rk: temp revert rk change

This commit is contained in:
Huang, Tao 2015-11-11 15:54:30 +08:00
commit 240e7f3ebf
359 changed files with 4450 additions and 12784 deletions

View file

@ -70,10 +70,6 @@ the operations defined in clk.h:
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long,
unsigned long *);
long (*determine_rate)(struct clk_hw *hw,
unsigned long rate,
unsigned long *best_parent_rate,
struct clk **best_parent_clk);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
int (*set_rate)(struct clk_hw *hw, unsigned long);
@ -183,28 +179,26 @@ mandatory, a cell marked as "n" implies that either including that
callback is invalid or otherwise unnecessary. Empty cells are either
optional or must be evaluated on a case-by-case basis.
clock hardware characteristics
-----------------------------------------------------------
| gate | change rate | single parent | multiplexer | root |
|------|-------------|---------------|-------------|------|
.prepare | | | | | |
.unprepare | | | | | |
| | | | | |
.enable | y | | | | |
.disable | y | | | | |
.is_enabled | y | | | | |
| | | | | |
.recalc_rate | | y | | | |
.round_rate | | y [1] | | | |
.determine_rate | | y [1] | | | |
.set_rate | | y | | | |
| | | | | |
.set_parent | | | n | y | n |
.get_parent | | | n | y | n |
| | | | | |
.init | | | | | |
-----------------------------------------------------------
[1] either one of round_rate or determine_rate is required.
clock hardware characteristics
-----------------------------------------------------------
| gate | change rate | single parent | multiplexer | root |
|------|-------------|---------------|-------------|------|
.prepare | | | | | |
.unprepare | | | | | |
| | | | | |
.enable | y | | | | |
.disable | y | | | | |
.is_enabled | y | | | | |
| | | | | |
.recalc_rate | | y | | | |
.round_rate | | y | | | |
.set_rate | | y | | | |
| | | | | |
.set_parent | | | n | y | n |
.get_parent | | | n | y | n |
| | | | | |
.init | | | | | |
-----------------------------------------------------------
Finally, register your clock at run-time with a hardware-specific
registration function. This function simply populates struct clk_foo's

View file

@ -8,7 +8,6 @@ Required properties:
- compatible : should be one of
"arm,cortex-a15-pmu"
"arm,cortex-a12-pmu"
"arm,cortex-a9-pmu"
"arm,cortex-a8-pmu"
"arm,cortex-a7-pmu"

View file

@ -8,11 +8,6 @@ Required properties:
- reg : SRAM iomem address range
Optional properties:
- map-exec: Map range to allow code execution
- map-cacheable: Map range as cacheable
Example:
sram: sram@5c000000 {

View file

@ -19,9 +19,6 @@ Optional properties:
"bus-width = <1>" property.
- sdhci,auto-cmd12: specifies that a controller can only handle auto
CMD12.
- voltage-ranges : two cells are required, first cell specifies minimum
slot voltage (mV), second cell specifies maximum slot voltage (mV).
Several ranges could be specified.
Example:
@ -32,5 +29,4 @@ sdhci@2e000 {
interrupt-parent = <&ipic>;
/* Filled in by U-Boot */
clock-frequency = <0>;
voltage-ranges = <3300 3300>;
};

View file

@ -28,7 +28,6 @@ Optional properties:
- cap-mmc-highspeed: MMC high-speed timing is supported
- cap-power-off-card: powering off the card is safe
- cap-sdio-irq: enable SDIO IRQ signalling on this interface
- full-pwr-cycle: full power cycle of the card is supported
*NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
polarity properties, we have to fix the meaning of the "normal" and "inverted"

View file

@ -1,14 +1,14 @@
* Synopsys Designware Mobile Storage Host Controller
* Synopsis Designware Mobile Storage Host Controller
The Synopsys designware mobile storage host controller is used to interface
The Synopsis designware mobile storage host controller is used to interface
a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
differences between the core mmc properties described by mmc.txt and the
properties used by the Synopsys Designware Mobile Storage Host Controller.
properties used by the Synopsis Designware Mobile Storage Host Controller.
Required Properties:
* compatible: should be
- snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
- snps,dw-mshc: for controllers compliant with synopsis dw-mshc.
* #address-cells: should be 1.
* #size-cells: should be 0.
@ -39,22 +39,6 @@ Required Properties:
Optional properties:
* clocks: from common clock binding: handle to biu and ciu clocks for the
bus interface unit clock and the card interface unit clock.
* clock-names: from common clock binding: Shall be "biu" and "ciu".
If the biu clock is missing we'll simply skip enabling it. If the
ciu clock is missing we'll just assume that the clock is running at
clock-frequency. It is an error to omit both the ciu clock and the
clock-frequency.
* clock-frequency: should be the frequency (in Hz) of the ciu clock. If this
is specified and the ciu clock is specified then we'll try to set the ciu
clock to this at probe time.
* clock-freq-min-max: Minimum and Maximum clock frequency for card output
clock(cclk_out). If it's not specified, max is 200MHZ and min is 400KHz by default.
* num-slots: specifies the number of slots supported by the controller.
The number of physical slots actually used could be equal or less than the
value specified by num-slots. If this property is not specified, the value
@ -67,17 +51,10 @@ Optional properties:
* card-detect-delay: Delay in milli-seconds before detecting card after card
insert event. The default value is 0.
* supports-highspeed: Enables support for high speed cards (up to 50MHz)
* caps2-mmc-hs200-1_8v: Supports mmc HS200 SDR 1.8V mode
* caps2-mmc-hs200-1_2v: Supports mmc HS200 SDR 1.2V mode
* supports-highspeed: Enables support for high speed cards (upto 50MHz)
* broken-cd: as documented in mmc core bindings.
* vmmc-supply: The phandle to the regulator to use for vmmc. If this is
specified we'll defer probe until we can find this regulator.
Aliases:
- All the MSHC controller nodes should be represented in the aliases node using
@ -90,8 +67,6 @@ board specific portions as listed below.
dwmmc0@12200000 {
compatible = "snps,dw-mshc";
clocks = <&clock 351>, <&clock 132>;
clock-names = "biu", "ciu";
reg = <0x12200000 0x1000>;
interrupts = <0 75 0>;
#address-cells = <1>;
@ -99,15 +74,11 @@ board specific portions as listed below.
};
dwmmc0@12200000 {
clock-frequency = <400000000>;
clock-freq-min-max = <400000 200000000>;
num-slots = <1>;
supports-highspeed;
caps2-mmc-hs200-1_8v;
broken-cd;
fifo-depth = <0x80>;
card-detect-delay = <200>;
vmmc-supply = <&buck8>;
slot@0 {
reg = <0>;

View file

@ -165,8 +165,9 @@ slew-rate - set the slew rate
Arguments for parameters:
- bias-pull-up, -down and -pin-default take as optional argument on hardware
supporting it the pull strength in Ohm. bias-disable will disable the pull.
- bias-pull-up, -down and -pin-default take as optional argument 0 to disable
the pull, on hardware supporting it the pull strength in Ohm. bias-disable
will also disable any active pull.
- drive-strength takes as argument the target strength in mA.

View file

@ -46,7 +46,6 @@ ralink Mediatek/Ralink Technology Corp.
ramtron Ramtron International
realtek Realtek Semiconductor Corp.
renesas Renesas Electronics Corporation
rockchip Fuzhou Rockchip Electronics Co., Ltd
samsung Samsung Semiconductor
sbs Smart Battery System
schindler Schindler

View file

@ -14,11 +14,8 @@ Required properties:
Optional properties:
- pwm-names: a list of names for the PWM devices specified in the
"pwms" property (see PWM binding[0])
- enable-gpios: contains a single GPIO specifier for the GPIO which enables
and disables the backlight (see GPIO binding[1])
[0]: Documentation/devicetree/bindings/pwm/pwm.txt
[1]: Documentation/devicetree/bindings/gpio/gpio.txt
Example:
@ -28,6 +25,4 @@ Example:
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
enable-gpios = <&gpio 58 0>;
};

View file

@ -21,13 +21,7 @@ required properties:
vfront-porch, vback-porch, vsync-len: vertical display timing parameters in
lines
- clock-frequency: display clock in Hz
- screen-type: screen interface type,such as SCREEN_LVDS/SCREEN_RGB/SCREEN_DEP/SCREEN_MIPI,
defined in include/dt-bindings/rkfb/rk_fb.h,this is used on RockChip platform
- out-face : screen data width, such as OUT_P888/OUT_D888_P666/OUT_P666/OUT_P565,
defined in include/dt-bindings/rkfb/rk_fb.h,this is used on RockChip platform
- lvds-format: lvds data format for lvds screen,such as LVDS_8BIT_1/2/3/LVDS_6BIT,
defined in include/dt-bindings/rkfb/rk_fb.h,this is used on RockChip platform
- swap-rb/rg/gb: set to 1 if some screen rgb need to swap,this is used on RockChip platform
optional properties:
- hsync-active: hsync pulse is active low/high/ignored
- vsync-active: vsync pulse is active low/high/ignored

View file

@ -660,6 +660,11 @@ Subsystems may wish to conserve code space by using the set of generic power
management callbacks provided by the PM core, defined in
driver/base/power/generic_ops.c:
int pm_generic_runtime_idle(struct device *dev);
- invoke the ->runtime_idle() callback provided by the driver of this
device, if defined, and call pm_runtime_suspend() for this device if the
return value is 0 or the callback is not defined
int pm_generic_runtime_suspend(struct device *dev);
- invoke the ->runtime_suspend() callback provided by the driver of this
device and return its result, or return -EINVAL if not defined

View file

@ -1317,14 +1317,6 @@ W: http://wiki.xilinx.com
T: git git://git.xilinx.com/linux-xlnx.git
S: Supported
F: arch/arm/mach-zynq/
F: drivers/cpuidle/cpuidle-zynq.c
F: drivers/mmc/host/sdhci-of-arasan.c
ARM SMMU DRIVER
M: Will Deacon <will.deacon@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/iommu/arm-smmu.c
ARM64 PORT (AARCH64 ARCHITECTURE)
M: Catalin Marinas <catalin.marinas@arm.com>

View file

@ -10,8 +10,6 @@ NAME = TOSSUG Baby Fish
# Comments in this file are targeted only to the developer, do not
# expect to learn how to build the kernel reading this file.
SUBLEVEL = 0
# Do not:
# o use make's built-in rules and variables
# (this increases performance and avoids hard-to-debug behaviour);
@ -194,18 +192,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
# "make" in the configured kernel build directory always uses that.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
ARCH ?= arm
ARCH ?= $(SUBARCH)
ifeq ($(ARCH),arm64)
ifneq ($(wildcard ../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9),)
CROSS_COMPILE ?= ../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android-
endif
endif
ifeq ($(ARCH),arm)
ifneq ($(wildcard ../prebuilts/gcc/linux-x86/arm/arm-eabi-4.6),)
CROSS_COMPILE ?= ../prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi-
endif
endif
CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%)
# Architecture as present in compile.h
@ -353,12 +340,6 @@ DEPMOD = /sbin/depmod
PERL = perl
CHECK = sparse
# Use the wrapper for the compiler. This wrapper scans for new
# warnings and causes the build to stop upon encountering them.
ifneq ($(wildcard $(srctree)/scripts/gcc-wrapper.py),)
CC = $(srctree)/scripts/gcc-wrapper.py $(CROSS_COMPILE)gcc
endif
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void $(CF)
CFLAGS_MODULE =
@ -417,7 +398,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
export KBUILD_ARFLAGS OBJCOPY_OUTPUT_FORMAT
export KBUILD_ARFLAGS
# When compiling out-of-tree modules, put MODVERDIR in the module
# tree rather than in the kernel tree. The kernel tree might
@ -705,10 +686,6 @@ ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
ifeq ($(CONFIG_PIE),y)
LDFLAGS_vmlinux += --just-symbols=pie/pie.syms
endif
# Default kernel image to build when no specific target is given.
# KBUILD_IMAGE may be overruled on the command line or
# set in the environment
@ -764,14 +741,12 @@ core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
$(net-y) $(net-m) $(libs-y) $(libs-m) $(libpie-y)))
$(net-y) $(net-m) $(libs-y) $(libs-m)))
vmlinux-alldirs := $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \
$(init-n) $(init-) \
$(core-n) $(core-) $(drivers-n) $(drivers-) \
$(net-n) $(net-) $(libs-n) $(libs-) $(libpie-))))
pie-$(CONFIG_PIE) := pie/
$(net-n) $(net-) $(libs-n) $(libs-))))
init-y := $(patsubst %/, %/built-in.o, $(init-y))
core-y := $(patsubst %/, %/built-in.o, $(core-y))
@ -780,21 +755,16 @@ net-y := $(patsubst %/, %/built-in.o, $(net-y))
libs-y1 := $(patsubst %/, %/lib.a, $(libs-y))
libs-y2 := $(patsubst %/, %/built-in.o, $(libs-y))
libs-y := $(libs-y1) $(libs-y2)
pie-y := $(patsubst %/, %/built-in.o, $(pie-y))
libpie-y := $(patsubst %/, %/built-in.o, $(libpie-y))
# Externally visible symbols (used by link-vmlinux.sh)
export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y)
export KBUILD_VMLINUX_PIE := $(pie-y)
export KBUILD_LIBPIE := $(libpie-y)
export KBUILD_PIE_LDS := $(PIE_LDS)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
# used by scripts/pacmage/Makefile
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools virt)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_PIE_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_PIE) $(KBUILD_LIBPIE)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
# Final link of vmlinux
cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
@ -1063,7 +1033,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
#
clean: rm-dirs := $(CLEAN_DIRS)
clean: rm-files := $(CLEAN_FILES)
clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation samples pie)
clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation samples)
PHONY += $(clean-dirs) clean archclean vmlinuxclean
$(clean-dirs):

View file

@ -49,7 +49,6 @@ config ARM
select HAVE_MEMBLOCK
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_PERF_EVENTS
select HAVE_PIE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
@ -985,8 +984,6 @@ source "arch/arm/mach-mmp/Kconfig"
source "arch/arm/mach-realview/Kconfig"
source "arch/arm/mach-rockchip/Kconfig"
source "arch/arm/mach-sa1100/Kconfig"
source "arch/arm/plat-samsung/Kconfig"
@ -1362,29 +1359,6 @@ config ARM_ERRATA_798181
which sends an IPI to the CPUs that are running the same ASID
as the one being invalidated.
config ARM_ERRATA_818325
bool "ARM errata: Execution of an UNPREDICTABLE STR or STM instruction might deadlock"
depends on CPU_V7
help
This option enables the workaround for the 818325 Cortex-A12
(r0p0..r0p1-00lac0-rc11) erratum. When a CPU executes a sequence of
two conditional store instructions with opposite condition code and
updating the same register, the system might enter a deadlock if the
second conditional instruction is an UNPREDICTABLE STR or STM
instruction. This workaround setting bit[12] of the Feature Register
prevents the erratum. This bit disables an optimisation applied to a
sequence of 2 instructions that use opposing condition codes.
config ARM_ERRATA_821420
bool "ARM errata: A sequence of VMOV to core registers instruction might lead to a deadlock"
depends on CPU_V7
help
This option enables the workaround for the 821420 Cortex-A12 (r0p0,
r0p1) erratum. In very rare timing conditions, a sequence of VMOV to
Core registers instructions, for which the second one is in the
shadow of a branch or abort, can lead to a deadlock when the VMOV
instructions are issued out-of-order.
endmenu
source "arch/arm/common/Kconfig"
@ -1741,7 +1715,6 @@ config ARCH_NR_GPIO
int
default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
default 512 if SOC_OMAP5
default 512 if ARCH_ROCKCHIP
default 392 if ARCH_U8500
default 352 if ARCH_VT8500
default 288 if ARCH_SUNXI

View file

@ -374,13 +374,6 @@ choice
their output to the standard serial port on the RealView
PB1176 platform.
config DEBUG_ROCKCHIP_UART
depends on ARCH_ROCKCHIP
bool "Use UART on Rockchip SoCs"
help
Say Y here if you want kernel low-level debugging support
on Rockchip SoCs.
config DEBUG_S3C_UART0
depends on PLAT_SAMSUNG
select DEBUG_EXYNOS_UART if ARCH_EXYNOS
@ -667,7 +660,6 @@ config DEBUG_LL_INCLUDE
default "debug/picoxcell.S" if DEBUG_PICOXCELL_UART
default "debug/pxa.S" if DEBUG_PXA_UART1 || DEBUG_MMP_UART2 || \
DEBUG_MMP_UART3
default "debug/rockchip.S" if DEBUG_ROCKCHIP_UART
default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1
default "debug/socfpga.S" if DEBUG_SOCFPGA_UART
default "debug/sunxi.S" if DEBUG_SUNXI_UART0 || DEBUG_SUNXI_UART1
@ -698,14 +690,6 @@ config EARLY_PRINTK
kernel low-level debugging functions. Add earlyprintk to your
kernel parameters to enable this console.
config EARLY_PRINTK_DIRECT
bool "Early printk direct"
depends on DEBUG_LL
help
Say Y here if you want to have an early console using the
kernel low-level debugging functions and EARLY_PRINTK is
not early enough.
config ARM_KPROBES_TEST
tristate "Kprobes test module"
depends on KPROBES && MODULES

View file

@ -130,8 +130,6 @@ KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/uni
CHECKFLAGS += -D__arm__
OBJCOPY_OUTPUT_FORMAT := elf32-littlearm
#Default value
head-y := arch/arm/kernel/head$(MMUEXT).o
textofs-y := 0x00008000
@ -182,7 +180,6 @@ machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell
machine-$(CONFIG_ARCH_PRIMA2) += prima2
machine-$(CONFIG_ARCH_PXA) += pxa
machine-$(CONFIG_ARCH_REALVIEW) += realview
machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip
machine-$(CONFIG_ARCH_RPC) += rpc
machine-$(CONFIG_ARCH_S3C24XX) += s3c24xx
machine-$(CONFIG_ARCH_S3C64XX) += s3c64xx
@ -273,9 +270,6 @@ drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
libs-y := arch/arm/lib/ $(libs-y)
PIE_LDS := arch/arm/kernel/pie.lds
libpie-$(CONFIG_PIE) += arch/arm/libpie/
# Default target when executing plain make
ifeq ($(CONFIG_XIP_KERNEL),y)
KBUILD_IMAGE := xipImage
@ -336,19 +330,3 @@ define archhelp
echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH) and run lilo'
endef
kernel.img: zImage
$(Q)$(srctree)/mkkrnlimg $(objtree)/arch/arm/boot/zImage $(objtree)/kernel.img >/dev/null
@echo ' Image: kernel.img is ready'
%_kernel.img: %.dtb zImage
$(Q)cat $(objtree)/arch/arm/boot/zImage $(objtree)/arch/arm/boot/dts/$*.dtb > $(objtree)/zImage-dtb && \
$(srctree)/mkkrnlimg $(objtree)/zImage-dtb $(objtree)/kernel.img >/dev/null && \
rm -f $(objtree)/zImage-dtb
@echo ' Image: kernel.img (with $*.dtb) is ready'
LOGO := $(notdir $(wildcard $(srctree)/logo.bmp))
LOGO_KERNEL := $(notdir $(wildcard $(srctree)/logo_kernel.bmp))
%.img: %.dtb kernel.img $(LOGO) $(LOGO_KERNEL)
$(Q)$(srctree)/resource_tool $(objtree)/arch/arm/boot/dts/$*.dtb $(LOGO) $(LOGO_KERNEL)
@echo ' Image: resource.img (with $*.dtb $(LOGO) $(LOGO_KERNEL)) is ready'

View file

@ -126,11 +126,6 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
ifeq ($(CONFIG_CC_STACKPROTECTOR),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -fstack-protector, , $(ORIG_CFLAGS))
endif
ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
asflags-y := -DZIMAGE

View file

@ -11,9 +11,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#ifdef CONFIG_ARM_TRUSTZONE
#undef CONFIG_MMU
#endif
.arch armv7-a
/*
* Debugging stuff
@ -182,9 +179,6 @@ not_angel:
@ determine final kernel image address
mov r4, pc
and r4, r4, #0xf8000000
#ifdef CONFIG_ARM_TRUSTZONE
mov r4, #0
#endif
add r4, r4, #TEXT_OFFSET
#else
ldr r4, =zreladdr
@ -719,9 +713,6 @@ __armv7_mmu_cache_on:
movne r1, #0xfffffffd @ domain 0 = client
bic r6, r6, #1 << 31 @ 32-bit translation system
bic r6, r6, #3 << 0 @ use only ttbr0
#ifdef CONFIG_ARM_TRUSTZONE
mov r6, #0
#endif
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
mcr p15, 0, r0, c7, c5, 4 @ ISB

View file

@ -155,28 +155,6 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
am335x-bone.dtb
dtb-$(CONFIG_ARCH_ORION5X) += orion5x-lacie-ethernet-disk-mini-v2.dtb
dtb-$(CONFIG_ARCH_PRIMA2) += prima2-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += \
rk3036-fpga.dtb \
rk3036-new.dtb \
rk3036-rk88.dtb \
rk3036-sdk.dtb \
rk3126-86v.dtb \
rk3126-fpga.dtb \
rk3126-sdk.dtb \
rk3128-86v.dtb \
rk3128-box.dtb \
rk3128-box-ns.dtb \
rk3128-box-rk88.dtb \
rk3128-sdk.dtb \
rk3228-fpga.dtb \
rk3228-sdk.dtb \
rk3288-box.dtb \
rk3288-p977_8846.dtb \
rk3288-p977.dtb \
rk3288-popmetal.dtb \
rk3288-tb_8846.dtb \
rk3288-tb.dtb \
rk3288-tb_sec.dtb
dtb-$(CONFIG_ARCH_U8500) += snowball.dtb \
hrefprev60.dtb \
hrefv60plus.dtb \

View file

@ -78,15 +78,6 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
static inline u64 arch_counter_get_cntpct(void)
{
u64 cval;
isb();
asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
return cval;
}
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;

View file

@ -48,7 +48,6 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_NONE 0
#define R_ARM_PC24 1
#define R_ARM_ABS32 2
#define R_ARM_RELATIVE 23
#define R_ARM_CALL 28
#define R_ARM_JUMP24 29
#define R_ARM_V4BX 40

View file

@ -17,12 +17,16 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_FNCPY_H
#define __ASM_FNCPY_H
#include <linux/types.h>
/*
* These macros are intended for use when there is a need to copy a low-level
* function body into special memory.
*
* For example, when reconfiguring the SDRAM controller, the code doing the
* reconfiguration may need to run from SRAM.
*
* NOTE: that the copied function body must be entirely self-contained and
* position-independent in order for this to work properly.
*
* NOTE: in order for embedded literals and data to get referenced correctly,
* the alignment of functions must be preserved when copying. To ensure this,
* the source and destination addresses for fncpy() must be aligned to a
@ -30,29 +34,61 @@
* You will typically need a ".align 3" directive in the assembler where the
* function to be copied is defined, and ensure that your allocator for the
* destination buffer returns 8-byte-aligned pointers.
*/
#define ARCH_FNCPY_ALIGN 3
*
* Typical usage example:
*
* extern int f(args);
* extern uint32_t size_of_f;
* int (*copied_f)(args);
* void *sram_buffer;
*
* copied_f = fncpy(sram_buffer, &f, size_of_f);
*
* ... later, call the function: ...
*
* copied_f(args);
*
* The size of the function to be copied can't be determined from C:
* this must be determined by other means, such as adding assmbler directives
* in the file where f is defined.
*/
/* Clear the Thumb bit */
#define fnptr_to_addr(funcp) ({ \
#ifndef __ASM_FNCPY_H
#define __ASM_FNCPY_H
#include <linux/types.h>
#include <linux/string.h>
#include <asm/bug.h>
#include <asm/cacheflush.h>
/*
* Minimum alignment requirement for the source and destination addresses
* for function copying.
*/
#define FNCPY_ALIGN 8
#define fncpy(dest_buf, funcp, size) ({ \
uintptr_t __funcp_address; \
typeof(funcp) __result; \
\
asm("" : "=r" (__funcp_address) : "0" (funcp)); \
__funcp_address & ~1; \
})
/* Put the Thumb bit back */
#define fnptr_translate(orig_funcp, new_addr) ({ \
uintptr_t __funcp_address; \
typeof(orig_funcp) __result; \
\
asm("" : "=r" (__funcp_address) : "0" (orig_funcp)); \
/* \
* Ensure alignment of source and destination addresses, \
* disregarding the function's Thumb bit: \
*/ \
BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
(__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
\
memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
flush_icache_range((unsigned long)(dest_buf), \
(unsigned long)(dest_buf) + (size)); \
\
asm("" : "=r" (__result) \
: "0" ((uintptr_t)(new_addr) | (__funcp_address & 1))); \
: "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \
\
__result; \
})
#include <asm-generic/fncpy.h>
#endif /* !__ASM_FNCPY_H */

View file

@ -329,8 +329,6 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
#define ioremap_exec(cookie,size) __arm_ioremap_exec((cookie), (size), true)
#define ioremap_exec_nocache(cookie,size) __arm_ioremap_exec((cookie), (size), false)
#define iounmap __arm_iounmap
/*

View file

@ -1,32 +1,7 @@
#ifndef __ASM_ARM_SUSPEND_H
#define __ASM_ARM_SUSPEND_H
#include <asm/pie.h>
extern void cpu_resume(void);
extern int cpu_suspend(unsigned long, int (*)(unsigned long));
/**
* ARM_PIE_RESUME - generate a PIE trampoline for resume
* @proc: SoC, should match argument used with PIE_OVERLAY_SECTION()
* @func: C or asm function to call at resume
* @stack: stack to use before calling func
*/
#define ARM_PIE_RESUME(proc, func, stack) \
static void __naked __noreturn __pie(proc) proc##_resume_trampoline2(void) \
{ \
__asm__ __volatile__( \
" mov sp, %0\n" \
: : "r"((stack)) : "sp"); \
\
func(); \
} \
\
void __naked __noreturn __pie(proc) proc##_resume_trampoline(void) \
{ \
pie_relocate_from_pie(); \
proc##_resume_trampoline2(); \
} \
EXPORT_PIE_SYMBOL(proc##_resume_trampoline)
#endif

View file

@ -1,2 +1 @@
vmlinux.lds
pie.lds

View file

@ -87,6 +87,4 @@ obj-y += psci.o
obj-$(CONFIG_SMP) += psci_smp.o
endif
obj-$(CONFIG_PIE) += pie.o
extra-y := $(head-y) vmlinux.lds pie.lds
extra-y := $(head-y) vmlinux.lds

View file

@ -213,7 +213,6 @@ static struct notifier_block __cpuinitdata cpu_pmu_pm_notifier = {
*/
static struct of_device_id cpu_pmu_of_device_ids[] = {
{.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
{.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
{.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
{.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
{.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},

View file

@ -109,19 +109,6 @@ enum armv7_a15_perf_types {
ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
};
/* ARMv7 Cortex-A12 specific event types */
enum armv7_a12_perf_types {
ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
ARMV7_A12_PERFCTR_PC_WRITE_SPEC = 0x76,
ARMV7_A12_PERFCTR_PF_TLB_REFILL = 0xe7,
};
/*
* Cortex-A8 HW events mapping
*
@ -744,130 +731,6 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
};
/*
* Cortex-A12 HW events mapping
*/
static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
/*
* Not all performance counters differentiate between read
* and write accesses/misses so we're not always strictly
* correct, but it's the best we can do. Writes and reads get
* combined in these cases.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Perf Events' indices
*/
@ -1394,12 +1257,6 @@ static int armv7_a7_map_event(struct perf_event *event)
&armv7_a7_perf_cache_map, 0xFF);
}
static int armv7_a12_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv7_a12_perf_map,
&armv7_a12_perf_cache_map, 0xFF);
}
static void armv7pmu_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->handle_irq = armv7pmu_handle_irq;
@ -1473,16 +1330,6 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
return 0;
}
static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
cpu_pmu->name = "ARMv7 Cortex-A12";
cpu_pmu->map_event = armv7_a12_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
return 0;
}
#else
static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
{
@ -1508,9 +1355,4 @@ static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
{
return -ENODEV;
}
static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
{
return -ENODEV;
}
#endif /* CONFIG_CPU_V7 */

View file

@ -255,7 +255,6 @@ void machine_shutdown(void)
*/
void machine_halt(void)
{
local_irq_disable();
smp_send_stop();
local_irq_disable();
@ -270,7 +269,6 @@ void machine_halt(void)
*/
void machine_power_off(void)
{
local_irq_disable();
smp_send_stop();
if (pm_power_off)
@ -290,7 +288,6 @@ void machine_power_off(void)
*/
void machine_restart(char *cmd)
{
local_irq_disable();
smp_send_stop();
/* Flush the console to make sure all the relevant messages make it

View file

@ -925,10 +925,6 @@ static int c_show(struct seq_file *m, void *v)
int i, j;
u32 cpuid;
#ifdef CONFIG_ARCH_ROCKCHIP
seq_printf(m, "Processor\t: %s rev %d (%s)\n",
cpu_name, read_cpuid_id() & 15, elf_platform);
#endif
for_each_online_cpu(i) {
/*
* glibc reads /proc/cpuinfo to determine the number of

View file

@ -81,8 +81,6 @@ SECTIONS
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
*(.pie.*)
*(.ARM.exidx.pie.*.text)
*(.discard)
*(.discard.*)
}

View file

@ -591,6 +591,11 @@ static int _od_runtime_suspend(struct device *dev)
return ret;
}
static int _od_runtime_idle(struct device *dev)
{
return pm_generic_runtime_idle(dev);
}
static int _od_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@ -648,7 +653,7 @@ static int _od_resume_noirq(struct device *dev)
struct dev_pm_domain omap_device_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
NULL)
_od_runtime_idle)
USE_PLATFORM_PM_SLEEP_OPS
.suspend_noirq = _od_suspend_noirq,
.resume_noirq = _od_resume_noirq,

View file

@ -596,16 +596,6 @@ config IO_36
comment "Processor Features"
config ARM_TRUSTZONE
bool "Support TrustZone-enabled Trusted Execution Environment"
default n
help
Select if you want a kernel to be executed at non-secure world.
This option should be used with related secure bootloader and
TrustZone software.
If you don't know about TrustZone, say 'N'.
config ARM_LPAE
bool "Support for the Large Physical Address Extension"
depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \

View file

@ -255,9 +255,6 @@ static void __dma_free_buffer(struct page *page, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
#ifdef CONFIG_ARCH_ROCKCHIP
struct dma_attrs *attrs,
#endif
const void *caller);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
@ -345,9 +342,6 @@ static int __init atomic_pool_init(void)
if (IS_ENABLED(CONFIG_DMA_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
#ifdef CONFIG_ARCH_ROCKCHIP
NULL,
#endif
atomic_pool_init);
else
ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
@ -554,9 +548,6 @@ static int __free_from_pool(void *start, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
#ifdef CONFIG_ARCH_ROCKCHIP
struct dma_attrs *attrs,
#endif
const void *caller)
{
unsigned long order = get_order(size);
@ -570,11 +561,6 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
__dma_clear_buffer(page, size);
#ifdef CONFIG_ARCH_ROCKCHIP
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
return (*ret_page=page);
#endif
if (PageHighMem(page)) {
ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
if (!ptr) {
@ -589,20 +575,6 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
return ptr;
}
#ifdef CONFIG_ARCH_ROCKCHIP
static void __free_from_contiguous(struct device *dev, struct page *page,
void *cpu_addr, size_t size,
struct dma_attrs *attrs)
{
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
if (PageHighMem(page))
__dma_free_remap(cpu_addr, size);
else
__dma_remap(page, size, pgprot_kernel);
}
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
#else
static void __free_from_contiguous(struct device *dev, struct page *page,
void *cpu_addr, size_t size)
{
@ -612,7 +584,6 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
__dma_remap(page, size, pgprot_kernel);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
#endif
static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
{
@ -631,17 +602,9 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
#define __alloc_from_pool(size, ret_page) NULL
#ifdef CONFIG_ARCH_ROCKCHIP
#define __alloc_from_contiguous(dev, size, prot, ret, attrs, c) NULL
#else
#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
#endif
#define __free_from_pool(cpu_addr, size) 0
#ifdef CONFIG_ARCH_ROCKCHIP
#define __free_from_contiguous(dev, page, cpu_addr, size, attrs) do { } while (0)
#else
#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
#endif
#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
@ -661,12 +624,7 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
#ifdef CONFIG_ARCH_ROCKCHIP
gfp_t gfp, pgprot_t prot, bool is_coherent,
struct dma_attrs *attrs, const void *caller)
#else
gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
#endif
{
u64 mask = get_coherent_dma_mask(dev);
struct page *page = NULL;
@ -706,11 +664,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
else if (!IS_ENABLED(CONFIG_DMA_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
#ifdef CONFIG_ARCH_ROCKCHIP
addr = __alloc_from_contiguous(dev, size, prot, &page, attrs, caller);
#else
addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
#endif
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
@ -732,9 +686,6 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
return memory;
return __dma_alloc(dev, size, handle, gfp, prot, false,
#ifdef CONFIG_ARCH_ROCKCHIP
attrs,
#endif
__builtin_return_address(0));
}
@ -748,9 +699,6 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
return memory;
return __dma_alloc(dev, size, handle, gfp, prot, true,
#ifdef CONFIG_ARCH_ROCKCHIP
attrs,
#endif
__builtin_return_address(0));
}
@ -810,11 +758,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
* Non-atomic allocations cannot be freed with IRQs disabled
*/
WARN_ON(irqs_disabled());
#ifdef CONFIG_ARCH_ROCKCHIP
__free_from_contiguous(dev, page, cpu_addr, size, attrs);
#else
__free_from_contiguous(dev, page, cpu_addr, size);
#endif
}
}

View file

@ -341,15 +341,6 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
//#ifdef CONFIG_ARCH_ROCKCHIP
for (i = 1; i < memblock.memory.cnt; i++) {
struct memblock_region *rgn = &memblock.memory.regions[i];
if (rgn->size != memblock.memory.regions[i-1].size)
memblock_reserve(rgn->base+rgn->size-PAGE_SIZE, PAGE_SIZE);
}
//#endif
/* Register the kernel text, kernel data and initrd with memblock. */
#ifdef CONFIG_XIP_KERNEL
memblock_reserve(__pa(_sdata), _end - _sdata);

View file

@ -182,7 +182,6 @@ __v7_ca9mp_setup:
mov r10, #(1 << 0) @ TLB ops broadcasting
b 1f
__v7_ca7mp_setup:
__v7_ca12mp_setup:
__v7_ca15mp_setup:
mov r10, #0
1:
@ -326,28 +325,7 @@ __v7_setup:
1:
#endif
/* Cortex-A12 Errata */
3: ldr r10, =0x00000c0d @ Cortex-A12 primary part number
teq r0, r10
bne 4f
#ifdef CONFIG_ARM_ERRATA_818325
teq r6, #0x00 @ present in r0p0
teqne r6, #0x01 @ present in r0p1-00lac0-rc11
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
orreq r10, r10, #1 << 12 @ set bit #12
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
isb
#endif
#ifdef CONFIG_ARM_ERRATA_821420
teq r6, #0x00 @ present in r0p0
teqne r6, #0x01 @ present in r0p1
mrceq p15, 0, r10, c15, c0, 2
orreq r10, r10, #1 << 1
mcreq p15, 0, r10, c15, c0, 2
isb
#endif
4: mov r10, #0
3: mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
@ -471,16 +449,6 @@ __v7_ca7mp_proc_info:
__v7_proc __v7_ca7mp_setup
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
* ARM Ltd. Cortex A12 processor.
*/
.type __v7_ca12mp_proc_info, #object
__v7_ca12mp_proc_info:
.long 0x410fc0d0
.long 0xff0ffff0
__v7_proc __v7_ca12mp_setup
.size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
/*
* ARM Ltd. Cortex A15 processor.
*/

View file

@ -54,7 +54,7 @@ void *omap_sram_push_address(unsigned long size)
}
new_ceil -= size;
new_ceil = ROUND_DOWN(new_ceil, 1 << ARCH_FNCPY_ALIGN);
new_ceil = ROUND_DOWN(new_ceil, FNCPY_ALIGN);
omap_sram_ceil = IOMEM(new_ceil);
return (void *)omap_sram_ceil;

View file

@ -129,9 +129,6 @@ config SWIOTLB
config IOMMU_HELPER
def_bool SWIOTLB
config KERNEL_MODE_NEON
def_bool y
config FIX_EARLYCON_MEM
def_bool y
@ -141,14 +138,6 @@ source "kernel/Kconfig.freezer"
menu "Platform selection"
config ARCH_ROCKCHIP
bool "Rockchip SoCs"
select PINCTRL
select PINCTRL_RK3368
select ARCH_REQUIRE_GPIOLIB
source "arch/arm64/mach-rockchip/Kconfig"
config ARCH_VEXPRESS
bool "ARMv8 software model (Versatile Express)"
select ARCH_REQUIRE_GPIOLIB
@ -573,8 +562,6 @@ menu "CPU Power Management"
source "drivers/cpuidle/Kconfig"
source "drivers/cpuquiet/Kconfig"
endmenu
source "net/Kconfig"

View file

@ -48,7 +48,6 @@ endif
export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/
core-$(CONFIG_ARCH_ROCKCHIP) += arch/arm64/mach-rockchip/
core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
libs-y := arch/arm64/lib/ $(libs-y)
@ -100,13 +99,3 @@ define archhelp
echo ' (distribution) /sbin/installkernel or'
echo ' install to $$(INSTALL_PATH) and run lilo'
endef
kernel.img: Image
$(Q)$(srctree)/mkkrnlimg $(objtree)/arch/arm64/boot/Image $(objtree)/kernel.img >/dev/null
@echo ' Image: kernel.img is ready'
LOGO := $(notdir $(wildcard $(srctree)/logo.bmp))
LOGO_KERNEL := $(notdir $(wildcard $(srctree)/logo_kernel.bmp))
%.img: %.dtb kernel.img $(LOGO) $(LOGO_KERNEL)
$(Q)$(srctree)/resource_tool $(objtree)/arch/arm64/boot/dts/$*.dtb $(LOGO) $(LOGO_KERNEL)
@echo ' Image: resource.img (with $*.dtb $(LOGO) $(LOGO_KERNEL)) is ready'

View file

@ -1,11 +1,3 @@
dtb-$(CONFIG_ARCH_ROCKCHIP) += \
rk3368-box.dtb \
rk3368-box-r88_808.dtb \
rk3368-box-r88.dtb \
rk3368-fpga.dtb \
rk3368-p9_818.dtb \
rk3368-tb_8846.dtb \
rk3368-tb_mipi.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb \
fvp-base-gicv2-psci.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb

View file

@ -135,16 +135,6 @@ static inline void arch_timer_evtstrm_enable(int divider)
#endif
}
static inline u64 arch_counter_get_cntpct(void)
{
u64 cval;
isb();
asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
return cval;
}
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;

View file

@ -21,7 +21,6 @@ struct sleep_save_sp {
phys_addr_t save_ptr_stash_phys;
};
extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
extern int cpu_suspend(unsigned long);

View file

@ -22,7 +22,6 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/hardirq.h>
#include <asm/fpsimd.h>
#include <asm/cputype.h>

View file

@ -102,63 +102,33 @@ void arch_cpu_idle_dead(void)
}
#endif
/*
* Called by kexec, immediately prior to machine_kexec().
*
* This must completely disable all secondary CPUs; simply causing those CPUs
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
* kexec'd kernel to use any and all RAM as it sees fit, without having to
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
* functionality embodied in disable_nonboot_cpus() to achieve this.
*/
void machine_shutdown(void)
{
disable_nonboot_cpus();
#ifdef CONFIG_SMP
smp_send_stop();
#endif
}
/*
* Halting simply requires that the secondary CPUs stop performing any
* activity (executing tasks, handling interrupts). smp_send_stop()
* achieves this.
*/
void machine_halt(void)
{
local_irq_disable();
smp_send_stop();
machine_shutdown();
while (1);
}
/*
* Power-off simply requires that the secondary CPUs stop performing any
* activity (executing tasks, handling interrupts). smp_send_stop()
* achieves this. When the system power is turned off, it will take all CPUs
* with it.
*/
void machine_power_off(void)
{
local_irq_disable();
smp_send_stop();
machine_shutdown();
if (pm_power_off)
pm_power_off();
}
/*
* Restart requires that the secondary CPUs stop performing any activity
* while the primary CPU resets the system. Systems with a single CPU can
* use soft_restart() as their machine descriptor's .restart hook, since that
* will cause the only available CPU to reset. Systems with multiple CPUs must
* provide a HW restart implementation, to ensure that all CPUs reset at once.
* This is required so that any code running after reset on the primary CPU
* doesn't have to co-ordinate with other CPUs to ensure they aren't still
* executing pre-reset code, and using RAM that the primary CPU's code wishes
* to use. Implementing such co-ordination would be essentially impossible.
*/
void machine_restart(char *cmd)
{
machine_shutdown();
/* Disable interrupts first */
local_irq_disable();
local_fiq_disable();
smp_send_stop();
/* Now call the architecture specific reboot code. */
if (arm_pm_restart)
@ -457,13 +427,3 @@ unsigned long randomize_et_dyn(unsigned long base)
{
return randomize_base(base);
}
void arch_cpu_idle_enter(void)
{
idle_notifier_call_chain(IDLE_START);
}
void arch_cpu_idle_exit(void)
{
idle_notifier_call_chain(IDLE_END);
}

View file

@ -23,7 +23,6 @@
#include <linux/reboot.h>
#include <linux/pm.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <uapi/linux/psci.h>
#include <asm/compiler.h>
@ -105,13 +104,12 @@ static u32 psci_power_state_pack(struct psci_power_state state)
static void psci_power_state_unpack(u32 power_state,
struct psci_power_state *state)
{
state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >>
PSCI_0_2_POWER_STATE_ID_SHIFT;
state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >>
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
state->affinity_level =
(power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >>
PSCI_0_2_POWER_STATE_AFFL_SHIFT;
state->id = (power_state >> PSCI_0_2_POWER_STATE_ID_SHIFT)
& PSCI_0_2_POWER_STATE_ID_MASK;
state->type = (power_state >> PSCI_0_2_POWER_STATE_TYPE_SHIFT)
& PSCI_0_2_POWER_STATE_TYPE_MASK;
state->affinity_level = (power_state >> PSCI_0_2_POWER_STATE_AFFL_SHIFT)
& PSCI_0_2_POWER_STATE_AFFL_MASK;
}
static int psci_get_version(void)
@ -186,63 +184,6 @@ static int psci_migrate_info_type(void)
return err;
}
static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
unsigned int cpu)
{
int i, ret, count = 0;
struct psci_power_state *psci_states;
struct device_node *state_node;
/*
* If the PSCI cpu_suspend function hook has not been initialized
* idle states must not be enabled, so bail out
*/
if (!psci_ops.cpu_suspend)
return -EOPNOTSUPP;
/* Count idle states */
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
count))) {
count++;
of_node_put(state_node);
}
if (!count)
return -ENODEV;
psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
if (!psci_states)
return -ENOMEM;
for (i = 0; i < count; i++) {
u32 psci_power_state;
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
ret = of_property_read_u32(state_node,
"arm,psci-suspend-param",
&psci_power_state);
if (ret) {
pr_warn(" * %s missing arm,psci-suspend-param property\n",
state_node->full_name);
of_node_put(state_node);
goto free_mem;
}
of_node_put(state_node);
pr_debug("psci-power-state %#x index %d\n", psci_power_state,
i);
psci_power_state_unpack(psci_power_state, &psci_states[i]);
}
/* Idle states parsed correctly, initialize per-cpu pointer */
per_cpu(psci_power_state, cpu) = psci_states;
return 0;
free_mem:
kfree(psci_states);
return ret;
}
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
@ -328,11 +269,9 @@ static int __init psci_0_2_init(struct device_node *np)
PSCI_0_2_FN_MIGRATE_INFO_TYPE;
psci_ops.migrate_info_type = psci_migrate_info_type;
#ifndef CONFIG_ARCH_ROCKCHIP
arm_pm_restart = psci_sys_reset;
pm_power_off = psci_sys_poweroff;
#endif
out_put_node:
of_node_put(np);
@ -466,17 +405,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
for (i = 0; i < 10; i++) {
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
#ifdef CONFIG_CPUQUIET_FRAMEWORK
if (system_state != SYSTEM_RUNNING)
#endif
pr_info("CPU%d killed.\n", cpu);
return 1;
}
msleep(10);
#ifdef CONFIG_CPUQUIET_FRAMEWORK
if (system_state != SYSTEM_RUNNING)
#endif
pr_info("Retrying again to check for CPU kill\n");
}
@ -489,41 +422,19 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
#endif
#ifdef CONFIG_ARM64_CPU_SUSPEND
static int psci_suspend_finisher(unsigned long index)
static int cpu_psci_cpu_suspend(unsigned long index)
{
struct psci_power_state *state = __get_cpu_var(psci_power_state);
return psci_ops.cpu_suspend(state[index - 1],
virt_to_phys(cpu_resume));
}
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
{
int ret;
struct psci_power_state *state = __get_cpu_var(psci_power_state);
/*
* idle state index 0 corresponds to wfi, should never be called
* from the cpu_suspend operations
*/
if (WARN_ON_ONCE(!index))
return -EINVAL;
if (!state)
return -EOPNOTSUPP;
if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY)
ret = psci_ops.cpu_suspend(state[index - 1], 0);
else
ret = __cpu_suspend(index, psci_suspend_finisher);
return ret;
return psci_ops.cpu_suspend(state[index], virt_to_phys(cpu_resume));
}
#endif
const struct cpu_operations cpu_psci_ops = {
.name = "psci",
#ifdef CONFIG_CPU_IDLE
.cpu_init_idle = cpu_psci_cpu_init_idle,
#endif
#ifdef CONFIG_SMP
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,

View file

@ -78,16 +78,6 @@ unsigned int compat_elf_hwcap2 __read_mostly;
static const char *cpu_name;
static const char *machine_name;
unsigned int system_rev;
EXPORT_SYMBOL(system_rev);
unsigned int system_serial_low;
EXPORT_SYMBOL(system_serial_low);
unsigned int system_serial_high;
EXPORT_SYMBOL(system_serial_high);
phys_addr_t __fdt_pointer __initdata;
/*
@ -543,8 +533,7 @@ static int c_show(struct seq_file *m, void *v)
* software which does already (at least for 32-bit).
*/
seq_puts(m, "Features\t:");
if (personality(current->personality) == PER_LINUX32 ||
is_compat_task()) {
if (personality(current->personality) == PER_LINUX32) {
#ifdef CONFIG_COMPAT
for (j = 0; compat_hwcap_str[j]; j++)
if (COMPAT_ELF_HWCAP & (1 << j))
@ -568,11 +557,6 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf));
}
seq_printf(m, "Hardware\t: %s\n", machine_name);
seq_printf(m, "Revision\t: %04x\n", system_rev);
seq_printf(m, "Serial\t\t: %08x%08x\n",
system_serial_high, system_serial_low);
return 0;
}

View file

@ -49,39 +49,28 @@
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
.endm
/*
* Save CPU state for a suspend and execute the suspend finisher.
* On success it will return 0 through cpu_resume - ie through a CPU
* soft/hard reboot from the reset vector.
* On failure it returns the suspend finisher return value or force
* -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
* is not allowed to return, if it does this must be considered failure).
* It saves callee registers, and allocates space on the kernel stack
* to save the CPU specific registers + some other data for resume.
* Save CPU state for a suspend. This saves callee registers, and allocates
* space on the kernel stack to save the CPU specific registers + some
* other data for resume.
*
* x0 = suspend finisher argument
* x1 = suspend finisher function pointer
*/
ENTRY(__cpu_suspend_enter)
ENTRY(__cpu_suspend)
stp x29, lr, [sp, #-96]!
stp x19, x20, [sp,#16]
stp x21, x22, [sp,#32]
stp x23, x24, [sp,#48]
stp x25, x26, [sp,#64]
stp x27, x28, [sp,#80]
/*
* Stash suspend finisher and its argument in x20 and x19
*/
mov x19, x0
mov x20, x1
mov x2, sp
sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
mov x0, sp
mov x1, sp
/*
* x0 now points to struct cpu_suspend_ctx allocated on the stack
* x1 now points to struct cpu_suspend_ctx allocated on the stack
*/
str x2, [x0, #CPU_CTX_SP]
ldr x1, =sleep_save_sp
ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
str x2, [x1, #CPU_CTX_SP]
ldr x2, =sleep_save_sp
ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
#ifdef CONFIG_SMP
mrs x7, mpidr_el1
ldr x9, =mpidr_hash
@ -93,21 +82,11 @@ ENTRY(__cpu_suspend_enter)
ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
add x1, x1, x8, lsl #3
add x2, x2, x8, lsl #3
#endif
bl __cpu_suspend_save
/*
* Grab suspend finisher in x20 and its argument in x19
*/
mov x0, x19
mov x1, x20
/*
* We are ready for power down, fire off the suspend finisher
* in x1, with argument in x0
*/
blr x1
bl __cpu_suspend_finisher
/*
* Never gets here, unless suspend finisher fails.
* Never gets here, unless suspend fails.
* Successful cpu_suspend should return from cpu_resume, returning
* through this code path is considered an error
* If the return value is set to 0 force x0 = -EOPNOTSUPP
@ -124,7 +103,7 @@ ENTRY(__cpu_suspend_enter)
ldp x27, x28, [sp, #80]
ldp x29, lr, [sp], #96
ret
ENDPROC(__cpu_suspend_enter)
ENDPROC(__cpu_suspend)
.ltorg
/*
@ -147,12 +126,14 @@ cpu_resume_after_mmu:
ret
ENDPROC(cpu_resume_after_mmu)
.data
ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
#ifdef CONFIG_SMP
mrs x1, mpidr_el1
adrp x8, mpidr_hash
add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
adr x4, mpidr_hash_ptr
ldr x5, [x4]
add x8, x4, x5 // x8 = struct mpidr_hash phys address
/* retrieve mpidr_hash members to compute the hash */
ldr x2, [x8, #MPIDR_HASH_MASK]
ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
@ -162,15 +143,14 @@ ENTRY(cpu_resume)
#else
mov x7, xzr
#endif
adrp x0, sleep_save_sp
add x0, x0, #:lo12:sleep_save_sp
adr x0, sleep_save_sp
ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
ldr x0, [x0, x7, lsl #3]
/* load sp from context */
ldr x2, [x0, #CPU_CTX_SP]
adrp x1, sleep_idmap_phys
adr x1, sleep_idmap_phys
/* load physical address of identity map page table in x1 */
ldr x1, [x1, #:lo12:sleep_idmap_phys]
ldr x1, [x1]
mov sp, x2
/*
* cpu_do_resume expects x0 to contain context physical address
@ -179,3 +159,26 @@ ENTRY(cpu_resume)
bl cpu_do_resume // PC relative jump, MMU off
b cpu_resume_mmu // Resume MMU, never returns
ENDPROC(cpu_resume)
.align 3
mpidr_hash_ptr:
/*
* offset of mpidr_hash symbol from current location
* used to obtain run-time mpidr_hash address with MMU off
*/
.quad mpidr_hash - .
/*
* physical address of identity mapped page tables
*/
.type sleep_idmap_phys, #object
ENTRY(sleep_idmap_phys)
.quad 0
/*
* struct sleep_save_sp {
* phys_addr_t *save_ptr_stash;
* phys_addr_t save_ptr_stash_phys;
* };
*/
.type sleep_save_sp, #object
ENTRY(sleep_save_sp)
.space SLEEP_SAVE_SP_SZ // struct sleep_save_sp

View file

@ -140,9 +140,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
cpumask_set_cpu(cpu, mm_cpumask(mm));
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
#ifdef CONFIG_CPUQUIET_FRAMEWORK
if (system_state != SYSTEM_RUNNING)
#endif
printk("CPU%u: Booted secondary processor\n", cpu);
/*
@ -264,9 +261,6 @@ void __cpu_die(unsigned int cpu)
pr_crit("CPU%u: cpu didn't die\n", cpu);
return;
}
#ifdef CONFIG_CPUQUIET_FRAMEWORK
if (system_state != SYSTEM_RUNNING)
#endif
pr_notice("CPU%u: shutdown\n", cpu);
/*

View file

@ -5,24 +5,26 @@
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
extern int __cpu_suspend(unsigned long);
/*
* This is called by __cpu_suspend_enter() to save the state, and do whatever
* This is called by __cpu_suspend() to save the state, and do whatever
* flushing is required to ensure that when the CPU goes to sleep we have
* the necessary data available when the caches are not searched.
*
* ptr: CPU context virtual address
* save_ptr: address of the location where the context physical address
* must be saved
* @arg: Argument to pass to suspend operations
* @ptr: CPU context virtual address
* @save_ptr: address of the location where the context physical address
* must be saved
*/
void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
phys_addr_t *save_ptr)
int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
phys_addr_t *save_ptr)
{
int cpu = smp_processor_id();
*save_ptr = virt_to_phys(ptr);
cpu_do_suspend(ptr);
@ -33,6 +35,8 @@ void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
*/
__flush_dcache_area(ptr, sizeof(*ptr));
__flush_dcache_area(save_ptr, sizeof(*save_ptr));
return cpu_ops[cpu]->cpu_suspend(arg);
}
/*
@ -52,15 +56,15 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
}
/**
* cpu_suspend() - function to enter a low-power state
* @arg: argument to pass to CPU suspend operations
* cpu_suspend
*
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
* operations back-end error code otherwise.
* @arg: argument to pass to the finisher function
*/
int cpu_suspend(unsigned long arg)
{
int cpu = smp_processor_id();
struct mm_struct *mm = current->active_mm;
int ret, cpu = smp_processor_id();
unsigned long flags;
/*
* If cpu_ops have not been registered or suspend
@ -68,21 +72,6 @@ int cpu_suspend(unsigned long arg)
*/
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
return -EOPNOTSUPP;
return cpu_ops[cpu]->cpu_suspend(arg);
}
/*
* __cpu_suspend
*
* arg: argument to pass to the finisher function
* fn: finisher function pointer
*
*/
int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
struct mm_struct *mm = current->active_mm;
int ret;
unsigned long flags;
/*
* From this point debug exceptions are disabled to prevent
@ -97,27 +86,16 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* page tables, so that the thread address space is properly
* set-up on function return.
*/
ret = __cpu_suspend_enter(arg, fn);
ret = __cpu_suspend(arg);
if (ret == 0) {
/*
* We are resuming from reset with TTBR0_EL1 set to the
* idmap to enable the MMU; restore the active_mm mappings in
* TTBR0_EL1 unless the active_mm == &init_mm, in which case
* the thread entered __cpu_suspend with TTBR0_EL1 set to
* reserved TTBR0 page tables and should be restored as such.
*/
if (mm == &init_mm)
cpu_set_reserved_ttbr0();
else
cpu_switch_mm(mm->pgd, mm);
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all();
/*
* Restore per-cpu offset before any kernel
* subsystem relying on it has a chance to run.
*/
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
set_my_cpu_offset(per_cpu_offset(cpu));
/*
* Restore HW breakpoint registers to sane values
@ -138,10 +116,10 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return ret;
}
struct sleep_save_sp sleep_save_sp;
phys_addr_t sleep_idmap_phys;
extern struct sleep_save_sp sleep_save_sp;
extern phys_addr_t sleep_idmap_phys;
static int __init cpu_suspend_init(void)
static int cpu_suspend_init(void)
{
void *ctx_ptr;

View file

@ -158,9 +158,6 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
if (!ptr)
goto no_mem;
if (flags & __GFP_ZERO)
memset(ptr, 0, size);
/* remove any dirty cache lines on the kernel alias */
__dma_flush_range(ptr, ptr + size);
@ -224,11 +221,10 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int i, ret;
ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) {
for_each_sg(sgl, sg, ret, i)
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg->length, dir);
}
for_each_sg(sgl, sg, ret, i)
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
sg->length, dir);
return ret;
}

View file

@ -39,10 +39,6 @@
#include <asm/sizes.h>
#include <asm/tlb.h>
#ifdef CONFIG_ARCH_ROCKCHIP
#include <linux/rockchip/common.h>
#endif
#include "mm.h"
static unsigned long phys_initrd_start __initdata = 0;
@ -206,13 +202,6 @@ void __init arm64_memblock_init(void)
}
early_init_fdt_scan_reserved_mem();
#ifdef CONFIG_ARCH_ROCKCHIP
/* reserve memory for uboot */
rockchip_uboot_mem_reserve();
/* reserve memory for ION */
rockchip_ion_reserve();
#endif
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA))

View file

@ -249,14 +249,3 @@ config SYSV68_PARTITION
partition table format used by Motorola Delta machines (using
sysv68).
Otherwise, say N.
config RK_PARTITION
bool "Rockchip partition table support" if PARTITION_ADVANCED
default y if ARCH_ROCKCHIP
---help---
Like most systems, Rockchip use its own hard disk partition table
format,incompatible with all others. Say Y here if you would like
to be able to read the hard diskpartition table format used by Rockchip
Soc inside machines with eMMC as main storage disk. Otherwise, as well
as you don't know what all this about, say N.

View file

@ -18,4 +18,3 @@ obj-$(CONFIG_IBM_PARTITION) += ibm.o
obj-$(CONFIG_EFI_PARTITION) += efi.o
obj-$(CONFIG_KARMA_PARTITION) += karma.o
obj-$(CONFIG_SYSV68_PARTITION) += sysv68.o
obj-$(CONFIG_RK_PARTITION) += rk.o

View file

@ -19,6 +19,7 @@
#include <linux/genhd.h>
#include "check.h"
#include "acorn.h"
#include "amiga.h"
#include "atari.h"
@ -33,7 +34,6 @@
#include "efi.h"
#include "karma.h"
#include "sysv68.h"
#include "rk.h"
int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
@ -104,10 +104,6 @@ static int (*check_part[])(struct parsed_partitions *) = {
#ifdef CONFIG_SYSV68_PARTITION
sysv68_partition,
#endif
#ifdef CONFIG_RK_PARTITION
rkpart_partition,
#endif
NULL
};
@ -161,13 +157,6 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
sprintf(state->name, "p");
i = res = err = 0;
/* Rockchip partition table ONLY used by eMMC disk */
#ifdef CONFIG_RK_PARTITION
if ((179 == MAJOR(bdev->bd_dev) && (1 == hd->emmc_disk)))
i = sizeof(check_part) / sizeof(struct parsed_partitions *) - 2;
#endif
while (!res && check_part[i]) {
memset(state->parts, 0, state->limit * sizeof(state->parts[0]));
res = check_part[i++](state);

View file

@ -170,8 +170,6 @@ source "drivers/reset/Kconfig"
source "drivers/gator/Kconfig"
source "drivers/headset_observe/Kconfig"
source "drivers/android/Kconfig"
endmenu

View file

@ -108,7 +108,6 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-$(CONFIG_CPUQUIET_FRAMEWORK)+= cpuquiet/
obj-y += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-y += leds/
@ -156,6 +155,5 @@ obj-$(CONFIG_IPACK_BUS) += ipack/
obj-$(CONFIG_NTB) += ntb/
obj-$(CONFIG_GATOR) += gator/
obj-y += headset_observe/
obj-$(CONFIG_CORESIGHT) += coresight/
obj-$(CONFIG_ANDROID) += android/

View file

@ -921,6 +921,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
#ifdef CONFIG_PM_RUNTIME
.runtime_suspend = acpi_subsys_runtime_suspend,
.runtime_resume = acpi_subsys_runtime_resume,
.runtime_idle = pm_generic_runtime_idle,
#endif
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,

View file

@ -284,7 +284,7 @@ static const struct dev_pm_ops amba_pm = {
SET_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
NULL
pm_generic_runtime_idle
)
};
@ -421,7 +421,6 @@ int amba_driver_register(struct amba_driver *drv)
{
drv->drv.bus = &amba_bustype;
#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn
SETFN(probe);
SETFN(remove);
@ -491,7 +490,6 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
}
ret = amba_get_enable_pclk(dev);
ret = 0;
if (ret == 0) {
u32 pid, cid;

View file

@ -5483,7 +5483,7 @@ static int ata_port_runtime_idle(struct device *dev)
return -EBUSY;
}
return 0;
return pm_runtime_suspend(dev);
}
static int ata_port_runtime_suspend(struct device *dev)

View file

@ -464,13 +464,31 @@ static void device_remove_bin_attributes(struct device *dev,
static int device_add_groups(struct device *dev,
const struct attribute_group **groups)
{
return sysfs_create_groups(&dev->kobj, groups);
int error = 0;
int i;
if (groups) {
for (i = 0; groups[i]; i++) {
error = sysfs_create_group(&dev->kobj, groups[i]);
if (error) {
while (--i >= 0)
sysfs_remove_group(&dev->kobj,
groups[i]);
break;
}
}
}
return error;
}
static void device_remove_groups(struct device *dev,
const struct attribute_group **groups)
{
sysfs_remove_groups(&dev->kobj, groups);
int i;
if (groups)
for (i = 0; groups[i]; i++)
sysfs_remove_group(&dev->kobj, groups[i]);
}
static int device_add_attrs(struct device *dev)

View file

@ -319,7 +319,6 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
cpu->dev.id = num;
cpu->dev.bus = &cpu_subsys;
cpu->dev.release = cpu_device_release;
cpu->dev.of_node = of_get_cpu_node(num, NULL);
#ifdef CONFIG_HAVE_CPU_AUTOPROBE
cpu->dev.bus->uevent = cpu_uevent;
#endif

View file

@ -90,13 +90,6 @@ static inline int is_dma_buf_file(struct file *file)
return file->f_op == &dma_buf_fops;
}
#ifdef CONFIG_ARCH_ROCKCHIP
int dma_buf_is_dma_buf(struct file *file)
{
return is_dma_buf_file(file);
}
#endif
/**
* dma_buf_export_named - Creates a new dma_buf, and associates an anon file
* with this buffer, so it can be exported.

View file

@ -126,13 +126,31 @@ EXPORT_SYMBOL_GPL(driver_remove_file);
static int driver_add_groups(struct device_driver *drv,
const struct attribute_group **groups)
{
return sysfs_create_groups(&drv->p->kobj, groups);
int error = 0;
int i;
if (groups) {
for (i = 0; groups[i]; i++) {
error = sysfs_create_group(&drv->p->kobj, groups[i]);
if (error) {
while (--i >= 0)
sysfs_remove_group(&drv->p->kobj,
groups[i]);
break;
}
}
}
return error;
}
static void driver_remove_groups(struct device_driver *drv,
const struct attribute_group **groups)
{
sysfs_remove_groups(&drv->p->kobj, groups);
int i;
if (groups)
for (i = 0; groups[i]; i++)
sysfs_remove_group(&drv->p->kobj, groups[i]);
}
/**

View file

@ -48,25 +48,6 @@ int pinctrl_bind_pins(struct device *dev)
goto cleanup_get;
}
#ifdef CONFIG_PM
/*
* If power management is enabled, we also look for the optional
* sleep and idle pin states, with semantics as defined in
* <linux/pinctrl/pinctrl-state.h>
*/
dev->pins->sleep_state = pinctrl_lookup_state(dev->pins->p,
PINCTRL_STATE_SLEEP);
if (IS_ERR(dev->pins->sleep_state))
/* Not supplying this state is perfectly legal */
dev_dbg(dev, "no sleep pinctrl state\n");
dev->pins->idle_state = pinctrl_lookup_state(dev->pins->p,
PINCTRL_STATE_IDLE);
if (IS_ERR(dev->pins->idle_state))
/* Not supplying this state is perfectly legal */
dev_dbg(dev, "no idle pinctrl state\n");
#endif
return 0;
/*

View file

@ -884,6 +884,7 @@ int platform_pm_restore(struct device *dev)
static const struct dev_pm_ops platform_dev_pm_ops = {
.runtime_suspend = pm_generic_runtime_suspend,
.runtime_resume = pm_generic_runtime_resume,
.runtime_idle = pm_generic_runtime_idle,
USE_PLATFORM_PM_SLEEP_OPS
};

View file

@ -2144,6 +2144,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
genpd->domain.ops.prepare = pm_genpd_prepare;
genpd->domain.ops.suspend = pm_genpd_suspend;
genpd->domain.ops.suspend_late = pm_genpd_suspend_late;

View file

@ -11,6 +11,29 @@
#include <linux/export.h>
#ifdef CONFIG_PM_RUNTIME
/**
* pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
* @dev: Device to handle.
*
* If PM operations are defined for the @dev's driver and they include
* ->runtime_idle(), execute it and return its error code, if nonzero.
* Otherwise, execute pm_runtime_suspend() for the device and return 0.
*/
int pm_generic_runtime_idle(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (pm && pm->runtime_idle) {
int ret = pm->runtime_idle(dev);
if (ret)
return ret;
}
pm_runtime_suspend(dev);
return 0;
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
* @dev: Device to suspend.

View file

@ -293,8 +293,11 @@ static int rpm_idle(struct device *dev, int rpmflags)
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
if (dev->power.no_callbacks)
if (dev->power.no_callbacks) {
/* Assume ->runtime_idle() callback would have suspended. */
retval = rpm_suspend(dev, rpmflags);
goto out;
}
/* Carry out an asynchronous or a synchronous idle notification. */
if (rpmflags & RPM_ASYNC) {
@ -303,8 +306,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
trace_rpm_return_int(dev, _THIS_IP_, 0);
return 0;
goto out;
}
dev->power.idle_notification = true;
@ -324,14 +326,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
callback = dev->driver->pm->runtime_idle;
if (callback)
retval = __rpm_callback(callback, dev);
__rpm_callback(callback, dev);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval ? retval : rpm_suspend(dev, rpmflags);
return retval;
}
/**

View file

@ -52,7 +52,6 @@ struct regmap_async {
struct regmap {
struct mutex mutex;
spinlock_t spinlock;
unsigned long spinlock_flags;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */

View file

@ -49,17 +49,11 @@ static int regmap_i2c_gather_write(void *context,
xfer[0].flags = 0;
xfer[0].len = reg_size;
xfer[0].buf = (void *)reg;
#ifdef CONFIG_I2C_ROCKCHIP_COMPAT
xfer[0].scl_rate = 100*1000;
#endif
xfer[1].addr = i2c->addr;
xfer[1].flags = I2C_M_NOSTART;
xfer[1].len = val_size;
xfer[1].buf = (void *)val;
#ifdef CONFIG_I2C_ROCKCHIP_COMPAT
xfer[1].scl_rate = 100*1000;
#endif
ret = i2c_transfer(i2c->adapter, xfer, 2);
if (ret == 2)
@ -83,17 +77,11 @@ static int regmap_i2c_read(void *context,
xfer[0].flags = 0;
xfer[0].len = reg_size;
xfer[0].buf = (void *)reg;
#ifdef CONFIG_I2C_ROCKCHIP_COMPAT
xfer[0].scl_rate = 100*1000;
#endif
xfer[1].addr = i2c->addr;
xfer[1].flags = I2C_M_RD;
xfer[1].len = val_size;
xfer[1].buf = val;
#ifdef CONFIG_I2C_ROCKCHIP_COMPAT
xfer[1].scl_rate = 100*1000;
#endif
ret = i2c_transfer(i2c->adapter, xfer, 2);
if (ret == 2)

View file

@ -302,16 +302,13 @@ static void regmap_unlock_mutex(void *__map)
static void regmap_lock_spinlock(void *__map)
{
struct regmap *map = __map;
unsigned long flags;
spin_lock_irqsave(&map->spinlock, flags);
map->spinlock_flags = flags;
spin_lock(&map->spinlock);
}
static void regmap_unlock_spinlock(void *__map)
{
struct regmap *map = __map;
spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
spin_unlock(&map->spinlock);
}
static void dev_get_regmap_release(struct device *dev, void *res)

View file

@ -13,12 +13,6 @@ config BT_HCIBTUSB
Say Y here to compile support for Bluetooth USB devices into the
kernel or say M to compile it as module (btusb).
config BT_RTKBTUSB
tristate "RTK HCI USB driver"
depends on USB
help
RTK Bluetooth HCI USB driver
config BT_HCIBTSDIO
tristate "HCI SDIO driver"
depends on MMC

View file

@ -2,7 +2,6 @@
# Makefile for the Linux Bluetooth HCI device drivers.
#
obj-$(CONFIG_BT) += vflash.o
obj-$(CONFIG_BT_HCIVHCI) += hci_vhci.o
obj-$(CONFIG_BT_HCIUART) += hci_uart.o
obj-$(CONFIG_BT_HCIBCM203X) += bcm203x.o
@ -14,7 +13,6 @@ obj-$(CONFIG_BT_HCIBLUECARD) += bluecard_cs.o
obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o
obj-$(CONFIG_BT_HCIBTUSB) += btusb.o
obj-$(CONFIG_BT_RTKBTUSB) += rtk_btusb.o
obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o
obj-$(CONFIG_BT_ATH3K) += ath3k.o

View file

@ -27,7 +27,7 @@ config COMMON_CLK_DEBUG
bool "DebugFS representation of clock tree"
select DEBUG_FS
---help---
Creates a directory hierarchy in debugfs for visualizing the clk
Creates a directory hierchy in debugfs for visualizing the clk
tree structure. Each directory contains read-only members
that export information specific to that clk node: clk_rate,
clk_flags, clk_prepare_count, clk_enable_count &

View file

@ -20,7 +20,6 @@ obj-$(CONFIG_ARCH_U300) += clk-u300.o
obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
obj-$(CONFIG_ARCH_PRIMA2) += clk-prima2.o
obj-$(CONFIG_PLAT_ORION) += mvebu/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
endif

View file

@ -55,30 +55,6 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
return rate_ops->recalc_rate(rate_hw, parent_rate);
}
static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
struct clk **best_parent_p)
{
struct clk_composite *composite = to_clk_composite(hw);
const struct clk_ops *rate_ops = composite->rate_ops;
const struct clk_ops *mux_ops = composite->mux_ops;
struct clk_hw *rate_hw = composite->rate_hw;
struct clk_hw *mux_hw = composite->mux_hw;
if (rate_hw && rate_ops && rate_ops->determine_rate) {
rate_hw->clk = hw->clk;
return rate_ops->determine_rate(rate_hw, rate, best_parent_rate,
best_parent_p);
} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
mux_hw->clk = hw->clk;
return mux_ops->determine_rate(mux_hw, rate, best_parent_rate,
best_parent_p);
} else {
pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
return 0;
}
}
static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
@ -171,8 +147,6 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
composite->mux_ops = mux_ops;
clk_composite_ops->get_parent = clk_composite_get_parent;
clk_composite_ops->set_parent = clk_composite_set_parent;
if (mux_ops->determine_rate)
clk_composite_ops->determine_rate = clk_composite_determine_rate;
}
if (rate_hw && rate_ops) {
@ -196,8 +170,6 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
composite->rate_hw = rate_hw;
composite->rate_ops = rate_ops;
clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
if (rate_ops->determine_rate)
clk_composite_ops->determine_rate = clk_composite_determine_rate;
}
if (gate_hw && gate_ops) {

View file

@ -317,12 +317,8 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->lock)
spin_lock_irqsave(divider->lock, flags);
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = div_mask(divider) << (divider->shift + 16);
} else {
val = readl(divider->reg);
val &= ~(div_mask(divider) << divider->shift);
}
val = readl(divider->reg);
val &= ~(div_mask(divider) << divider->shift);
val |= value << divider->shift;
writel(val, divider->reg);
@ -349,13 +345,6 @@ static struct clk *_register_divider(struct device *dev, const char *name,
struct clk *clk;
struct clk_init_data init;
if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
if (width + shift > 16) {
pr_warn("divider value exceeds LOWORD field\n");
return ERR_PTR(-EINVAL);
}
}
/* allocate the divider */
div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
if (!div) {

View file

@ -53,18 +53,12 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
if (gate->lock)
spin_lock_irqsave(gate->lock, flags);
if (gate->flags & CLK_GATE_HIWORD_MASK) {
reg = BIT(gate->bit_idx + 16);
if (set)
reg |= BIT(gate->bit_idx);
} else {
reg = readl(gate->reg);
reg = readl(gate->reg);
if (set)
reg |= BIT(gate->bit_idx);
else
reg &= ~BIT(gate->bit_idx);
}
if (set)
reg |= BIT(gate->bit_idx);
else
reg &= ~BIT(gate->bit_idx);
writel(reg, gate->reg);
@ -127,13 +121,6 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
struct clk *clk;
struct clk_init_data init;
if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
if (bit_idx > 16) {
pr_err("gate bit exceeds LOWORD field\n");
return ERR_PTR(-EINVAL);
}
}
/* allocate the gate */
gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
if (!gate) {

View file

@ -86,12 +86,8 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
if (mux->flags & CLK_MUX_HIWORD_MASK) {
val = mux->mask << (mux->shift + 16);
} else {
val = readl(mux->reg);
val &= ~(mux->mask << mux->shift);
}
val = readl(mux->reg);
val &= ~(mux->mask << mux->shift);
val |= index << mux->shift;
writel(val, mux->reg);
@ -104,7 +100,6 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
const struct clk_ops clk_mux_ops = {
.get_parent = clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
.determine_rate = __clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
@ -116,15 +111,6 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
struct clk_mux *mux;
struct clk *clk;
struct clk_init_data init;
u8 width = 0;
if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
width = fls(mask) - ffs(mask) + 1;
if (width + shift > 16) {
pr_err("mux value exceeds LOWORD field\n");
return ERR_PTR(-EINVAL);
}
}
/* allocate the mux */
mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);

View file

@ -107,7 +107,7 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, clk_get_rate(c));
c->enable_count, c->prepare_count, c->rate);
seq_printf(s, "\n");
}
@ -166,7 +166,7 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
seq_printf(s, "\"%s\": { ", c->name);
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
seq_printf(s, "\"rate\": %lu", c->rate);
}
static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
@ -458,6 +458,7 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
clk->ops->unprepare(clk->hw);
}
}
EXPORT_SYMBOL_GPL(__clk_get_flags);
/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
@ -533,7 +534,7 @@ static int clk_disable_unused(void)
return 0;
}
late_initcall_sync(clk_disable_unused);
late_initcall(clk_disable_unused);
/*** helper functions ***/
@ -558,19 +559,6 @@ struct clk *__clk_get_parent(struct clk *clk)
return !clk ? NULL : clk->parent;
}
struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
{
if (!clk || index >= clk->num_parents)
return NULL;
else if (!clk->parents)
return __clk_lookup(clk->parent_names[index]);
else if (!clk->parents[index])
return clk->parents[index] =
__clk_lookup(clk->parent_names[index]);
else
return clk->parents[index];
}
unsigned int __clk_get_enable_count(struct clk *clk)
{
return !clk ? 0 : clk->enable_count;
@ -606,7 +594,6 @@ unsigned long __clk_get_flags(struct clk *clk)
{
return !clk ? 0 : clk->flags;
}
EXPORT_SYMBOL_GPL(__clk_get_flags);
bool __clk_is_prepared(struct clk *clk)
{
@ -692,55 +679,6 @@ struct clk *__clk_lookup(const char *name)
return NULL;
}
/*
* Helper for finding best parent to provide a given frequency. This can be used
* directly as a determine_rate callback (e.g. for a mux), or from a more
* complex clock that may combine a mux with other operations.
*/
long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
struct clk **best_parent_p)
{
struct clk *clk = hw->clk, *parent, *best_parent = NULL;
int i, num_parents;
unsigned long parent_rate, best = 0;
/* if NO_REPARENT flag set, pass through to current parent */
if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
parent = clk->parent;
if (clk->flags & CLK_SET_RATE_PARENT)
best = __clk_round_rate(parent, rate);
else if (parent)
best = __clk_get_rate(parent);
else
best = __clk_get_rate(clk);
goto out;
}
/* find the parent that can provide the fastest rate <= rate */
num_parents = clk->num_parents;
for (i = 0; i < num_parents; i++) {
parent = clk_get_parent_by_index(clk, i);
if (!parent)
continue;
if (clk->flags & CLK_SET_RATE_PARENT)
parent_rate = __clk_round_rate(parent, rate);
else
parent_rate = __clk_get_rate(parent);
if (parent_rate <= rate && parent_rate > best) {
best_parent = parent;
best = parent_rate;
}
}
out:
if (best_parent)
*best_parent_p = best_parent;
*best_parent_rate = best;
return best;
}
/*** clk api ***/
void __clk_unprepare(struct clk *clk)
@ -764,7 +702,7 @@ void __clk_unprepare(struct clk *clk)
/**
* clk_unprepare - undo preparation of a clock source
* @clk: the clk being unprepared
* @clk: the clk being unprepare
*
* clk_unprepare may sleep, which differentiates it from clk_disable. In a
* simple case, clk_unprepare can be used instead of clk_disable to gate a clk
@ -931,31 +869,27 @@ EXPORT_SYMBOL_GPL(clk_enable);
/**
* __clk_round_rate - round the given rate for a clk
* @clk: round the rate of this clock
* @rate: the rate which is to be rounded
*
* Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
*/
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long parent_rate = 0;
struct clk *parent;
if (!clk)
return 0;
parent = clk->parent;
if (parent)
parent_rate = parent->rate;
if (!clk->ops->round_rate) {
if (clk->flags & CLK_SET_RATE_PARENT)
return __clk_round_rate(clk->parent, rate);
else
return clk->rate;
}
if (clk->ops->determine_rate)
return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
&parent);
else if (clk->ops->round_rate)
return clk->ops->round_rate(clk->hw, rate, &parent_rate);
else if (clk->flags & CLK_SET_RATE_PARENT)
return __clk_round_rate(clk->parent, rate);
else
return clk->rate;
if (clk->parent)
parent_rate = clk->parent->rate;
return clk->ops->round_rate(clk->hw, rate, &parent_rate);
}
/**
@ -1022,7 +956,7 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
*
* Walks the subtree of clks starting with clk and recalculates rates as it
* goes. Note that if a clk does not implement the .recalc_rate callback then
* it is assumed that the clock will take on the rate of its parent.
* it is assumed that the clock will take on the rate of it's parent.
*
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
@ -1080,121 +1014,6 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_rate);
static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
{
int i;
if (!clk->parents) {
clk->parents = kcalloc(clk->num_parents,
sizeof(struct clk *), GFP_KERNEL);
if (!clk->parents)
return -ENOMEM;
}
/*
* find index of new parent clock using cached parent ptrs,
* or if not yet cached, use string name comparison and cache
* them now to avoid future calls to __clk_lookup.
*/
for (i = 0; i < clk->num_parents; i++) {
if (clk->parents[i] == parent)
return i;
if (clk->parents[i])
continue;
if (!strcmp(clk->parent_names[i], parent->name)) {
clk->parents[i] = __clk_lookup(parent->name);
return i;
}
}
return -EINVAL;
}
static void clk_reparent(struct clk *clk, struct clk *new_parent)
{
hlist_del(&clk->child_node);
if (new_parent) {
/* avoid duplicate POST_RATE_CHANGE notifications */
if (new_parent->new_child == clk)
new_parent->new_child = NULL;
hlist_add_head(&clk->child_node, &new_parent->children);
} else {
hlist_add_head(&clk->child_node, &clk_orphan_list);
}
clk->parent = new_parent;
}
static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
{
unsigned long flags;
int ret = 0;
struct clk *old_parent = clk->parent;
/*
* Migrate prepare state between parents and prevent race with
* clk_enable().
*
* If the clock is not prepared, then a race with
* clk_enable/disable() is impossible since we already have the
* prepare lock (future calls to clk_enable() need to be preceded by
* a clk_prepare()).
*
* If the clock is prepared, migrate the prepared state to the new
* parent and also protect against a race with clk_enable() by
* forcing the clock and the new parent on. This ensures that all
* future calls to clk_enable() are practically NOPs with respect to
* hardware and software states.
*
* See also: Comment for clk_set_parent() below.
*/
if (clk->prepare_count) {
__clk_prepare(parent);
clk_enable(parent);
clk_enable(clk);
}
/* update the clk tree topology */
flags = clk_enable_lock();
clk_reparent(clk, parent);
clk_enable_unlock(flags);
/* change clock input source */
if (parent && clk->ops->set_parent)
ret = clk->ops->set_parent(clk->hw, p_index);
if (ret) {
flags = clk_enable_lock();
clk_reparent(clk, old_parent);
clk_enable_unlock(flags);
if (clk->prepare_count) {
clk_disable(clk);
clk_disable(parent);
__clk_unprepare(parent);
}
return ret;
}
/*
* Finish the migration of prepare state and undo the changes done
* for preventing a race with clk_enable().
*/
if (clk->prepare_count) {
clk_disable(clk);
clk_disable(old_parent);
__clk_unprepare(old_parent);
}
/* update debugfs with new clk tree topology */
clk_debug_reparent(clk, parent);
return 0;
}
/**
* __clk_speculate_rates
* @clk: first clk in the subtree
@ -1207,7 +1026,7 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
* pre-rate change notifications and returns early if no clks in the
* subtree have subscribed to the notifications. Note that if a clk does not
* implement the .recalc_rate callback then it is assumed that the clock will
* take on the rate of its parent.
* take on the rate of it's parent.
*
* Caller must hold prepare_lock.
*/
@ -1239,25 +1058,18 @@ out:
return ret;
}
static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
struct clk *new_parent, u8 p_index)
static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
{
struct clk *child;
clk->new_rate = new_rate;
clk->new_parent = new_parent;
clk->new_parent_index = p_index;
/* include clk in new parent's PRE_RATE_CHANGE notifications */
clk->new_child = NULL;
if (new_parent && new_parent != clk->parent)
new_parent->new_child = clk;
hlist_for_each_entry(child, &clk->children, child_node) {
if (child->ops->recalc_rate)
child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
else
child->new_rate = new_rate;
clk_calc_subtree(child, child->new_rate, NULL, 0);
clk_calc_subtree(child, child->new_rate);
}
}
@ -1268,63 +1080,50 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
{
struct clk *top = clk;
struct clk *old_parent, *parent;
unsigned long best_parent_rate = 0;
unsigned long new_rate;
int p_index = 0;
/* sanity */
if (IS_ERR_OR_NULL(clk))
return NULL;
/* save parent rate, if it exists */
parent = old_parent = clk->parent;
if (parent)
best_parent_rate = parent->rate;
if (clk->parent)
best_parent_rate = clk->parent->rate;
/* find the closest rate and parent clk/rate */
if (clk->ops->determine_rate) {
new_rate = clk->ops->determine_rate(clk->hw, rate,
&best_parent_rate,
&parent);
} else if (clk->ops->round_rate) {
new_rate = clk->ops->round_rate(clk->hw, rate,
&best_parent_rate);
} else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
/* pass-through clock without adjustable parent */
clk->new_rate = clk->rate;
return NULL;
} else {
/* pass-through clock with adjustable parent */
top = clk_calc_new_rates(parent, rate);
new_rate = parent->new_rate;
/* never propagate up to the parent */
if (!(clk->flags & CLK_SET_RATE_PARENT)) {
if (!clk->ops->round_rate) {
clk->new_rate = clk->rate;
return NULL;
}
new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
goto out;
}
/* some clocks must be gated to change parent */
if (parent != old_parent &&
(clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
pr_debug("%s: %s not gated but wants to reparent\n",
__func__, clk->name);
/* need clk->parent from here on out */
if (!clk->parent) {
pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
return NULL;
}
/* try finding the new parent index */
if (parent) {
p_index = clk_fetch_parent_index(clk, parent);
if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, clk->name);
return NULL;
}
if (!clk->ops->round_rate) {
top = clk_calc_new_rates(clk->parent, rate);
new_rate = clk->parent->new_rate;
goto out;
}
if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
best_parent_rate != parent->rate)
top = clk_calc_new_rates(parent, best_parent_rate);
new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
if (best_parent_rate != clk->parent->rate) {
top = clk_calc_new_rates(clk->parent, best_parent_rate);
goto out;
}
out:
clk_calc_subtree(clk, new_rate, parent, p_index);
clk_calc_subtree(clk, new_rate);
return top;
}
@ -1336,7 +1135,7 @@ out:
*/
static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
{
struct clk *child, *tmp_clk, *fail_clk = NULL;
struct clk *child, *fail_clk = NULL;
int ret = NOTIFY_DONE;
if (clk->rate == clk->new_rate)
@ -1349,19 +1148,9 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
}
hlist_for_each_entry(child, &clk->children, child_node) {
/* Skip children who will be reparented to another clock */
if (child->new_parent && child->new_parent != clk)
continue;
tmp_clk = clk_propagate_rate_change(child, event);
if (tmp_clk)
fail_clk = tmp_clk;
}
/* handle the new child who might not be in clk->children yet */
if (clk->new_child) {
tmp_clk = clk_propagate_rate_change(clk->new_child, event);
if (tmp_clk)
fail_clk = tmp_clk;
clk = clk_propagate_rate_change(child, event);
if (clk)
fail_clk = clk;
}
return fail_clk;
@ -1374,24 +1163,11 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
static void clk_change_rate(struct clk *clk)
{
struct clk *child;
unsigned long old_rate, tmp_rate;
unsigned long old_rate;
unsigned long best_parent_rate = 0;
old_rate = clk->rate;
/* set parent */
if (clk->new_parent && clk->new_parent != clk->parent) {
if (clk->flags & CLK_SET_RATE_PARENT_IN_ORDER) {
tmp_rate = clk->ops->recalc_rate(clk->hw, clk->new_parent->rate);
if ((tmp_rate > clk->rate) && (tmp_rate > clk->new_rate)) {
if (clk->ops->set_rate)
clk->ops->set_rate(clk->hw, clk->new_rate, clk->new_parent->rate);
}
}
__clk_set_parent(clk, clk->new_parent, clk->new_parent_index);
}
if (clk->parent)
best_parent_rate = clk->parent->rate;
@ -1406,16 +1182,8 @@ static void clk_change_rate(struct clk *clk)
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
hlist_for_each_entry(child, &clk->children, child_node) {
/* Skip children who will be reparented to another clock */
if (child->new_parent && child->new_parent != clk)
continue;
hlist_for_each_entry(child, &clk->children, child_node)
clk_change_rate(child);
}
/* handle the new child who might not be in clk->children yet */
if (clk->new_child)
clk_change_rate(clk->new_child);
}
/**
@ -1430,7 +1198,7 @@ static void clk_change_rate(struct clk *clk)
* outcome of clk's .round_rate implementation. If *parent_rate is unchanged
* after calling .round_rate then upstream parent propagation is ignored. If
* *parent_rate comes back with a new rate for clk's parent then we propagate
* up to clk's parent and set its rate. Upward propagation will continue
* up to clk's parent and set it's rate. Upward propagation will continue
* until either a clk does not support the CLK_SET_RATE_PARENT flag or
* .round_rate stops requesting changes to clk's parent_rate.
*
@ -1444,14 +1212,11 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
struct clk *top, *fail_clk;
int ret = 0;
if (!clk)
return 0;
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
/* bail early if nothing to do */
if (rate == clk_get_rate(clk))
if (rate == clk->rate)
goto out;
if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
@ -1547,15 +1312,33 @@ static struct clk *__clk_init_parent(struct clk *clk)
if (!clk->parents)
clk->parents =
kcalloc(clk->num_parents, sizeof(struct clk *),
kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
ret = clk_get_parent_by_index(clk, index);
if (!clk->parents)
ret = __clk_lookup(clk->parent_names[index]);
else if (!clk->parents[index])
ret = clk->parents[index] =
__clk_lookup(clk->parent_names[index]);
else
ret = clk->parents[index];
out:
return ret;
}
static void clk_reparent(struct clk *clk, struct clk *new_parent)
{
hlist_del(&clk->child_node);
if (new_parent)
hlist_add_head(&clk->child_node, &new_parent->children);
else
hlist_add_head(&clk->child_node, &clk_orphan_list);
clk->parent = new_parent;
}
void __clk_reparent(struct clk *clk, struct clk *new_parent)
{
clk_reparent(clk, new_parent);
@ -1563,33 +1346,123 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
__clk_recalc_rates(clk, POST_RATE_CHANGE);
}
static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
{
u8 i;
if (!clk->parents)
clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
/*
* find index of new parent clock using cached parent ptrs,
* or if not yet cached, use string name comparison and cache
* them now to avoid future calls to __clk_lookup.
*/
for (i = 0; i < clk->num_parents; i++) {
if (clk->parents && clk->parents[i] == parent)
break;
else if (!strcmp(clk->parent_names[i], parent->name)) {
if (clk->parents)
clk->parents[i] = __clk_lookup(parent->name);
break;
}
}
return i;
}
static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
{
unsigned long flags;
int ret = 0;
struct clk *old_parent = clk->parent;
bool migrated_enable = false;
/* migrate prepare */
if (clk->prepare_count)
__clk_prepare(parent);
flags = clk_enable_lock();
/* migrate enable */
if (clk->enable_count) {
__clk_enable(parent);
migrated_enable = true;
}
/* update the clk tree topology */
clk_reparent(clk, parent);
clk_enable_unlock(flags);
/* change clock input source */
if (parent && clk->ops->set_parent)
ret = clk->ops->set_parent(clk->hw, p_index);
if (ret) {
/*
* The error handling is tricky due to that we need to release
* the spinlock while issuing the .set_parent callback. This
* means the new parent might have been enabled/disabled in
* between, which must be considered when doing rollback.
*/
flags = clk_enable_lock();
clk_reparent(clk, old_parent);
if (migrated_enable && clk->enable_count) {
__clk_disable(parent);
} else if (migrated_enable && (clk->enable_count == 0)) {
__clk_disable(old_parent);
} else if (!migrated_enable && clk->enable_count) {
__clk_disable(parent);
__clk_enable(old_parent);
}
clk_enable_unlock(flags);
if (clk->prepare_count)
__clk_unprepare(parent);
return ret;
}
/* clean up enable for old parent if migration was done */
if (migrated_enable) {
flags = clk_enable_lock();
__clk_disable(old_parent);
clk_enable_unlock(flags);
}
/* clean up prepare for old parent if migration was done */
if (clk->prepare_count)
__clk_unprepare(old_parent);
/* update debugfs with new clk tree topology */
clk_debug_reparent(clk, parent);
return 0;
}
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
* @parent: the new input to clk
*
* Re-parent clk to use parent as its new input source. If clk is in
* prepared state, the clk will get enabled for the duration of this call. If
* that's not acceptable for a specific clk (Eg: the consumer can't handle
* that, the reparenting is glitchy in hardware, etc), use the
* CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
*
* After successfully changing clk's parent clk_set_parent will update the
* clk topology, sysfs topology and propagate rate recalculation via
* __clk_recalc_rates.
*
* Returns 0 on success, -EERROR otherwise.
* Re-parent clk to use parent as it's new input source. If clk has the
* CLK_SET_PARENT_GATE flag set then clk must be gated for this
* operation to succeed. After successfully changing clk's parent
* clk_set_parent will update the clk topology, sysfs topology and
* propagate rate recalculation via __clk_recalc_rates. Returns 0 on
* success, -EERROR otherwise.
*/
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret = 0;
int p_index = 0;
u8 p_index = 0;
unsigned long p_rate = 0;
if (!clk)
return 0;
if (!clk->ops)
if (!clk || !clk->ops)
return -EINVAL;
/* verify ops for for multi-parent clks */
@ -1612,16 +1485,17 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
if (parent) {
p_index = clk_fetch_parent_index(clk, parent);
p_rate = parent->rate;
if (p_index < 0) {
if (p_index == clk->num_parents) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, clk->name);
ret = p_index;
ret = -EINVAL;
goto out;
}
}
/* propagate PRE_RATE_CHANGE notifications */
ret = __clk_speculate_rates(clk, p_rate);
if (clk->notifier_count)
ret = __clk_speculate_rates(clk, p_rate);
/* abort if a driver objects */
if (ret & NOTIFY_STOP_MASK)
@ -1672,9 +1546,8 @@ int __clk_init(struct device *dev, struct clk *clk)
/* check that clk_ops are sane. See Documentation/clk.txt */
if (clk->ops->set_rate &&
!((clk->ops->round_rate || clk->ops->determine_rate) &&
clk->ops->recalc_rate)) {
pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
!(clk->ops->round_rate && clk->ops->recalc_rate)) {
pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
__func__, clk->name);
ret = -EINVAL;
goto out;
@ -1704,8 +1577,8 @@ int __clk_init(struct device *dev, struct clk *clk)
* for clock drivers to statically initialize clk->parents.
*/
if (clk->num_parents > 1 && !clk->parents) {
clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
GFP_KERNEL);
clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
/*
* __clk_lookup returns NULL for parents that have not been
* clk_init'd; thus any access to clk->parents[] must check
@ -1757,7 +1630,7 @@ int __clk_init(struct device *dev, struct clk *clk)
* this clock
*/
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
if (orphan->num_parents && orphan->ops->get_parent) {
if (orphan->ops->get_parent) {
i = orphan->ops->get_parent(orphan->hw);
if (!strcmp(clk->name, orphan->parent_names[i]))
__clk_reparent(orphan, clk);
@ -1777,7 +1650,7 @@ int __clk_init(struct device *dev, struct clk *clk)
* The .init callback is not used by any of the basic clock types, but
* exists for weird hardware that must perform initialization magic.
* Please consider other ways of solving initialization problems before
* using this callback, as its use is discouraged.
* using this callback, as it's use is discouraged.
*/
if (clk->ops->init)
clk->ops->init(clk->hw);
@ -1804,7 +1677,7 @@ out:
* very large numbers of clocks that need to be statically initialized. It is
* a layering violation to include clk-private.h from any code which implements
* a clock's .ops; as such any statically initialized clock data MUST be in a
* separate C file from the logic that implements its operations. Returns 0
* separate C file from the logic that implements it's operations. Returns 0
* on success, otherwise an error code.
*/
struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
@ -1845,8 +1718,8 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
hw->clk = clk;
/* allocate local copy in case parent_names is __initdata */
clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
GFP_KERNEL);
clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
GFP_KERNEL);
if (!clk->parent_names) {
pr_err("%s: could not allocate clk->parent_names\n", __func__);
@ -2250,13 +2123,13 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
*/
void __init of_clk_init(const struct of_device_id *matches)
{
const struct of_device_id *match;
struct device_node *np;
if (!matches)
matches = __clk_of_table;
for_each_matching_node_and_match(np, matches, &match) {
for_each_matching_node(np, matches) {
const struct of_device_id *match = of_match_node(matches, np);
of_clk_init_cb_t clk_init_cb = match->data;
clk_init_cb(np);
}

View file

@ -12,7 +12,6 @@ obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip_timer.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o

View file

@ -391,12 +391,12 @@ u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
static cycle_t arch_counter_read(struct clocksource *cs)
{
return arch_timer_read_counter();
return arch_counter_get_cntvct();
}
static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
{
return arch_timer_read_counter();
return arch_counter_get_cntvct();
}
static struct clocksource clocksource_counter = {
@ -429,10 +429,6 @@ static void __init arch_counter_register(unsigned type)
else
arch_timer_read_counter = arch_counter_get_cntvct_mem;
if (!arch_timer_use_virtual)
if (arch_timer_read_counter == arch_counter_get_cntvct)
arch_timer_read_counter = arch_counter_get_cntpct;
start_count = arch_timer_read_counter();
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
cyclecounter.mult = clocksource_counter.mult;

View file

@ -105,21 +105,6 @@ config ARM_OMAP2PLUS_CPUFREQ
default ARCH_OMAP2PLUS
select CPU_FREQ_TABLE
config ARM_ROCKCHIP_CPUFREQ
bool "CPUfreq driver for Rockchip CPUs"
depends on ARCH_ROCKCHIP
default y
help
This enables the CPUfreq driver for Rockchips CPUs.
If in doubt, say Y.
config ARM_ROCKCHIP_BL_CPUFREQ
bool "CPUfreq driver for Rockchip big LITTLE CPUs"
depends on ARCH_ROCKCHIP
help
This enables the CPUfreq driver for Rockchips big LITTLE CPUs.
If in doubt, say Y.
config ARM_S3C2416_CPUFREQ
bool "S3C2416 CPU Frequency scaling support"
depends on CPU_S3C2416

View file

@ -67,8 +67,6 @@ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
obj-$(CONFIG_ARM_ROCKCHIP_CPUFREQ) += rockchip-cpufreq.o
obj-$(CONFIG_ARM_ROCKCHIP_BL_CPUFREQ) += rockchip_big_little.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o

View file

@ -19,9 +19,6 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#ifdef CONFIG_ARCH_ROCKCHIP
#include <linux/input.h>
#endif
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/rwsem.h>
@ -109,14 +106,6 @@ struct cpufreq_interactive_tunables {
int boostpulse_duration_val;
/* End time of boost pulse in ktime converted to usecs */
u64 boostpulse_endtime;
#ifdef CONFIG_ARCH_ROCKCHIP
/* Frequency to which a touch boost takes the cpus to */
unsigned long touchboost_freq;
/* Duration of a touchboost pulse in usecs */
int touchboostpulse_duration_val;
/* End time of touchboost pulse in ktime converted to usecs */
u64 touchboostpulse_endtime;
#endif
bool boosted;
/*
* Max additional time to wait in idle, beyond timer_rate, at speeds
@ -378,19 +367,7 @@ static void cpufreq_interactive_timer(unsigned long data)
cpu_load = loadadjfreq / pcpu->policy->cur;
tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
#ifdef CONFIG_ARCH_ROCKCHIP
pcpu->target_freq = pcpu->policy->cur;
tunables->boosted |= now < tunables->touchboostpulse_endtime;
#endif
if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
#ifdef CONFIG_ARCH_ROCKCHIP
if (now < tunables->touchboostpulse_endtime) {
new_freq = choose_freq(pcpu, loadadjfreq);
if (new_freq < tunables->touchboost_freq)
new_freq = tunables->touchboost_freq;
} else
#endif
if (pcpu->target_freq < tunables->hispeed_freq) {
new_freq = tunables->hispeed_freq;
} else {
@ -1150,123 +1127,6 @@ static struct notifier_block cpufreq_interactive_idle_nb = {
.notifier_call = cpufreq_interactive_idle_notifier,
};
#ifdef CONFIG_ARCH_ROCKCHIP
static void cpufreq_interactive_input_event(struct input_handle *handle, unsigned int type,
unsigned int code, int value)
{
u64 now, endtime;
int i;
int anyboost = 0;
unsigned long flags[2];
struct cpufreq_interactive_cpuinfo *pcpu;
struct cpufreq_interactive_tunables *tunables;
if (type != EV_ABS)
return;
trace_cpufreq_interactive_boost("touch");
spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
now = ktime_to_us(ktime_get());
for_each_online_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
if (have_governor_per_policy())
tunables = pcpu->policy->governor_data;
else
tunables = common_tunables;
if (!tunables)
continue;
endtime = now + tunables->touchboostpulse_duration_val;
if (endtime < (tunables->touchboostpulse_endtime + 10 * USEC_PER_MSEC))
continue;
tunables->touchboostpulse_endtime = endtime;
spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
if (pcpu->target_freq < tunables->touchboost_freq) {
pcpu->target_freq = tunables->touchboost_freq;
cpumask_set_cpu(i, &speedchange_cpumask);
pcpu->hispeed_validate_time =
ktime_to_us(ktime_get());
anyboost = 1;
}
pcpu->floor_freq = tunables->touchboost_freq;
pcpu->floor_validate_time = ktime_to_us(ktime_get());
spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
}
spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
if (anyboost)
wake_up_process(speedchange_task);
}
static int cpufreq_interactive_input_connect(struct input_handler *handler,
struct input_dev *dev, const struct input_device_id *id)
{
struct input_handle *handle;
int error;
handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = "cpufreq";
error = input_register_handle(handle);
if (error)
goto err2;
error = input_open_device(handle);
if (error)
goto err1;
return 0;
err1:
input_unregister_handle(handle);
err2:
kfree(handle);
return error;
}
static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
static const struct input_device_id cpufreq_interactive_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) },
.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
BIT_MASK(ABS_MT_POSITION_X) |
BIT_MASK(ABS_MT_POSITION_Y) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
.absbit = { [BIT_WORD(ABS_X)] =
BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
},
{ },
};
static struct input_handler cpufreq_interactive_input_handler = {
.event = cpufreq_interactive_input_event,
.connect = cpufreq_interactive_input_connect,
.disconnect = cpufreq_interactive_input_disconnect,
.name = "cpufreq_interactive",
.id_table = cpufreq_interactive_ids,
};
#endif
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event)
{
@ -1315,25 +1175,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
spin_lock_init(&tunables->target_loads_lock);
spin_lock_init(&tunables->above_hispeed_delay_lock);
#ifdef CONFIG_ARCH_ROCKCHIP
{
unsigned int index;
freq_table = cpufreq_frequency_get_table(policy->cpu);
tunables->hispeed_freq = policy->max;
if (policy->min < 600000)
tunables->hispeed_freq = 600000;
else if (cpufreq_frequency_table_target(policy, freq_table, policy->min + 1, CPUFREQ_RELATION_L, &index) == 0)
tunables->hispeed_freq = freq_table[index].frequency;
tunables->timer_slack_val = 20 * USEC_PER_MSEC;
tunables->min_sample_time = 40 * USEC_PER_MSEC;
store_above_hispeed_delay(tunables, "20000 1000000:80000 1200000:100000 1700000:20000", 0);
store_target_loads(tunables, "70 600000:70 800000:75 1500000:80 1700000:90", 0);
tunables->boostpulse_duration_val = 40 * USEC_PER_MSEC;
tunables->touchboostpulse_duration_val = 500 * USEC_PER_MSEC;
tunables->touchboost_freq = 1200000;
}
#endif
policy->governor_data = tunables;
if (!have_governor_per_policy())
common_tunables = tunables;
@ -1352,9 +1193,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
idle_notifier_register(&cpufreq_interactive_idle_nb);
cpufreq_register_notifier(&cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
#ifdef CONFIG_ARCH_ROCKCHIP
rc = input_register_handler(&cpufreq_interactive_input_handler);
#endif
}
break;
@ -1362,9 +1200,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
case CPUFREQ_GOV_POLICY_EXIT:
if (!--tunables->usage_count) {
if (policy->governor->initialized == 1) {
#ifdef CONFIG_ARCH_ROCKCHIP
input_unregister_handler(&cpufreq_interactive_input_handler);
#endif
cpufreq_unregister_notifier(&cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
idle_notifier_unregister(&cpufreq_interactive_idle_nb);

View file

@ -433,10 +433,6 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
spin_lock(&cpufreq_stats_lock);
stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur);
#ifdef CONFIG_ARCH_ROCKCHIP
if (stat->last_index == -1)
stat->last_index = 0;
#endif
spin_unlock(&cpufreq_stats_lock);
cpufreq_cpu_put(data);
return 0;

View file

@ -65,14 +65,6 @@ config DEVFREQ_GOV_USERSPACE
comment "DEVFREQ Drivers"
config ROCKCHIP_RK3368_DDR_FREQ
bool "ROCKCHIP RK3368 DDR FREQ Driver"
depends on ARCH_ROCKCHIP
help
This adds the rockchip ddr change freq driver for rk3368 it
used MCU to change ddr freq,and mast enadle rockchip mailbox
and scpi.
config ARM_EXYNOS4_BUS_DEVFREQ
bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412

View file

@ -4,6 +4,5 @@ obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
obj-$(CONFIG_ROCKCHIP_RK3368_DDR_FREQ) += ddr_rk3368.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o

View file

@ -1405,7 +1405,7 @@ static int dma_runtime_idle(struct device *dev)
return -EAGAIN;
}
return 0;
return pm_schedule_suspend(dev, 0);
}
/******************************************************************************

View file

@ -27,7 +27,6 @@
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/err.h>
#include <asm/unaligned.h>
#include "dmaengine.h"
#define PL330_MAX_CHAN 8
@ -288,7 +287,7 @@ static unsigned cmd_line;
/* The number of default descriptors */
#define NR_DEFAULT_DESC 32
#define NR_DEFAULT_DESC 16
/* Populated by the PL330 core driver for DMA API driver's info */
struct pl330_config {
@ -396,7 +395,6 @@ struct pl330_req {
struct pl330_xfer *x;
/* Hook to attach to DMAC's list of reqs with due callback */
struct list_head rqd;
unsigned int infiniteloop;
};
/*
@ -507,7 +505,7 @@ struct pl330_dmac {
/* Maximum possible events/irqs */
int events[32];
/* BUS address of MicroCode buffer */
dma_addr_t mcode_bus;
u32 mcode_bus;
/* CPU address of MicroCode buffer */
void *mcode_cpu;
/* List of all Channel threads */
@ -574,8 +572,6 @@ struct dma_pl330_chan {
/* for cyclic capability */
bool cyclic;
enum dma_status chan_status;
};
struct dma_pl330_dmac {
@ -659,17 +655,10 @@ static inline u32 get_id(struct pl330_info *pi, u32 off)
void __iomem *regs = pi->base;
u32 id = 0;
#ifdef CONFIG_ARCH_ROCKCHIP
id |= ((readl(regs + off + 0x0) & 0xff) << 0);
id |= ((readl(regs + off + 0x4) & 0xff) << 8);
id |= ((readl(regs + off + 0x8) & 0xff) << 16);
id |= ((readl(regs + off + 0xc) & 0xff) << 24);
#else
id |= (readb(regs + off + 0x0) << 0);
id |= (readb(regs + off + 0x4) << 8);
id |= (readb(regs + off + 0x8) << 16);
id |= (readb(regs + off + 0xc) << 24);
#endif
return id;
}
@ -687,7 +676,7 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAADDH;
buf[0] |= (da << 1);
put_unaligned(val, (u16 *)&buf[1]); //*((u16 *)&buf[1]) = val;
*((u16 *)&buf[1]) = val;
PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
da == 1 ? "DA" : "SA", val);
@ -841,7 +830,7 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAMOV;
buf[1] = dst;
put_unaligned(val, (u32 *)&buf[2]); //*((u32 *)&buf[2]) = val;
*((u32 *)&buf[2]) = val;
PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
@ -1019,7 +1008,7 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
buf[1] = chan & 0x7;
put_unaligned(addr, (u32 *)&buf[2]); //*((u32 *)&buf[2]) = addr;
*((u32 *)&buf[2]) = addr;
return SZ_DMAGO;
}
@ -1291,17 +1280,10 @@ static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
int off = 0;
while (cyc--) {
#ifdef CONFIG_ARCH_ROCKCHIP
off += _emit_WFP(dry_run, &buf[off], BURST, pxs->r->peri);
off += _emit_LDP(dry_run, &buf[off], BURST, pxs->r->peri);
off += _emit_ST(dry_run, &buf[off], ALWAYS);
//off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); //for sdmmc sdio
#else
off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
off += _emit_ST(dry_run, &buf[off], ALWAYS);
off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
#endif
}
return off;
@ -1313,17 +1295,10 @@ static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
int off = 0;
while (cyc--) {
#ifdef CONFIG_ARCH_ROCKCHIP
off += _emit_WFP(dry_run, &buf[off], BURST, pxs->r->peri);
off += _emit_LD(dry_run, &buf[off], ALWAYS);
off += _emit_STP(dry_run, &buf[off], BURST, pxs->r->peri);
//off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
#else
off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
off += _emit_LD(dry_run, &buf[off], ALWAYS);
off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
#endif
}
return off;
@ -1352,77 +1327,6 @@ static int _bursts(unsigned dry_run, u8 buf[],
return off;
}
/* Returns bytes consumed */
static inline int _loop_infiniteloop(unsigned dry_run, u8 buf[],
unsigned long bursts, const struct _xfer_spec *pxs, int ev)
{
int cyc, off;
unsigned lcnt0, lcnt1, ljmp0, ljmp1, ljmpfe;
struct _arg_LPEND lpend;
off = 0;
ljmpfe = off;
lcnt0 = pxs->r->infiniteloop;
if (bursts > 256) {
lcnt1 = 256;
cyc = bursts / 256;
} else {
lcnt1 = bursts;
cyc = 1;
}
/* forever loop */
off += _emit_MOV(dry_run, &buf[off], SAR, pxs->x->src_addr);
off += _emit_MOV(dry_run, &buf[off], DAR, pxs->x->dst_addr);
if (pxs->r->rqtype != MEMTOMEM)
off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
/* loop0 */
off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
ljmp0 = off;
/* loop1 */
off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
ljmp1 = off;
off += _bursts(dry_run, &buf[off], pxs, cyc);
lpend.cond = ALWAYS;
lpend.forever = false;
lpend.loop = 1;
lpend.bjump = off - ljmp1;
off += _emit_LPEND(dry_run, &buf[off], &lpend);
/* remainder */
lcnt1 = bursts - (lcnt1 * cyc);
if (lcnt1) {
off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
ljmp1 = off;
off += _bursts(dry_run, &buf[off], pxs, 1);
lpend.cond = ALWAYS;
lpend.forever = false;
lpend.loop = 1;
lpend.bjump = off - ljmp1;
off += _emit_LPEND(dry_run, &buf[off], &lpend);
}
off += _emit_SEV(dry_run, &buf[off], ev);
lpend.cond = ALWAYS;
lpend.forever = false;
lpend.loop = 0;
lpend.bjump = off - ljmp0;
off += _emit_LPEND(dry_run, &buf[off], &lpend);
lpend.cond = ALWAYS;
lpend.forever = true;
lpend.loop = 1;
lpend.bjump = off - ljmpfe;
off += _emit_LPEND(dry_run, &buf[off], &lpend);
return off;
}
/* Returns bytes consumed and updates bursts */
static inline int _loop(unsigned dry_run, u8 buf[],
unsigned long *bursts, const struct _xfer_spec *pxs)
@ -1502,20 +1406,6 @@ static inline int _loop(unsigned dry_run, u8 buf[],
return off;
}
static inline int _setup_xfer_infiniteloop(unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs, int ev)
{
struct pl330_xfer *x = pxs->x;
u32 ccr = pxs->ccr;
unsigned long bursts = BYTE_TO_BURST(x->bytes, ccr);
int off = 0;
/* Setup Loop(s) */
off += _loop_infiniteloop(dry_run, &buf[off], bursts, pxs, ev);
return off;
}
static inline int _setup_loops(unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs)
{
@ -1568,32 +1458,21 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
x = pxs->r->x;
if (!pxs->r->infiniteloop) {
do {
/* Error if xfer length is not aligned at burst size */
if (x->bytes % (BRST_SIZE(pxs->ccr) *
BRST_LEN(pxs->ccr)))
return -EINVAL;
pxs->x = x;
off += _setup_xfer(dry_run, &buf[off], pxs);
x = x->next;
} while (x);
/* DMASEV peripheral/event */
off += _emit_SEV(dry_run, &buf[off], thrd->ev);
/* DMAEND */
off += _emit_END(dry_run, &buf[off]);
} else {
do {
/* Error if xfer length is not aligned at burst size */
if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
return -EINVAL;
pxs->x = x;
off += _setup_xfer_infiniteloop(dry_run, &buf[off],
pxs, thrd->ev);
}
off += _setup_xfer(dry_run, &buf[off], pxs);
x = x->next;
} while (x);
/* DMASEV peripheral/event */
off += _emit_SEV(dry_run, &buf[off], thrd->ev);
/* DMAEND */
off += _emit_END(dry_run, &buf[off]);
return off;
}
@ -1871,9 +1750,6 @@ static int pl330_update(const struct pl330_info *pi)
id = pl330->events[ev];
if (id == -1)
continue;
thrd = &pl330->channels[id];
active = thrd->req_running;
@ -1882,16 +1758,12 @@ static int pl330_update(const struct pl330_info *pi)
/* Detach the req */
rqdone = thrd->req[active].r;
if (!rqdone->infiniteloop) {
thrd->req[active].r = NULL;
/* Detach the req */
thrd->req[active].r = NULL;
mark_free(thrd, active);
mark_free(thrd, active);
/* Get going again ASAP */
_start(thrd);
}
/* Get going again ASAP */
_start(thrd);
/* For now, just make a list of callbacks to be done */
list_add_tail(&rqdone->rqd, &pl330->req_done);
@ -2042,21 +1914,11 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
{
struct pl330_dmac *pl330 = thrd->dmac;
struct pl330_info *pi = pl330->pinfo;
void __iomem *regs = pi->base;
u32 inten = readl(regs + INTEN);
/* If the event is valid and was held by the thread */
if (ev >= 0 && ev < pi->pcfg.num_events
&& pl330->events[ev] == thrd->id) {
&& pl330->events[ev] == thrd->id)
pl330->events[ev] = -1;
if (readl(regs + ES) & (1 << ev)) {
if (!(inten & (1 << ev)))
writel(inten | (1 << ev), regs + INTEN);
writel(1 << ev, regs + INTCLR);
writel(inten & ~(1 << ev) , regs + INTEN);
}
}
}
static void pl330_release_channel(void *ch_id)
@ -2410,18 +2272,11 @@ static inline void handle_cyclic_desc_list(struct list_head *list)
}
/* pch will be unset if list was empty */
if (!pch || !pch->dmac)
if (!pch)
return;
spin_lock_irqsave(&pch->lock, flags);
if (pch->chan_status == DMA_PAUSED) {
list_for_each_entry(desc, list, node) {
desc->status = DONE;
}
list_splice_tail_init(list, &pch->dmac->desc_pool);
} else {
list_splice_tail_init(list, &pch->work_list);
}
list_splice_tail_init(list, &pch->work_list);
spin_unlock_irqrestore(&pch->lock, flags);
}
@ -2462,7 +2317,6 @@ static void pl330_tasklet(unsigned long data)
spin_lock_irqsave(&pch->lock, flags);
pch->chan_status = DMA_SUCCESS;
/* Pick up ripe tomatoes */
list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
if (desc->status == DONE) {
@ -2522,7 +2376,7 @@ bool pl330_filter(struct dma_chan *chan, void *param)
return false;
peri_id = chan->private;
return *peri_id == (unsigned long)param;
return *peri_id == (unsigned)param;
}
EXPORT_SYMBOL(pl330_filter);
@ -2597,7 +2451,6 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
}
list_splice_tail_init(&list, &pdmac->desc_pool);
pch->chan_status = DMA_PAUSED;
spin_unlock_irqrestore(&pch->lock, flags);
break;
case DMA_SLAVE_CONFIG:
@ -2649,13 +2502,7 @@ static enum dma_status
pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct dma_pl330_chan *pch = to_pchan(chan);
void __iomem *regs = pch->dmac->pif.base;
struct pl330_thread *pt = pch->pl330_chid;
enum dma_status st;
st = dma_cookie_status(chan, cookie, txstate);
txstate->residue = readl(regs + DA(pt->id));
return st;
return dma_cookie_status(chan, cookie, txstate);
}
static void pl330_issue_pending(struct dma_chan *chan)
@ -2724,7 +2571,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
if (!pdmac)
return 0;
desc = kzalloc(count * sizeof(*desc), flg);
desc = kmalloc(count * sizeof(*desc), flg);
if (!desc)
return 0;
@ -2771,29 +2618,22 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
struct dma_pl330_dmac *pdmac = pch->dmac;
u8 *peri_id = pch->chan.private;
struct dma_pl330_desc *desc;
int i = 0;
/* Pluck one desc from the pool of DMAC */
desc = pluck_desc(pdmac);
/* If the DMAC pool is empty, alloc new */
if (!desc) {
for(i = 0; i < 3; i++) {
if (!add_desc(pdmac, GFP_ATOMIC, 1))
continue;
/* Try again */
desc = pluck_desc(pdmac);
if (!desc) {
dev_err(pch->dmac->pif.dev,
"%s:%d i=%d ALERT!\n", __func__, __LINE__,i);
continue;
}
break;
}
if(!desc && i >= 3)
if (!add_desc(pdmac, GFP_ATOMIC, 1))
return NULL;
/* Try again */
desc = pluck_desc(pdmac);
if (!desc) {
dev_err(pch->dmac->pif.dev,
"%s:%d ALERT!\n", __func__, __LINE__);
return NULL;
}
}
/* Initialize the descriptor */
@ -2801,7 +2641,6 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
desc->req.infiniteloop = 0;
desc->req.peri = peri_id ? pch->chan.chan_id : 0;
desc->rqcfg.pcfg = &pch->dmac->pif.pcfg;
@ -2881,7 +2720,6 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
unsigned int i;
dma_addr_t dst;
dma_addr_t src;
unsigned int *infinite = context;
if (len % period_len != 0)
return NULL;
@ -2936,12 +2774,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
}
desc->rqcfg.brst_size = pch->burst_sz;
#ifdef CONFIG_ARCH_ROCKCHIP
desc->rqcfg.brst_len = pch->burst_len;
#else
desc->rqcfg.brst_len = 1;
#endif
desc->req.infiniteloop = *infinite;
fill_px(&desc->px, dst, src, period_len);
if (!first)
@ -3069,11 +2902,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->rqcfg.brst_size = pch->burst_sz;
#ifdef CONFIG_ARCH_ROCKCHIP
desc->rqcfg.brst_len = pch->burst_len;
#else
desc->rqcfg.brst_len = 1;
#endif
}
/* Return the last desc in the chain */
@ -3089,28 +2918,6 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
return IRQ_NONE;
}
int pl330_dma_getposition(struct dma_chan *chan,
dma_addr_t *src, dma_addr_t *dst)
{
struct dma_pl330_chan *pch = to_pchan(chan);
struct pl330_info *pi;
void __iomem *regs;
struct pl330_thread *thrd;
if (unlikely(!pch))
return -EINVAL;
thrd = pch->pl330_chid;
pi = &pch->dmac->pif;
regs = pi->base;
*src = readl(regs + SA(thrd->id));
*dst = readl(regs + DA(thrd->id));
return 0;
}
EXPORT_SYMBOL(pl330_dma_getposition);
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
@ -3214,9 +3021,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->device_prep_slave_sg = pl330_prep_slave_sg;
pd->device_control = pl330_control;
pd->device_issue_pending = pl330_issue_pending;
#ifdef CONFIG_ARCH_ROCKCHIP
pd->dma_getposition = pl330_dma_getposition;
#endif
ret = dma_async_device_register(pd);
if (ret) {

View file

@ -209,13 +209,6 @@ config GPIO_RCAR
help
Say yes here to support GPIO on Renesas R-Car SoCs.
config GPIO_RT5025
bool "Richtek RT5025 GPIO support"
depends on MFD_RT5025
default n
help
This is the gpio driver for RT5025 PMIC.
config GPIO_SPEAR_SPICS
bool "ST SPEAr13xx SPI Chip Select as GPIO support"
depends on PLAT_SPEAR

View file

@ -87,4 +87,3 @@ obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
obj-$(CONFIG_GPIO_RT5025) += gpio-rt5025.o

View file

@ -305,7 +305,11 @@ static const struct irq_domain_ops lnw_gpio_irq_ops = {
static int lnw_gpio_runtime_idle(struct device *dev)
{
pm_schedule_suspend(dev, 500);
int err = pm_schedule_suspend(dev, 500);
if (!err)
return 0;
return -EBUSY;
}

View file

@ -1,3 +1,2 @@
obj-y += drm/ vga/ arm/
obj-y += drm/ vga/
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
obj-$(CONFIG_POWERVR_ROGUE) += rogue/

View file

@ -201,8 +201,6 @@ config DRM_SAVAGE
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/rockchip/Kconfig"
source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/gma500/Kconfig"

View file

@ -45,7 +45,6 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/

View file

@ -3464,7 +3464,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
ret = -ENOSPC;
goto out;
}
fb->pixel_format = crtc->fb->pixel_format;
if (crtc->fb->pixel_format != fb->pixel_format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL;

View file

@ -37,7 +37,7 @@
#include <drm/drmP.h>
#include <drm/drm_core.h>
unsigned int drm_debug = 0xf; /* 1 to enable debug output */
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */

View file

@ -253,7 +253,6 @@ static int hidraw_open(struct inode *inode, struct file *file)
unsigned int minor = iminor(inode);
struct hidraw *dev;
struct hidraw_list *list;
unsigned long flags;
int err = 0;
if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) {
@ -267,6 +266,11 @@ static int hidraw_open(struct inode *inode, struct file *file)
goto out_unlock;
}
list->hidraw = hidraw_table[minor];
mutex_init(&list->read_mutex);
list_add_tail(&list->node, &hidraw_table[minor]->list);
file->private_data = list;
dev = hidraw_table[minor];
if (!dev->open++) {
err = hid_hw_power(dev->hid, PM_HINT_FULLON);
@ -279,16 +283,9 @@ static int hidraw_open(struct inode *inode, struct file *file)
if (err < 0) {
hid_hw_power(dev->hid, PM_HINT_NORMAL);
dev->open--;
goto out_unlock;
}
}
list->hidraw = hidraw_table[minor];
mutex_init(&list->read_mutex);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
list_add_tail(&list->node, &hidraw_table[minor]->list);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
file->private_data = list;
out_unlock:
mutex_unlock(&minors_lock);
out:
@ -327,13 +324,10 @@ static int hidraw_release(struct inode * inode, struct file * file)
{
unsigned int minor = iminor(inode);
struct hidraw_list *list = file->private_data;
unsigned long flags;
mutex_lock(&minors_lock);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list);
drop_ref(hidraw_table[minor], 0);
@ -462,9 +456,7 @@ int hidraw_report_event(struct hid_device *hid, u8 *data, int len)
struct hidraw *dev = hid->hidraw;
struct hidraw_list *list;
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&dev->list_lock, flags);
list_for_each_entry(list, &dev->list, node) {
int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
@ -479,7 +471,6 @@ int hidraw_report_event(struct hid_device *hid, u8 *data, int len)
list->head = new_head;
kill_fasync(&list->fasync, SIGIO, POLL_IN);
}
spin_unlock_irqrestore(&dev->list_lock, flags);
wake_up_interruptible(&dev->wait);
return ret;
@ -528,7 +519,6 @@ int hidraw_connect(struct hid_device *hid)
mutex_unlock(&minors_lock);
init_waitqueue_head(&dev->wait);
spin_lock_init(&dev->list_lock);
INIT_LIST_HEAD(&dev->list);
dev->hid = hid;

View file

@ -1183,7 +1183,6 @@ static int usbhid_start(struct hid_device *hid)
usbhid_set_leds(hid);
device_set_wakeup_enable(&dev->dev, 1);
}
device_set_wakeup_enable(&dev->dev, 1);
return 0;
fail:

Some files were not shown because too many files have changed in this diff Show more