Merge v3.13-rc6 into char-misc-next
We want these fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
		
				commit
				
					
						dde86f41f4
					
				
			
		
					 288 changed files with 2788 additions and 1403 deletions
				
			
		
							
								
								
									
										72
									
								
								Documentation/block/null_blk.txt
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								Documentation/block/null_blk.txt
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,72 @@ | ||||||
|  | Null block device driver | ||||||
|  | ================================================================================ | ||||||
|  | 
 | ||||||
|  | I. Overview | ||||||
|  | 
 | ||||||
|  | The null block device (/dev/nullb*) is used for benchmarking the various | ||||||
|  | block-layer implementations. It emulates a block device of X gigabytes in size. | ||||||
|  | The following instances are possible: | ||||||
|  | 
 | ||||||
|  |   Single-queue block-layer | ||||||
|  |     - Request-based. | ||||||
|  |     - Single submission queue per device. | ||||||
|  |     - Implements IO scheduling algorithms (CFQ, Deadline, noop). | ||||||
|  |   Multi-queue block-layer | ||||||
|  |     - Request-based. | ||||||
|  |     - Configurable submission queues per device. | ||||||
|  |   No block-layer (Known as bio-based) | ||||||
|  |     - Bio-based. IO requests are submitted directly to the device driver. | ||||||
|  |     - Directly accepts bio data structure and returns them. | ||||||
|  | 
 | ||||||
|  | All of them have a completion queue for each core in the system. | ||||||
|  | 
 | ||||||
|  | II. Module parameters applicable for all instances: | ||||||
|  | 
 | ||||||
|  | queue_mode=[0-2]: Default: 2-Multi-queue | ||||||
|  |   Selects which block-layer the module should instantiate with. | ||||||
|  | 
 | ||||||
|  |   0: Bio-based. | ||||||
|  |   1: Single-queue. | ||||||
|  |   2: Multi-queue. | ||||||
|  | 
 | ||||||
|  | home_node=[0--nr_nodes]: Default: NUMA_NO_NODE | ||||||
|  |   Selects what CPU node the data structures are allocated from. | ||||||
|  | 
 | ||||||
|  | gb=[Size in GB]: Default: 250GB | ||||||
|  |   The size of the device reported to the system. | ||||||
|  | 
 | ||||||
|  | bs=[Block size (in bytes)]: Default: 512 bytes | ||||||
|  |   The block size reported to the system. | ||||||
|  | 
 | ||||||
|  | nr_devices=[Number of devices]: Default: 2 | ||||||
|  |   Number of block devices instantiated. They are instantiated as /dev/nullb0, | ||||||
|  |   etc. | ||||||
|  | 
 | ||||||
|  | irq_mode=[0-2]: Default: 1-Soft-irq | ||||||
|  |   The completion mode used for completing IOs to the block-layer. | ||||||
|  | 
 | ||||||
|  |   0: None. | ||||||
|  |   1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead | ||||||
|  |      when IOs are issued from another CPU node than the home the device is | ||||||
|  |      connected to. | ||||||
|  |   2: Timer: Waits a specific period (completion_nsec) for each IO before | ||||||
|  |      completion. | ||||||
|  | 
 | ||||||
|  | completion_nsec=[ns]: Default: 10.000ns | ||||||
|  |   Combined with irq_mode=2 (timer). The time each completion event must wait. | ||||||
|  | 
 | ||||||
|  | submit_queues=[0..nr_cpus]: | ||||||
|  |   The number of submission queues attached to the device driver. If unset, it | ||||||
|  |   defaults to 1 on single-queue and bio-based instances. For multi-queue, | ||||||
|  |   it is ignored when use_per_node_hctx module parameter is 1. | ||||||
|  | 
 | ||||||
|  | hw_queue_depth=[0..qdepth]: Default: 64 | ||||||
|  |   The hardware queue depth of the device. | ||||||
|  | 
 | ||||||
|  | III: Multi-queue specific parameters | ||||||
|  | 
 | ||||||
|  | use_per_node_hctx=[0/1]: Default: 0 | ||||||
|  |   0: The number of submit queues are set to the value of the submit_queues | ||||||
|  |      parameter. | ||||||
|  |   1: The multi-queue block layer is instantiated with a hardware dispatch | ||||||
|  |      queue for each CPU node in the system. | ||||||
|  | @ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | ||||||
| 
 | 
 | ||||||
| 			* atapi_dmadir: Enable ATAPI DMADIR bridge support | 			* atapi_dmadir: Enable ATAPI DMADIR bridge support | ||||||
| 
 | 
 | ||||||
|  | 			* disable: Disable this device. | ||||||
|  | 
 | ||||||
| 			If there are multiple matching configurations changing | 			If there are multiple matching configurations changing | ||||||
| 			the same attribute, the last one is used. | 			the same attribute, the last one is used. | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										240
									
								
								Documentation/module-signing.txt
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										240
									
								
								Documentation/module-signing.txt
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,240 @@ | ||||||
|  | 			============================== | ||||||
|  | 			KERNEL MODULE SIGNING FACILITY | ||||||
|  | 			============================== | ||||||
|  | 
 | ||||||
|  | CONTENTS | ||||||
|  | 
 | ||||||
|  |  - Overview. | ||||||
|  |  - Configuring module signing. | ||||||
|  |  - Generating signing keys. | ||||||
|  |  - Public keys in the kernel. | ||||||
|  |  - Manually signing modules. | ||||||
|  |  - Signed modules and stripping. | ||||||
|  |  - Loading signed modules. | ||||||
|  |  - Non-valid signatures and unsigned modules. | ||||||
|  |  - Administering/protecting the private key. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ======== | ||||||
|  | OVERVIEW | ||||||
|  | ======== | ||||||
|  | 
 | ||||||
|  | The kernel module signing facility cryptographically signs modules during | ||||||
|  | installation and then checks the signature upon loading the module.  This | ||||||
|  | allows increased kernel security by disallowing the loading of unsigned modules | ||||||
|  | or modules signed with an invalid key.  Module signing increases security by | ||||||
|  | making it harder to load a malicious module into the kernel.  The module | ||||||
|  | signature checking is done by the kernel so that it is not necessary to have | ||||||
|  | trusted userspace bits. | ||||||
|  | 
 | ||||||
|  | This facility uses X.509 ITU-T standard certificates to encode the public keys | ||||||
|  | involved.  The signatures are not themselves encoded in any industrial standard | ||||||
|  | type.  The facility currently only supports the RSA public key encryption | ||||||
|  | standard (though it is pluggable and permits others to be used).  The possible | ||||||
|  | hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and | ||||||
|  | SHA-512 (the algorithm is selected by data in the signature). | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ========================== | ||||||
|  | CONFIGURING MODULE SIGNING | ||||||
|  | ========================== | ||||||
|  | 
 | ||||||
|  | The module signing facility is enabled by going to the "Enable Loadable Module | ||||||
|  | Support" section of the kernel configuration and turning on | ||||||
|  | 
 | ||||||
|  | 	CONFIG_MODULE_SIG	"Module signature verification" | ||||||
|  | 
 | ||||||
|  | This has a number of options available: | ||||||
|  | 
 | ||||||
|  |  (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE) | ||||||
|  | 
 | ||||||
|  |      This specifies how the kernel should deal with a module that has a | ||||||
|  |      signature for which the key is not known or a module that is unsigned. | ||||||
|  | 
 | ||||||
|  |      If this is off (ie. "permissive"), then modules for which the key is not | ||||||
|  |      available and modules that are unsigned are permitted, but the kernel will | ||||||
|  |      be marked as being tainted. | ||||||
|  | 
 | ||||||
|  |      If this is on (ie. "restrictive"), only modules that have a valid | ||||||
|  |      signature that can be verified by a public key in the kernel's possession | ||||||
|  |      will be loaded.  All other modules will generate an error. | ||||||
|  | 
 | ||||||
|  |      Irrespective of the setting here, if the module has a signature block that | ||||||
|  |      cannot be parsed, it will be rejected out of hand. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |  (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL) | ||||||
|  | 
 | ||||||
|  |      If this is on then modules will be automatically signed during the | ||||||
|  |      modules_install phase of a build.  If this is off, then the modules must | ||||||
|  |      be signed manually using: | ||||||
|  | 
 | ||||||
|  | 	scripts/sign-file | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |  (3) "Which hash algorithm should modules be signed with?" | ||||||
|  | 
 | ||||||
|  |      This presents a choice of which hash algorithm the installation phase will | ||||||
|  |      sign the modules with: | ||||||
|  | 
 | ||||||
|  | 	CONFIG_SIG_SHA1		"Sign modules with SHA-1" | ||||||
|  | 	CONFIG_SIG_SHA224	"Sign modules with SHA-224" | ||||||
|  | 	CONFIG_SIG_SHA256	"Sign modules with SHA-256" | ||||||
|  | 	CONFIG_SIG_SHA384	"Sign modules with SHA-384" | ||||||
|  | 	CONFIG_SIG_SHA512	"Sign modules with SHA-512" | ||||||
|  | 
 | ||||||
|  |      The algorithm selected here will also be built into the kernel (rather | ||||||
|  |      than being a module) so that modules signed with that algorithm can have | ||||||
|  |      their signatures checked without causing a dependency loop. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ======================= | ||||||
|  | GENERATING SIGNING KEYS | ||||||
|  | ======================= | ||||||
|  | 
 | ||||||
|  | Cryptographic keypairs are required to generate and check signatures.  A | ||||||
|  | private key is used to generate a signature and the corresponding public key is | ||||||
|  | used to check it.  The private key is only needed during the build, after which | ||||||
|  | it can be deleted or stored securely.  The public key gets built into the | ||||||
|  | kernel so that it can be used to check the signatures as the modules are | ||||||
|  | loaded. | ||||||
|  | 
 | ||||||
|  | Under normal conditions, the kernel build will automatically generate a new | ||||||
|  | keypair using openssl if one does not exist in the files: | ||||||
|  | 
 | ||||||
|  | 	signing_key.priv | ||||||
|  | 	signing_key.x509 | ||||||
|  | 
 | ||||||
|  | during the building of vmlinux (the public part of the key needs to be built | ||||||
|  | into vmlinux) using parameters in the: | ||||||
|  | 
 | ||||||
|  | 	x509.genkey | ||||||
|  | 
 | ||||||
|  | file (which is also generated if it does not already exist). | ||||||
|  | 
 | ||||||
|  | It is strongly recommended that you provide your own x509.genkey file. | ||||||
|  | 
 | ||||||
|  | Most notably, in the x509.genkey file, the req_distinguished_name section | ||||||
|  | should be altered from the default: | ||||||
|  | 
 | ||||||
|  | 	[ req_distinguished_name ] | ||||||
|  | 	O = Magrathea | ||||||
|  | 	CN = Glacier signing key | ||||||
|  | 	emailAddress = slartibartfast@magrathea.h2g2 | ||||||
|  | 
 | ||||||
|  | The generated RSA key size can also be set with: | ||||||
|  | 
 | ||||||
|  | 	[ req ] | ||||||
|  | 	default_bits = 4096 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | It is also possible to manually generate the key private/public files using the | ||||||
|  | x509.genkey key generation configuration file in the root node of the Linux | ||||||
|  | kernel sources tree and the openssl command.  The following is an example to | ||||||
|  | generate the public/private key files: | ||||||
|  | 
 | ||||||
|  | 	openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \ | ||||||
|  | 	   -config x509.genkey -outform DER -out signing_key.x509 \ | ||||||
|  | 	   -keyout signing_key.priv | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ========================= | ||||||
|  | PUBLIC KEYS IN THE KERNEL | ||||||
|  | ========================= | ||||||
|  | 
 | ||||||
|  | The kernel contains a ring of public keys that can be viewed by root.  They're | ||||||
|  | in a keyring called ".system_keyring" that can be seen by: | ||||||
|  | 
 | ||||||
|  | 	[root@deneb ~]# cat /proc/keys | ||||||
|  | 	... | ||||||
|  | 	223c7853 I------     1 perm 1f030000     0     0 keyring   .system_keyring: 1 | ||||||
|  | 	302d2d52 I------     1 perm 1f010000     0     0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 [] | ||||||
|  | 	... | ||||||
|  | 
 | ||||||
|  | Beyond the public key generated specifically for module signing, any file | ||||||
|  | placed in the kernel source root directory or the kernel build root directory | ||||||
|  | whose name is suffixed with ".x509" will be assumed to be an X.509 public key | ||||||
|  | and will be added to the keyring. | ||||||
|  | 
 | ||||||
|  | Further, the architecture code may take public keys from a hardware store and | ||||||
|  | add those in also (e.g. from the UEFI key database). | ||||||
|  | 
 | ||||||
|  | Finally, it is possible to add additional public keys by doing: | ||||||
|  | 
 | ||||||
|  | 	keyctl padd asymmetric "" [.system_keyring-ID] <[key-file] | ||||||
|  | 
 | ||||||
|  | e.g.: | ||||||
|  | 
 | ||||||
|  | 	keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509 | ||||||
|  | 
 | ||||||
|  | Note, however, that the kernel will only permit keys to be added to | ||||||
|  | .system_keyring _if_ the new key's X.509 wrapper is validly signed by a key | ||||||
|  | that is already resident in the .system_keyring at the time the key was added. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ========================= | ||||||
|  | MANUALLY SIGNING MODULES | ||||||
|  | ========================= | ||||||
|  | 
 | ||||||
|  | To manually sign a module, use the scripts/sign-file tool available in | ||||||
|  | the Linux kernel source tree.  The script requires 4 arguments: | ||||||
|  | 
 | ||||||
|  | 	1.  The hash algorithm (e.g., sha256) | ||||||
|  | 	2.  The private key filename | ||||||
|  | 	3.  The public key filename | ||||||
|  | 	4.  The kernel module to be signed | ||||||
|  | 
 | ||||||
|  | The following is an example to sign a kernel module: | ||||||
|  | 
 | ||||||
|  | 	scripts/sign-file sha512 kernel-signkey.priv \ | ||||||
|  | 		kernel-signkey.x509 module.ko | ||||||
|  | 
 | ||||||
|  | The hash algorithm used does not have to match the one configured, but if it | ||||||
|  | doesn't, you should make sure that hash algorithm is either built into the | ||||||
|  | kernel or can be loaded without requiring itself. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ============================ | ||||||
|  | SIGNED MODULES AND STRIPPING | ||||||
|  | ============================ | ||||||
|  | 
 | ||||||
|  | A signed module has a digital signature simply appended at the end.  The string | ||||||
|  | "~Module signature appended~." at the end of the module's file confirms that a | ||||||
|  | signature is present but it does not confirm that the signature is valid! | ||||||
|  | 
 | ||||||
|  | Signed modules are BRITTLE as the signature is outside of the defined ELF | ||||||
|  | container.  Thus they MAY NOT be stripped once the signature is computed and | ||||||
|  | attached.  Note the entire module is the signed payload, including any and all | ||||||
|  | debug information present at the time of signing. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ====================== | ||||||
|  | LOADING SIGNED MODULES | ||||||
|  | ====================== | ||||||
|  | 
 | ||||||
|  | Modules are loaded with insmod, modprobe, init_module() or finit_module(), | ||||||
|  | exactly as for unsigned modules as no processing is done in userspace.  The | ||||||
|  | signature checking is all done within the kernel. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ========================================= | ||||||
|  | NON-VALID SIGNATURES AND UNSIGNED MODULES | ||||||
|  | ========================================= | ||||||
|  | 
 | ||||||
|  | If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on | ||||||
|  | the kernel command line, the kernel will only load validly signed modules | ||||||
|  | for which it has a public key.   Otherwise, it will also load modules that are | ||||||
|  | unsigned.   Any module for which the kernel has a key, but which proves to have | ||||||
|  | a signature mismatch will not be permitted to load. | ||||||
|  | 
 | ||||||
|  | Any module that has an unparseable signature will be rejected. | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | ========================================= | ||||||
|  | ADMINISTERING/PROTECTING THE PRIVATE KEY | ||||||
|  | ========================================= | ||||||
|  | 
 | ||||||
|  | Since the private key is used to sign modules, viruses and malware could use | ||||||
|  | the private key to sign modules and compromise the operating system.  The | ||||||
|  | private key must be either destroyed or moved to a secure location and not kept | ||||||
|  | in the root node of the kernel source tree. | ||||||
|  | @ -16,8 +16,12 @@ ip_default_ttl - INTEGER | ||||||
| 	Default: 64 (as recommended by RFC1700) | 	Default: 64 (as recommended by RFC1700) | ||||||
| 
 | 
 | ||||||
| ip_no_pmtu_disc - BOOLEAN | ip_no_pmtu_disc - BOOLEAN | ||||||
| 	Disable Path MTU Discovery. | 	Disable Path MTU Discovery. If enabled and a | ||||||
| 	default FALSE | 	fragmentation-required ICMP is received, the PMTU to this | ||||||
|  | 	destination will be set to min_pmtu (see below). You will need | ||||||
|  | 	to raise min_pmtu to the smallest interface MTU on your system | ||||||
|  | 	manually if you want to avoid locally generated fragments. | ||||||
|  | 	Default: FALSE | ||||||
| 
 | 
 | ||||||
| min_pmtu - INTEGER | min_pmtu - INTEGER | ||||||
| 	default 552 - minimum discovered Path MTU | 	default 552 - minimum discovered Path MTU | ||||||
|  |  | ||||||
							
								
								
									
										27
									
								
								MAINTAINERS
									
										
									
									
									
								
							
							
						
						
									
										27
									
								
								MAINTAINERS
									
										
									
									
									
								
							|  | @ -1008,6 +1008,8 @@ M:	Santosh Shilimkar <santosh.shilimkar@ti.com> | ||||||
| L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||||||
| S:	Maintained | S:	Maintained | ||||||
| F:	arch/arm/mach-keystone/ | F:	arch/arm/mach-keystone/ | ||||||
|  | F:	drivers/clk/keystone/ | ||||||
|  | T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git | ||||||
| 
 | 
 | ||||||
| ARM/LOGICPD PXA270 MACHINE SUPPORT | ARM/LOGICPD PXA270 MACHINE SUPPORT | ||||||
| M:	Lennert Buytenhek <kernel@wantstofly.org> | M:	Lennert Buytenhek <kernel@wantstofly.org> | ||||||
|  | @ -3761,9 +3763,11 @@ F:	include/uapi/linux/gigaset_dev.h | ||||||
| 
 | 
 | ||||||
| GPIO SUBSYSTEM | GPIO SUBSYSTEM | ||||||
| M:	Linus Walleij <linus.walleij@linaro.org> | M:	Linus Walleij <linus.walleij@linaro.org> | ||||||
| S:	Maintained | M:	Alexandre Courbot <gnurou@gmail.com> | ||||||
| L:	linux-gpio@vger.kernel.org | L:	linux-gpio@vger.kernel.org | ||||||
| F:	Documentation/gpio.txt | T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git | ||||||
|  | S:	Maintained | ||||||
|  | F:	Documentation/gpio/ | ||||||
| F:	drivers/gpio/ | F:	drivers/gpio/ | ||||||
| F:	include/linux/gpio* | F:	include/linux/gpio* | ||||||
| F:	include/asm-generic/gpio.h | F:	include/asm-generic/gpio.h | ||||||
|  | @ -3831,6 +3835,12 @@ T:	git git://linuxtv.org/media_tree.git | ||||||
| S:	Maintained | S:	Maintained | ||||||
| F:	drivers/media/usb/gspca/ | F:	drivers/media/usb/gspca/ | ||||||
| 
 | 
 | ||||||
|  | GUID PARTITION TABLE (GPT) | ||||||
|  | M:	Davidlohr Bueso <davidlohr@hp.com> | ||||||
|  | L:	linux-efi@vger.kernel.org | ||||||
|  | S:	Maintained | ||||||
|  | F:	block/partitions/efi.* | ||||||
|  | 
 | ||||||
| STK1160 USB VIDEO CAPTURE DRIVER | STK1160 USB VIDEO CAPTURE DRIVER | ||||||
| M:	Ezequiel Garcia <elezegarcia@gmail.com> | M:	Ezequiel Garcia <elezegarcia@gmail.com> | ||||||
| L:	linux-media@vger.kernel.org | L:	linux-media@vger.kernel.org | ||||||
|  | @ -5911,12 +5921,21 @@ M:	Steffen Klassert <steffen.klassert@secunet.com> | ||||||
| M:	Herbert Xu <herbert@gondor.apana.org.au> | M:	Herbert Xu <herbert@gondor.apana.org.au> | ||||||
| M:	"David S. Miller" <davem@davemloft.net> | M:	"David S. Miller" <davem@davemloft.net> | ||||||
| L:	netdev@vger.kernel.org | L:	netdev@vger.kernel.org | ||||||
| T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git | T:	git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git | ||||||
|  | T:	git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git | ||||||
| S:	Maintained | S:	Maintained | ||||||
| F:	net/xfrm/ | F:	net/xfrm/ | ||||||
| F:	net/key/ | F:	net/key/ | ||||||
| F:	net/ipv4/xfrm* | F:	net/ipv4/xfrm* | ||||||
|  | F:	net/ipv4/esp4.c | ||||||
|  | F:	net/ipv4/ah4.c | ||||||
|  | F:	net/ipv4/ipcomp.c | ||||||
|  | F:	net/ipv4/ip_vti.c | ||||||
| F:	net/ipv6/xfrm* | F:	net/ipv6/xfrm* | ||||||
|  | F:	net/ipv6/esp6.c | ||||||
|  | F:	net/ipv6/ah6.c | ||||||
|  | F:	net/ipv6/ipcomp6.c | ||||||
|  | F:	net/ipv6/ip6_vti.c | ||||||
| F:	include/uapi/linux/xfrm.h | F:	include/uapi/linux/xfrm.h | ||||||
| F:	include/net/xfrm.h | F:	include/net/xfrm.h | ||||||
| 
 | 
 | ||||||
|  | @ -9571,7 +9590,7 @@ F:	drivers/xen/*swiotlb* | ||||||
| 
 | 
 | ||||||
| XFS FILESYSTEM | XFS FILESYSTEM | ||||||
| P:	Silicon Graphics Inc | P:	Silicon Graphics Inc | ||||||
| M:	Dave Chinner <dchinner@fromorbit.com> | M:	Dave Chinner <david@fromorbit.com> | ||||||
| M:	Ben Myers <bpm@sgi.com> | M:	Ben Myers <bpm@sgi.com> | ||||||
| M:	xfs@oss.sgi.com | M:	xfs@oss.sgi.com | ||||||
| L:	xfs@oss.sgi.com | L:	xfs@oss.sgi.com | ||||||
|  |  | ||||||
							
								
								
									
										24
									
								
								Makefile
									
										
									
									
									
								
							
							
						
						
									
										24
									
								
								Makefile
									
										
									
									
									
								
							|  | @ -1,7 +1,7 @@ | ||||||
| VERSION = 3 | VERSION = 3 | ||||||
| PATCHLEVEL = 13 | PATCHLEVEL = 13 | ||||||
| SUBLEVEL = 0 | SUBLEVEL = 0 | ||||||
| EXTRAVERSION = -rc4 | EXTRAVERSION = -rc6 | ||||||
| NAME = One Giant Leap for Frogkind | NAME = One Giant Leap for Frogkind | ||||||
| 
 | 
 | ||||||
| # *DOCUMENTATION*
 | # *DOCUMENTATION*
 | ||||||
|  | @ -732,19 +732,15 @@ export mod_strip_cmd | ||||||
| # Select initial ramdisk compression format, default is gzip(1).
 | # Select initial ramdisk compression format, default is gzip(1).
 | ||||||
| # This shall be used by the dracut(8) tool while creating an initramfs image.
 | # This shall be used by the dracut(8) tool while creating an initramfs image.
 | ||||||
| #
 | #
 | ||||||
| INITRD_COMPRESS=gzip | INITRD_COMPRESS-y                  := gzip | ||||||
| ifeq ($(CONFIG_RD_BZIP2), y) | INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2 | ||||||
|         INITRD_COMPRESS=bzip2 | INITRD_COMPRESS-$(CONFIG_RD_LZMA)  := lzma | ||||||
| else ifeq ($(CONFIG_RD_LZMA), y) | INITRD_COMPRESS-$(CONFIG_RD_XZ)    := xz | ||||||
|         INITRD_COMPRESS=lzma | INITRD_COMPRESS-$(CONFIG_RD_LZO)   := lzo | ||||||
| else ifeq ($(CONFIG_RD_XZ), y) | INITRD_COMPRESS-$(CONFIG_RD_LZ4)   := lz4 | ||||||
|         INITRD_COMPRESS=xz | # do not export INITRD_COMPRESS, since we didn't actually
 | ||||||
| else ifeq ($(CONFIG_RD_LZO), y) | # choose a sane default compression above.
 | ||||||
|         INITRD_COMPRESS=lzo | # export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
 | ||||||
| else ifeq ($(CONFIG_RD_LZ4), y) |  | ||||||
|         INITRD_COMPRESS=lz4 |  | ||||||
| endif |  | ||||||
| export INITRD_COMPRESS |  | ||||||
| 
 | 
 | ||||||
| ifdef CONFIG_MODULE_SIG_ALL | ifdef CONFIG_MODULE_SIG_ALL | ||||||
| MODSECKEY = ./signing_key.priv | MODSECKEY = ./signing_key.priv | ||||||
|  |  | ||||||
|  | @ -8,7 +8,11 @@ | ||||||
| 
 | 
 | ||||||
| /******** no-legacy-syscalls-ABI *******/ | /******** no-legacy-syscalls-ABI *******/ | ||||||
| 
 | 
 | ||||||
| #ifndef _UAPI_ASM_ARC_UNISTD_H | /*
 | ||||||
|  |  * Non-typical guard macro to enable inclusion twice in ARCH sys.c | ||||||
|  |  * That is how the Generic syscall wrapper generator works | ||||||
|  |  */ | ||||||
|  | #if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) | ||||||
| #define _UAPI_ASM_ARC_UNISTD_H | #define _UAPI_ASM_ARC_UNISTD_H | ||||||
| 
 | 
 | ||||||
| #define __ARCH_WANT_SYS_EXECVE | #define __ARCH_WANT_SYS_EXECVE | ||||||
|  | @ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls) | ||||||
| #define __NR_sysfs		(__NR_arch_specific_syscall + 3) | #define __NR_sysfs		(__NR_arch_specific_syscall + 3) | ||||||
| __SYSCALL(__NR_sysfs, sys_sysfs) | __SYSCALL(__NR_sysfs, sys_sysfs) | ||||||
| 
 | 
 | ||||||
|  | #undef __SYSCALL | ||||||
|  | 
 | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|  | @ -87,9 +87,9 @@ | ||||||
| 		interrupts = <1 9 0xf04>; | 		interrupts = <1 9 0xf04>; | ||||||
| 	}; | 	}; | ||||||
| 
 | 
 | ||||||
| 	gpio0: gpio@ffc40000 { | 	gpio0: gpio@e6050000 { | ||||||
| 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | ||||||
| 		reg = <0 0xffc40000 0 0x2c>; | 		reg = <0 0xe6050000 0 0x50>; | ||||||
| 		interrupt-parent = <&gic>; | 		interrupt-parent = <&gic>; | ||||||
| 		interrupts = <0 4 0x4>; | 		interrupts = <0 4 0x4>; | ||||||
| 		#gpio-cells = <2>; | 		#gpio-cells = <2>; | ||||||
|  | @ -99,9 +99,9 @@ | ||||||
| 		interrupt-controller; | 		interrupt-controller; | ||||||
| 	}; | 	}; | ||||||
| 
 | 
 | ||||||
| 	gpio1: gpio@ffc41000 { | 	gpio1: gpio@e6051000 { | ||||||
| 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | ||||||
| 		reg = <0 0xffc41000 0 0x2c>; | 		reg = <0 0xe6051000 0 0x50>; | ||||||
| 		interrupt-parent = <&gic>; | 		interrupt-parent = <&gic>; | ||||||
| 		interrupts = <0 5 0x4>; | 		interrupts = <0 5 0x4>; | ||||||
| 		#gpio-cells = <2>; | 		#gpio-cells = <2>; | ||||||
|  | @ -111,9 +111,9 @@ | ||||||
| 		interrupt-controller; | 		interrupt-controller; | ||||||
| 	}; | 	}; | ||||||
| 
 | 
 | ||||||
| 	gpio2: gpio@ffc42000 { | 	gpio2: gpio@e6052000 { | ||||||
| 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | ||||||
| 		reg = <0 0xffc42000 0 0x2c>; | 		reg = <0 0xe6052000 0 0x50>; | ||||||
| 		interrupt-parent = <&gic>; | 		interrupt-parent = <&gic>; | ||||||
| 		interrupts = <0 6 0x4>; | 		interrupts = <0 6 0x4>; | ||||||
| 		#gpio-cells = <2>; | 		#gpio-cells = <2>; | ||||||
|  | @ -123,9 +123,9 @@ | ||||||
| 		interrupt-controller; | 		interrupt-controller; | ||||||
| 	}; | 	}; | ||||||
| 
 | 
 | ||||||
| 	gpio3: gpio@ffc43000 { | 	gpio3: gpio@e6053000 { | ||||||
| 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | ||||||
| 		reg = <0 0xffc43000 0 0x2c>; | 		reg = <0 0xe6053000 0 0x50>; | ||||||
| 		interrupt-parent = <&gic>; | 		interrupt-parent = <&gic>; | ||||||
| 		interrupts = <0 7 0x4>; | 		interrupts = <0 7 0x4>; | ||||||
| 		#gpio-cells = <2>; | 		#gpio-cells = <2>; | ||||||
|  | @ -135,9 +135,9 @@ | ||||||
| 		interrupt-controller; | 		interrupt-controller; | ||||||
| 	}; | 	}; | ||||||
| 
 | 
 | ||||||
| 	gpio4: gpio@ffc44000 { | 	gpio4: gpio@e6054000 { | ||||||
| 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | ||||||
| 		reg = <0 0xffc44000 0 0x2c>; | 		reg = <0 0xe6054000 0 0x50>; | ||||||
| 		interrupt-parent = <&gic>; | 		interrupt-parent = <&gic>; | ||||||
| 		interrupts = <0 8 0x4>; | 		interrupts = <0 8 0x4>; | ||||||
| 		#gpio-cells = <2>; | 		#gpio-cells = <2>; | ||||||
|  | @ -147,9 +147,9 @@ | ||||||
| 		interrupt-controller; | 		interrupt-controller; | ||||||
| 	}; | 	}; | ||||||
| 
 | 
 | ||||||
| 	gpio5: gpio@ffc45000 { | 	gpio5: gpio@e6055000 { | ||||||
| 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 		compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | ||||||
| 		reg = <0 0xffc45000 0 0x2c>; | 		reg = <0 0xe6055000 0 0x50>; | ||||||
| 		interrupt-parent = <&gic>; | 		interrupt-parent = <&gic>; | ||||||
| 		interrupts = <0 9 0x4>; | 		interrupts = <0 9 0x4>; | ||||||
| 		#gpio-cells = <2>; | 		#gpio-cells = <2>; | ||||||
|  | @ -241,7 +241,7 @@ | ||||||
| 
 | 
 | ||||||
| 	sdhi0: sdhi@ee100000 { | 	sdhi0: sdhi@ee100000 { | ||||||
| 		compatible = "renesas,sdhi-r8a7790"; | 		compatible = "renesas,sdhi-r8a7790"; | ||||||
| 		reg = <0 0xee100000 0 0x100>; | 		reg = <0 0xee100000 0 0x200>; | ||||||
| 		interrupt-parent = <&gic>; | 		interrupt-parent = <&gic>; | ||||||
| 		interrupts = <0 165 4>; | 		interrupts = <0 165 4>; | ||||||
| 		cap-sd-highspeed; | 		cap-sd-highspeed; | ||||||
|  | @ -250,7 +250,7 @@ | ||||||
| 
 | 
 | ||||||
| 	sdhi1: sdhi@ee120000 { | 	sdhi1: sdhi@ee120000 { | ||||||
| 		compatible = "renesas,sdhi-r8a7790"; | 		compatible = "renesas,sdhi-r8a7790"; | ||||||
| 		reg = <0 0xee120000 0 0x100>; | 		reg = <0 0xee120000 0 0x200>; | ||||||
| 		interrupt-parent = <&gic>; | 		interrupt-parent = <&gic>; | ||||||
| 		interrupts = <0 166 4>; | 		interrupts = <0 166 4>; | ||||||
| 		cap-sd-highspeed; | 		cap-sd-highspeed; | ||||||
|  |  | ||||||
|  | @ -242,12 +242,18 @@ static void __init ldp_display_init(void) | ||||||
| 
 | 
 | ||||||
| static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) | ||||||
| { | { | ||||||
|  | 	int res; | ||||||
|  | 
 | ||||||
| 	/* LCD enable GPIO */ | 	/* LCD enable GPIO */ | ||||||
| 	ldp_lcd_pdata.enable_gpio = gpio + 7; | 	ldp_lcd_pdata.enable_gpio = gpio + 7; | ||||||
| 
 | 
 | ||||||
| 	/* Backlight enable GPIO */ | 	/* Backlight enable GPIO */ | ||||||
| 	ldp_lcd_pdata.backlight_gpio = gpio + 15; | 	ldp_lcd_pdata.backlight_gpio = gpio + 15; | ||||||
| 
 | 
 | ||||||
|  | 	res = platform_device_register(&ldp_lcd_device); | ||||||
|  | 	if (res) | ||||||
|  | 		pr_err("Unable to register LCD: %d\n", res); | ||||||
|  | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { | ||||||
| 
 | 
 | ||||||
| static struct platform_device *ldp_devices[] __initdata = { | static struct platform_device *ldp_devices[] __initdata = { | ||||||
| 	&ldp_gpio_keys_device, | 	&ldp_gpio_keys_device, | ||||||
| 	&ldp_lcd_device, |  | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_OMAP_MUX | #ifdef CONFIG_OMAP_MUX | ||||||
|  |  | ||||||
|  | @ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = { | ||||||
| 	{ "dss_hdmi", "omapdss_hdmi", -1 }, | 	{ "dss_hdmi", "omapdss_hdmi", -1 }, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | ||||||
|  | { | ||||||
|  | 	u32 enable_mask, enable_shift; | ||||||
|  | 	u32 pipd_mask, pipd_shift; | ||||||
|  | 	u32 reg; | ||||||
|  | 
 | ||||||
|  | 	if (dsi_id == 0) { | ||||||
|  | 		enable_mask = OMAP4_DSI1_LANEENABLE_MASK; | ||||||
|  | 		enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT; | ||||||
|  | 		pipd_mask = OMAP4_DSI1_PIPD_MASK; | ||||||
|  | 		pipd_shift = OMAP4_DSI1_PIPD_SHIFT; | ||||||
|  | 	} else if (dsi_id == 1) { | ||||||
|  | 		enable_mask = OMAP4_DSI2_LANEENABLE_MASK; | ||||||
|  | 		enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT; | ||||||
|  | 		pipd_mask = OMAP4_DSI2_PIPD_MASK; | ||||||
|  | 		pipd_shift = OMAP4_DSI2_PIPD_SHIFT; | ||||||
|  | 	} else { | ||||||
|  | 		return -ENODEV; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); | ||||||
|  | 
 | ||||||
|  | 	reg &= ~enable_mask; | ||||||
|  | 	reg &= ~pipd_mask; | ||||||
|  | 
 | ||||||
|  | 	reg |= (lanes << enable_shift) & enable_mask; | ||||||
|  | 	reg |= (lanes << pipd_shift) & pipd_mask; | ||||||
|  | 
 | ||||||
|  | 	omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); | ||||||
|  | 
 | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) | static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) | ||||||
| { | { | ||||||
|  | 	if (cpu_is_omap44xx()) | ||||||
|  | 		return omap4_dsi_mux_pads(dsi_id, lane_mask); | ||||||
|  | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) | static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) | ||||||
| { | { | ||||||
|  | 	if (cpu_is_omap44xx()) | ||||||
|  | 		omap4_dsi_mux_pads(dsi_id, 0); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) | static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) | ||||||
|  |  | ||||||
|  | @ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = { | ||||||
| 
 | 
 | ||||||
| /* gpmc */ | /* gpmc */ | ||||||
| static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { | ||||||
| 	{ .irq = 20 }, | 	{ .irq = 20 + OMAP_INTC_START, }, | ||||||
| 	{ .irq = -1 } | 	{ .irq = -1 } | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | @ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = { | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { | ||||||
| 	{ .irq = 52 }, | 	{ .irq = 52 + OMAP_INTC_START, }, | ||||||
| 	{ .irq = -1 } | 	{ .irq = -1 } | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = { | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { | ||||||
| 	{ .irq = 20 }, | 	{ .irq = 20 + OMAP_INTC_START, }, | ||||||
| 	{ .irq = -1 } | 	{ .irq = -1 } | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | @ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = { | ||||||
| 
 | 
 | ||||||
| static struct omap_hwmod omap3xxx_mmu_isp_hwmod; | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; | ||||||
| static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { | ||||||
| 	{ .irq = 24 }, | 	{ .irq = 24 + OMAP_INTC_START, }, | ||||||
| 	{ .irq = -1 } | 	{ .irq = -1 } | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | @ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = { | ||||||
| 
 | 
 | ||||||
| static struct omap_hwmod omap3xxx_mmu_iva_hwmod; | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; | ||||||
| static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { | ||||||
| 	{ .irq = 28 }, | 	{ .irq = 28 + OMAP_INTC_START, }, | ||||||
| 	{ .irq = -1 } | 	{ .irq = -1 } | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = { | ||||||
| 	.class		= &dra7xx_uart_hwmod_class, | 	.class		= &dra7xx_uart_hwmod_class, | ||||||
| 	.clkdm_name	= "l4per_clkdm", | 	.clkdm_name	= "l4per_clkdm", | ||||||
| 	.main_clk	= "uart1_gfclk_mux", | 	.main_clk	= "uart1_gfclk_mux", | ||||||
| 	.flags		= HWMOD_SWSUP_SIDLE_ACT, | 	.flags		= HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS, | ||||||
| 	.prcm = { | 	.prcm = { | ||||||
| 		.omap4 = { | 		.omap4 = { | ||||||
| 			.clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, | 			.clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, | ||||||
|  |  | ||||||
|  | @ -10,6 +10,8 @@ | ||||||
|  * published by the Free Software Foundation. |  * published by the Free Software Foundation. | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
|  | #include <mach/irqs.h> | ||||||
|  | 
 | ||||||
| #define LUBBOCK_ETH_PHYS	PXA_CS3_PHYS | #define LUBBOCK_ETH_PHYS	PXA_CS3_PHYS | ||||||
| 
 | 
 | ||||||
| #define LUBBOCK_FPGA_PHYS	PXA_CS2_PHYS | #define LUBBOCK_FPGA_PHYS	PXA_CS2_PHYS | ||||||
|  |  | ||||||
|  | @ -8,8 +8,6 @@ | ||||||
|  * published by the Free Software Foundation. |  * published by the Free Software Foundation. | ||||||
| */ | */ | ||||||
| 
 | 
 | ||||||
| #include <linux/clk-provider.h> |  | ||||||
| #include <linux/irqchip.h> |  | ||||||
| #include <linux/of_platform.h> | #include <linux/of_platform.h> | ||||||
| 
 | 
 | ||||||
| #include <asm/mach/arch.h> | #include <asm/mach/arch.h> | ||||||
|  | @ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void) | ||||||
| 		panic("SoC is not S3C64xx!"); | 		panic("SoC is not S3C64xx!"); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void __init s3c64xx_dt_init_irq(void) |  | ||||||
| { |  | ||||||
| 	of_clk_init(NULL); |  | ||||||
| 	samsung_wdt_reset_of_init(); |  | ||||||
| 	irqchip_init(); |  | ||||||
| }; |  | ||||||
| 
 |  | ||||||
| static void __init s3c64xx_dt_init_machine(void) | static void __init s3c64xx_dt_init_machine(void) | ||||||
| { | { | ||||||
|  | 	samsung_wdt_reset_of_init(); | ||||||
| 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)") | ||||||
| 	/* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ | 	/* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ | ||||||
| 	.dt_compat	= s3c64xx_dt_compat, | 	.dt_compat	= s3c64xx_dt_compat, | ||||||
| 	.map_io		= s3c64xx_dt_map_io, | 	.map_io		= s3c64xx_dt_map_io, | ||||||
| 	.init_irq	= s3c64xx_dt_init_irq, |  | ||||||
| 	.init_machine	= s3c64xx_dt_init_machine, | 	.init_machine	= s3c64xx_dt_init_machine, | ||||||
| 	.restart        = s3c64xx_dt_restart, | 	.restart        = s3c64xx_dt_restart, | ||||||
| MACHINE_END | MACHINE_END | ||||||
|  |  | ||||||
|  | @ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { | ||||||
| 	REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), | 	REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | /* Fixed 3.3V regulator used by LCD backlight */ | ||||||
|  | static struct regulator_consumer_supply fixed5v0_power_consumers[] = { | ||||||
|  | 	REGULATOR_SUPPLY("power", "pwm-backlight.0"), | ||||||
|  | }; | ||||||
|  | 
 | ||||||
| /* Fixed 3.3V regulator to be used by SDHI0 */ | /* Fixed 3.3V regulator to be used by SDHI0 */ | ||||||
| static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { | ||||||
| 	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), | 	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), | ||||||
|  | @ -1196,6 +1201,8 @@ static void __init eva_init(void) | ||||||
| 
 | 
 | ||||||
| 	regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, | 	regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, | ||||||
| 				     ARRAY_SIZE(fixed3v3_power_consumers), 3300000); | 				     ARRAY_SIZE(fixed3v3_power_consumers), 3300000); | ||||||
|  | 	regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, | ||||||
|  | 				     ARRAY_SIZE(fixed5v0_power_consumers), 5000000); | ||||||
| 
 | 
 | ||||||
| 	pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); | 	pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); | ||||||
| 	pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); | 	pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); | ||||||
|  |  | ||||||
|  | @ -679,7 +679,7 @@ static void __init bockw_init(void) | ||||||
| 			.id             = i, | 			.id             = i, | ||||||
| 			.data           = &rsnd_card_info[i], | 			.data           = &rsnd_card_info[i], | ||||||
| 			.size_data      = sizeof(struct asoc_simple_card_info), | 			.size_data      = sizeof(struct asoc_simple_card_info), | ||||||
| 			.dma_mask       = ~0, | 			.dma_mask	= DMA_BIT_MASK(32), | ||||||
| 		}; | 		}; | ||||||
| 
 | 
 | ||||||
| 		platform_device_register_full(&cardinfo); | 		platform_device_register_full(&cardinfo); | ||||||
|  |  | ||||||
|  | @ -245,7 +245,9 @@ static void __init lager_init(void) | ||||||
| { | { | ||||||
| 	lager_add_standard_devices(); | 	lager_add_standard_devices(); | ||||||
| 
 | 
 | ||||||
| 	phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); | 	if (IS_ENABLED(CONFIG_PHYLIB)) | ||||||
|  | 		phy_register_fixup_for_id("r8a7790-ether-ff:01", | ||||||
|  | 					  lager_ksz8041_fixup); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static const char * const lager_boards_compat_dt[] __initconst = { | static const char * const lager_boards_compat_dt[] __initconst = { | ||||||
|  |  | ||||||
|  | @ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, | ||||||
| 	struct remap_data *info = data; | 	struct remap_data *info = data; | ||||||
| 	struct page *page = info->pages[info->index++]; | 	struct page *page = info->pages[info->index++]; | ||||||
| 	unsigned long pfn = page_to_pfn(page); | 	unsigned long pfn = page_to_pfn(page); | ||||||
| 	pte_t pte = pfn_pte(pfn, info->prot); | 	pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); | ||||||
| 
 | 
 | ||||||
| 	if (map_foreign_page(pfn, info->fgmfn, info->domid)) | 	if (map_foreign_page(pfn, info->fgmfn, info->domid)) | ||||||
| 		return -EFAULT; | 		return -EFAULT; | ||||||
|  | @ -224,10 +224,10 @@ static int __init xen_guest_init(void) | ||||||
| 	} | 	} | ||||||
| 	if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) | 	if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) | ||||||
| 		return 0; | 		return 0; | ||||||
| 	xen_hvm_resume_frames = res.start >> PAGE_SHIFT; | 	xen_hvm_resume_frames = res.start; | ||||||
| 	xen_events_irq = irq_of_parse_and_map(node, 0); | 	xen_events_irq = irq_of_parse_and_map(node, 0); | ||||||
| 	pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", | 	pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", | ||||||
| 			version, xen_events_irq, xen_hvm_resume_frames); | 			version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT)); | ||||||
| 	xen_domain_type = XEN_HVM_DOMAIN; | 	xen_domain_type = XEN_HVM_DOMAIN; | ||||||
| 
 | 
 | ||||||
| 	xen_setup_features(); | 	xen_setup_features(); | ||||||
|  |  | ||||||
|  | @ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | ||||||
| 	     unsigned long offset, size_t size, enum dma_data_direction dir, | 	     unsigned long offset, size_t size, enum dma_data_direction dir, | ||||||
| 	     struct dma_attrs *attrs) | 	     struct dma_attrs *attrs) | ||||||
| { | { | ||||||
| 	__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||||||
| 		size_t size, enum dma_data_direction dir, | 		size_t size, enum dma_data_direction dir, | ||||||
| 		struct dma_attrs *attrs) | 		struct dma_attrs *attrs) | ||||||
| { | { | ||||||
| 	__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||||||
| 		dma_addr_t handle, size_t size, enum dma_data_direction dir) | 		dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||||||
| { | { | ||||||
| 	__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void xen_dma_sync_single_for_device(struct device *hwdev, | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||||||
| 		dma_addr_t handle, size_t size, enum dma_data_direction dir) | 		dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||||||
| { | { | ||||||
| 	__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); |  | ||||||
| } | } | ||||||
| #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ | ||||||
|  |  | ||||||
|  | @ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, | ||||||
| { | { | ||||||
| 	int err, len, type, disabled = !ctrl.enabled; | 	int err, len, type, disabled = !ctrl.enabled; | ||||||
| 
 | 
 | ||||||
| 	if (disabled) { | 	attr->disabled = disabled; | ||||||
| 		len = 0; | 	if (disabled) | ||||||
| 		type = HW_BREAKPOINT_EMPTY; | 		return 0; | ||||||
| 	} else { |  | ||||||
| 		err = arch_bp_generic_fields(ctrl, &len, &type); |  | ||||||
| 		if (err) |  | ||||||
| 			return err; |  | ||||||
| 
 | 
 | ||||||
| 		switch (note_type) { | 	err = arch_bp_generic_fields(ctrl, &len, &type); | ||||||
| 		case NT_ARM_HW_BREAK: | 	if (err) | ||||||
| 			if ((type & HW_BREAKPOINT_X) != type) | 		return err; | ||||||
| 				return -EINVAL; | 
 | ||||||
| 			break; | 	switch (note_type) { | ||||||
| 		case NT_ARM_HW_WATCH: | 	case NT_ARM_HW_BREAK: | ||||||
| 			if ((type & HW_BREAKPOINT_RW) != type) | 		if ((type & HW_BREAKPOINT_X) != type) | ||||||
| 				return -EINVAL; |  | ||||||
| 			break; |  | ||||||
| 		default: |  | ||||||
| 			return -EINVAL; | 			return -EINVAL; | ||||||
| 		} | 		break; | ||||||
|  | 	case NT_ARM_HW_WATCH: | ||||||
|  | 		if ((type & HW_BREAKPOINT_RW) != type) | ||||||
|  | 			return -EINVAL; | ||||||
|  | 		break; | ||||||
|  | 	default: | ||||||
|  | 		return -EINVAL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	attr->bp_len	= len; | 	attr->bp_len	= len; | ||||||
| 	attr->bp_type	= type; | 	attr->bp_type	= type; | ||||||
| 	attr->disabled	= disabled; |  | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); | ||||||
| extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); | ||||||
| extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); | ||||||
| extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); | ||||||
|  | extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | ||||||
|  | 				 struct kvm_vcpu *vcpu); | ||||||
|  | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||||||
|  | 				   struct kvmppc_book3s_shadow_vcpu *svcpu); | ||||||
| 
 | 
 | ||||||
| static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | ||||||
| { | { | ||||||
|  |  | ||||||
|  | @ -79,6 +79,7 @@ struct kvmppc_host_state { | ||||||
| 	ulong vmhandler; | 	ulong vmhandler; | ||||||
| 	ulong scratch0; | 	ulong scratch0; | ||||||
| 	ulong scratch1; | 	ulong scratch1; | ||||||
|  | 	ulong scratch2; | ||||||
| 	u8 in_guest; | 	u8 in_guest; | ||||||
| 	u8 restore_hid5; | 	u8 restore_hid5; | ||||||
| 	u8 napping; | 	u8 napping; | ||||||
|  | @ -106,6 +107,7 @@ struct kvmppc_host_state { | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct kvmppc_book3s_shadow_vcpu { | struct kvmppc_book3s_shadow_vcpu { | ||||||
|  | 	bool in_use; | ||||||
| 	ulong gpr[14]; | 	ulong gpr[14]; | ||||||
| 	u32 cr; | 	u32 cr; | ||||||
| 	u32 xer; | 	u32 xer; | ||||||
|  |  | ||||||
|  | @ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, | ||||||
| int64_t opal_pci_poll(uint64_t phb_id); | int64_t opal_pci_poll(uint64_t phb_id); | ||||||
| int64_t opal_return_cpu(void); | int64_t opal_return_cpu(void); | ||||||
| 
 | 
 | ||||||
| int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); | ||||||
| int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); | ||||||
| 
 | 
 | ||||||
| int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, | ||||||
| 		       uint32_t addr, uint32_t data, uint32_t sz); | 		       uint32_t addr, uint32_t data, uint32_t sz); | ||||||
| int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, | ||||||
| 		      uint32_t addr, uint32_t *data, uint32_t sz); | 		      uint32_t addr, __be32 *data, uint32_t sz); | ||||||
| int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); | int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); | ||||||
| int64_t opal_manage_flash(uint8_t op); | int64_t opal_manage_flash(uint8_t op); | ||||||
| int64_t opal_update_flash(uint64_t blk_list); | int64_t opal_update_flash(uint64_t blk_list); | ||||||
|  |  | ||||||
|  | @ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); | ||||||
| extern void enable_kernel_spe(void); | extern void enable_kernel_spe(void); | ||||||
| extern void giveup_spe(struct task_struct *); | extern void giveup_spe(struct task_struct *); | ||||||
| extern void load_up_spe(struct task_struct *); | extern void load_up_spe(struct task_struct *); | ||||||
| extern void switch_booke_debug_regs(struct thread_struct *new_thread); | extern void switch_booke_debug_regs(struct debug_reg *new_debug); | ||||||
| 
 | 
 | ||||||
| #ifndef CONFIG_SMP | #ifndef CONFIG_SMP | ||||||
| extern void discard_lazy_cpu_state(void); | extern void discard_lazy_cpu_state(void); | ||||||
|  |  | ||||||
|  | @ -576,6 +576,7 @@ int main(void) | ||||||
| 	HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | 	HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | ||||||
| 	HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | 	HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | ||||||
| 	HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | 	HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | ||||||
|  | 	HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); | ||||||
| 	HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | 	HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | ||||||
| 	HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); | 	HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); | ||||||
| 	HSTATE_FIELD(HSTATE_NAPPING, napping); | 	HSTATE_FIELD(HSTATE_NAPPING, napping); | ||||||
|  |  | ||||||
|  | @ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | ||||||
| void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) | ||||||
| { | { | ||||||
| 	unsigned long addr; | 	unsigned long addr; | ||||||
| 	const u32 *basep, *sizep; | 	const __be32 *basep, *sizep; | ||||||
| 	unsigned int rtas_start = 0, rtas_end = 0; | 	unsigned int rtas_start = 0, rtas_end = 0; | ||||||
| 
 | 
 | ||||||
| 	basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); | 	basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); | ||||||
| 	sizep = of_get_property(rtas.dev, "rtas-size", NULL); | 	sizep = of_get_property(rtas.dev, "rtas-size", NULL); | ||||||
| 
 | 
 | ||||||
| 	if (basep && sizep) { | 	if (basep && sizep) { | ||||||
| 		rtas_start = *basep; | 		rtas_start = be32_to_cpup(basep); | ||||||
| 		rtas_end = *basep + *sizep; | 		rtas_end = rtas_start + be32_to_cpup(sizep); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for (addr = begin; addr < end; addr += PAGE_SIZE) { | 	for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||||||
|  |  | ||||||
|  | @ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | ||||||
| #endif | #endif | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void prime_debug_regs(struct thread_struct *thread) | static void prime_debug_regs(struct debug_reg *debug) | ||||||
| { | { | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * We could have inherited MSR_DE from userspace, since | 	 * We could have inherited MSR_DE from userspace, since | ||||||
|  | @ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) | ||||||
| 	 */ | 	 */ | ||||||
| 	mtmsr(mfmsr() & ~MSR_DE); | 	mtmsr(mfmsr() & ~MSR_DE); | ||||||
| 
 | 
 | ||||||
| 	mtspr(SPRN_IAC1, thread->debug.iac1); | 	mtspr(SPRN_IAC1, debug->iac1); | ||||||
| 	mtspr(SPRN_IAC2, thread->debug.iac2); | 	mtspr(SPRN_IAC2, debug->iac2); | ||||||
| #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | ||||||
| 	mtspr(SPRN_IAC3, thread->debug.iac3); | 	mtspr(SPRN_IAC3, debug->iac3); | ||||||
| 	mtspr(SPRN_IAC4, thread->debug.iac4); | 	mtspr(SPRN_IAC4, debug->iac4); | ||||||
| #endif | #endif | ||||||
| 	mtspr(SPRN_DAC1, thread->debug.dac1); | 	mtspr(SPRN_DAC1, debug->dac1); | ||||||
| 	mtspr(SPRN_DAC2, thread->debug.dac2); | 	mtspr(SPRN_DAC2, debug->dac2); | ||||||
| #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | ||||||
| 	mtspr(SPRN_DVC1, thread->debug.dvc1); | 	mtspr(SPRN_DVC1, debug->dvc1); | ||||||
| 	mtspr(SPRN_DVC2, thread->debug.dvc2); | 	mtspr(SPRN_DVC2, debug->dvc2); | ||||||
| #endif | #endif | ||||||
| 	mtspr(SPRN_DBCR0, thread->debug.dbcr0); | 	mtspr(SPRN_DBCR0, debug->dbcr0); | ||||||
| 	mtspr(SPRN_DBCR1, thread->debug.dbcr1); | 	mtspr(SPRN_DBCR1, debug->dbcr1); | ||||||
| #ifdef CONFIG_BOOKE | #ifdef CONFIG_BOOKE | ||||||
| 	mtspr(SPRN_DBCR2, thread->debug.dbcr2); | 	mtspr(SPRN_DBCR2, debug->dbcr2); | ||||||
| #endif | #endif | ||||||
| } | } | ||||||
| /*
 | /*
 | ||||||
|  | @ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) | ||||||
|  * debug registers, set the debug registers from the values |  * debug registers, set the debug registers from the values | ||||||
|  * stored in the new thread. |  * stored in the new thread. | ||||||
|  */ |  */ | ||||||
| void switch_booke_debug_regs(struct thread_struct *new_thread) | void switch_booke_debug_regs(struct debug_reg *new_debug) | ||||||
| { | { | ||||||
| 	if ((current->thread.debug.dbcr0 & DBCR0_IDM) | 	if ((current->thread.debug.dbcr0 & DBCR0_IDM) | ||||||
| 		|| (new_thread->debug.dbcr0 & DBCR0_IDM)) | 		|| (new_debug->dbcr0 & DBCR0_IDM)) | ||||||
| 			prime_debug_regs(new_thread); | 			prime_debug_regs(new_debug); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(switch_booke_debug_regs); | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); | ||||||
| #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */ | #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */ | ||||||
|  | @ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | ||||||
| #endif /* CONFIG_SMP */ | #endif /* CONFIG_SMP */ | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_PPC_ADV_DEBUG_REGS | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||||||
| 	switch_booke_debug_regs(&new->thread); | 	switch_booke_debug_regs(&new->thread.debug); | ||||||
| #else | #else | ||||||
| /*
 | /*
 | ||||||
|  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would |  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | ||||||
|  |  | ||||||
|  | @ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request, | ||||||
| 
 | 
 | ||||||
| 			flush_fp_to_thread(child); | 			flush_fp_to_thread(child); | ||||||
| 			if (fpidx < (PT_FPSCR - PT_FPR0)) | 			if (fpidx < (PT_FPSCR - PT_FPR0)) | ||||||
| 				memcpy(&tmp, &child->thread.fp_state.fpr, | 				memcpy(&tmp, &child->thread.TS_FPR(fpidx), | ||||||
| 				       sizeof(long)); | 				       sizeof(long)); | ||||||
| 			else | 			else | ||||||
| 				tmp = child->thread.fp_state.fpscr; | 				tmp = child->thread.fp_state.fpscr; | ||||||
|  | @ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request, | ||||||
| 
 | 
 | ||||||
| 			flush_fp_to_thread(child); | 			flush_fp_to_thread(child); | ||||||
| 			if (fpidx < (PT_FPSCR - PT_FPR0)) | 			if (fpidx < (PT_FPSCR - PT_FPR0)) | ||||||
| 				memcpy(&child->thread.fp_state.fpr, &data, | 				memcpy(&child->thread.TS_FPR(fpidx), &data, | ||||||
| 				       sizeof(long)); | 				       sizeof(long)); | ||||||
| 			else | 			else | ||||||
| 				child->thread.fp_state.fpscr = data; | 				child->thread.fp_state.fpscr = data; | ||||||
|  |  | ||||||
|  | @ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void) | ||||||
| 	if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && | 	if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && | ||||||
| 	    (dn = of_find_node_by_path("/rtas"))) { | 	    (dn = of_find_node_by_path("/rtas"))) { | ||||||
| 		int num_addr_cell, num_size_cell, maxcpus; | 		int num_addr_cell, num_size_cell, maxcpus; | ||||||
| 		const unsigned int *ireg; | 		const __be32 *ireg; | ||||||
| 
 | 
 | ||||||
| 		num_addr_cell = of_n_addr_cells(dn); | 		num_addr_cell = of_n_addr_cells(dn); | ||||||
| 		num_size_cell = of_n_size_cells(dn); | 		num_size_cell = of_n_size_cells(dn); | ||||||
|  | @ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void) | ||||||
| 		if (!ireg) | 		if (!ireg) | ||||||
| 			goto out; | 			goto out; | ||||||
| 
 | 
 | ||||||
| 		maxcpus = ireg[num_addr_cell + num_size_cell]; | 		maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell); | ||||||
| 
 | 
 | ||||||
| 		/* Double maxcpus for processors which have SMT capability */ | 		/* Double maxcpus for processors which have SMT capability */ | ||||||
| 		if (cpu_has_feature(CPU_FTR_SMT)) | 		if (cpu_has_feature(CPU_FTR_SMT)) | ||||||
|  |  | ||||||
|  | @ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | ||||||
| int cpu_to_core_id(int cpu) | int cpu_to_core_id(int cpu) | ||||||
| { | { | ||||||
| 	struct device_node *np; | 	struct device_node *np; | ||||||
| 	const int *reg; | 	const __be32 *reg; | ||||||
| 	int id = -1; | 	int id = -1; | ||||||
| 
 | 
 | ||||||
| 	np = of_get_cpu_node(cpu, NULL); | 	np = of_get_cpu_node(cpu, NULL); | ||||||
|  | @ -591,7 +591,7 @@ int cpu_to_core_id(int cpu) | ||||||
| 	if (!reg) | 	if (!reg) | ||||||
| 		goto out; | 		goto out; | ||||||
| 
 | 
 | ||||||
| 	id = *reg; | 	id = be32_to_cpup(reg); | ||||||
| out: | out: | ||||||
| 	of_node_put(np); | 	of_node_put(np); | ||||||
| 	return id; | 	return id; | ||||||
|  |  | ||||||
|  | @ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | ||||||
| 		slb_v = vcpu->kvm->arch.vrma_slb_v; | 		slb_v = vcpu->kvm->arch.vrma_slb_v; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	preempt_disable(); | ||||||
| 	/* Find the HPTE in the hash table */ | 	/* Find the HPTE in the hash table */ | ||||||
| 	index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | 	index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | ||||||
| 					 HPTE_V_VALID | HPTE_V_ABSENT); | 					 HPTE_V_VALID | HPTE_V_ABSENT); | ||||||
| 	if (index < 0) | 	if (index < 0) { | ||||||
|  | 		preempt_enable(); | ||||||
| 		return -ENOENT; | 		return -ENOENT; | ||||||
|  | 	} | ||||||
| 	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | ||||||
| 	v = hptep[0] & ~HPTE_V_HVLOCK; | 	v = hptep[0] & ~HPTE_V_HVLOCK; | ||||||
| 	gr = kvm->arch.revmap[index].guest_rpte; | 	gr = kvm->arch.revmap[index].guest_rpte; | ||||||
|  | @ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | ||||||
| 	/* Unlock the HPTE */ | 	/* Unlock the HPTE */ | ||||||
| 	asm volatile("lwsync" : : : "memory"); | 	asm volatile("lwsync" : : : "memory"); | ||||||
| 	hptep[0] = v; | 	hptep[0] = v; | ||||||
|  | 	preempt_enable(); | ||||||
| 
 | 
 | ||||||
| 	gpte->eaddr = eaddr; | 	gpte->eaddr = eaddr; | ||||||
| 	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | 	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | ||||||
|  | @ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||||||
| 			return -EFAULT; | 			return -EFAULT; | ||||||
| 	} else { | 	} else { | ||||||
| 		page = pages[0]; | 		page = pages[0]; | ||||||
|  | 		pfn = page_to_pfn(page); | ||||||
| 		if (PageHuge(page)) { | 		if (PageHuge(page)) { | ||||||
| 			page = compound_head(page); | 			page = compound_head(page); | ||||||
| 			pte_size <<= compound_order(page); | 			pte_size <<= compound_order(page); | ||||||
|  | @ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||||||
| 			} | 			} | ||||||
| 			rcu_read_unlock_sched(); | 			rcu_read_unlock_sched(); | ||||||
| 		} | 		} | ||||||
| 		pfn = page_to_pfn(page); |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ret = -EFAULT; | 	ret = -EFAULT; | ||||||
|  | @ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||||||
| 		r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | 		r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Set the HPTE to point to pfn */ | 	/*
 | ||||||
| 	r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); | 	 * Set the HPTE to point to pfn. | ||||||
|  | 	 * Since the pfn is at PAGE_SIZE granularity, make sure we | ||||||
|  | 	 * don't mask out lower-order bits if psize < PAGE_SIZE. | ||||||
|  | 	 */ | ||||||
|  | 	if (psize < PAGE_SIZE) | ||||||
|  | 		psize = PAGE_SIZE; | ||||||
|  | 	r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); | ||||||
| 	if (hpte_is_writable(r) && !write_ok) | 	if (hpte_is_writable(r) && !write_ok) | ||||||
| 		r = hpte_make_readonly(r); | 		r = hpte_make_readonly(r); | ||||||
| 	ret = RESUME_GUEST; | 	ret = RESUME_GUEST; | ||||||
|  |  | ||||||
|  | @ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) | ||||||
| static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | ||||||
| { | { | ||||||
| 	struct kvmppc_vcore *vc = vcpu->arch.vcore; | 	struct kvmppc_vcore *vc = vcpu->arch.vcore; | ||||||
|  | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	spin_lock(&vcpu->arch.tbacct_lock); | 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); | ||||||
| 	if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && | 	if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && | ||||||
| 	    vc->preempt_tb != TB_NIL) { | 	    vc->preempt_tb != TB_NIL) { | ||||||
| 		vc->stolen_tb += mftb() - vc->preempt_tb; | 		vc->stolen_tb += mftb() - vc->preempt_tb; | ||||||
|  | @ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | ||||||
| 		vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | 		vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | ||||||
| 		vcpu->arch.busy_preempt = TB_NIL; | 		vcpu->arch.busy_preempt = TB_NIL; | ||||||
| 	} | 	} | ||||||
| 	spin_unlock(&vcpu->arch.tbacct_lock); | 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) | ||||||
| { | { | ||||||
| 	struct kvmppc_vcore *vc = vcpu->arch.vcore; | 	struct kvmppc_vcore *vc = vcpu->arch.vcore; | ||||||
|  | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	spin_lock(&vcpu->arch.tbacct_lock); | 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); | ||||||
| 	if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | 	if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | ||||||
| 		vc->preempt_tb = mftb(); | 		vc->preempt_tb = mftb(); | ||||||
| 	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) | 	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) | ||||||
| 		vcpu->arch.busy_preempt = mftb(); | 		vcpu->arch.busy_preempt = mftb(); | ||||||
| 	spin_unlock(&vcpu->arch.tbacct_lock); | 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) | ||||||
|  | @ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) | ||||||
| 	 */ | 	 */ | ||||||
| 	if (vc->vcore_state != VCORE_INACTIVE && | 	if (vc->vcore_state != VCORE_INACTIVE && | ||||||
| 	    vc->runner->arch.run_task != current) { | 	    vc->runner->arch.run_task != current) { | ||||||
| 		spin_lock(&vc->runner->arch.tbacct_lock); | 		spin_lock_irq(&vc->runner->arch.tbacct_lock); | ||||||
| 		p = vc->stolen_tb; | 		p = vc->stolen_tb; | ||||||
| 		if (vc->preempt_tb != TB_NIL) | 		if (vc->preempt_tb != TB_NIL) | ||||||
| 			p += now - vc->preempt_tb; | 			p += now - vc->preempt_tb; | ||||||
| 		spin_unlock(&vc->runner->arch.tbacct_lock); | 		spin_unlock_irq(&vc->runner->arch.tbacct_lock); | ||||||
| 	} else { | 	} else { | ||||||
| 		p = vc->stolen_tb; | 		p = vc->stolen_tb; | ||||||
| 	} | 	} | ||||||
|  | @ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | ||||||
| 	core_stolen = vcore_stolen_time(vc, now); | 	core_stolen = vcore_stolen_time(vc, now); | ||||||
| 	stolen = core_stolen - vcpu->arch.stolen_logged; | 	stolen = core_stolen - vcpu->arch.stolen_logged; | ||||||
| 	vcpu->arch.stolen_logged = core_stolen; | 	vcpu->arch.stolen_logged = core_stolen; | ||||||
| 	spin_lock(&vcpu->arch.tbacct_lock); | 	spin_lock_irq(&vcpu->arch.tbacct_lock); | ||||||
| 	stolen += vcpu->arch.busy_stolen; | 	stolen += vcpu->arch.busy_stolen; | ||||||
| 	vcpu->arch.busy_stolen = 0; | 	vcpu->arch.busy_stolen = 0; | ||||||
| 	spin_unlock(&vcpu->arch.tbacct_lock); | 	spin_unlock_irq(&vcpu->arch.tbacct_lock); | ||||||
| 	if (!dt || !vpa) | 	if (!dt || !vpa) | ||||||
| 		return; | 		return; | ||||||
| 	memset(dt, 0, sizeof(struct dtl_entry)); | 	memset(dt, 0, sizeof(struct dtl_entry)); | ||||||
|  | @ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | ||||||
| 		if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | 		if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | ||||||
| 			return RESUME_HOST; | 			return RESUME_HOST; | ||||||
| 
 | 
 | ||||||
|  | 		idx = srcu_read_lock(&vcpu->kvm->srcu); | ||||||
| 		rc = kvmppc_rtas_hcall(vcpu); | 		rc = kvmppc_rtas_hcall(vcpu); | ||||||
|  | 		srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||||||
| 
 | 
 | ||||||
| 		if (rc == -ENOENT) | 		if (rc == -ENOENT) | ||||||
| 			return RESUME_HOST; | 			return RESUME_HOST; | ||||||
|  | @ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | ||||||
| 
 | 
 | ||||||
| 	if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) | 	if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) | ||||||
| 		return; | 		return; | ||||||
| 	spin_lock(&vcpu->arch.tbacct_lock); | 	spin_lock_irq(&vcpu->arch.tbacct_lock); | ||||||
| 	now = mftb(); | 	now = mftb(); | ||||||
| 	vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | 	vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | ||||||
| 		vcpu->arch.stolen_logged; | 		vcpu->arch.stolen_logged; | ||||||
| 	vcpu->arch.busy_preempt = now; | 	vcpu->arch.busy_preempt = now; | ||||||
| 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | ||||||
| 	spin_unlock(&vcpu->arch.tbacct_lock); | 	spin_unlock_irq(&vcpu->arch.tbacct_lock); | ||||||
| 	--vc->n_runnable; | 	--vc->n_runnable; | ||||||
| 	list_del(&vcpu->arch.run_list); | 	list_del(&vcpu->arch.run_list); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | ||||||
| 		is_io = pa & (HPTE_R_I | HPTE_R_W); | 		is_io = pa & (HPTE_R_I | HPTE_R_W); | ||||||
| 		pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); | 		pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); | ||||||
| 		pa &= PAGE_MASK; | 		pa &= PAGE_MASK; | ||||||
|  | 		pa |= gpa & ~PAGE_MASK; | ||||||
| 	} else { | 	} else { | ||||||
| 		/* Translate to host virtual address */ | 		/* Translate to host virtual address */ | ||||||
| 		hva = __gfn_to_hva_memslot(memslot, gfn); | 		hva = __gfn_to_hva_memslot(memslot, gfn); | ||||||
|  | @ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | ||||||
| 				ptel = hpte_make_readonly(ptel); | 				ptel = hpte_make_readonly(ptel); | ||||||
| 			is_io = hpte_cache_bits(pte_val(pte)); | 			is_io = hpte_cache_bits(pte_val(pte)); | ||||||
| 			pa = pte_pfn(pte) << PAGE_SHIFT; | 			pa = pte_pfn(pte) << PAGE_SHIFT; | ||||||
|  | 			pa |= hva & (pte_size - 1); | ||||||
|  | 			pa |= gpa & ~PAGE_MASK; | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (pte_size < psize) | 	if (pte_size < psize) | ||||||
| 		return H_PARAMETER; | 		return H_PARAMETER; | ||||||
| 	if (pa && pte_size > psize) |  | ||||||
| 		pa |= gpa & (pte_size - 1); |  | ||||||
| 
 | 
 | ||||||
| 	ptel &= ~(HPTE_R_PP0 - psize); | 	ptel &= ~(HPTE_R_PP0 - psize); | ||||||
| 	ptel |= pa; | 	ptel |= pa; | ||||||
|  | @ -749,6 +750,10 @@ static int slb_base_page_shift[4] = { | ||||||
| 	20,	/* 1M, unsupported */ | 	20,	/* 1M, unsupported */ | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | /* When called from virtmode, this func should be protected by
 | ||||||
|  |  * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK | ||||||
|  |  * can trigger deadlock issue. | ||||||
|  |  */ | ||||||
| long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | ||||||
| 			      unsigned long valid) | 			      unsigned long valid) | ||||||
| { | { | ||||||
|  |  | ||||||
|  | @ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||||||
| 
 | 
 | ||||||
| 13:	b	machine_check_fwnmi | 13:	b	machine_check_fwnmi | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| /* | /* | ||||||
|  * We come in here when wakened from nap mode on a secondary hw thread. |  * We come in here when wakened from nap mode on a secondary hw thread. | ||||||
|  * Relocation is off and most register values are lost. |  * Relocation is off and most register values are lost. | ||||||
|  | @ -224,6 +223,11 @@ kvm_start_guest: | ||||||
| 	/* Clear our vcpu pointer so we don't come back in early */ | 	/* Clear our vcpu pointer so we don't come back in early */ | ||||||
| 	li	r0, 0 | 	li	r0, 0 | ||||||
| 	std	r0, HSTATE_KVM_VCPU(r13) | 	std	r0, HSTATE_KVM_VCPU(r13) | ||||||
|  | 	/* | ||||||
|  | 	 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing | ||||||
|  | 	 * the nap_count, because once the increment to nap_count is | ||||||
|  | 	 * visible we could be given another vcpu. | ||||||
|  | 	 */ | ||||||
| 	lwsync | 	lwsync | ||||||
| 	/* Clear any pending IPI - we're an offline thread */ | 	/* Clear any pending IPI - we're an offline thread */ | ||||||
| 	ld	r5, HSTATE_XICS_PHYS(r13) | 	ld	r5, HSTATE_XICS_PHYS(r13) | ||||||
|  | @ -241,7 +245,6 @@ kvm_start_guest: | ||||||
| 	/* increment the nap count and then go to nap mode */ | 	/* increment the nap count and then go to nap mode */ | ||||||
| 	ld	r4, HSTATE_KVM_VCORE(r13) | 	ld	r4, HSTATE_KVM_VCORE(r13) | ||||||
| 	addi	r4, r4, VCORE_NAP_COUNT | 	addi	r4, r4, VCORE_NAP_COUNT | ||||||
| 	lwsync				/* make previous updates visible */ |  | ||||||
| 51:	lwarx	r3, 0, r4 | 51:	lwarx	r3, 0, r4 | ||||||
| 	addi	r3, r3, 1 | 	addi	r3, r3, 1 | ||||||
| 	stwcx.	r3, 0, r4 | 	stwcx.	r3, 0, r4 | ||||||
|  | @ -751,15 +754,14 @@ kvmppc_interrupt_hv: | ||||||
| 	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | 	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | ||||||
| 	 * guest R13 saved in SPRN_SCRATCH0 | 	 * guest R13 saved in SPRN_SCRATCH0 | ||||||
| 	 */ | 	 */ | ||||||
| 	/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | 	std	r9, HSTATE_SCRATCH2(r13) | ||||||
| 	std	r9, HSTATE_HOST_R2(r13) |  | ||||||
| 
 | 
 | ||||||
| 	lbz	r9, HSTATE_IN_GUEST(r13) | 	lbz	r9, HSTATE_IN_GUEST(r13) | ||||||
| 	cmpwi	r9, KVM_GUEST_MODE_HOST_HV | 	cmpwi	r9, KVM_GUEST_MODE_HOST_HV | ||||||
| 	beq	kvmppc_bad_host_intr | 	beq	kvmppc_bad_host_intr | ||||||
| #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | ||||||
| 	cmpwi	r9, KVM_GUEST_MODE_GUEST | 	cmpwi	r9, KVM_GUEST_MODE_GUEST | ||||||
| 	ld	r9, HSTATE_HOST_R2(r13) | 	ld	r9, HSTATE_SCRATCH2(r13) | ||||||
| 	beq	kvmppc_interrupt_pr | 	beq	kvmppc_interrupt_pr | ||||||
| #endif | #endif | ||||||
| 	/* We're now back in the host but in guest MMU context */ | 	/* We're now back in the host but in guest MMU context */ | ||||||
|  | @ -779,7 +781,7 @@ kvmppc_interrupt_hv: | ||||||
| 	std	r6, VCPU_GPR(R6)(r9) | 	std	r6, VCPU_GPR(R6)(r9) | ||||||
| 	std	r7, VCPU_GPR(R7)(r9) | 	std	r7, VCPU_GPR(R7)(r9) | ||||||
| 	std	r8, VCPU_GPR(R8)(r9) | 	std	r8, VCPU_GPR(R8)(r9) | ||||||
| 	ld	r0, HSTATE_HOST_R2(r13) | 	ld	r0, HSTATE_SCRATCH2(r13) | ||||||
| 	std	r0, VCPU_GPR(R9)(r9) | 	std	r0, VCPU_GPR(R9)(r9) | ||||||
| 	std	r10, VCPU_GPR(R10)(r9) | 	std	r10, VCPU_GPR(R10)(r9) | ||||||
| 	std	r11, VCPU_GPR(R11)(r9) | 	std	r11, VCPU_GPR(R11)(r9) | ||||||
|  | @ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | ||||||
| 	 */ | 	 */ | ||||||
| 	/* Increment the threads-exiting-guest count in the 0xff00 | 	/* Increment the threads-exiting-guest count in the 0xff00 | ||||||
| 	   bits of vcore->entry_exit_count */ | 	   bits of vcore->entry_exit_count */ | ||||||
| 	lwsync |  | ||||||
| 	ld	r5,HSTATE_KVM_VCORE(r13) | 	ld	r5,HSTATE_KVM_VCORE(r13) | ||||||
| 	addi	r6,r5,VCORE_ENTRY_EXIT | 	addi	r6,r5,VCORE_ENTRY_EXIT | ||||||
| 41:	lwarx	r3,0,r6 | 41:	lwarx	r3,0,r6 | ||||||
| 	addi	r0,r3,0x100 | 	addi	r0,r3,0x100 | ||||||
| 	stwcx.	r0,0,r6 | 	stwcx.	r0,0,r6 | ||||||
| 	bne	41b | 	bne	41b | ||||||
| 	lwsync | 	isync		/* order stwcx. vs. reading napping_threads */ | ||||||
| 
 | 
 | ||||||
| 	/* | 	/* | ||||||
| 	 * At this point we have an interrupt that we have to pass | 	 * At this point we have an interrupt that we have to pass | ||||||
|  | @ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | ||||||
| 	sld	r0,r0,r4 | 	sld	r0,r0,r4 | ||||||
| 	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */ | 	andc.	r3,r3,r0		/* no sense IPI'ing ourselves */ | ||||||
| 	beq	43f | 	beq	43f | ||||||
|  | 	/* Order entry/exit update vs. IPIs */ | ||||||
|  | 	sync | ||||||
| 	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */ | 	mulli	r4,r4,PACA_SIZE		/* get paca for thread 0 */ | ||||||
| 	subf	r6,r4,r13 | 	subf	r6,r4,r13 | ||||||
| 42:	andi.	r0,r3,1 | 42:	andi.	r0,r3,1 | ||||||
|  | @ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | ||||||
| 	bge	kvm_cede_exit | 	bge	kvm_cede_exit | ||||||
| 	stwcx.	r4,0,r6 | 	stwcx.	r4,0,r6 | ||||||
| 	bne	31b | 	bne	31b | ||||||
|  | 	/* order napping_threads update vs testing entry_exit_count */ | ||||||
|  | 	isync | ||||||
| 	li	r0,1 | 	li	r0,1 | ||||||
| 	stb	r0,HSTATE_NAPPING(r13) | 	stb	r0,HSTATE_NAPPING(r13) | ||||||
| 	/* order napping_threads update vs testing entry_exit_count */ |  | ||||||
| 	lwsync |  | ||||||
| 	mr	r4,r3 | 	mr	r4,r3 | ||||||
| 	lwz	r7,VCORE_ENTRY_EXIT(r5) | 	lwz	r7,VCORE_ENTRY_EXIT(r5) | ||||||
| 	cmpwi	r7,0x100 | 	cmpwi	r7,0x100 | ||||||
|  |  | ||||||
|  | @ -129,29 +129,32 @@ kvm_start_lightweight: | ||||||
| 	 * R12      = exit handler id | 	 * R12      = exit handler id | ||||||
| 	 * R13      = PACA | 	 * R13      = PACA | ||||||
| 	 * SVCPU.*  = guest * | 	 * SVCPU.*  = guest * | ||||||
|  | 	 * MSR.EE   = 1 | ||||||
| 	 * | 	 * | ||||||
| 	 */ | 	 */ | ||||||
| 
 | 
 | ||||||
|  | 	PPC_LL	r3, GPR4(r1)		/* vcpu pointer */ | ||||||
|  | 
 | ||||||
|  | 	/* | ||||||
|  | 	 * kvmppc_copy_from_svcpu can clobber volatile registers, save | ||||||
|  | 	 * the exit handler id to the vcpu and restore it from there later. | ||||||
|  | 	 */ | ||||||
|  | 	stw	r12, VCPU_TRAP(r3) | ||||||
|  | 
 | ||||||
| 	/* Transfer reg values from shadow vcpu back to vcpu struct */ | 	/* Transfer reg values from shadow vcpu back to vcpu struct */ | ||||||
| 	/* On 64-bit, interrupts are still off at this point */ | 	/* On 64-bit, interrupts are still off at this point */ | ||||||
| 	PPC_LL	r3, GPR4(r1)		/* vcpu pointer */ | 
 | ||||||
| 	GET_SHADOW_VCPU(r4) | 	GET_SHADOW_VCPU(r4) | ||||||
| 	bl	FUNC(kvmppc_copy_from_svcpu) | 	bl	FUNC(kvmppc_copy_from_svcpu) | ||||||
| 	nop | 	nop | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_PPC_BOOK3S_64 | #ifdef CONFIG_PPC_BOOK3S_64 | ||||||
| 	/* Re-enable interrupts */ |  | ||||||
| 	ld	r3, HSTATE_HOST_MSR(r13) |  | ||||||
| 	ori	r3, r3, MSR_EE |  | ||||||
| 	MTMSR_EERI(r3) |  | ||||||
| 
 |  | ||||||
| 	/* | 	/* | ||||||
| 	 * Reload kernel SPRG3 value. | 	 * Reload kernel SPRG3 value. | ||||||
| 	 * No need to save guest value as usermode can't modify SPRG3. | 	 * No need to save guest value as usermode can't modify SPRG3. | ||||||
| 	 */ | 	 */ | ||||||
| 	ld	r3, PACA_SPRG3(r13) | 	ld	r3, PACA_SPRG3(r13) | ||||||
| 	mtspr	SPRN_SPRG3, r3 | 	mtspr	SPRN_SPRG3, r3 | ||||||
| 
 |  | ||||||
| #endif /* CONFIG_PPC_BOOK3S_64 */ | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||||||
| 
 | 
 | ||||||
| 	/* R7 = vcpu */ | 	/* R7 = vcpu */ | ||||||
|  | @ -177,7 +180,7 @@ kvm_start_lightweight: | ||||||
| 	PPC_STL	r31, VCPU_GPR(R31)(r7) | 	PPC_STL	r31, VCPU_GPR(R31)(r7) | ||||||
| 
 | 
 | ||||||
| 	/* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | 	/* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | ||||||
| 	mr	r5, r12 | 	lwz	r5, VCPU_TRAP(r7) | ||||||
| 
 | 
 | ||||||
| 	/* Restore r3 (kvm_run) and r4 (vcpu) */ | 	/* Restore r3 (kvm_run) and r4 (vcpu) */ | ||||||
| 	REST_2GPRS(3, r1) | 	REST_2GPRS(3, r1) | ||||||
|  |  | ||||||
|  | @ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | ||||||
| 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||||||
| 	memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | 	memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | ||||||
| 	svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; | 	svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; | ||||||
|  | 	svcpu->in_use = 0; | ||||||
| 	svcpu_put(svcpu); | 	svcpu_put(svcpu); | ||||||
| #endif | #endif | ||||||
| 	vcpu->cpu = smp_processor_id(); | 	vcpu->cpu = smp_processor_id(); | ||||||
|  | @ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | ||||||
| { | { | ||||||
| #ifdef CONFIG_PPC_BOOK3S_64 | #ifdef CONFIG_PPC_BOOK3S_64 | ||||||
| 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||||||
|  | 	if (svcpu->in_use) { | ||||||
|  | 		kvmppc_copy_from_svcpu(vcpu, svcpu); | ||||||
|  | 	} | ||||||
| 	memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | 	memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | ||||||
| 	to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; | 	to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; | ||||||
| 	svcpu_put(svcpu); | 	svcpu_put(svcpu); | ||||||
|  | @ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | ||||||
| 	svcpu->ctr = vcpu->arch.ctr; | 	svcpu->ctr = vcpu->arch.ctr; | ||||||
| 	svcpu->lr  = vcpu->arch.lr; | 	svcpu->lr  = vcpu->arch.lr; | ||||||
| 	svcpu->pc  = vcpu->arch.pc; | 	svcpu->pc  = vcpu->arch.pc; | ||||||
|  | 	svcpu->in_use = true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | ||||||
| void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||||||
| 			    struct kvmppc_book3s_shadow_vcpu *svcpu) | 			    struct kvmppc_book3s_shadow_vcpu *svcpu) | ||||||
| { | { | ||||||
|  | 	/*
 | ||||||
|  | 	 * vcpu_put would just call us again because in_use hasn't | ||||||
|  | 	 * been updated yet. | ||||||
|  | 	 */ | ||||||
|  | 	preempt_disable(); | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Maybe we were already preempted and synced the svcpu from | ||||||
|  | 	 * our preempt notifiers. Don't bother touching this svcpu then. | ||||||
|  | 	 */ | ||||||
|  | 	if (!svcpu->in_use) | ||||||
|  | 		goto out; | ||||||
|  | 
 | ||||||
| 	vcpu->arch.gpr[0] = svcpu->gpr[0]; | 	vcpu->arch.gpr[0] = svcpu->gpr[0]; | ||||||
| 	vcpu->arch.gpr[1] = svcpu->gpr[1]; | 	vcpu->arch.gpr[1] = svcpu->gpr[1]; | ||||||
| 	vcpu->arch.gpr[2] = svcpu->gpr[2]; | 	vcpu->arch.gpr[2] = svcpu->gpr[2]; | ||||||
|  | @ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||||||
| 	vcpu->arch.fault_dar   = svcpu->fault_dar; | 	vcpu->arch.fault_dar   = svcpu->fault_dar; | ||||||
| 	vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | 	vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | ||||||
| 	vcpu->arch.last_inst   = svcpu->last_inst; | 	vcpu->arch.last_inst   = svcpu->last_inst; | ||||||
|  | 	svcpu->in_use = false; | ||||||
|  | 
 | ||||||
|  | out: | ||||||
|  | 	preempt_enable(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | ||||||
|  |  | ||||||
|  | @ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) | ||||||
| 
 | 
 | ||||||
| 	li	r6, MSR_IR | MSR_DR | 	li	r6, MSR_IR | MSR_DR | ||||||
| 	andc	r6, r5, r6	/* Clear DR and IR in MSR value */ | 	andc	r6, r5, r6	/* Clear DR and IR in MSR value */ | ||||||
| #ifdef CONFIG_PPC_BOOK3S_32 |  | ||||||
| 	/* | 	/* | ||||||
| 	 * Set EE in HOST_MSR so that it's enabled when we get into our | 	 * Set EE in HOST_MSR so that it's enabled when we get into our | ||||||
| 	 * C exit handler function.  On 64-bit we delay enabling | 	 * C exit handler function. | ||||||
| 	 * interrupts until we have finished transferring stuff |  | ||||||
| 	 * to or from the PACA. |  | ||||||
| 	 */ | 	 */ | ||||||
| 	ori	r5, r5, MSR_EE | 	ori	r5, r5, MSR_EE | ||||||
| #endif |  | ||||||
| 	mtsrr0	r7 | 	mtsrr0	r7 | ||||||
| 	mtsrr1	r6 | 	mtsrr1	r6 | ||||||
| 	RFI | 	RFI | ||||||
|  |  | ||||||
|  | @ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | ||||||
| int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||||||
| { | { | ||||||
| 	int ret, s; | 	int ret, s; | ||||||
| 	struct thread_struct thread; | 	struct debug_reg debug; | ||||||
| #ifdef CONFIG_PPC_FPU | #ifdef CONFIG_PPC_FPU | ||||||
| 	struct thread_fp_state fp; | 	struct thread_fp_state fp; | ||||||
| 	int fpexc_mode; | 	int fpexc_mode; | ||||||
|  | @ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| 	/* Switch to guest debug context */ | 	/* Switch to guest debug context */ | ||||||
| 	thread.debug = vcpu->arch.shadow_dbg_reg; | 	debug = vcpu->arch.shadow_dbg_reg; | ||||||
| 	switch_booke_debug_regs(&thread); | 	switch_booke_debug_regs(&debug); | ||||||
| 	thread.debug = current->thread.debug; | 	debug = current->thread.debug; | ||||||
| 	current->thread.debug = vcpu->arch.shadow_dbg_reg; | 	current->thread.debug = vcpu->arch.shadow_dbg_reg; | ||||||
| 
 | 
 | ||||||
| 	kvmppc_fix_ee_before_entry(); | 	kvmppc_fix_ee_before_entry(); | ||||||
|  | @ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||||||
| 	   We also get here with interrupts enabled. */ | 	   We also get here with interrupts enabled. */ | ||||||
| 
 | 
 | ||||||
| 	/* Switch back to user space debug context */ | 	/* Switch back to user space debug context */ | ||||||
| 	switch_booke_debug_regs(&thread); | 	switch_booke_debug_regs(&debug); | ||||||
| 	current->thread.debug = thread.debug; | 	current->thread.debug = debug; | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_PPC_FPU | #ifdef CONFIG_PPC_FPU | ||||||
| 	kvmppc_save_guest_fp(vcpu); | 	kvmppc_save_guest_fp(vcpu); | ||||||
|  |  | ||||||
|  | @ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1; | ||||||
| static u8 opal_lpc_inb(unsigned long port) | static u8 opal_lpc_inb(unsigned long port) | ||||||
| { | { | ||||||
| 	int64_t rc; | 	int64_t rc; | ||||||
| 	uint32_t data; | 	__be32 data; | ||||||
| 
 | 
 | ||||||
| 	if (opal_lpc_chip_id < 0 || port > 0xffff) | 	if (opal_lpc_chip_id < 0 || port > 0xffff) | ||||||
| 		return 0xff; | 		return 0xff; | ||||||
| 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); | 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); | ||||||
| 	return rc ? 0xff : data; | 	return rc ? 0xff : be32_to_cpu(data); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static __le16 __opal_lpc_inw(unsigned long port) | static __le16 __opal_lpc_inw(unsigned long port) | ||||||
| { | { | ||||||
| 	int64_t rc; | 	int64_t rc; | ||||||
| 	uint32_t data; | 	__be32 data; | ||||||
| 
 | 
 | ||||||
| 	if (opal_lpc_chip_id < 0 || port > 0xfffe) | 	if (opal_lpc_chip_id < 0 || port > 0xfffe) | ||||||
| 		return 0xffff; | 		return 0xffff; | ||||||
| 	if (port & 1) | 	if (port & 1) | ||||||
| 		return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); | 		return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); | ||||||
| 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); | 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); | ||||||
| 	return rc ? 0xffff : data; | 	return rc ? 0xffff : be32_to_cpu(data); | ||||||
| } | } | ||||||
| static u16 opal_lpc_inw(unsigned long port) | static u16 opal_lpc_inw(unsigned long port) | ||||||
| { | { | ||||||
|  | @ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port) | ||||||
| static __le32 __opal_lpc_inl(unsigned long port) | static __le32 __opal_lpc_inl(unsigned long port) | ||||||
| { | { | ||||||
| 	int64_t rc; | 	int64_t rc; | ||||||
| 	uint32_t data; | 	__be32 data; | ||||||
| 
 | 
 | ||||||
| 	if (opal_lpc_chip_id < 0 || port > 0xfffc) | 	if (opal_lpc_chip_id < 0 || port > 0xfffc) | ||||||
| 		return 0xffffffff; | 		return 0xffffffff; | ||||||
|  | @ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port) | ||||||
| 		       (__le32)opal_lpc_inb(port + 2) <<  8 | | 		       (__le32)opal_lpc_inb(port + 2) <<  8 | | ||||||
| 			       opal_lpc_inb(port + 3); | 			       opal_lpc_inb(port + 3); | ||||||
| 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); | 	rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); | ||||||
| 	return rc ? 0xffffffff : data; | 	return rc ? 0xffffffff : be32_to_cpu(data); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static u32 opal_lpc_inl(unsigned long port) | static u32 opal_lpc_inl(unsigned long port) | ||||||
|  |  | ||||||
|  | @ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) | ||||||
| { | { | ||||||
| 	struct opal_scom_map *m = map; | 	struct opal_scom_map *m = map; | ||||||
| 	int64_t rc; | 	int64_t rc; | ||||||
|  | 	__be64 v; | ||||||
| 
 | 
 | ||||||
| 	reg = opal_scom_unmangle(reg); | 	reg = opal_scom_unmangle(reg); | ||||||
| 	rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); | 	rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); | ||||||
|  | 	*value = be64_to_cpu(v); | ||||||
| 	return opal_xscom_err_xlate(rc); | 	return opal_xscom_err_xlate(rc); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m) | ||||||
| { | { | ||||||
| 	struct hvcall_ppp_data ppp_data; | 	struct hvcall_ppp_data ppp_data; | ||||||
| 	struct device_node *root; | 	struct device_node *root; | ||||||
| 	const int *perf_level; | 	const __be32 *perf_level; | ||||||
| 	int rc; | 	int rc; | ||||||
| 
 | 
 | ||||||
| 	rc = h_get_ppp(&ppp_data); | 	rc = h_get_ppp(&ppp_data); | ||||||
|  | @ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m) | ||||||
| 		perf_level = of_get_property(root, | 		perf_level = of_get_property(root, | ||||||
| 				"ibm,partition-performance-parameters-level", | 				"ibm,partition-performance-parameters-level", | ||||||
| 					     NULL); | 					     NULL); | ||||||
| 		if (perf_level && (*perf_level >= 1)) { | 		if (perf_level && (be32_to_cpup(perf_level) >= 1)) { | ||||||
| 			seq_printf(m, | 			seq_printf(m, | ||||||
| 			    "physical_procs_allocated_to_virtualization=%d\n", | 			    "physical_procs_allocated_to_virtualization=%d\n", | ||||||
| 				   ppp_data.phys_platform_procs); | 				   ppp_data.phys_platform_procs); | ||||||
|  | @ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | ||||||
| 	int partition_potential_processors; | 	int partition_potential_processors; | ||||||
| 	int partition_active_processors; | 	int partition_active_processors; | ||||||
| 	struct device_node *rtas_node; | 	struct device_node *rtas_node; | ||||||
| 	const int *lrdrp = NULL; | 	const __be32 *lrdrp = NULL; | ||||||
| 
 | 
 | ||||||
| 	rtas_node = of_find_node_by_path("/rtas"); | 	rtas_node = of_find_node_by_path("/rtas"); | ||||||
| 	if (rtas_node) | 	if (rtas_node) | ||||||
|  | @ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | ||||||
| 	if (lrdrp == NULL) { | 	if (lrdrp == NULL) { | ||||||
| 		partition_potential_processors = vdso_data->processorCount; | 		partition_potential_processors = vdso_data->processorCount; | ||||||
| 	} else { | 	} else { | ||||||
| 		partition_potential_processors = *(lrdrp + 4); | 		partition_potential_processors = be32_to_cpup(lrdrp + 4); | ||||||
| 	} | 	} | ||||||
| 	of_node_put(rtas_node); | 	of_node_put(rtas_node); | ||||||
| 
 | 
 | ||||||
|  | @ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | ||||||
| 	const char *model = ""; | 	const char *model = ""; | ||||||
| 	const char *system_id = ""; | 	const char *system_id = ""; | ||||||
| 	const char *tmp; | 	const char *tmp; | ||||||
| 	const unsigned int *lp_index_ptr; | 	const __be32 *lp_index_ptr; | ||||||
| 	unsigned int lp_index = 0; | 	unsigned int lp_index = 0; | ||||||
| 
 | 
 | ||||||
| 	seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); | 	seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); | ||||||
|  | @ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | ||||||
| 		lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", | 		lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", | ||||||
| 					NULL); | 					NULL); | ||||||
| 		if (lp_index_ptr) | 		if (lp_index_ptr) | ||||||
| 			lp_index = *lp_index_ptr; | 			lp_index = be32_to_cpup(lp_index_ptr); | ||||||
| 		of_node_put(rootdn); | 		of_node_put(rootdn); | ||||||
| 	} | 	} | ||||||
| 	seq_printf(m, "serial_number=%s\n", system_id); | 	seq_printf(m, "serial_number=%s\n", system_id); | ||||||
|  |  | ||||||
|  | @ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) | ||||||
| { | { | ||||||
| 	struct device_node *dn; | 	struct device_node *dn; | ||||||
| 	struct pci_dn *pdn; | 	struct pci_dn *pdn; | ||||||
| 	const u32 *req_msi; | 	const __be32 *p; | ||||||
|  | 	u32 req_msi; | ||||||
| 
 | 
 | ||||||
| 	pdn = pci_get_pdn(pdev); | 	pdn = pci_get_pdn(pdev); | ||||||
| 	if (!pdn) | 	if (!pdn) | ||||||
|  | @ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) | ||||||
| 
 | 
 | ||||||
| 	dn = pdn->node; | 	dn = pdn->node; | ||||||
| 
 | 
 | ||||||
| 	req_msi = of_get_property(dn, prop_name, NULL); | 	p = of_get_property(dn, prop_name, NULL); | ||||||
| 	if (!req_msi) { | 	if (!p) { | ||||||
| 		pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); | 		pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); | ||||||
| 		return -ENOENT; | 		return -ENOENT; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (*req_msi < nvec) { | 	req_msi = be32_to_cpup(p); | ||||||
|  | 	if (req_msi < nvec) { | ||||||
| 		pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); | 		pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); | ||||||
| 
 | 
 | ||||||
| 		if (*req_msi == 0) /* Be paranoid */ | 		if (req_msi == 0) /* Be paranoid */ | ||||||
| 			return -ENOSPC; | 			return -ENOSPC; | ||||||
| 
 | 
 | ||||||
| 		return *req_msi; | 		return req_msi; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
|  | @ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec) | ||||||
| static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | ||||||
| { | { | ||||||
| 	struct device_node *dn; | 	struct device_node *dn; | ||||||
| 	const u32 *p; | 	const __be32 *p; | ||||||
| 
 | 
 | ||||||
| 	dn = of_node_get(pci_device_to_OF_node(dev)); | 	dn = of_node_get(pci_device_to_OF_node(dev)); | ||||||
| 	while (dn) { | 	while (dn) { | ||||||
|  | @ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | ||||||
| 		if (p) { | 		if (p) { | ||||||
| 			pr_debug("rtas_msi: found prop on dn %s\n", | 			pr_debug("rtas_msi: found prop on dn %s\n", | ||||||
| 				dn->full_name); | 				dn->full_name); | ||||||
| 			*total = *p; | 			*total = be32_to_cpup(p); | ||||||
| 			return dn; | 			return dn; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  | @ -232,13 +234,13 @@ struct msi_counts { | ||||||
| static void *count_non_bridge_devices(struct device_node *dn, void *data) | static void *count_non_bridge_devices(struct device_node *dn, void *data) | ||||||
| { | { | ||||||
| 	struct msi_counts *counts = data; | 	struct msi_counts *counts = data; | ||||||
| 	const u32 *p; | 	const __be32 *p; | ||||||
| 	u32 class; | 	u32 class; | ||||||
| 
 | 
 | ||||||
| 	pr_debug("rtas_msi: counting %s\n", dn->full_name); | 	pr_debug("rtas_msi: counting %s\n", dn->full_name); | ||||||
| 
 | 
 | ||||||
| 	p = of_get_property(dn, "class-code", NULL); | 	p = of_get_property(dn, "class-code", NULL); | ||||||
| 	class = p ? *p : 0; | 	class = p ? be32_to_cpup(p) : 0; | ||||||
| 
 | 
 | ||||||
| 	if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) | 	if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||||||
| 		counts->num_devices++; | 		counts->num_devices++; | ||||||
|  | @ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data) | ||||||
| static void *count_spare_msis(struct device_node *dn, void *data) | static void *count_spare_msis(struct device_node *dn, void *data) | ||||||
| { | { | ||||||
| 	struct msi_counts *counts = data; | 	struct msi_counts *counts = data; | ||||||
| 	const u32 *p; | 	const __be32 *p; | ||||||
| 	int req; | 	int req; | ||||||
| 
 | 
 | ||||||
| 	if (dn == counts->requestor) | 	if (dn == counts->requestor) | ||||||
|  | @ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data) | ||||||
| 		req = 0; | 		req = 0; | ||||||
| 		p = of_get_property(dn, "ibm,req#msi", NULL); | 		p = of_get_property(dn, "ibm,req#msi", NULL); | ||||||
| 		if (p) | 		if (p) | ||||||
| 			req = *p; | 			req = be32_to_cpup(p); | ||||||
| 
 | 
 | ||||||
| 		p = of_get_property(dn, "ibm,req#msi-x", NULL); | 		p = of_get_property(dn, "ibm,req#msi-x", NULL); | ||||||
| 		if (p) | 		if (p) | ||||||
| 			req = max(req, (int)*p); | 			req = max(req, (int)be32_to_cpup(p)); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (req < counts->quota) | 	if (req < counts->quota) | ||||||
|  |  | ||||||
|  | @ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT];	/* assume this is in the first 4GB */ | ||||||
| static DEFINE_SPINLOCK(nvram_lock); | static DEFINE_SPINLOCK(nvram_lock); | ||||||
| 
 | 
 | ||||||
| struct err_log_info { | struct err_log_info { | ||||||
| 	int error_type; | 	__be32 error_type; | ||||||
| 	unsigned int seq_num; | 	__be32 seq_num; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct nvram_os_partition { | struct nvram_os_partition { | ||||||
|  | @ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = { | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| struct oops_log_info { | struct oops_log_info { | ||||||
| 	u16 version; | 	__be16 version; | ||||||
| 	u16 report_length; | 	__be16 report_length; | ||||||
| 	u64 timestamp; | 	__be64 timestamp; | ||||||
| } __attribute__((packed)); | } __attribute__((packed)); | ||||||
| 
 | 
 | ||||||
| static void oops_to_nvram(struct kmsg_dumper *dumper, | static void oops_to_nvram(struct kmsg_dumper *dumper, | ||||||
|  | @ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, | ||||||
| 		length = part->size; | 		length = part->size; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	info.error_type = err_type; | 	info.error_type = cpu_to_be32(err_type); | ||||||
| 	info.seq_num = error_log_cnt; | 	info.seq_num = cpu_to_be32(error_log_cnt); | ||||||
| 
 | 
 | ||||||
| 	tmp_index = part->index; | 	tmp_index = part->index; | ||||||
| 
 | 
 | ||||||
|  | @ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (part->os_partition) { | 	if (part->os_partition) { | ||||||
| 		*error_log_cnt = info.seq_num; | 		*error_log_cnt = be32_to_cpu(info.seq_num); | ||||||
| 		*err_type = info.error_type; | 		*err_type = be32_to_cpu(info.error_type); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
|  | @ -529,9 +529,9 @@ static int zip_oops(size_t text_len) | ||||||
| 		pr_err("nvram: logging uncompressed oops/panic report\n"); | 		pr_err("nvram: logging uncompressed oops/panic report\n"); | ||||||
| 		return -1; | 		return -1; | ||||||
| 	} | 	} | ||||||
| 	oops_hdr->version = OOPS_HDR_VERSION; | 	oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); | ||||||
| 	oops_hdr->report_length = (u16) zipped_len; | 	oops_hdr->report_length = cpu_to_be16(zipped_len); | ||||||
| 	oops_hdr->timestamp = get_seconds(); | 	oops_hdr->timestamp = cpu_to_be64(get_seconds()); | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type, | ||||||
| 				clobbering_unread_rtas_event()) | 				clobbering_unread_rtas_event()) | ||||||
| 		return -1; | 		return -1; | ||||||
| 
 | 
 | ||||||
| 	oops_hdr->version = OOPS_HDR_VERSION; | 	oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); | ||||||
| 	oops_hdr->report_length = (u16) size; | 	oops_hdr->report_length = cpu_to_be16(size); | ||||||
| 	oops_hdr->timestamp = get_seconds(); | 	oops_hdr->timestamp = cpu_to_be64(get_seconds()); | ||||||
| 
 | 
 | ||||||
| 	if (compressed) | 	if (compressed) | ||||||
| 		err_type = ERR_TYPE_KERNEL_PANIC_GZ; | 		err_type = ERR_TYPE_KERNEL_PANIC_GZ; | ||||||
|  | @ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, | ||||||
| 		size_t length, hdr_size; | 		size_t length, hdr_size; | ||||||
| 
 | 
 | ||||||
| 		oops_hdr = (struct oops_log_info *)buff; | 		oops_hdr = (struct oops_log_info *)buff; | ||||||
| 		if (oops_hdr->version < OOPS_HDR_VERSION) { | 		if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) { | ||||||
| 			/* Old format oops header had 2-byte record size */ | 			/* Old format oops header had 2-byte record size */ | ||||||
| 			hdr_size = sizeof(u16); | 			hdr_size = sizeof(u16); | ||||||
| 			length = oops_hdr->version; | 			length = be16_to_cpu(oops_hdr->version); | ||||||
| 			time->tv_sec = 0; | 			time->tv_sec = 0; | ||||||
| 			time->tv_nsec = 0; | 			time->tv_nsec = 0; | ||||||
| 		} else { | 		} else { | ||||||
| 			hdr_size = sizeof(*oops_hdr); | 			hdr_size = sizeof(*oops_hdr); | ||||||
| 			length = oops_hdr->report_length; | 			length = be16_to_cpu(oops_hdr->report_length); | ||||||
| 			time->tv_sec = oops_hdr->timestamp; | 			time->tv_sec = be64_to_cpu(oops_hdr->timestamp); | ||||||
| 			time->tv_nsec = 0; | 			time->tv_nsec = 0; | ||||||
| 		} | 		} | ||||||
| 		*buf = kmalloc(length, GFP_KERNEL); | 		*buf = kmalloc(length, GFP_KERNEL); | ||||||
|  | @ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, | ||||||
| 		kmsg_dump_get_buffer(dumper, false, | 		kmsg_dump_get_buffer(dumper, false, | ||||||
| 				     oops_data, oops_data_sz, &text_len); | 				     oops_data, oops_data_sz, &text_len); | ||||||
| 		err_type = ERR_TYPE_KERNEL_PANIC; | 		err_type = ERR_TYPE_KERNEL_PANIC; | ||||||
| 		oops_hdr->version = OOPS_HDR_VERSION; | 		oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); | ||||||
| 		oops_hdr->report_length = (u16) text_len; | 		oops_hdr->report_length = cpu_to_be16(text_len); | ||||||
| 		oops_hdr->timestamp = get_seconds(); | 		oops_hdr->timestamp = cpu_to_be64(get_seconds()); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	(void) nvram_write_os_partition(&oops_log_partition, oops_buf, | 	(void) nvram_write_os_partition(&oops_log_partition, oops_buf, | ||||||
| 		(int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, | 		(int) (sizeof(*oops_hdr) + text_len), err_type, | ||||||
| 		++oops_count); | 		++oops_count); | ||||||
| 
 | 
 | ||||||
| 	spin_unlock_irqrestore(&lock, flags); | 	spin_unlock_irqrestore(&lock, flags); | ||||||
|  |  | ||||||
|  | @ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | ||||||
| { | { | ||||||
| 	struct device_node *dn, *pdn; | 	struct device_node *dn, *pdn; | ||||||
| 	struct pci_bus *bus; | 	struct pci_bus *bus; | ||||||
| 	const uint32_t *pcie_link_speed_stats; | 	const __be32 *pcie_link_speed_stats; | ||||||
| 
 | 
 | ||||||
| 	bus = bridge->bus; | 	bus = bridge->bus; | ||||||
| 
 | 
 | ||||||
|  | @ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { | 	for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { | ||||||
| 		pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, | 		pcie_link_speed_stats = of_get_property(pdn, | ||||||
| 			"ibm,pcie-link-speed-stats", NULL); | 			"ibm,pcie-link-speed-stats", NULL); | ||||||
| 		if (pcie_link_speed_stats) | 		if (pcie_link_speed_stats) | ||||||
| 			break; | 			break; | ||||||
|  | @ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | ||||||
| 		return 0; | 		return 0; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	switch (pcie_link_speed_stats[0]) { | 	switch (be32_to_cpup(pcie_link_speed_stats)) { | ||||||
| 	case 0x01: | 	case 0x01: | ||||||
| 		bus->max_bus_speed = PCIE_SPEED_2_5GT; | 		bus->max_bus_speed = PCIE_SPEED_2_5GT; | ||||||
| 		break; | 		break; | ||||||
|  | @ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | ||||||
| 		break; | 		break; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	switch (pcie_link_speed_stats[1]) { | 	switch (be32_to_cpup(pcie_link_speed_stats)) { | ||||||
| 	case 0x01: | 	case 0x01: | ||||||
| 		bus->cur_bus_speed = PCIE_SPEED_2_5GT; | 		bus->cur_bus_speed = PCIE_SPEED_2_5GT; | ||||||
| 		break; | 		break; | ||||||
|  |  | ||||||
|  | @ -6,7 +6,7 @@ lib-y  = delay.o memmove.o memchr.o \ | ||||||
| 	 checksum.o strlen.o div64.o div64-generic.o | 	 checksum.o strlen.o div64.o div64-generic.o | ||||||
| 
 | 
 | ||||||
| # Extracted from libgcc
 | # Extracted from libgcc
 | ||||||
| lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
 | obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
 | ||||||
| 	 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
 | 	 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
 | ||||||
| 	 udiv_qrnnd.o | 	 udiv_qrnnd.o | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #define pte_accessible pte_accessible | #define pte_accessible pte_accessible | ||||||
| static inline unsigned long pte_accessible(pte_t a) | static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) | ||||||
| { | { | ||||||
| 	return pte_val(a) & _PAGE_VALID; | 	return pte_val(a) & _PAGE_VALID; | ||||||
| } | } | ||||||
|  | @ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | ||||||
| 	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U | 	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U | ||||||
| 	 *             and SUN4V pte layout, so this inline test is fine. | 	 *             and SUN4V pte layout, so this inline test is fine. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (likely(mm != &init_mm) && pte_accessible(orig)) | 	if (likely(mm != &init_mm) && pte_accessible(mm, orig)) | ||||||
| 		tlb_batch_add(mm, addr, ptep, orig, fullmm); | 		tlb_batch_add(mm, addr, ptep, orig, fullmm); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -26,6 +26,7 @@ config X86 | ||||||
| 	select HAVE_AOUT if X86_32 | 	select HAVE_AOUT if X86_32 | ||||||
| 	select HAVE_UNSTABLE_SCHED_CLOCK | 	select HAVE_UNSTABLE_SCHED_CLOCK | ||||||
| 	select ARCH_SUPPORTS_NUMA_BALANCING | 	select ARCH_SUPPORTS_NUMA_BALANCING | ||||||
|  | 	select ARCH_SUPPORTS_INT128 if X86_64 | ||||||
| 	select ARCH_WANTS_PROT_NUMA_PROT_NONE | 	select ARCH_WANTS_PROT_NUMA_PROT_NONE | ||||||
| 	select HAVE_IDE | 	select HAVE_IDE | ||||||
| 	select HAVE_OPROFILE | 	select HAVE_OPROFILE | ||||||
|  |  | ||||||
|  | @ -452,9 +452,16 @@ static inline int pte_present(pte_t a) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #define pte_accessible pte_accessible | #define pte_accessible pte_accessible | ||||||
| static inline int pte_accessible(pte_t a) | static inline bool pte_accessible(struct mm_struct *mm, pte_t a) | ||||||
| { | { | ||||||
| 	return pte_flags(a) & _PAGE_PRESENT; | 	if (pte_flags(a) & _PAGE_PRESENT) | ||||||
|  | 		return true; | ||||||
|  | 
 | ||||||
|  | 	if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && | ||||||
|  | 			mm_tlb_flush_pending(mm)) | ||||||
|  | 		return true; | ||||||
|  | 
 | ||||||
|  | 	return false; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline int pte_hidden(pte_t pte) | static inline int pte_hidden(pte_t pte) | ||||||
|  |  | ||||||
|  | @ -7,6 +7,12 @@ | ||||||
| 
 | 
 | ||||||
| DECLARE_PER_CPU(int, __preempt_count); | DECLARE_PER_CPU(int, __preempt_count); | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such | ||||||
|  |  * that a decrement hitting 0 means we can and should reschedule. | ||||||
|  |  */ | ||||||
|  | #define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED) | ||||||
|  | 
 | ||||||
| /*
 | /*
 | ||||||
|  * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |  * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | ||||||
|  * that think a non-zero value indicates we cannot preempt. |  * that think a non-zero value indicates we cannot preempt. | ||||||
|  | @ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val) | ||||||
| 	__this_cpu_add_4(__preempt_count, -val); | 	__this_cpu_add_4(__preempt_count, -val); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule | ||||||
|  |  * a decrement which hits zero means we have no preempt_count and should | ||||||
|  |  * reschedule. | ||||||
|  |  */ | ||||||
| static __always_inline bool __preempt_count_dec_and_test(void) | static __always_inline bool __preempt_count_dec_and_test(void) | ||||||
| { | { | ||||||
| 	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); | 	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); | ||||||
|  |  | ||||||
|  | @ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c) | ||||||
| 			set_cpu_cap(c, X86_FEATURE_PEBS); | 			set_cpu_cap(c, X86_FEATURE_PEBS); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) | 	if (c->x86 == 6 && cpu_has_clflush && | ||||||
|  | 	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) | ||||||
| 		set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | 		set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_X86_64 | #ifdef CONFIG_X86_64 | ||||||
|  |  | ||||||
|  | @ -262,11 +262,20 @@ struct cpu_hw_events { | ||||||
| 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | 	__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | ||||||
| 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | 			  HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | ||||||
| 
 | 
 | ||||||
| #define EVENT_CONSTRAINT_END		\ | /*
 | ||||||
| 	EVENT_CONSTRAINT(0, 0, 0) |  * We define the end marker as having a weight of -1 | ||||||
|  |  * to enable blacklisting of events using a counter bitmask | ||||||
|  |  * of zero and thus a weight of zero. | ||||||
|  |  * The end marker has a weight that cannot possibly be | ||||||
|  |  * obtained from counting the bits in the bitmask. | ||||||
|  |  */ | ||||||
|  | #define EVENT_CONSTRAINT_END { .weight = -1 } | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * Check for end marker with weight == -1 | ||||||
|  |  */ | ||||||
| #define for_each_event_constraint(e, c)	\ | #define for_each_event_constraint(e, c)	\ | ||||||
| 	for ((e) = (c); (e)->weight; (e)++) | 	for ((e) = (c); (e)->weight != -1; (e)++) | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Extra registers for specific events. |  * Extra registers for specific events. | ||||||
|  |  | ||||||
|  | @ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | ||||||
| 		pte_t pte = gup_get_pte(ptep); | 		pte_t pte = gup_get_pte(ptep); | ||||||
| 		struct page *page; | 		struct page *page; | ||||||
| 
 | 
 | ||||||
|  | 		/* Similar to the PMD case, NUMA hinting must take slow path */ | ||||||
|  | 		if (pte_numa(pte)) { | ||||||
|  | 			pte_unmap(ptep); | ||||||
|  | 			return 0; | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
| 		if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { | 		if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { | ||||||
| 			pte_unmap(ptep); | 			pte_unmap(ptep); | ||||||
| 			return 0; | 			return 0; | ||||||
|  | @ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | ||||||
| 		if (pmd_none(pmd) || pmd_trans_splitting(pmd)) | 		if (pmd_none(pmd) || pmd_trans_splitting(pmd)) | ||||||
| 			return 0; | 			return 0; | ||||||
| 		if (unlikely(pmd_large(pmd))) { | 		if (unlikely(pmd_large(pmd))) { | ||||||
|  | 			/*
 | ||||||
|  | 			 * NUMA hinting faults need to be handled in the GUP | ||||||
|  | 			 * slowpath for accounting purposes and so that they | ||||||
|  | 			 * can be serialised against THP migration. | ||||||
|  | 			 */ | ||||||
|  | 			if (pmd_numa(pmd)) | ||||||
|  | 				return 0; | ||||||
| 			if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) | 			if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) | ||||||
| 				return 0; | 				return 0; | ||||||
| 		} else { | 		} else { | ||||||
|  |  | ||||||
|  | @ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = { | ||||||
| void blk_mq_unregister_disk(struct gendisk *disk) | void blk_mq_unregister_disk(struct gendisk *disk) | ||||||
| { | { | ||||||
| 	struct request_queue *q = disk->queue; | 	struct request_queue *q = disk->queue; | ||||||
|  | 	struct blk_mq_hw_ctx *hctx; | ||||||
|  | 	struct blk_mq_ctx *ctx; | ||||||
|  | 	int i, j; | ||||||
|  | 
 | ||||||
|  | 	queue_for_each_hw_ctx(q, hctx, i) { | ||||||
|  | 		hctx_for_each_ctx(hctx, ctx, j) { | ||||||
|  | 			kobject_del(&ctx->kobj); | ||||||
|  | 			kobject_put(&ctx->kobj); | ||||||
|  | 		} | ||||||
|  | 		kobject_del(&hctx->kobj); | ||||||
|  | 		kobject_put(&hctx->kobj); | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); | 	kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); | ||||||
| 	kobject_del(&q->mq_kobj); | 	kobject_del(&q->mq_kobj); | ||||||
|  | 	kobject_put(&q->mq_kobj); | ||||||
| 
 | 
 | ||||||
| 	kobject_put(&disk_to_dev(disk)->kobj); | 	kobject_put(&disk_to_dev(disk)->kobj); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig" | ||||||
| config ACPI_EXTLOG | config ACPI_EXTLOG | ||||||
| 	tristate "Extended Error Log support" | 	tristate "Extended Error Log support" | ||||||
| 	depends on X86_MCE && X86_LOCAL_APIC | 	depends on X86_MCE && X86_LOCAL_APIC | ||||||
| 	select EFI |  | ||||||
| 	select UEFI_CPER | 	select UEFI_CPER | ||||||
| 	default n | 	default n | ||||||
| 	help | 	help | ||||||
|  |  | ||||||
|  | @ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { | ||||||
| 	{ "80860F14", (unsigned long)&byt_sdio_dev_desc }, | 	{ "80860F14", (unsigned long)&byt_sdio_dev_desc }, | ||||||
| 	{ "80860F41", (unsigned long)&byt_i2c_dev_desc }, | 	{ "80860F41", (unsigned long)&byt_i2c_dev_desc }, | ||||||
| 	{ "INT33B2", }, | 	{ "INT33B2", }, | ||||||
|  | 	{ "INT33FC", }, | ||||||
| 
 | 
 | ||||||
| 	{ "INT3430", (unsigned long)&lpt_dev_desc }, | 	{ "INT3430", (unsigned long)&lpt_dev_desc }, | ||||||
| 	{ "INT3431", (unsigned long)&lpt_dev_desc }, | 	{ "INT3431", (unsigned long)&lpt_dev_desc }, | ||||||
|  |  | ||||||
|  | @ -2,7 +2,6 @@ config ACPI_APEI | ||||||
| 	bool "ACPI Platform Error Interface (APEI)" | 	bool "ACPI Platform Error Interface (APEI)" | ||||||
| 	select MISC_FILESYSTEMS | 	select MISC_FILESYSTEMS | ||||||
| 	select PSTORE | 	select PSTORE | ||||||
| 	select EFI |  | ||||||
| 	select UEFI_CPER | 	select UEFI_CPER | ||||||
| 	depends on X86 | 	depends on X86 | ||||||
| 	help | 	help | ||||||
|  |  | ||||||
|  | @ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count, | ||||||
| static struct pstore_info erst_info = { | static struct pstore_info erst_info = { | ||||||
| 	.owner		= THIS_MODULE, | 	.owner		= THIS_MODULE, | ||||||
| 	.name		= "erst", | 	.name		= "erst", | ||||||
|  | 	.flags		= PSTORE_FLAGS_FRAGILE, | ||||||
| 	.open		= erst_open_pstore, | 	.open		= erst_open_pstore, | ||||||
| 	.close		= erst_close_pstore, | 	.close		= erst_close_pstore, | ||||||
| 	.read		= erst_reader, | 	.read		= erst_reader, | ||||||
|  |  | ||||||
|  | @ -1238,15 +1238,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | ||||||
| 	if (rc) | 	if (rc) | ||||||
| 		return rc; | 		return rc; | ||||||
| 
 | 
 | ||||||
| 	/* AHCI controllers often implement SFF compatible interface.
 |  | ||||||
| 	 * Grab all PCI BARs just in case. |  | ||||||
| 	 */ |  | ||||||
| 	rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); |  | ||||||
| 	if (rc == -EBUSY) |  | ||||||
| 		pcim_pin_device(pdev); |  | ||||||
| 	if (rc) |  | ||||||
| 		return rc; |  | ||||||
| 
 |  | ||||||
| 	if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 	if (pdev->vendor == PCI_VENDOR_ID_INTEL && | ||||||
| 	    (pdev->device == 0x2652 || pdev->device == 0x2653)) { | 	    (pdev->device == 0x2652 || pdev->device == 0x2653)) { | ||||||
| 		u8 map; | 		u8 map; | ||||||
|  | @ -1263,6 +1254,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	/* AHCI controllers often implement SFF compatible interface.
 | ||||||
|  | 	 * Grab all PCI BARs just in case. | ||||||
|  | 	 */ | ||||||
|  | 	rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME); | ||||||
|  | 	if (rc == -EBUSY) | ||||||
|  | 		pcim_pin_device(pdev); | ||||||
|  | 	if (rc) | ||||||
|  | 		return rc; | ||||||
|  | 
 | ||||||
| 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); | ||||||
| 	if (!hpriv) | 	if (!hpriv) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * set PHY Paremeters, two steps to configure the GPR13, | 	 * set PHY Paremeters, two steps to configure the GPR13, | ||||||
| 	 * one write for rest of parameters, mask of first write | 	 * one write for rest of parameters, mask of first write | ||||||
| 	 * is 0x07fffffd, and the other one write for setting | 	 * is 0x07ffffff, and the other one write for setting | ||||||
| 	 * the mpll_clk_en. | 	 * the mpll_clk_en. | ||||||
| 	 */ | 	 */ | ||||||
| 	regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK | 	regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK | ||||||
|  | @ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio) | ||||||
| 			| IMX6Q_GPR13_SATA_TX_ATTEN_MASK | 			| IMX6Q_GPR13_SATA_TX_ATTEN_MASK | ||||||
| 			| IMX6Q_GPR13_SATA_TX_BOOST_MASK | 			| IMX6Q_GPR13_SATA_TX_BOOST_MASK | ||||||
| 			| IMX6Q_GPR13_SATA_TX_LVL_MASK | 			| IMX6Q_GPR13_SATA_TX_LVL_MASK | ||||||
|  | 			| IMX6Q_GPR13_SATA_MPLL_CLK_EN | ||||||
| 			| IMX6Q_GPR13_SATA_TX_EDGE_RATE | 			| IMX6Q_GPR13_SATA_TX_EDGE_RATE | ||||||
| 			, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB | 			, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB | ||||||
| 			| IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M | 			| IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M | ||||||
|  |  | ||||||
|  | @ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev, | ||||||
| 				    "failed to get NCQ Send/Recv Log Emask 0x%x\n", | 				    "failed to get NCQ Send/Recv Log Emask 0x%x\n", | ||||||
| 				    err_mask); | 				    err_mask); | ||||||
| 		} else { | 		} else { | ||||||
|  | 			u8 *cmds = dev->ncq_send_recv_cmds; | ||||||
|  | 
 | ||||||
| 			dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; | 			dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; | ||||||
| 			memcpy(dev->ncq_send_recv_cmds, ap->sector_buf, | 			memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); | ||||||
| 				ATA_LOG_NCQ_SEND_RECV_SIZE); | 
 | ||||||
|  | 			if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { | ||||||
|  | 				ata_dev_dbg(dev, "disabling queued TRIM support\n"); | ||||||
|  | 				cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= | ||||||
|  | 					~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; | ||||||
|  | 			} | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | ||||||
| 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ | | 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ | | ||||||
| 						ATA_HORKAGE_FIRMWARE_WARN }, | 						ATA_HORKAGE_FIRMWARE_WARN }, | ||||||
| 
 | 
 | ||||||
|  | 	/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ | ||||||
|  | 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA }, | ||||||
|  | 
 | ||||||
| 	/* Blacklist entries taken from Silicon Image 3124/3132
 | 	/* Blacklist entries taken from Silicon Image 3124/3132
 | ||||||
| 	   Windows driver .inf file - also several Linux problem reports */ | 	   Windows driver .inf file - also several Linux problem reports */ | ||||||
| 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, }, | 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, }, | ||||||
|  | @ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | ||||||
| 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER }, | 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER }, | ||||||
| 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER }, | 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER }, | ||||||
| 
 | 
 | ||||||
|  | 	/* devices that don't properly handle queued TRIM commands */ | ||||||
|  | 	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, }, | ||||||
|  | 	{ "Crucial_CT???M500SSD1",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, }, | ||||||
|  | 
 | ||||||
| 	/* End Marker */ | 	/* End Marker */ | ||||||
| 	{ } | 	{ } | ||||||
| }; | }; | ||||||
|  | @ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur, | ||||||
| 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, | 		{ "norst",	.lflags		= ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, | ||||||
| 		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE }, | 		{ "rstonce",	.lflags		= ATA_LFLAG_RST_ONCE }, | ||||||
| 		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR }, | 		{ "atapi_dmadir", .horkage_on	= ATA_HORKAGE_ATAPI_DMADIR }, | ||||||
|  | 		{ "disable",	.horkage_on	= ATA_HORKAGE_DISABLE }, | ||||||
| 	}; | 	}; | ||||||
| 	char *start = *cur, *p = *cur; | 	char *start = *cur, *p = *cur; | ||||||
| 	char *id, *val, *endp; | 	char *id, *val, *endp; | ||||||
|  |  | ||||||
|  | @ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work) | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * XXX - UGLY HACK | ||||||
|  | 	 * | ||||||
|  | 	 * The block layer suspend/resume path is fundamentally broken due | ||||||
|  | 	 * to freezable kthreads and workqueue and may deadlock if a block | ||||||
|  | 	 * device gets removed while resume is in progress.  I don't know | ||||||
|  | 	 * what the solution is short of removing freezable kthreads and | ||||||
|  | 	 * workqueues altogether. | ||||||
|  | 	 * | ||||||
|  | 	 * The following is an ugly hack to avoid kicking off device | ||||||
|  | 	 * removal while freezer is active.  This is a joke but does avoid | ||||||
|  | 	 * this particular deadlock scenario. | ||||||
|  | 	 * | ||||||
|  | 	 * https://bugzilla.kernel.org/show_bug.cgi?id=62801
 | ||||||
|  | 	 * http://marc.info/?l=linux-kernel&m=138695698516487
 | ||||||
|  | 	 */ | ||||||
|  | #ifdef CONFIG_FREEZER | ||||||
|  | 	while (pm_freezing) | ||||||
|  | 		msleep(10); | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
| 	DPRINTK("ENTER\n"); | 	DPRINTK("ENTER\n"); | ||||||
| 	mutex_lock(&ap->scsi_scan_mutex); | 	mutex_lock(&ap->scsi_scan_mutex); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1,4 +1,5 @@ | ||||||
| #include <linux/module.h> | #include <linux/module.h> | ||||||
|  | 
 | ||||||
| #include <linux/moduleparam.h> | #include <linux/moduleparam.h> | ||||||
| #include <linux/sched.h> | #include <linux/sched.h> | ||||||
| #include <linux/fs.h> | #include <linux/fs.h> | ||||||
|  | @ -65,7 +66,7 @@ enum { | ||||||
| 	NULL_Q_MQ		= 2, | 	NULL_Q_MQ		= 2, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static int submit_queues = 1; | static int submit_queues; | ||||||
| module_param(submit_queues, int, S_IRUGO); | module_param(submit_queues, int, S_IRUGO); | ||||||
| MODULE_PARM_DESC(submit_queues, "Number of submission queues"); | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); | ||||||
| 
 | 
 | ||||||
|  | @ -101,9 +102,9 @@ static int hw_queue_depth = 64; | ||||||
| module_param(hw_queue_depth, int, S_IRUGO); | module_param(hw_queue_depth, int, S_IRUGO); | ||||||
| MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); | ||||||
| 
 | 
 | ||||||
| static bool use_per_node_hctx = true; | static bool use_per_node_hctx = false; | ||||||
| module_param(use_per_node_hctx, bool, S_IRUGO); | module_param(use_per_node_hctx, bool, S_IRUGO); | ||||||
| MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); | ||||||
| 
 | 
 | ||||||
| static void put_tag(struct nullb_queue *nq, unsigned int tag) | static void put_tag(struct nullb_queue *nq, unsigned int tag) | ||||||
| { | { | ||||||
|  | @ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | ||||||
| 
 | 
 | ||||||
| static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) | ||||||
| { | { | ||||||
| 	return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, | 	int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); | ||||||
| 				hctx_index); | 	int tip = (reg->nr_hw_queues % nr_online_nodes); | ||||||
|  | 	int node = 0, i, n; | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Split submit queues evenly wrt to the number of nodes. If uneven, | ||||||
|  | 	 * fill the first buckets with one extra, until the rest is filled with | ||||||
|  | 	 * no extra. | ||||||
|  | 	 */ | ||||||
|  | 	for (i = 0, n = 1; i < hctx_index; i++, n++) { | ||||||
|  | 		if (n % b_size == 0) { | ||||||
|  | 			n = 0; | ||||||
|  | 			node++; | ||||||
|  | 
 | ||||||
|  | 			tip--; | ||||||
|  | 			if (!tip) | ||||||
|  | 				b_size = reg->nr_hw_queues / nr_online_nodes; | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * A node might not be online, therefore map the relative node id to the | ||||||
|  | 	 * real node id. | ||||||
|  | 	 */ | ||||||
|  | 	for_each_online_node(n) { | ||||||
|  | 		if (!node) | ||||||
|  | 			break; | ||||||
|  | 		node--; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | ||||||
|  | @ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | ||||||
| 	kfree(hctx); | 	kfree(hctx); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) | ||||||
|  | { | ||||||
|  | 	BUG_ON(!nullb); | ||||||
|  | 	BUG_ON(!nq); | ||||||
|  | 
 | ||||||
|  | 	init_waitqueue_head(&nq->wait); | ||||||
|  | 	nq->queue_depth = nullb->queue_depth; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | ||||||
| 			  unsigned int index) | 			  unsigned int index) | ||||||
| { | { | ||||||
| 	struct nullb *nullb = data; | 	struct nullb *nullb = data; | ||||||
| 	struct nullb_queue *nq = &nullb->queues[index]; | 	struct nullb_queue *nq = &nullb->queues[index]; | ||||||
| 
 | 
 | ||||||
| 	init_waitqueue_head(&nq->wait); |  | ||||||
| 	nq->queue_depth = nullb->queue_depth; |  | ||||||
| 	nullb->nr_queues++; |  | ||||||
| 	hctx->driver_data = nq; | 	hctx->driver_data = nq; | ||||||
|  | 	null_init_queue(nullb, nq); | ||||||
|  | 	nullb->nr_queues++; | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | @ -417,13 +455,13 @@ static int setup_commands(struct nullb_queue *nq) | ||||||
| 
 | 
 | ||||||
| 	nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); | 	nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); | ||||||
| 	if (!nq->cmds) | 	if (!nq->cmds) | ||||||
| 		return 1; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | 	tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | ||||||
| 	nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); | 	nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); | ||||||
| 	if (!nq->tag_map) { | 	if (!nq->tag_map) { | ||||||
| 		kfree(nq->cmds); | 		kfree(nq->cmds); | ||||||
| 		return 1; | 		return -ENOMEM; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < nq->queue_depth; i++) { | 	for (i = 0; i < nq->queue_depth; i++) { | ||||||
|  | @ -454,33 +492,37 @@ static void cleanup_queues(struct nullb *nullb) | ||||||
| 
 | 
 | ||||||
| static int setup_queues(struct nullb *nullb) | static int setup_queues(struct nullb *nullb) | ||||||
| { | { | ||||||
| 	struct nullb_queue *nq; | 	nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), | ||||||
| 	int i; | 								GFP_KERNEL); | ||||||
| 
 |  | ||||||
| 	nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL); |  | ||||||
| 	if (!nullb->queues) | 	if (!nullb->queues) | ||||||
| 		return 1; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	nullb->nr_queues = 0; | 	nullb->nr_queues = 0; | ||||||
| 	nullb->queue_depth = hw_queue_depth; | 	nullb->queue_depth = hw_queue_depth; | ||||||
| 
 | 
 | ||||||
| 	if (queue_mode == NULL_Q_MQ) | 	return 0; | ||||||
| 		return 0; | } | ||||||
|  | 
 | ||||||
|  | static int init_driver_queues(struct nullb *nullb) | ||||||
|  | { | ||||||
|  | 	struct nullb_queue *nq; | ||||||
|  | 	int i, ret = 0; | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < submit_queues; i++) { | 	for (i = 0; i < submit_queues; i++) { | ||||||
| 		nq = &nullb->queues[i]; | 		nq = &nullb->queues[i]; | ||||||
| 		init_waitqueue_head(&nq->wait); | 
 | ||||||
| 		nq->queue_depth = hw_queue_depth; | 		null_init_queue(nullb, nq); | ||||||
| 		if (setup_commands(nq)) | 
 | ||||||
| 			break; | 		ret = setup_commands(nq); | ||||||
|  | 		if (ret) | ||||||
|  | 			goto err_queue; | ||||||
| 		nullb->nr_queues++; | 		nullb->nr_queues++; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (i == submit_queues) | 	return 0; | ||||||
| 		return 0; | err_queue: | ||||||
| 
 |  | ||||||
| 	cleanup_queues(nullb); | 	cleanup_queues(nullb); | ||||||
| 	return 1; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int null_add_dev(void) | static int null_add_dev(void) | ||||||
|  | @ -518,11 +560,13 @@ static int null_add_dev(void) | ||||||
| 	} else if (queue_mode == NULL_Q_BIO) { | 	} else if (queue_mode == NULL_Q_BIO) { | ||||||
| 		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | 		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | ||||||
| 		blk_queue_make_request(nullb->q, null_queue_bio); | 		blk_queue_make_request(nullb->q, null_queue_bio); | ||||||
|  | 		init_driver_queues(nullb); | ||||||
| 	} else { | 	} else { | ||||||
| 		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | 		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | ||||||
| 		blk_queue_prep_rq(nullb->q, null_rq_prep_fn); | 		blk_queue_prep_rq(nullb->q, null_rq_prep_fn); | ||||||
| 		if (nullb->q) | 		if (nullb->q) | ||||||
| 			blk_queue_softirq_done(nullb->q, null_softirq_done_fn); | 			blk_queue_softirq_done(nullb->q, null_softirq_done_fn); | ||||||
|  | 		init_driver_queues(nullb); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if (!nullb->q) | 	if (!nullb->q) | ||||||
|  | @ -579,7 +623,13 @@ static int __init null_init(void) | ||||||
| 	} | 	} | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| 	if (submit_queues > nr_cpu_ids) | 	if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { | ||||||
|  | 		if (submit_queues < nr_online_nodes) { | ||||||
|  | 			pr_warn("null_blk: submit_queues param is set to %u.", | ||||||
|  | 							nr_online_nodes); | ||||||
|  | 			submit_queues = nr_online_nodes; | ||||||
|  | 		} | ||||||
|  | 	} else if (submit_queues > nr_cpu_ids) | ||||||
| 		submit_queues = nr_cpu_ids; | 		submit_queues = nr_cpu_ids; | ||||||
| 	else if (!submit_queues) | 	else if (!submit_queues) | ||||||
| 		submit_queues = 1; | 		submit_queues = 1; | ||||||
|  |  | ||||||
|  | @ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | ||||||
| { | { | ||||||
| 	switch (state) { | 	switch (state) { | ||||||
| 	case SKD_MSG_STATE_IDLE: | 	case SKD_MSG_STATE_IDLE: | ||||||
|  | @ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| const char *skd_skreq_state_to_str(enum skd_req_state state) | static const char *skd_skreq_state_to_str(enum skd_req_state state) | ||||||
| { | { | ||||||
| 	switch (state) { | 	switch (state) { | ||||||
| 	case SKD_REQ_STATE_IDLE: | 	case SKD_REQ_STATE_IDLE: | ||||||
|  |  | ||||||
|  | @ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw) | ||||||
| 	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); | 	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	ret = regmap_update_bits(s2mps11->iodev->regmap, | 	ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, | ||||||
| 				S2MPS11_REG_RTC_CTRL, | 				S2MPS11_REG_RTC_CTRL, | ||||||
| 				 s2mps11->mask, s2mps11->mask); | 				 s2mps11->mask, s2mps11->mask); | ||||||
| 	if (!ret) | 	if (!ret) | ||||||
|  | @ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw) | ||||||
| 	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); | 	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); | ||||||
| 	int ret; | 	int ret; | ||||||
| 
 | 
 | ||||||
| 	ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL, | 	ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL, | ||||||
| 			   s2mps11->mask, ~s2mps11->mask); | 			   s2mps11->mask, ~s2mps11->mask); | ||||||
| 
 | 
 | ||||||
| 	if (!ret) | 	if (!ret) | ||||||
|  | @ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev) | ||||||
| 		s2mps11_clk->hw.init = &s2mps11_clks_init[i]; | 		s2mps11_clk->hw.init = &s2mps11_clks_init[i]; | ||||||
| 		s2mps11_clk->mask = 1 << i; | 		s2mps11_clk->mask = 1 << i; | ||||||
| 
 | 
 | ||||||
| 		ret = regmap_read(s2mps11_clk->iodev->regmap, | 		ret = regmap_read(s2mps11_clk->iodev->regmap_pmic, | ||||||
| 				  S2MPS11_REG_RTC_CTRL, &val); | 				  S2MPS11_REG_RTC_CTRL, &val); | ||||||
| 		if (ret < 0) | 		if (ret < 0) | ||||||
| 			goto err_reg; | 			goto err_reg; | ||||||
|  |  | ||||||
|  | @ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK | ||||||
| config CLKSRC_EFM32 | config CLKSRC_EFM32 | ||||||
| 	bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 | 	bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 | ||||||
| 	depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) | 	depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) | ||||||
|  | 	select CLKSRC_MMIO | ||||||
| 	default ARCH_EFM32 | 	default ARCH_EFM32 | ||||||
| 	help | 	help | ||||||
| 	  Support to use the timers of EFM32 SoCs as clock source and clock | 	  Support to use the timers of EFM32 SoCs as clock source and clock | ||||||
|  |  | ||||||
|  | @ -35,6 +35,5 @@ void __init clocksource_of_init(void) | ||||||
| 
 | 
 | ||||||
| 		init_func = match->data; | 		init_func = match->data; | ||||||
| 		init_func(np); | 		init_func(np); | ||||||
| 		of_node_put(np); |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer) | ||||||
| 
 | 
 | ||||||
| static u64 read_sched_clock(void) | static u64 read_sched_clock(void) | ||||||
| { | { | ||||||
| 	return __raw_readl(sched_io_base); | 	return ~__raw_readl(sched_io_base); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static const struct of_device_id sptimer_ids[] __initconst = { | static const struct of_device_id sptimer_ids[] __initconst = { | ||||||
| 	{ .compatible = "picochip,pc3x2-rtc" }, | 	{ .compatible = "picochip,pc3x2-rtc" }, | ||||||
| 	{ .compatible = "snps,dw-apb-timer-sp" }, |  | ||||||
| 	{ /* Sentinel */ }, | 	{ /* Sentinel */ }, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | @ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer) | ||||||
| 	num_called++; | 	num_called++; | ||||||
| } | } | ||||||
| CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); | CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); | ||||||
| CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); | CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init); | ||||||
|  | CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init); | ||||||
|  | CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init); | ||||||
|  |  | ||||||
|  | @ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node) | ||||||
| 	writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), | 	writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), | ||||||
| 	       timer_base + TIMER_CTL_REG(0)); | 	       timer_base + TIMER_CTL_REG(0)); | ||||||
| 
 | 
 | ||||||
|  | 	/* Make sure timer is stopped before playing with interrupts */ | ||||||
|  | 	sun4i_clkevt_time_stop(0); | ||||||
|  | 
 | ||||||
| 	ret = setup_irq(irq, &sun4i_timer_irq); | 	ret = setup_irq(irq, &sun4i_timer_irq); | ||||||
| 	if (ret) | 	if (ret) | ||||||
| 		pr_warn("failed to setup irq %d\n", irq); | 		pr_warn("failed to setup irq %d\n", irq); | ||||||
|  |  | ||||||
|  | @ -255,11 +255,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) | ||||||
| 
 | 
 | ||||||
| 	ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; | 	ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; | ||||||
| 
 | 
 | ||||||
| 	/*
 |  | ||||||
| 	 * Set scale and timer for sched_clock. |  | ||||||
| 	 */ |  | ||||||
| 	sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); |  | ||||||
| 
 |  | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Setup free-running clocksource timer (interrupts | 	 * Setup free-running clocksource timer (interrupts | ||||||
| 	 * disabled). | 	 * disabled). | ||||||
|  | @ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) | ||||||
| 	timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | | 	timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | | ||||||
| 			     TIMER0_DIV(TIMER_DIVIDER_SHIFT)); | 			     TIMER0_DIV(TIMER_DIVIDER_SHIFT)); | ||||||
| 
 | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Set scale and timer for sched_clock. | ||||||
|  | 	 */ | ||||||
|  | 	sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); | ||||||
|  | 
 | ||||||
| 	clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, | 	clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, | ||||||
| 			      "armada_370_xp_clocksource", | 			      "armada_370_xp_clocksource", | ||||||
| 			      timer_clk, 300, 32, clocksource_mmio_readl_down); | 			      timer_clk, 300, 32, clocksource_mmio_readl_down); | ||||||
|  |  | ||||||
|  | @ -828,6 +828,12 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | ||||||
| 	int ret = 0; | 	int ret = 0; | ||||||
| 
 | 
 | ||||||
| 	memcpy(&new_policy, policy, sizeof(*policy)); | 	memcpy(&new_policy, policy, sizeof(*policy)); | ||||||
|  | 
 | ||||||
|  | 	/* Use the default policy if its valid. */ | ||||||
|  | 	if (cpufreq_driver->setpolicy) | ||||||
|  | 		cpufreq_parse_governor(policy->governor->name, | ||||||
|  | 					&new_policy.policy, NULL); | ||||||
|  | 
 | ||||||
| 	/* assure that the starting sequence is run in cpufreq_set_policy */ | 	/* assure that the starting sequence is run in cpufreq_set_policy */ | ||||||
| 	policy->governor = NULL; | 	policy->governor = NULL; | ||||||
| 
 | 
 | ||||||
|  | @ -845,8 +851,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_HOTPLUG_CPU | #ifdef CONFIG_HOTPLUG_CPU | ||||||
| static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | ||||||
| 				  unsigned int cpu, struct device *dev, | 				  unsigned int cpu, struct device *dev) | ||||||
| 				  bool frozen) |  | ||||||
| { | { | ||||||
| 	int ret = 0; | 	int ret = 0; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
|  | @ -877,11 +882,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Don't touch sysfs links during light-weight init */ | 	return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); | ||||||
| 	if (!frozen) |  | ||||||
| 		ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); |  | ||||||
| 
 |  | ||||||
| 	return ret; |  | ||||||
| } | } | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|  | @ -926,6 +927,27 @@ err_free_policy: | ||||||
| 	return NULL; | 	return NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) | ||||||
|  | { | ||||||
|  | 	struct kobject *kobj; | ||||||
|  | 	struct completion *cmp; | ||||||
|  | 
 | ||||||
|  | 	down_read(&policy->rwsem); | ||||||
|  | 	kobj = &policy->kobj; | ||||||
|  | 	cmp = &policy->kobj_unregister; | ||||||
|  | 	up_read(&policy->rwsem); | ||||||
|  | 	kobject_put(kobj); | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * We need to make sure that the underlying kobj is | ||||||
|  | 	 * actually not referenced anymore by anybody before we | ||||||
|  | 	 * proceed with unloading. | ||||||
|  | 	 */ | ||||||
|  | 	pr_debug("waiting for dropping of refcount\n"); | ||||||
|  | 	wait_for_completion(cmp); | ||||||
|  | 	pr_debug("wait complete\n"); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static void cpufreq_policy_free(struct cpufreq_policy *policy) | static void cpufreq_policy_free(struct cpufreq_policy *policy) | ||||||
| { | { | ||||||
| 	free_cpumask_var(policy->related_cpus); | 	free_cpumask_var(policy->related_cpus); | ||||||
|  | @ -986,7 +1008,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | ||||||
| 	list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { | 	list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { | ||||||
| 		if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { | 		if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { | ||||||
| 			read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 			read_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||||||
| 			ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); | 			ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); | ||||||
| 			up_read(&cpufreq_rwsem); | 			up_read(&cpufreq_rwsem); | ||||||
| 			return ret; | 			return ret; | ||||||
| 		} | 		} | ||||||
|  | @ -1096,7 +1118,10 @@ err_get_freq: | ||||||
| 	if (cpufreq_driver->exit) | 	if (cpufreq_driver->exit) | ||||||
| 		cpufreq_driver->exit(policy); | 		cpufreq_driver->exit(policy); | ||||||
| err_set_policy_cpu: | err_set_policy_cpu: | ||||||
|  | 	if (frozen) | ||||||
|  | 		cpufreq_policy_put_kobj(policy); | ||||||
| 	cpufreq_policy_free(policy); | 	cpufreq_policy_free(policy); | ||||||
|  | 
 | ||||||
| nomem_out: | nomem_out: | ||||||
| 	up_read(&cpufreq_rwsem); | 	up_read(&cpufreq_rwsem); | ||||||
| 
 | 
 | ||||||
|  | @ -1118,7 +1143,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | ||||||
| 					   unsigned int old_cpu, bool frozen) | 					   unsigned int old_cpu) | ||||||
| { | { | ||||||
| 	struct device *cpu_dev; | 	struct device *cpu_dev; | ||||||
| 	int ret; | 	int ret; | ||||||
|  | @ -1126,10 +1151,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | ||||||
| 	/* first sibling now owns the new sysfs dir */ | 	/* first sibling now owns the new sysfs dir */ | ||||||
| 	cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); | 	cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); | ||||||
| 
 | 
 | ||||||
| 	/* Don't touch sysfs files during light-weight tear-down */ |  | ||||||
| 	if (frozen) |  | ||||||
| 		return cpu_dev->id; |  | ||||||
| 
 |  | ||||||
| 	sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); | 	sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); | ||||||
| 	ret = kobject_move(&policy->kobj, &cpu_dev->kobj); | 	ret = kobject_move(&policy->kobj, &cpu_dev->kobj); | ||||||
| 	if (ret) { | 	if (ret) { | ||||||
|  | @ -1196,7 +1217,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | ||||||
| 		if (!frozen) | 		if (!frozen) | ||||||
| 			sysfs_remove_link(&dev->kobj, "cpufreq"); | 			sysfs_remove_link(&dev->kobj, "cpufreq"); | ||||||
| 	} else if (cpus > 1) { | 	} else if (cpus > 1) { | ||||||
| 		new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); | 		new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); | ||||||
| 		if (new_cpu >= 0) { | 		if (new_cpu >= 0) { | ||||||
| 			update_policy_cpu(policy, new_cpu); | 			update_policy_cpu(policy, new_cpu); | ||||||
| 
 | 
 | ||||||
|  | @ -1218,8 +1239,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | ||||||
| 	int ret; | 	int ret; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 	struct cpufreq_policy *policy; | 	struct cpufreq_policy *policy; | ||||||
| 	struct kobject *kobj; |  | ||||||
| 	struct completion *cmp; |  | ||||||
| 
 | 
 | ||||||
| 	read_lock_irqsave(&cpufreq_driver_lock, flags); | 	read_lock_irqsave(&cpufreq_driver_lock, flags); | ||||||
| 	policy = per_cpu(cpufreq_cpu_data, cpu); | 	policy = per_cpu(cpufreq_cpu_data, cpu); | ||||||
|  | @ -1249,22 +1268,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if (!frozen) { | 		if (!frozen) | ||||||
| 			down_read(&policy->rwsem); | 			cpufreq_policy_put_kobj(policy); | ||||||
| 			kobj = &policy->kobj; |  | ||||||
| 			cmp = &policy->kobj_unregister; |  | ||||||
| 			up_read(&policy->rwsem); |  | ||||||
| 			kobject_put(kobj); |  | ||||||
| 
 |  | ||||||
| 			/*
 |  | ||||||
| 			 * We need to make sure that the underlying kobj is |  | ||||||
| 			 * actually not referenced anymore by anybody before we |  | ||||||
| 			 * proceed with unloading. |  | ||||||
| 			 */ |  | ||||||
| 			pr_debug("waiting for dropping of refcount\n"); |  | ||||||
| 			wait_for_completion(cmp); |  | ||||||
| 			pr_debug("wait complete\n"); |  | ||||||
| 		} |  | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * Perform the ->exit() even during light-weight tear-down, | 		 * Perform the ->exit() even during light-weight tear-down, | ||||||
|  |  | ||||||
|  | @ -62,6 +62,7 @@ config INTEL_IOATDMA | ||||||
| 	tristate "Intel I/OAT DMA support" | 	tristate "Intel I/OAT DMA support" | ||||||
| 	depends on PCI && X86 | 	depends on PCI && X86 | ||||||
| 	select DMA_ENGINE | 	select DMA_ENGINE | ||||||
|  | 	select DMA_ENGINE_RAID | ||||||
| 	select DCA | 	select DCA | ||||||
| 	help | 	help | ||||||
| 	  Enable support for the Intel(R) I/OAT DMA engine present | 	  Enable support for the Intel(R) I/OAT DMA engine present | ||||||
|  | @ -112,6 +113,7 @@ config MV_XOR | ||||||
| 	bool "Marvell XOR engine support" | 	bool "Marvell XOR engine support" | ||||||
| 	depends on PLAT_ORION | 	depends on PLAT_ORION | ||||||
| 	select DMA_ENGINE | 	select DMA_ENGINE | ||||||
|  | 	select DMA_ENGINE_RAID | ||||||
| 	select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 	select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||||||
| 	---help--- | 	---help--- | ||||||
| 	  Enable support for the Marvell XOR engine. | 	  Enable support for the Marvell XOR engine. | ||||||
|  | @ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA | ||||||
| 	tristate "AMCC PPC440SPe ADMA support" | 	tristate "AMCC PPC440SPe ADMA support" | ||||||
| 	depends on 440SPe || 440SP | 	depends on 440SPe || 440SP | ||||||
| 	select DMA_ENGINE | 	select DMA_ENGINE | ||||||
|  | 	select DMA_ENGINE_RAID | ||||||
| 	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL | ||||||
| 	select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 	select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||||||
| 	help | 	help | ||||||
|  | @ -352,6 +355,7 @@ config NET_DMA | ||||||
| 	bool "Network: TCP receive copy offload" | 	bool "Network: TCP receive copy offload" | ||||||
| 	depends on DMA_ENGINE && NET | 	depends on DMA_ENGINE && NET | ||||||
| 	default (INTEL_IOATDMA || FSL_DMA) | 	default (INTEL_IOATDMA || FSL_DMA) | ||||||
|  | 	depends on BROKEN | ||||||
| 	help | 	help | ||||||
| 	  This enables the use of DMA engines in the network stack to | 	  This enables the use of DMA engines in the network stack to | ||||||
| 	  offload receive copy-to-user operations, freeing CPU cycles. | 	  offload receive copy-to-user operations, freeing CPU cycles. | ||||||
|  | @ -377,4 +381,7 @@ config DMATEST | ||||||
| 	  Simple DMA test client. Say N unless you're debugging a | 	  Simple DMA test client. Say N unless you're debugging a | ||||||
| 	  DMA Device driver. | 	  DMA Device driver. | ||||||
| 
 | 
 | ||||||
|  | config DMA_ENGINE_RAID | ||||||
|  | 	bool | ||||||
|  | 
 | ||||||
| endif | endif | ||||||
|  |  | ||||||
|  | @ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan) | ||||||
| { | { | ||||||
| 	return &chan->dev->device; | 	return &chan->dev->device; | ||||||
| } | } | ||||||
| static struct device *chan2parent(struct dma_chan *chan) |  | ||||||
| { |  | ||||||
| 	return chan->dev->device.parent; |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| #if defined(VERBOSE_DEBUG) | #if defined(VERBOSE_DEBUG) | ||||||
| static void vdbg_dump_regs(struct at_dma_chan *atchan) | static void vdbg_dump_regs(struct at_dma_chan *atchan) | ||||||
|  |  | ||||||
|  | @ -912,7 +912,7 @@ struct dmaengine_unmap_pool { | ||||||
| #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } | ||||||
| static struct dmaengine_unmap_pool unmap_pool[] = { | static struct dmaengine_unmap_pool unmap_pool[] = { | ||||||
| 	__UNMAP_POOL(2), | 	__UNMAP_POOL(2), | ||||||
| 	#if IS_ENABLED(CONFIG_ASYNC_TX_DMA) | 	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) | ||||||
| 	__UNMAP_POOL(16), | 	__UNMAP_POOL(16), | ||||||
| 	__UNMAP_POOL(128), | 	__UNMAP_POOL(128), | ||||||
| 	__UNMAP_POOL(256), | 	__UNMAP_POOL(256), | ||||||
|  | @ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | ||||||
| 	dma_cookie_t cookie; | 	dma_cookie_t cookie; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); | 	unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); | ||||||
| 	if (!unmap) | 	if (!unmap) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -539,9 +539,9 @@ static int dmatest_func(void *data) | ||||||
| 
 | 
 | ||||||
| 		um->len = params->buf_size; | 		um->len = params->buf_size; | ||||||
| 		for (i = 0; i < src_cnt; i++) { | 		for (i = 0; i < src_cnt; i++) { | ||||||
| 			unsigned long buf = (unsigned long) thread->srcs[i]; | 			void *buf = thread->srcs[i]; | ||||||
| 			struct page *pg = virt_to_page(buf); | 			struct page *pg = virt_to_page(buf); | ||||||
| 			unsigned pg_off = buf & ~PAGE_MASK; | 			unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; | ||||||
| 
 | 
 | ||||||
| 			um->addr[i] = dma_map_page(dev->dev, pg, pg_off, | 			um->addr[i] = dma_map_page(dev->dev, pg, pg_off, | ||||||
| 						   um->len, DMA_TO_DEVICE); | 						   um->len, DMA_TO_DEVICE); | ||||||
|  | @ -559,9 +559,9 @@ static int dmatest_func(void *data) | ||||||
| 		/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 		/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | ||||||
| 		dsts = &um->addr[src_cnt]; | 		dsts = &um->addr[src_cnt]; | ||||||
| 		for (i = 0; i < dst_cnt; i++) { | 		for (i = 0; i < dst_cnt; i++) { | ||||||
| 			unsigned long buf = (unsigned long) thread->dsts[i]; | 			void *buf = thread->dsts[i]; | ||||||
| 			struct page *pg = virt_to_page(buf); | 			struct page *pg = virt_to_page(buf); | ||||||
| 			unsigned pg_off = buf & ~PAGE_MASK; | 			unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; | ||||||
| 
 | 
 | ||||||
| 			dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, | 			dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, | ||||||
| 					       DMA_BIDIRECTIONAL); | 					       DMA_BIDIRECTIONAL); | ||||||
|  |  | ||||||
|  | @ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan, | ||||||
| 	hw->count = CPU_TO_DMA(chan, count, 32); | 	hw->count = CPU_TO_DMA(chan, count, 32); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) |  | ||||||
| { |  | ||||||
| 	return DMA_TO_CPU(chan, desc->hw.count, 32); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void set_desc_src(struct fsldma_chan *chan, | static void set_desc_src(struct fsldma_chan *chan, | ||||||
| 			 struct fsl_dma_ld_hw *hw, dma_addr_t src) | 			 struct fsl_dma_ld_hw *hw, dma_addr_t src) | ||||||
| { | { | ||||||
|  | @ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan, | ||||||
| 	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); | 	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static dma_addr_t get_desc_src(struct fsldma_chan *chan, |  | ||||||
| 			       struct fsl_desc_sw *desc) |  | ||||||
| { |  | ||||||
| 	u64 snoop_bits; |  | ||||||
| 
 |  | ||||||
| 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) |  | ||||||
| 		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; |  | ||||||
| 	return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void set_desc_dst(struct fsldma_chan *chan, | static void set_desc_dst(struct fsldma_chan *chan, | ||||||
| 			 struct fsl_dma_ld_hw *hw, dma_addr_t dst) | 			 struct fsl_dma_ld_hw *hw, dma_addr_t dst) | ||||||
| { | { | ||||||
|  | @ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan, | ||||||
| 	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); | 	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static dma_addr_t get_desc_dst(struct fsldma_chan *chan, |  | ||||||
| 			       struct fsl_desc_sw *desc) |  | ||||||
| { |  | ||||||
| 	u64 snoop_bits; |  | ||||||
| 
 |  | ||||||
| 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) |  | ||||||
| 		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; |  | ||||||
| 	return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void set_desc_next(struct fsldma_chan *chan, | static void set_desc_next(struct fsldma_chan *chan, | ||||||
| 			  struct fsl_dma_ld_hw *hw, dma_addr_t next) | 			  struct fsl_dma_ld_hw *hw, dma_addr_t next) | ||||||
| { | { | ||||||
|  | @ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||||||
| 	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | 	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | ||||||
| 	struct fsl_desc_sw *child; | 	struct fsl_desc_sw *child; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 	dma_cookie_t cookie; | 	dma_cookie_t cookie = -EINVAL; | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&chan->desc_lock, flags); | 	spin_lock_irqsave(&chan->desc_lock, flags); | ||||||
| 
 | 
 | ||||||
|  | @ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, | ||||||
| 				      struct fsl_desc_sw *desc) | 				      struct fsl_desc_sw *desc) | ||||||
| { | { | ||||||
| 	struct dma_async_tx_descriptor *txd = &desc->async_tx; | 	struct dma_async_tx_descriptor *txd = &desc->async_tx; | ||||||
| 	struct device *dev = chan->common.device->dev; |  | ||||||
| 	dma_addr_t src = get_desc_src(chan, desc); |  | ||||||
| 	dma_addr_t dst = get_desc_dst(chan, desc); |  | ||||||
| 	u32 len = get_desc_cnt(chan, desc); |  | ||||||
| 
 | 
 | ||||||
| 	/* Run the link descriptor callback function */ | 	/* Run the link descriptor callback function */ | ||||||
| 	if (txd->callback) { | 	if (txd->callback) { | ||||||
|  |  | ||||||
|  | @ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) | ||||||
| 	hw_desc->desc_command = (1 << 31); | 	hw_desc->desc_command = (1 << 31); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) |  | ||||||
| { |  | ||||||
| 	struct mv_xor_desc *hw_desc = desc->hw_desc; |  | ||||||
| 	return hw_desc->phy_dest_addr; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | ||||||
| 				   u32 byte_count) | 				   u32 byte_count) | ||||||
| { | { | ||||||
|  | @ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan) | ||||||
| /*
 | /*
 | ||||||
|  * Perform a transaction to verify the HW works. |  * Perform a transaction to verify the HW works. | ||||||
|  */ |  */ | ||||||
| #define MV_XOR_TEST_SIZE 2000 |  | ||||||
| 
 | 
 | ||||||
| static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | ||||||
| { | { | ||||||
|  | @ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | ||||||
| 	struct dma_chan *dma_chan; | 	struct dma_chan *dma_chan; | ||||||
| 	dma_cookie_t cookie; | 	dma_cookie_t cookie; | ||||||
| 	struct dma_async_tx_descriptor *tx; | 	struct dma_async_tx_descriptor *tx; | ||||||
|  | 	struct dmaengine_unmap_data *unmap; | ||||||
| 	int err = 0; | 	int err = 0; | ||||||
| 
 | 
 | ||||||
| 	src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | 	src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); | ||||||
| 	if (!src) | 	if (!src) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | 	dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); | ||||||
| 	if (!dest) { | 	if (!dest) { | ||||||
| 		kfree(src); | 		kfree(src); | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Fill in src buffer */ | 	/* Fill in src buffer */ | ||||||
| 	for (i = 0; i < MV_XOR_TEST_SIZE; i++) | 	for (i = 0; i < PAGE_SIZE; i++) | ||||||
| 		((u8 *) src)[i] = (u8)i; | 		((u8 *) src)[i] = (u8)i; | ||||||
| 
 | 
 | ||||||
| 	dma_chan = &mv_chan->dmachan; | 	dma_chan = &mv_chan->dmachan; | ||||||
|  | @ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | ||||||
| 		goto out; | 		goto out; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	dest_dma = dma_map_single(dma_chan->device->dev, dest, | 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); | ||||||
| 				  MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | 	if (!unmap) { | ||||||
|  | 		err = -ENOMEM; | ||||||
|  | 		goto free_resources; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	src_dma = dma_map_single(dma_chan->device->dev, src, | 	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | ||||||
| 				 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); | 				 PAGE_SIZE, DMA_TO_DEVICE); | ||||||
|  | 	unmap->to_cnt = 1; | ||||||
|  | 	unmap->addr[0] = src_dma; | ||||||
|  | 
 | ||||||
|  | 	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, | ||||||
|  | 				  PAGE_SIZE, DMA_FROM_DEVICE); | ||||||
|  | 	unmap->from_cnt = 1; | ||||||
|  | 	unmap->addr[1] = dest_dma; | ||||||
|  | 
 | ||||||
|  | 	unmap->len = PAGE_SIZE; | ||||||
| 
 | 
 | ||||||
| 	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | 	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | ||||||
| 				    MV_XOR_TEST_SIZE, 0); | 				    PAGE_SIZE, 0); | ||||||
| 	cookie = mv_xor_tx_submit(tx); | 	cookie = mv_xor_tx_submit(tx); | ||||||
| 	mv_xor_issue_pending(dma_chan); | 	mv_xor_issue_pending(dma_chan); | ||||||
| 	async_tx_ack(tx); | 	async_tx_ack(tx); | ||||||
|  | @ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, | 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, | ||||||
| 				MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | 				PAGE_SIZE, DMA_FROM_DEVICE); | ||||||
| 	if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { | 	if (memcmp(src, dest, PAGE_SIZE)) { | ||||||
| 		dev_err(dma_chan->device->dev, | 		dev_err(dma_chan->device->dev, | ||||||
| 			"Self-test copy failed compare, disabling\n"); | 			"Self-test copy failed compare, disabling\n"); | ||||||
| 		err = -ENODEV; | 		err = -ENODEV; | ||||||
|  | @ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| free_resources: | free_resources: | ||||||
|  | 	dmaengine_unmap_put(unmap); | ||||||
| 	mv_xor_free_chan_resources(dma_chan); | 	mv_xor_free_chan_resources(dma_chan); | ||||||
| out: | out: | ||||||
| 	kfree(src); | 	kfree(src); | ||||||
|  | @ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | ||||||
| 	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | 	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | ||||||
| 	dma_addr_t dest_dma; | 	dma_addr_t dest_dma; | ||||||
| 	struct dma_async_tx_descriptor *tx; | 	struct dma_async_tx_descriptor *tx; | ||||||
|  | 	struct dmaengine_unmap_data *unmap; | ||||||
| 	struct dma_chan *dma_chan; | 	struct dma_chan *dma_chan; | ||||||
| 	dma_cookie_t cookie; | 	dma_cookie_t cookie; | ||||||
| 	u8 cmp_byte = 0; | 	u8 cmp_byte = 0; | ||||||
| 	u32 cmp_word; | 	u32 cmp_word; | ||||||
| 	int err = 0; | 	int err = 0; | ||||||
|  | 	int src_count = MV_XOR_NUM_SRC_TEST; | ||||||
| 
 | 
 | ||||||
| 	for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | 	for (src_idx = 0; src_idx < src_count; src_idx++) { | ||||||
| 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | ||||||
| 		if (!xor_srcs[src_idx]) { | 		if (!xor_srcs[src_idx]) { | ||||||
| 			while (src_idx--) | 			while (src_idx--) | ||||||
|  | @ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Fill in src buffers */ | 	/* Fill in src buffers */ | ||||||
| 	for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | 	for (src_idx = 0; src_idx < src_count; src_idx++) { | ||||||
| 		u8 *ptr = page_address(xor_srcs[src_idx]); | 		u8 *ptr = page_address(xor_srcs[src_idx]); | ||||||
| 		for (i = 0; i < PAGE_SIZE; i++) | 		for (i = 0; i < PAGE_SIZE; i++) | ||||||
| 			ptr[i] = (1 << src_idx); | 			ptr[i] = (1 << src_idx); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) | 	for (src_idx = 0; src_idx < src_count; src_idx++) | ||||||
| 		cmp_byte ^= (u8) (1 << src_idx); | 		cmp_byte ^= (u8) (1 << src_idx); | ||||||
| 
 | 
 | ||||||
| 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | ||||||
|  | @ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | ||||||
| 		goto out; | 		goto out; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* test xor */ | 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, | ||||||
| 	dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, | 					 GFP_KERNEL); | ||||||
| 				DMA_FROM_DEVICE); | 	if (!unmap) { | ||||||
|  | 		err = -ENOMEM; | ||||||
|  | 		goto free_resources; | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) | 	/* test xor */ | ||||||
| 		dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | 	for (i = 0; i < src_count; i++) { | ||||||
| 					   0, PAGE_SIZE, DMA_TO_DEVICE); | 		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | ||||||
|  | 					      0, PAGE_SIZE, DMA_TO_DEVICE); | ||||||
|  | 		dma_srcs[i] = unmap->addr[i]; | ||||||
|  | 		unmap->to_cnt++; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, | ||||||
|  | 				      DMA_FROM_DEVICE); | ||||||
|  | 	dest_dma = unmap->addr[src_count]; | ||||||
|  | 	unmap->from_cnt = 1; | ||||||
|  | 	unmap->len = PAGE_SIZE; | ||||||
| 
 | 
 | ||||||
| 	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | ||||||
| 				 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); | 				 src_count, PAGE_SIZE, 0); | ||||||
| 
 | 
 | ||||||
| 	cookie = mv_xor_tx_submit(tx); | 	cookie = mv_xor_tx_submit(tx); | ||||||
| 	mv_xor_issue_pending(dma_chan); | 	mv_xor_issue_pending(dma_chan); | ||||||
|  | @ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| free_resources: | free_resources: | ||||||
|  | 	dmaengine_unmap_put(unmap); | ||||||
| 	mv_xor_free_chan_resources(dma_chan); | 	mv_xor_free_chan_resources(dma_chan); | ||||||
| out: | out: | ||||||
| 	src_idx = MV_XOR_NUM_SRC_TEST; | 	src_idx = src_count; | ||||||
| 	while (src_idx--) | 	while (src_idx--) | ||||||
| 		__free_page(xor_srcs[src_idx]); | 		__free_page(xor_srcs[src_idx]); | ||||||
| 	__free_page(dest); | 	__free_page(dest); | ||||||
|  | @ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev) | ||||||
| 		int i = 0; | 		int i = 0; | ||||||
| 
 | 
 | ||||||
| 		for_each_child_of_node(pdev->dev.of_node, np) { | 		for_each_child_of_node(pdev->dev.of_node, np) { | ||||||
|  | 			struct mv_xor_chan *chan; | ||||||
| 			dma_cap_mask_t cap_mask; | 			dma_cap_mask_t cap_mask; | ||||||
| 			int irq; | 			int irq; | ||||||
| 
 | 
 | ||||||
|  | @ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev) | ||||||
| 				goto err_channel_add; | 				goto err_channel_add; | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			xordev->channels[i] = | 			chan = mv_xor_channel_add(xordev, pdev, i, | ||||||
| 				mv_xor_channel_add(xordev, pdev, i, | 						  cap_mask, irq); | ||||||
| 						   cap_mask, irq); | 			if (IS_ERR(chan)) { | ||||||
| 			if (IS_ERR(xordev->channels[i])) { | 				ret = PTR_ERR(chan); | ||||||
| 				ret = PTR_ERR(xordev->channels[i]); |  | ||||||
| 				xordev->channels[i] = NULL; |  | ||||||
| 				irq_dispose_mapping(irq); | 				irq_dispose_mapping(irq); | ||||||
| 				goto err_channel_add; | 				goto err_channel_add; | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
|  | 			xordev->channels[i] = chan; | ||||||
| 			i++; | 			i++; | ||||||
| 		} | 		} | ||||||
| 	} else if (pdata && pdata->channels) { | 	} else if (pdata && pdata->channels) { | ||||||
| 		for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | 		for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | ||||||
| 			struct mv_xor_channel_data *cd; | 			struct mv_xor_channel_data *cd; | ||||||
|  | 			struct mv_xor_chan *chan; | ||||||
| 			int irq; | 			int irq; | ||||||
| 
 | 
 | ||||||
| 			cd = &pdata->channels[i]; | 			cd = &pdata->channels[i]; | ||||||
|  | @ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev) | ||||||
| 				goto err_channel_add; | 				goto err_channel_add; | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			xordev->channels[i] = | 			chan = mv_xor_channel_add(xordev, pdev, i, | ||||||
| 				mv_xor_channel_add(xordev, pdev, i, | 						  cd->cap_mask, irq); | ||||||
| 						   cd->cap_mask, irq); | 			if (IS_ERR(chan)) { | ||||||
| 			if (IS_ERR(xordev->channels[i])) { | 				ret = PTR_ERR(chan); | ||||||
| 				ret = PTR_ERR(xordev->channels[i]); |  | ||||||
| 				goto err_channel_add; | 				goto err_channel_add; | ||||||
| 			} | 			} | ||||||
|  | 
 | ||||||
|  | 			xordev->channels[i] = chan; | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | ||||||
| 
 | 
 | ||||||
| static inline void _init_desc(struct dma_pl330_desc *desc) | static inline void _init_desc(struct dma_pl330_desc *desc) | ||||||
| { | { | ||||||
| 	desc->pchan = NULL; |  | ||||||
| 	desc->req.x = &desc->px; | 	desc->req.x = &desc->px; | ||||||
| 	desc->req.token = desc; | 	desc->req.token = desc; | ||||||
| 	desc->rqcfg.swap = SWAP_NO; | 	desc->rqcfg.swap = SWAP_NO; | ||||||
| 	desc->rqcfg.privileged = 0; |  | ||||||
| 	desc->rqcfg.insnaccess = 0; |  | ||||||
| 	desc->rqcfg.scctl = SCCTRL0; | 	desc->rqcfg.scctl = SCCTRL0; | ||||||
| 	desc->rqcfg.dcctl = DCCTRL0; | 	desc->rqcfg.dcctl = DCCTRL0; | ||||||
| 	desc->req.cfg = &desc->rqcfg; | 	desc->req.cfg = &desc->rqcfg; | ||||||
|  | @ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) | ||||||
| 	if (!pdmac) | 	if (!pdmac) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	desc = kmalloc(count * sizeof(*desc), flg); | 	desc = kcalloc(count, sizeof(*desc), flg); | ||||||
| 	if (!desc) | 	if (!desc) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -532,29 +532,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc, | ||||||
| 	hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | 	hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 |  | ||||||
|  * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation |  | ||||||
|  */ |  | ||||||
| static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc, |  | ||||||
| 					int value, unsigned long flags) |  | ||||||
| { |  | ||||||
| 	struct dma_cdb *hw_desc = desc->hw_desc; |  | ||||||
| 
 |  | ||||||
| 	memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); |  | ||||||
| 	desc->hw_next = NULL; |  | ||||||
| 	desc->src_cnt = 1; |  | ||||||
| 	desc->dst_cnt = 1; |  | ||||||
| 
 |  | ||||||
| 	if (flags & DMA_PREP_INTERRUPT) |  | ||||||
| 		set_bit(PPC440SPE_DESC_INT, &desc->flags); |  | ||||||
| 	else |  | ||||||
| 		clear_bit(PPC440SPE_DESC_INT, &desc->flags); |  | ||||||
| 
 |  | ||||||
| 	hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); |  | ||||||
| 	hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); |  | ||||||
| 	hw_desc->opc = DMA_CDB_OPC_DFILL128; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /**
 | /**
 | ||||||
|  * ppc440spe_desc_set_src_addr - set source address into the descriptor |  * ppc440spe_desc_set_src_addr - set source address into the descriptor | ||||||
|  */ |  */ | ||||||
|  | @ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( | ||||||
| 		struct ppc440spe_adma_chan *chan, | 		struct ppc440spe_adma_chan *chan, | ||||||
| 		dma_cookie_t cookie) | 		dma_cookie_t cookie) | ||||||
| { | { | ||||||
| 	int i; |  | ||||||
| 
 |  | ||||||
| 	BUG_ON(desc->async_tx.cookie < 0); | 	BUG_ON(desc->async_tx.cookie < 0); | ||||||
| 	if (desc->async_tx.cookie > 0) { | 	if (desc->async_tx.cookie > 0) { | ||||||
| 		cookie = desc->async_tx.cookie; | 		cookie = desc->async_tx.cookie; | ||||||
|  | @ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | ||||||
| 			ppc440spe_adma_prep_dma_interrupt; | 			ppc440spe_adma_prep_dma_interrupt; | ||||||
| 	} | 	} | ||||||
| 	pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " | 	pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " | ||||||
| 	  "( %s%s%s%s%s%s%s)\n", | 	  "( %s%s%s%s%s%s)\n", | ||||||
| 	  dev_name(adev->dev), | 	  dev_name(adev->dev), | ||||||
| 	  dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", | 	  dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", | ||||||
| 	  dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", | 	  dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", | ||||||
|  |  | ||||||
|  | @ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | ||||||
| 	dma_async_tx_callback callback; | 	dma_async_tx_callback callback; | ||||||
| 	void *param; | 	void *param; | ||||||
| 	struct dma_async_tx_descriptor *txd = &desc->txd; | 	struct dma_async_tx_descriptor *txd = &desc->txd; | ||||||
| 	struct txx9dmac_slave *ds = dc->chan.private; |  | ||||||
| 
 | 
 | ||||||
| 	dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | 	dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | ||||||
| 		 txd->cookie, desc); | 		 txd->cookie, desc); | ||||||
|  |  | ||||||
|  | @ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = { | ||||||
| 	.cmd_per_lun		= 1, | 	.cmd_per_lun		= 1, | ||||||
| 	.can_queue		= 1, | 	.can_queue		= 1, | ||||||
| 	.sdev_attrs		= sbp2_scsi_sysfs_attrs, | 	.sdev_attrs		= sbp2_scsi_sysfs_attrs, | ||||||
| 	.no_write_same		= 1, |  | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); | MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); | ||||||
|  |  | ||||||
|  | @ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP)	+= memmap.o | ||||||
| 
 | 
 | ||||||
| obj-$(CONFIG_GOOGLE_FIRMWARE)	+= google/ | obj-$(CONFIG_GOOGLE_FIRMWARE)	+= google/ | ||||||
| obj-$(CONFIG_EFI)		+= efi/ | obj-$(CONFIG_EFI)		+= efi/ | ||||||
|  | obj-$(CONFIG_UEFI_CPER)		+= efi/ | ||||||
|  |  | ||||||
|  | @ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE | ||||||
| 	  backend for pstore by default. This setting can be overridden | 	  backend for pstore by default. This setting can be overridden | ||||||
| 	  using the efivars module's pstore_disable parameter. | 	  using the efivars module's pstore_disable parameter. | ||||||
| 
 | 
 | ||||||
| config UEFI_CPER |  | ||||||
| 	def_bool n |  | ||||||
| 
 |  | ||||||
| endmenu | endmenu | ||||||
|  | 
 | ||||||
|  | config UEFI_CPER | ||||||
|  | 	bool | ||||||
|  |  | ||||||
|  | @ -1,7 +1,7 @@ | ||||||
| #
 | #
 | ||||||
| # Makefile for linux kernel
 | # Makefile for linux kernel
 | ||||||
| #
 | #
 | ||||||
| obj-y					+= efi.o vars.o | obj-$(CONFIG_EFI)			+= efi.o vars.o | ||||||
| obj-$(CONFIG_EFI_VARS)			+= efivars.o | obj-$(CONFIG_EFI_VARS)			+= efivars.o | ||||||
| obj-$(CONFIG_EFI_VARS_PSTORE)		+= efi-pstore.o | obj-$(CONFIG_EFI_VARS_PSTORE)		+= efi-pstore.o | ||||||
| obj-$(CONFIG_UEFI_CPER)			+= cper.o | obj-$(CONFIG_UEFI_CPER)			+= cper.o | ||||||
|  |  | ||||||
|  | @ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, | ||||||
| static struct pstore_info efi_pstore_info = { | static struct pstore_info efi_pstore_info = { | ||||||
| 	.owner		= THIS_MODULE, | 	.owner		= THIS_MODULE, | ||||||
| 	.name		= "efi", | 	.name		= "efi", | ||||||
|  | 	.flags		= PSTORE_FLAGS_FRAGILE, | ||||||
| 	.open		= efi_pstore_open, | 	.open		= efi_pstore_open, | ||||||
| 	.close		= efi_pstore_close, | 	.close		= efi_pstore_close, | ||||||
| 	.read		= efi_pstore_read, | 	.read		= efi_pstore_read, | ||||||
|  |  | ||||||
|  | @ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&tlmm_lock, irq_flags); | 	spin_lock_irqsave(&tlmm_lock, irq_flags); | ||||||
| 	writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); | 	writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); | ||||||
| 	clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); | 	clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); | ||||||
| 	__clear_bit(gpio, msm_gpio.enabled_irqs); | 	__clear_bit(gpio, msm_gpio.enabled_irqs); | ||||||
| 	spin_unlock_irqrestore(&tlmm_lock, irq_flags); | 	spin_unlock_irqrestore(&tlmm_lock, irq_flags); | ||||||
| } | } | ||||||
|  | @ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irqsave(&tlmm_lock, irq_flags); | 	spin_lock_irqsave(&tlmm_lock, irq_flags); | ||||||
| 	__set_bit(gpio, msm_gpio.enabled_irqs); | 	__set_bit(gpio, msm_gpio.enabled_irqs); | ||||||
| 	set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); | 	set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); | ||||||
| 	writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); | 	writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); | ||||||
| 	spin_unlock_irqrestore(&tlmm_lock, irq_flags); | 	spin_unlock_irqrestore(&tlmm_lock, irq_flags); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) | ||||||
| 	u32 pending; | 	u32 pending; | ||||||
| 	unsigned int offset, irqs_handled = 0; | 	unsigned int offset, irqs_handled = 0; | ||||||
| 
 | 
 | ||||||
| 	while ((pending = gpio_rcar_read(p, INTDT))) { | 	while ((pending = gpio_rcar_read(p, INTDT) & | ||||||
|  | 			  gpio_rcar_read(p, INTMSK))) { | ||||||
| 		offset = __ffs(pending); | 		offset = __ffs(pending); | ||||||
| 		gpio_rcar_write(p, INTCLR, BIT(offset)); | 		gpio_rcar_write(p, INTCLR, BIT(offset)); | ||||||
| 		generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); | 		generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); | ||||||
|  |  | ||||||
|  | @ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset) | ||||||
| 	if (offset < TWL4030_GPIO_MAX) | 	if (offset < TWL4030_GPIO_MAX) | ||||||
| 		ret = twl4030_set_gpio_direction(offset, 1); | 		ret = twl4030_set_gpio_direction(offset, 1); | ||||||
| 	else | 	else | ||||||
| 		ret = -EINVAL; | 		ret = -EINVAL;	/* LED outputs can't be set as input */ | ||||||
| 
 | 
 | ||||||
| 	if (!ret) | 	if (!ret) | ||||||
| 		priv->direction &= ~BIT(offset); | 		priv->direction &= ~BIT(offset); | ||||||
|  | @ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value) | ||||||
| static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) | static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) | ||||||
| { | { | ||||||
| 	struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); | 	struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); | ||||||
| 	int ret = -EINVAL; | 	int ret = 0; | ||||||
| 
 | 
 | ||||||
| 	mutex_lock(&priv->mutex); | 	mutex_lock(&priv->mutex); | ||||||
| 	if (offset < TWL4030_GPIO_MAX) | 	if (offset < TWL4030_GPIO_MAX) { | ||||||
| 		ret = twl4030_set_gpio_direction(offset, 0); | 		ret = twl4030_set_gpio_direction(offset, 0); | ||||||
|  | 		if (ret) { | ||||||
|  | 			mutex_unlock(&priv->mutex); | ||||||
|  | 			return ret; | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 *  LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output | ||||||
|  | 	 */ | ||||||
| 
 | 
 | ||||||
| 	priv->direction |= BIT(offset); | 	priv->direction |= BIT(offset); | ||||||
| 	mutex_unlock(&priv->mutex); | 	mutex_unlock(&priv->mutex); | ||||||
|  |  | ||||||
|  | @ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *, | ||||||
| extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; | extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; | ||||||
| 
 | 
 | ||||||
| int armada_fbdev_init(struct drm_device *); | int armada_fbdev_init(struct drm_device *); | ||||||
|  | void armada_fbdev_lastclose(struct drm_device *); | ||||||
| void armada_fbdev_fini(struct drm_device *); | void armada_fbdev_fini(struct drm_device *); | ||||||
| 
 | 
 | ||||||
| int armada_overlay_plane_create(struct drm_device *, unsigned long); | int armada_overlay_plane_create(struct drm_device *, unsigned long); | ||||||
|  |  | ||||||
|  | @ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = { | ||||||
| 		DRM_UNLOCKED), | 		DRM_UNLOCKED), | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | static void armada_drm_lastclose(struct drm_device *dev) | ||||||
|  | { | ||||||
|  | 	armada_fbdev_lastclose(dev); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static const struct file_operations armada_drm_fops = { | static const struct file_operations armada_drm_fops = { | ||||||
| 	.owner			= THIS_MODULE, | 	.owner			= THIS_MODULE, | ||||||
| 	.llseek			= no_llseek, | 	.llseek			= no_llseek, | ||||||
|  | @ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = { | ||||||
| 	.open			= NULL, | 	.open			= NULL, | ||||||
| 	.preclose		= NULL, | 	.preclose		= NULL, | ||||||
| 	.postclose		= NULL, | 	.postclose		= NULL, | ||||||
| 	.lastclose		= NULL, | 	.lastclose		= armada_drm_lastclose, | ||||||
| 	.unload			= armada_drm_unload, | 	.unload			= armada_drm_unload, | ||||||
| 	.get_vblank_counter	= drm_vblank_count, | 	.get_vblank_counter	= drm_vblank_count, | ||||||
| 	.enable_vblank		= armada_drm_enable_vblank, | 	.enable_vblank		= armada_drm_enable_vblank, | ||||||
|  |  | ||||||
|  | @ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh, | ||||||
| 	drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); | 	drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); | ||||||
| 	drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); | 	drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); | ||||||
| 
 | 
 | ||||||
| 	DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", | 	DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n", | ||||||
| 		dfb->fb.width, dfb->fb.height, | 		dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel, | ||||||
| 		dfb->fb.bits_per_pixel, obj->phys_addr); | 		(unsigned long long)obj->phys_addr); | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| 
 | 
 | ||||||
|  | @ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev) | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | void armada_fbdev_lastclose(struct drm_device *dev) | ||||||
|  | { | ||||||
|  | 	struct armada_private *priv = dev->dev_private; | ||||||
|  | 
 | ||||||
|  | 	drm_modeset_lock_all(dev); | ||||||
|  | 	if (priv->fbdev) | ||||||
|  | 		drm_fb_helper_restore_fbdev_mode(priv->fbdev); | ||||||
|  | 	drm_modeset_unlock_all(dev); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| void armada_fbdev_fini(struct drm_device *dev) | void armada_fbdev_fini(struct drm_device *dev) | ||||||
| { | { | ||||||
| 	struct armada_private *priv = dev->dev_private; | 	struct armada_private *priv = dev->dev_private; | ||||||
|  | @ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev) | ||||||
| 			framebuffer_release(info); | 			framebuffer_release(info); | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  | 		drm_fb_helper_fini(fbh); | ||||||
|  | 
 | ||||||
| 		if (fbh->fb) | 		if (fbh->fb) | ||||||
| 			fbh->fb->funcs->destroy(fbh->fb); | 			fbh->fb->funcs->destroy(fbh->fb); | ||||||
| 
 | 
 | ||||||
| 		drm_fb_helper_fini(fbh); |  | ||||||
| 
 |  | ||||||
| 		priv->fbdev = NULL; | 		priv->fbdev = NULL; | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) | ||||||
| 		obj->dev_addr = obj->linear->start; | 		obj->dev_addr = obj->linear->start; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", | 	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj, | ||||||
| 			 obj, obj->phys_addr, obj->dev_addr); | 			 (unsigned long long)obj->phys_addr, | ||||||
|  | 			 (unsigned long long)obj->dev_addr); | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | @ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) | ||||||
| 			 * refcount on the gem object itself. | 			 * refcount on the gem object itself. | ||||||
| 			 */ | 			 */ | ||||||
| 			drm_gem_object_reference(obj); | 			drm_gem_object_reference(obj); | ||||||
| 			dma_buf_put(buf); |  | ||||||
| 			return obj; | 			return obj; | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | @ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	dobj->obj.import_attach = attach; | 	dobj->obj.import_attach = attach; | ||||||
|  | 	get_dma_buf(buf); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Don't call dma_buf_map_attachment() here - it maps the | 	 * Don't call dma_buf_map_attachment() here - it maps the | ||||||
|  |  | ||||||
|  | @ -68,6 +68,8 @@ | ||||||
| #define EDID_QUIRK_DETAILED_SYNC_PP		(1 << 6) | #define EDID_QUIRK_DETAILED_SYNC_PP		(1 << 6) | ||||||
| /* Force reduced-blanking timings for detailed modes */ | /* Force reduced-blanking timings for detailed modes */ | ||||||
| #define EDID_QUIRK_FORCE_REDUCED_BLANKING	(1 << 7) | #define EDID_QUIRK_FORCE_REDUCED_BLANKING	(1 << 7) | ||||||
|  | /* Force 8bpc */ | ||||||
|  | #define EDID_QUIRK_FORCE_8BPC			(1 << 8) | ||||||
| 
 | 
 | ||||||
| struct detailed_mode_closure { | struct detailed_mode_closure { | ||||||
| 	struct drm_connector *connector; | 	struct drm_connector *connector; | ||||||
|  | @ -128,6 +130,9 @@ static struct edid_quirk { | ||||||
| 
 | 
 | ||||||
| 	/* Medion MD 30217 PG */ | 	/* Medion MD 30217 PG */ | ||||||
| 	{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, | 	{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, | ||||||
|  | 
 | ||||||
|  | 	/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ | ||||||
|  | 	{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) | ||||||
| 
 | 
 | ||||||
| 	drm_add_display_info(edid, &connector->display_info); | 	drm_add_display_info(edid, &connector->display_info); | ||||||
| 
 | 
 | ||||||
|  | 	if (quirks & EDID_QUIRK_FORCE_8BPC) | ||||||
|  | 		connector->display_info.bpc = 8; | ||||||
|  | 
 | ||||||
| 	return num_modes; | 	return num_modes; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(drm_add_edid_modes); | EXPORT_SYMBOL(drm_add_edid_modes); | ||||||
|  |  | ||||||
|  | @ -566,11 +566,11 @@ err_unload: | ||||||
| 	if (dev->driver->unload) | 	if (dev->driver->unload) | ||||||
| 		dev->driver->unload(dev); | 		dev->driver->unload(dev); | ||||||
| err_primary_node: | err_primary_node: | ||||||
| 	drm_put_minor(dev->primary); | 	drm_unplug_minor(dev->primary); | ||||||
| err_render_node: | err_render_node: | ||||||
| 	drm_put_minor(dev->render); | 	drm_unplug_minor(dev->render); | ||||||
| err_control_node: | err_control_node: | ||||||
| 	drm_put_minor(dev->control); | 	drm_unplug_minor(dev->control); | ||||||
| err_agp: | err_agp: | ||||||
| 	if (dev->driver->bus->agp_destroy) | 	if (dev->driver->bus->agp_destroy) | ||||||
| 		dev->driver->bus->agp_destroy(dev); | 		dev->driver->bus->agp_destroy(dev); | ||||||
|  |  | ||||||
|  | @ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev) | ||||||
| 	drm_i915_private_t *dev_priv = dev->dev_private; | 	drm_i915_private_t *dev_priv = dev->dev_private; | ||||||
| 	struct drm_i915_master_private *master_priv; | 	struct drm_i915_master_private *master_priv; | ||||||
| 
 | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * The dri breadcrumb update races against the drm master disappearing. | ||||||
|  | 	 * Instead of trying to fix this (this is by far not the only ums issue) | ||||||
|  | 	 * just don't do the update in kms mode. | ||||||
|  | 	 */ | ||||||
|  | 	if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||||||
|  | 		return; | ||||||
|  | 
 | ||||||
| 	if (dev->primary->master) { | 	if (dev->primary->master) { | ||||||
| 		master_priv = dev->primary->master->driver_priv; | 		master_priv = dev->primary->master->driver_priv; | ||||||
| 		if (master_priv->sarea_priv) | 		if (master_priv->sarea_priv) | ||||||
|  | @ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | ||||||
| 	spin_lock_init(&dev_priv->uncore.lock); | 	spin_lock_init(&dev_priv->uncore.lock); | ||||||
| 	spin_lock_init(&dev_priv->mm.object_stat_lock); | 	spin_lock_init(&dev_priv->mm.object_stat_lock); | ||||||
| 	mutex_init(&dev_priv->dpio_lock); | 	mutex_init(&dev_priv->dpio_lock); | ||||||
| 	mutex_init(&dev_priv->rps.hw_lock); |  | ||||||
| 	mutex_init(&dev_priv->modeset_restore_lock); | 	mutex_init(&dev_priv->modeset_restore_lock); | ||||||
| 
 | 
 | ||||||
| 	mutex_init(&dev_priv->pc8.lock); | 	intel_pm_setup(dev); | ||||||
| 	dev_priv->pc8.requirements_met = false; |  | ||||||
| 	dev_priv->pc8.gpu_idle = false; |  | ||||||
| 	dev_priv->pc8.irqs_disabled = false; |  | ||||||
| 	dev_priv->pc8.enabled = false; |  | ||||||
| 	dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ |  | ||||||
| 	INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); |  | ||||||
| 
 | 
 | ||||||
| 	intel_display_crc_init(dev); | 	intel_display_crc_init(dev); | ||||||
| 
 | 
 | ||||||
|  | @ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	intel_irq_init(dev); | 	intel_irq_init(dev); | ||||||
| 	intel_pm_init(dev); |  | ||||||
| 	intel_uncore_sanitize(dev); | 	intel_uncore_sanitize(dev); | ||||||
| 
 | 
 | ||||||
| 	/* Try to make sure MCHBAR is enabled before poking at it */ | 	/* Try to make sure MCHBAR is enabled before poking at it */ | ||||||
|  | @ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev) | ||||||
| 
 | 
 | ||||||
| void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | ||||||
| { | { | ||||||
|  | 	mutex_lock(&dev->struct_mutex); | ||||||
| 	i915_gem_context_close(dev, file_priv); | 	i915_gem_context_close(dev, file_priv); | ||||||
| 	i915_gem_release(dev, file_priv); | 	i915_gem_release(dev, file_priv); | ||||||
|  | 	mutex_unlock(&dev->struct_mutex); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) | ||||||
|  |  | ||||||
|  | @ -651,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) | ||||||
| 		intel_modeset_init_hw(dev); | 		intel_modeset_init_hw(dev); | ||||||
| 
 | 
 | ||||||
| 		drm_modeset_lock_all(dev); | 		drm_modeset_lock_all(dev); | ||||||
|  | 		drm_mode_config_reset(dev); | ||||||
| 		intel_modeset_setup_hw_state(dev, true); | 		intel_modeset_setup_hw_state(dev, true); | ||||||
| 		drm_modeset_unlock_all(dev); | 		drm_modeset_unlock_all(dev); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1755,8 +1755,13 @@ struct drm_i915_file_private { | ||||||
| #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile) | #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile) | ||||||
| #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \ | #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \ | ||||||
| 				 ((dev)->pdev->device & 0xFF00) == 0x0C00) | 				 ((dev)->pdev->device & 0xFF00) == 0x0C00) | ||||||
| #define IS_ULT(dev)		(IS_HASWELL(dev) && \ | #define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \ | ||||||
|  | 				 (((dev)->pdev->device & 0xf) == 0x2  || \ | ||||||
|  | 				 ((dev)->pdev->device & 0xf) == 0x6 || \ | ||||||
|  | 				 ((dev)->pdev->device & 0xf) == 0xe)) | ||||||
|  | #define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \ | ||||||
| 				 ((dev)->pdev->device & 0xFF00) == 0x0A00) | 				 ((dev)->pdev->device & 0xFF00) == 0x0A00) | ||||||
|  | #define IS_ULT(dev)		(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) | ||||||
| #define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \ | #define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \ | ||||||
| 				 ((dev)->pdev->device & 0x00F0) == 0x0020) | 				 ((dev)->pdev->device & 0x00F0) == 0x0020) | ||||||
| #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) | ||||||
|  | @ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev); | ||||||
| void i915_handle_error(struct drm_device *dev, bool wedged); | void i915_handle_error(struct drm_device *dev, bool wedged); | ||||||
| 
 | 
 | ||||||
| extern void intel_irq_init(struct drm_device *dev); | extern void intel_irq_init(struct drm_device *dev); | ||||||
| extern void intel_pm_init(struct drm_device *dev); |  | ||||||
| extern void intel_hpd_init(struct drm_device *dev); | extern void intel_hpd_init(struct drm_device *dev); | ||||||
| extern void intel_pm_init(struct drm_device *dev); |  | ||||||
| 
 | 
 | ||||||
| extern void intel_uncore_sanitize(struct drm_device *dev); | extern void intel_uncore_sanitize(struct drm_device *dev); | ||||||
| extern void intel_uncore_early_sanitize(struct drm_device *dev); | extern void intel_uncore_early_sanitize(struct drm_device *dev); | ||||||
|  |  | ||||||
|  | @ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) | ||||||
| 	kfree(request); | 	kfree(request); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, | ||||||
| 				      struct intel_ring_buffer *ring) | 				       struct intel_ring_buffer *ring) | ||||||
| { | { | ||||||
| 	u32 completed_seqno; | 	u32 completed_seqno = ring->get_seqno(ring, false); | ||||||
| 	u32 acthd; | 	u32 acthd = intel_ring_get_active_head(ring); | ||||||
|  | 	struct drm_i915_gem_request *request; | ||||||
| 
 | 
 | ||||||
| 	acthd = intel_ring_get_active_head(ring); | 	list_for_each_entry(request, &ring->request_list, list) { | ||||||
| 	completed_seqno = ring->get_seqno(ring, false); | 		if (i915_seqno_passed(completed_seqno, request->seqno)) | ||||||
|  | 			continue; | ||||||
| 
 | 
 | ||||||
|  | 		i915_set_reset_status(ring, request, acthd); | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | ||||||
|  | 					struct intel_ring_buffer *ring) | ||||||
|  | { | ||||||
| 	while (!list_empty(&ring->request_list)) { | 	while (!list_empty(&ring->request_list)) { | ||||||
| 		struct drm_i915_gem_request *request; | 		struct drm_i915_gem_request *request; | ||||||
| 
 | 
 | ||||||
|  | @ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | ||||||
| 					   struct drm_i915_gem_request, | 					   struct drm_i915_gem_request, | ||||||
| 					   list); | 					   list); | ||||||
| 
 | 
 | ||||||
| 		if (request->seqno > completed_seqno) |  | ||||||
| 			i915_set_reset_status(ring, request, acthd); |  | ||||||
| 
 |  | ||||||
| 		i915_gem_free_request(request); | 		i915_gem_free_request(request); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev) | ||||||
| 	struct intel_ring_buffer *ring; | 	struct intel_ring_buffer *ring; | ||||||
| 	int i; | 	int i; | ||||||
| 
 | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Before we free the objects from the requests, we need to inspect | ||||||
|  | 	 * them for finding the guilty party. As the requests only borrow | ||||||
|  | 	 * their reference to the objects, the inspection must be done first. | ||||||
|  | 	 */ | ||||||
| 	for_each_ring(ring, dev_priv, i) | 	for_each_ring(ring, dev_priv, i) | ||||||
| 		i915_gem_reset_ring_lists(dev_priv, ring); | 		i915_gem_reset_ring_status(dev_priv, ring); | ||||||
|  | 
 | ||||||
|  | 	for_each_ring(ring, dev_priv, i) | ||||||
|  | 		i915_gem_reset_ring_cleanup(dev_priv, ring); | ||||||
| 
 | 
 | ||||||
| 	i915_gem_cleanup_ringbuffer(dev); | 	i915_gem_cleanup_ringbuffer(dev); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) | ||||||
| { | { | ||||||
| 	struct drm_i915_file_private *file_priv = file->driver_priv; | 	struct drm_i915_file_private *file_priv = file->driver_priv; | ||||||
| 
 | 
 | ||||||
| 	mutex_lock(&dev->struct_mutex); |  | ||||||
| 	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); | 	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); | ||||||
| 	idr_destroy(&file_priv->context_idr); | 	idr_destroy(&file_priv->context_idr); | ||||||
| 	mutex_unlock(&dev->struct_mutex); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct i915_hw_context * | static struct i915_hw_context * | ||||||
|  | @ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to) | ||||||
| 	if (ret) | 	if (ret) | ||||||
| 		return ret; | 		return ret; | ||||||
| 
 | 
 | ||||||
| 	/* Clear this page out of any CPU caches for coherent swap-in/out. Note
 | 	/*
 | ||||||
|  | 	 * Pin can switch back to the default context if we end up calling into | ||||||
|  | 	 * evict_everything - as a last ditch gtt defrag effort that also | ||||||
|  | 	 * switches to the default context. Hence we need to reload from here. | ||||||
|  | 	 */ | ||||||
|  | 	from = ring->last_context; | ||||||
|  | 
 | ||||||
|  | 	/*
 | ||||||
|  | 	 * Clear this page out of any CPU caches for coherent swap-in/out. Note | ||||||
| 	 * that thanks to write = false in this call and us not setting any gpu | 	 * that thanks to write = false in this call and us not setting any gpu | ||||||
| 	 * write domains when putting a context object onto the active list | 	 * write domains when putting a context object onto the active list | ||||||
| 	 * (when switching away from it), this won't block. | 	 * (when switching away from it), this won't block. | ||||||
| 	 * XXX: We need a real interface to do this instead of trickery. */ | 	 * | ||||||
|  | 	 * XXX: We need a real interface to do this instead of trickery. | ||||||
|  | 	 */ | ||||||
| 	ret = i915_gem_object_set_to_gtt_domain(to->obj, false); | 	ret = i915_gem_object_set_to_gtt_domain(to->obj, false); | ||||||
| 	if (ret) { | 	if (ret) { | ||||||
| 		i915_gem_object_unpin(to->obj); | 		i915_gem_object_unpin(to->obj); | ||||||
|  |  | ||||||
|  | @ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, | ||||||
| 	} else | 	} else | ||||||
| 		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); | 		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); | ||||||
| 
 | 
 | ||||||
|  | search_again: | ||||||
| 	/* First see if there is a large enough contiguous idle region... */ | 	/* First see if there is a large enough contiguous idle region... */ | ||||||
| 	list_for_each_entry(vma, &vm->inactive_list, mm_list) { | 	list_for_each_entry(vma, &vm->inactive_list, mm_list) { | ||||||
| 		if (mark_free(vma, &unwind_list)) | 		if (mark_free(vma, &unwind_list)) | ||||||
|  | @ -115,10 +116,17 @@ none: | ||||||
| 		list_del_init(&vma->exec_list); | 		list_del_init(&vma->exec_list); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/* We expect the caller to unpin, evict all and try again, or give up.
 | 	/* Can we unpin some objects such as idle hw contents,
 | ||||||
| 	 * So calling i915_gem_evict_vm() is unnecessary. | 	 * or pending flips? | ||||||
| 	 */ | 	 */ | ||||||
| 	return -ENOSPC; | 	ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev); | ||||||
|  | 	if (ret) | ||||||
|  | 		return ret; | ||||||
|  | 
 | ||||||
|  | 	/* Only idle the GPU and repeat the search once */ | ||||||
|  | 	i915_gem_retire_requests(dev); | ||||||
|  | 	nonblocking = true; | ||||||
|  | 	goto search_again; | ||||||
| 
 | 
 | ||||||
| found: | found: | ||||||
| 	/* drm_mm doesn't allow any other other operations while
 | 	/* drm_mm doesn't allow any other other operations while
 | ||||||
|  |  | ||||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Greg Kroah-Hartman
				Greg Kroah-Hartman