Merge branches 'doc.2015.01.07a', 'fixes.2015.01.15a', 'preempt.2015.01.06a', 'srcu.2015.01.06a', 'stall.2015.01.16a' and 'torture.2015.01.11a' into HEAD
doc.2015.01.07a: Documentation updates. fixes.2015.01.15a: Miscellaneous fixes. preempt.2015.01.06a: Changes to handling of lists of preempted tasks. srcu.2015.01.06a: SRCU updates. stall.2015.01.16a: RCU CPU stall-warning updates and fixes. torture.2015.01.11a: RCU torture-test updates and fixes.
This commit is contained in:
		
				commit
				
					
						78e691f4ae
					
				
			
		
					 47 changed files with 563 additions and 568 deletions
				
			
		| 
						 | 
					@ -152,6 +152,15 @@ no non-lazy callbacks ("." is printed otherwise, as shown above) and
 | 
				
			||||||
"D" indicates that dyntick-idle processing is enabled ("." is printed
 | 
					"D" indicates that dyntick-idle processing is enabled ("." is printed
 | 
				
			||||||
otherwise, for example, if disabled via the "nohz=" kernel boot parameter).
 | 
					otherwise, for example, if disabled via the "nohz=" kernel boot parameter).
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					If the relevant grace-period kthread has been unable to run prior to
 | 
				
			||||||
 | 
					the stall warning, the following additional line is printed:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rcu_preempt kthread starved for 2023 jiffies!
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Starving the grace-period kthreads of CPU time can of course result in
 | 
				
			||||||
 | 
					RCU CPU stall warnings even when all CPUs and tasks have passed through
 | 
				
			||||||
 | 
					the required quiescent states.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Multiple Warnings From One Stall
 | 
					Multiple Warnings From One Stall
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -187,6 +196,11 @@ o	For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
 | 
				
			||||||
	behavior, you might need to replace some of the cond_resched()
 | 
						behavior, you might need to replace some of the cond_resched()
 | 
				
			||||||
	calls with calls to cond_resched_rcu_qs().
 | 
						calls with calls to cond_resched_rcu_qs().
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					o	Anything that prevents RCU's grace-period kthreads from running.
 | 
				
			||||||
 | 
						This can result in the "All QSes seen" console-log message.
 | 
				
			||||||
 | 
						This message will include information on when the kthread last
 | 
				
			||||||
 | 
						ran and how often it should be expected to run.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
o	A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
 | 
					o	A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
 | 
				
			||||||
	happen to preempt a low-priority task in the middle of an RCU
 | 
						happen to preempt a low-priority task in the middle of an RCU
 | 
				
			||||||
	read-side critical section.   This is especially damaging if
 | 
						read-side critical section.   This is especially damaging if
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -56,14 +56,14 @@ rcuboost:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
 | 
					The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  0!c=30455 g=30456 pq=1 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
 | 
					  0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
 | 
				
			||||||
  1!c=30719 g=30720 pq=1 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
 | 
					  1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
 | 
				
			||||||
  2!c=30150 g=30151 pq=1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
 | 
					  2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
 | 
				
			||||||
  3 c=31249 g=31250 pq=1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
 | 
					  3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
 | 
				
			||||||
  4!c=29502 g=29503 pq=1 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
 | 
					  4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
 | 
				
			||||||
  5 c=31201 g=31202 pq=1 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
 | 
					  5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
 | 
				
			||||||
  6!c=30253 g=30254 pq=1 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
 | 
					  6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
 | 
				
			||||||
  7 c=31178 g=31178 pq=1 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
 | 
					  7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
 | 
				
			||||||
 | 
					
 | 
				
			||||||
This file has one line per CPU, or eight for this 8-CPU system.
 | 
					This file has one line per CPU, or eight for this 8-CPU system.
 | 
				
			||||||
The fields are as follows:
 | 
					The fields are as follows:
 | 
				
			||||||
| 
						 | 
					@ -188,14 +188,14 @@ o	"ca" is the number of RCU callbacks that have been adopted by this
 | 
				
			||||||
Kernels compiled with CONFIG_RCU_BOOST=y display the following from
 | 
					Kernels compiled with CONFIG_RCU_BOOST=y display the following from
 | 
				
			||||||
/debug/rcu/rcu_preempt/rcudata:
 | 
					/debug/rcu/rcu_preempt/rcudata:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  0!c=12865 g=12866 pq=1 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
 | 
					  0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
 | 
				
			||||||
  1 c=14407 g=14408 pq=1 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
 | 
					  1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
 | 
				
			||||||
  2 c=14407 g=14408 pq=1 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
 | 
					  2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
 | 
				
			||||||
  3 c=14407 g=14408 pq=1 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
 | 
					  3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
 | 
				
			||||||
  4 c=14405 g=14406 pq=1 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
 | 
					  4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
 | 
				
			||||||
  5!c=14168 g=14169 pq=1 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
 | 
					  5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
 | 
				
			||||||
  6 c=14404 g=14405 pq=1 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
 | 
					  6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
 | 
				
			||||||
  7 c=14407 g=14408 pq=1 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
 | 
					  7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
 | 
				
			||||||
 | 
					
 | 
				
			||||||
This is similar to the output discussed above, but contains the following
 | 
					This is similar to the output discussed above, but contains the following
 | 
				
			||||||
additional fields:
 | 
					additional fields:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,6 +23,7 @@ config KVM
 | 
				
			||||||
	select HAVE_KVM_CPU_RELAX_INTERCEPT
 | 
						select HAVE_KVM_CPU_RELAX_INTERCEPT
 | 
				
			||||||
	select KVM_MMIO
 | 
						select KVM_MMIO
 | 
				
			||||||
	select KVM_ARM_HOST
 | 
						select KVM_ARM_HOST
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	depends on ARM_VIRT_EXT && ARM_LPAE
 | 
						depends on ARM_VIRT_EXT && ARM_LPAE
 | 
				
			||||||
	---help---
 | 
						---help---
 | 
				
			||||||
	  Support hosting virtualized guest machines. You will also
 | 
						  Support hosting virtualized guest machines. You will also
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -26,6 +26,7 @@ config KVM
 | 
				
			||||||
	select KVM_ARM_HOST
 | 
						select KVM_ARM_HOST
 | 
				
			||||||
	select KVM_ARM_VGIC
 | 
						select KVM_ARM_VGIC
 | 
				
			||||||
	select KVM_ARM_TIMER
 | 
						select KVM_ARM_TIMER
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	---help---
 | 
						---help---
 | 
				
			||||||
	  Support hosting virtualized guest machines.
 | 
						  Support hosting virtualized guest machines.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -20,6 +20,7 @@ config KVM
 | 
				
			||||||
	select PREEMPT_NOTIFIERS
 | 
						select PREEMPT_NOTIFIERS
 | 
				
			||||||
	select ANON_INODES
 | 
						select ANON_INODES
 | 
				
			||||||
	select KVM_MMIO
 | 
						select KVM_MMIO
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	---help---
 | 
						---help---
 | 
				
			||||||
	  Support for hosting Guest kernels.
 | 
						  Support for hosting Guest kernels.
 | 
				
			||||||
	  Currently supported on MIPS32 processors.
 | 
						  Currently supported on MIPS32 processors.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -21,6 +21,7 @@ config KVM
 | 
				
			||||||
	select PREEMPT_NOTIFIERS
 | 
						select PREEMPT_NOTIFIERS
 | 
				
			||||||
	select ANON_INODES
 | 
						select ANON_INODES
 | 
				
			||||||
	select HAVE_KVM_EVENTFD
 | 
						select HAVE_KVM_EVENTFD
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config KVM_BOOK3S_HANDLER
 | 
					config KVM_BOOK3S_HANDLER
 | 
				
			||||||
	bool
 | 
						bool
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -28,6 +28,7 @@ config KVM
 | 
				
			||||||
	select HAVE_KVM_IRQCHIP
 | 
						select HAVE_KVM_IRQCHIP
 | 
				
			||||||
	select HAVE_KVM_IRQFD
 | 
						select HAVE_KVM_IRQFD
 | 
				
			||||||
	select HAVE_KVM_IRQ_ROUTING
 | 
						select HAVE_KVM_IRQ_ROUTING
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	---help---
 | 
						---help---
 | 
				
			||||||
	  Support hosting paravirtualized guest machines using the SIE
 | 
						  Support hosting paravirtualized guest machines using the SIE
 | 
				
			||||||
	  virtualization capability on the mainframe. This should work
 | 
						  virtualization capability on the mainframe. This should work
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -21,6 +21,7 @@ config KVM
 | 
				
			||||||
	depends on HAVE_KVM && MODULES
 | 
						depends on HAVE_KVM && MODULES
 | 
				
			||||||
	select PREEMPT_NOTIFIERS
 | 
						select PREEMPT_NOTIFIERS
 | 
				
			||||||
	select ANON_INODES
 | 
						select ANON_INODES
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	---help---
 | 
						---help---
 | 
				
			||||||
	  Support hosting paravirtualized guest machines.
 | 
						  Support hosting paravirtualized guest machines.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -138,6 +138,7 @@ config X86
 | 
				
			||||||
	select HAVE_ACPI_APEI_NMI if ACPI
 | 
						select HAVE_ACPI_APEI_NMI if ACPI
 | 
				
			||||||
	select ACPI_LEGACY_TABLES_LOOKUP if ACPI
 | 
						select ACPI_LEGACY_TABLES_LOOKUP if ACPI
 | 
				
			||||||
	select X86_FEATURE_NAMES if PROC_FS
 | 
						select X86_FEATURE_NAMES if PROC_FS
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config INSTRUCTION_DECODER
 | 
					config INSTRUCTION_DECODER
 | 
				
			||||||
	def_bool y
 | 
						def_bool y
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -40,6 +40,7 @@ config KVM
 | 
				
			||||||
	select HAVE_KVM_MSI
 | 
						select HAVE_KVM_MSI
 | 
				
			||||||
	select HAVE_KVM_CPU_RELAX_INTERCEPT
 | 
						select HAVE_KVM_CPU_RELAX_INTERCEPT
 | 
				
			||||||
	select KVM_VFIO
 | 
						select KVM_VFIO
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	---help---
 | 
						---help---
 | 
				
			||||||
	  Support hosting fully virtualized guest machines using hardware
 | 
						  Support hosting fully virtualized guest machines using hardware
 | 
				
			||||||
	  virtualization extensions.  You will need a fairly recent
 | 
						  virtualization extensions.  You will need a fairly recent
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -13,6 +13,7 @@ config COMMON_CLK
 | 
				
			||||||
	bool
 | 
						bool
 | 
				
			||||||
	select HAVE_CLK_PREPARE
 | 
						select HAVE_CLK_PREPARE
 | 
				
			||||||
	select CLKDEV_LOOKUP
 | 
						select CLKDEV_LOOKUP
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	---help---
 | 
						---help---
 | 
				
			||||||
	  The common clock framework is a single definition of struct
 | 
						  The common clock framework is a single definition of struct
 | 
				
			||||||
	  clk, useful across many platforms, as well as an
 | 
						  clk, useful across many platforms, as well as an
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2,6 +2,7 @@ menu "CPU Frequency scaling"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config CPU_FREQ
 | 
					config CPU_FREQ
 | 
				
			||||||
	bool "CPU Frequency scaling"
 | 
						bool "CPU Frequency scaling"
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  CPU Frequency scaling allows you to change the clock speed of 
 | 
						  CPU Frequency scaling allows you to change the clock speed of 
 | 
				
			||||||
	  CPUs on the fly. This is a nice method to save power, because 
 | 
						  CPUs on the fly. This is a nice method to save power, because 
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,5 +1,6 @@
 | 
				
			||||||
menuconfig PM_DEVFREQ
 | 
					menuconfig PM_DEVFREQ
 | 
				
			||||||
	bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
 | 
						bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  A device may have a list of frequencies and voltages available.
 | 
						  A device may have a list of frequencies and voltages available.
 | 
				
			||||||
	  devfreq, a generic DVFS framework can be registered for a device
 | 
						  devfreq, a generic DVFS framework can be registered for a device
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5,6 +5,7 @@
 | 
				
			||||||
menuconfig MD
 | 
					menuconfig MD
 | 
				
			||||||
	bool "Multiple devices driver support (RAID and LVM)"
 | 
						bool "Multiple devices driver support (RAID and LVM)"
 | 
				
			||||||
	depends on BLOCK
 | 
						depends on BLOCK
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  Support multiple physical spindles through a single logical device.
 | 
						  Support multiple physical spindles through a single logical device.
 | 
				
			||||||
	  Required for RAID and logical volume management.
 | 
						  Required for RAID and logical volume management.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -197,6 +197,7 @@ config NETCONSOLE_DYNAMIC
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config NETPOLL
 | 
					config NETPOLL
 | 
				
			||||||
	def_bool NETCONSOLE
 | 
						def_bool NETCONSOLE
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config NET_POLL_CONTROLLER
 | 
					config NET_POLL_CONTROLLER
 | 
				
			||||||
	def_bool NETPOLL
 | 
						def_bool NETPOLL
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -8,6 +8,7 @@ config BTRFS_FS
 | 
				
			||||||
	select LZO_DECOMPRESS
 | 
						select LZO_DECOMPRESS
 | 
				
			||||||
	select RAID6_PQ
 | 
						select RAID6_PQ
 | 
				
			||||||
	select XOR_BLOCKS
 | 
						select XOR_BLOCKS
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  Btrfs is a general purpose copy-on-write filesystem with extents,
 | 
						  Btrfs is a general purpose copy-on-write filesystem with extents,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,5 +1,6 @@
 | 
				
			||||||
config FSNOTIFY
 | 
					config FSNOTIFY
 | 
				
			||||||
	def_bool n
 | 
						def_bool n
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
source "fs/notify/dnotify/Kconfig"
 | 
					source "fs/notify/dnotify/Kconfig"
 | 
				
			||||||
source "fs/notify/inotify/Kconfig"
 | 
					source "fs/notify/inotify/Kconfig"
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5,6 +5,7 @@
 | 
				
			||||||
config QUOTA
 | 
					config QUOTA
 | 
				
			||||||
	bool "Quota support"
 | 
						bool "Quota support"
 | 
				
			||||||
	select QUOTACTL
 | 
						select QUOTACTL
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  If you say Y here, you will be able to set per user limits for disk
 | 
						  If you say Y here, you will be able to set per user limits for disk
 | 
				
			||||||
	  usage (also called disk quotas). Currently, it works for the
 | 
						  usage (also called disk quotas). Currently, it works for the
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -385,7 +385,7 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Is this type a native word size -- useful for atomic operations */
 | 
					/* Is this type a native word size -- useful for atomic operations */
 | 
				
			||||||
#ifndef __native_word
 | 
					#ifndef __native_word
 | 
				
			||||||
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 | 
					# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Compile time object size, -1 for unknown */
 | 
					/* Compile time object size, -1 for unknown */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
 | 
				
			||||||
 * @member:	the name of the hlist_node within the struct.
 | 
					 * @member:	the name of the hlist_node within the struct.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#define hlist_for_each_entry_continue_rcu(pos, member)			\
 | 
					#define hlist_for_each_entry_continue_rcu(pos, member)			\
 | 
				
			||||||
	for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
 | 
						for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
 | 
				
			||||||
			typeof(*(pos)), member);			\
 | 
								&(pos)->member)), typeof(*(pos)), member);	\
 | 
				
			||||||
	     pos;							\
 | 
						     pos;							\
 | 
				
			||||||
	     pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
 | 
						     pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(	\
 | 
				
			||||||
			typeof(*(pos)), member))
 | 
								&(pos)->member)), typeof(*(pos)), member))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
 | 
					 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
 | 
				
			||||||
| 
						 | 
					@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
 | 
				
			||||||
 * @member:	the name of the hlist_node within the struct.
 | 
					 * @member:	the name of the hlist_node within the struct.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#define hlist_for_each_entry_continue_rcu_bh(pos, member)		\
 | 
					#define hlist_for_each_entry_continue_rcu_bh(pos, member)		\
 | 
				
			||||||
	for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
 | 
						for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
 | 
				
			||||||
			typeof(*(pos)), member);			\
 | 
								&(pos)->member)), typeof(*(pos)), member);	\
 | 
				
			||||||
	     pos;							\
 | 
						     pos;							\
 | 
				
			||||||
	     pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
 | 
						     pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(	\
 | 
				
			||||||
			typeof(*(pos)), member))
 | 
								&(pos)->member)), typeof(*(pos)), member))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
 | 
					 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -331,12 +331,13 @@ static inline void rcu_init_nohz(void)
 | 
				
			||||||
extern struct srcu_struct tasks_rcu_exit_srcu;
 | 
					extern struct srcu_struct tasks_rcu_exit_srcu;
 | 
				
			||||||
#define rcu_note_voluntary_context_switch(t) \
 | 
					#define rcu_note_voluntary_context_switch(t) \
 | 
				
			||||||
	do { \
 | 
						do { \
 | 
				
			||||||
 | 
							rcu_all_qs(); \
 | 
				
			||||||
		if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
 | 
							if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
 | 
				
			||||||
			ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
 | 
								ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
 | 
				
			||||||
	} while (0)
 | 
						} while (0)
 | 
				
			||||||
#else /* #ifdef CONFIG_TASKS_RCU */
 | 
					#else /* #ifdef CONFIG_TASKS_RCU */
 | 
				
			||||||
#define TASKS_RCU(x) do { } while (0)
 | 
					#define TASKS_RCU(x) do { } while (0)
 | 
				
			||||||
#define rcu_note_voluntary_context_switch(t)	do { } while (0)
 | 
					#define rcu_note_voluntary_context_switch(t)	rcu_all_qs()
 | 
				
			||||||
#endif /* #else #ifdef CONFIG_TASKS_RCU */
 | 
					#endif /* #else #ifdef CONFIG_TASKS_RCU */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -582,11 +583,11 @@ static inline void rcu_preempt_sleep_check(void)
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
#define __rcu_dereference_check(p, c, space) \
 | 
					#define __rcu_dereference_check(p, c, space) \
 | 
				
			||||||
({ \
 | 
					({ \
 | 
				
			||||||
	typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
 | 
						/* Dependency order vs. p above. */ \
 | 
				
			||||||
 | 
						typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
 | 
				
			||||||
	rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
 | 
						rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
 | 
				
			||||||
	rcu_dereference_sparse(p, space); \
 | 
						rcu_dereference_sparse(p, space); \
 | 
				
			||||||
	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
 | 
						((typeof(*p) __force __kernel *)(________p1)); \
 | 
				
			||||||
	((typeof(*p) __force __kernel *)(_________p1)); \
 | 
					 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
#define __rcu_dereference_protected(p, c, space) \
 | 
					#define __rcu_dereference_protected(p, c, space) \
 | 
				
			||||||
({ \
 | 
					({ \
 | 
				
			||||||
| 
						 | 
					@ -603,10 +604,10 @@ static inline void rcu_preempt_sleep_check(void)
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
#define __rcu_dereference_index_check(p, c) \
 | 
					#define __rcu_dereference_index_check(p, c) \
 | 
				
			||||||
({ \
 | 
					({ \
 | 
				
			||||||
	typeof(p) _________p1 = ACCESS_ONCE(p); \
 | 
						/* Dependency order vs. p above. */ \
 | 
				
			||||||
 | 
						typeof(p) _________p1 = lockless_dereference(p); \
 | 
				
			||||||
	rcu_lockdep_assert(c, \
 | 
						rcu_lockdep_assert(c, \
 | 
				
			||||||
			   "suspicious rcu_dereference_index_check() usage"); \
 | 
								   "suspicious rcu_dereference_index_check() usage"); \
 | 
				
			||||||
	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
 | 
					 | 
				
			||||||
	(_________p1); \
 | 
						(_________p1); \
 | 
				
			||||||
})
 | 
					})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Return the number of grace periods.
 | 
					 * Return the number of grace periods started.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static inline long rcu_batches_completed(void)
 | 
					static inline unsigned long rcu_batches_started(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Return the number of bottom-half grace periods.
 | 
					 * Return the number of bottom-half grace periods started.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static inline long rcu_batches_completed_bh(void)
 | 
					static inline unsigned long rcu_batches_started_bh(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Return the number of sched grace periods started.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline unsigned long rcu_batches_started_sched(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Return the number of grace periods completed.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline unsigned long rcu_batches_completed(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Return the number of bottom-half grace periods completed.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline unsigned long rcu_batches_completed_bh(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Return the number of sched grace periods completed.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline unsigned long rcu_batches_completed_sched(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -154,7 +186,10 @@ static inline bool rcu_is_watching(void)
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 | 
					#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void rcu_all_qs(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __LINUX_RCUTINY_H */
 | 
					#endif /* __LINUX_RCUTINY_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -81,9 +81,12 @@ void cond_synchronize_rcu(unsigned long oldstate);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern unsigned long rcutorture_testseq;
 | 
					extern unsigned long rcutorture_testseq;
 | 
				
			||||||
extern unsigned long rcutorture_vernum;
 | 
					extern unsigned long rcutorture_vernum;
 | 
				
			||||||
long rcu_batches_completed(void);
 | 
					unsigned long rcu_batches_started(void);
 | 
				
			||||||
long rcu_batches_completed_bh(void);
 | 
					unsigned long rcu_batches_started_bh(void);
 | 
				
			||||||
long rcu_batches_completed_sched(void);
 | 
					unsigned long rcu_batches_started_sched(void);
 | 
				
			||||||
 | 
					unsigned long rcu_batches_completed(void);
 | 
				
			||||||
 | 
					unsigned long rcu_batches_completed_bh(void);
 | 
				
			||||||
 | 
					unsigned long rcu_batches_completed_sched(void);
 | 
				
			||||||
void show_rcu_gp_kthreads(void);
 | 
					void show_rcu_gp_kthreads(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void rcu_force_quiescent_state(void);
 | 
					void rcu_force_quiescent_state(void);
 | 
				
			||||||
| 
						 | 
					@ -97,4 +100,6 @@ extern int rcu_scheduler_active __read_mostly;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool rcu_is_watching(void);
 | 
					bool rcu_is_watching(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void rcu_all_qs(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __LINUX_RCUTREE_H */
 | 
					#endif /* __LINUX_RCUTREE_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,7 +45,7 @@ struct rcu_batch {
 | 
				
			||||||
#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
 | 
					#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct srcu_struct {
 | 
					struct srcu_struct {
 | 
				
			||||||
	unsigned completed;
 | 
						unsigned long completed;
 | 
				
			||||||
	struct srcu_struct_array __percpu *per_cpu_ref;
 | 
						struct srcu_struct_array __percpu *per_cpu_ref;
 | 
				
			||||||
	spinlock_t queue_lock; /* protect ->batch_queue, ->running */
 | 
						spinlock_t queue_lock; /* protect ->batch_queue, ->running */
 | 
				
			||||||
	bool running;
 | 
						bool running;
 | 
				
			||||||
| 
						 | 
					@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work);
 | 
				
			||||||
 * define and init a srcu struct at build time.
 | 
					 * define and init a srcu struct at build time.
 | 
				
			||||||
 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
 | 
					 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#define DEFINE_SRCU(name)						\
 | 
					#define __DEFINE_SRCU(name, is_static)					\
 | 
				
			||||||
	static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
 | 
						static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
 | 
				
			||||||
	struct srcu_struct name = __SRCU_STRUCT_INIT(name);
 | 
						is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
 | 
				
			||||||
 | 
					#define DEFINE_SRCU(name)		__DEFINE_SRCU(name, /* not static */)
 | 
				
			||||||
#define DEFINE_STATIC_SRCU(name)					\
 | 
					#define DEFINE_STATIC_SRCU(name)	__DEFINE_SRCU(name, static)
 | 
				
			||||||
	static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
 | 
					 | 
				
			||||||
	static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * call_srcu() - Queue a callback for invocation after an SRCU grace period
 | 
					 * call_srcu() - Queue a callback for invocation after an SRCU grace period
 | 
				
			||||||
| 
						 | 
					@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
 | 
				
			||||||
void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
 | 
					void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
 | 
				
			||||||
void synchronize_srcu(struct srcu_struct *sp);
 | 
					void synchronize_srcu(struct srcu_struct *sp);
 | 
				
			||||||
void synchronize_srcu_expedited(struct srcu_struct *sp);
 | 
					void synchronize_srcu_expedited(struct srcu_struct *sp);
 | 
				
			||||||
long srcu_batches_completed(struct srcu_struct *sp);
 | 
					unsigned long srcu_batches_completed(struct srcu_struct *sp);
 | 
				
			||||||
void srcu_barrier(struct srcu_struct *sp);
 | 
					void srcu_barrier(struct srcu_struct *sp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
 | 
					#ifdef CONFIG_DEBUG_LOCK_ALLOC
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										18
									
								
								init/Kconfig
									
										
									
									
									
								
							
							
						
						
									
										18
									
								
								init/Kconfig
									
										
									
									
									
								
							| 
						 | 
					@ -470,7 +470,6 @@ choice
 | 
				
			||||||
config TREE_RCU
 | 
					config TREE_RCU
 | 
				
			||||||
	bool "Tree-based hierarchical RCU"
 | 
						bool "Tree-based hierarchical RCU"
 | 
				
			||||||
	depends on !PREEMPT && SMP
 | 
						depends on !PREEMPT && SMP
 | 
				
			||||||
	select IRQ_WORK
 | 
					 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  This option selects the RCU implementation that is
 | 
						  This option selects the RCU implementation that is
 | 
				
			||||||
	  designed for very large SMP system with hundreds or
 | 
						  designed for very large SMP system with hundreds or
 | 
				
			||||||
| 
						 | 
					@ -480,7 +479,6 @@ config TREE_RCU
 | 
				
			||||||
config PREEMPT_RCU
 | 
					config PREEMPT_RCU
 | 
				
			||||||
	bool "Preemptible tree-based hierarchical RCU"
 | 
						bool "Preemptible tree-based hierarchical RCU"
 | 
				
			||||||
	depends on PREEMPT
 | 
						depends on PREEMPT
 | 
				
			||||||
	select IRQ_WORK
 | 
					 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  This option selects the RCU implementation that is
 | 
						  This option selects the RCU implementation that is
 | 
				
			||||||
	  designed for very large SMP systems with hundreds or
 | 
						  designed for very large SMP systems with hundreds or
 | 
				
			||||||
| 
						 | 
					@ -501,9 +499,17 @@ config TINY_RCU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
endchoice
 | 
					endchoice
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					config SRCU
 | 
				
			||||||
 | 
						bool
 | 
				
			||||||
 | 
						help
 | 
				
			||||||
 | 
						  This option selects the sleepable version of RCU. This version
 | 
				
			||||||
 | 
						  permits arbitrary sleeping or blocking within RCU read-side critical
 | 
				
			||||||
 | 
						  sections.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config TASKS_RCU
 | 
					config TASKS_RCU
 | 
				
			||||||
	bool "Task_based RCU implementation using voluntary context switch"
 | 
						bool "Task_based RCU implementation using voluntary context switch"
 | 
				
			||||||
	default n
 | 
						default n
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  This option enables a task-based RCU implementation that uses
 | 
						  This option enables a task-based RCU implementation that uses
 | 
				
			||||||
	  only voluntary context switch (not preemption!), idle, and
 | 
						  only voluntary context switch (not preemption!), idle, and
 | 
				
			||||||
| 
						 | 
					@ -668,9 +674,10 @@ config RCU_BOOST
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config RCU_KTHREAD_PRIO
 | 
					config RCU_KTHREAD_PRIO
 | 
				
			||||||
	int "Real-time priority to use for RCU worker threads"
 | 
						int "Real-time priority to use for RCU worker threads"
 | 
				
			||||||
	range 1 99
 | 
						range 1 99 if RCU_BOOST
 | 
				
			||||||
	depends on RCU_BOOST
 | 
						range 0 99 if !RCU_BOOST
 | 
				
			||||||
	default 1
 | 
						default 1 if RCU_BOOST
 | 
				
			||||||
 | 
						default 0 if !RCU_BOOST
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  This option specifies the SCHED_FIFO priority value that will be
 | 
						  This option specifies the SCHED_FIFO priority value that will be
 | 
				
			||||||
	  assigned to the rcuc/n and rcub/n threads and is also the value
 | 
						  assigned to the rcuc/n and rcub/n threads and is also the value
 | 
				
			||||||
| 
						 | 
					@ -1595,6 +1602,7 @@ config PERF_EVENTS
 | 
				
			||||||
	depends on HAVE_PERF_EVENTS
 | 
						depends on HAVE_PERF_EVENTS
 | 
				
			||||||
	select ANON_INODES
 | 
						select ANON_INODES
 | 
				
			||||||
	select IRQ_WORK
 | 
						select IRQ_WORK
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  Enable kernel support for various performance events provided
 | 
						  Enable kernel support for various performance events provided
 | 
				
			||||||
	  by software and hardware.
 | 
						  by software and hardware.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										56
									
								
								kernel/cpu.c
									
										
									
									
									
								
							
							
						
						
									
										56
									
								
								kernel/cpu.c
									
										
									
									
									
								
							| 
						 | 
					@ -58,22 +58,23 @@ static int cpu_hotplug_disabled;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct {
 | 
					static struct {
 | 
				
			||||||
	struct task_struct *active_writer;
 | 
						struct task_struct *active_writer;
 | 
				
			||||||
	struct mutex lock; /* Synchronizes accesses to refcount, */
 | 
						/* wait queue to wake up the active_writer */
 | 
				
			||||||
 | 
						wait_queue_head_t wq;
 | 
				
			||||||
 | 
						/* verifies that no writer will get active while readers are active */
 | 
				
			||||||
 | 
						struct mutex lock;
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Also blocks the new readers during
 | 
						 * Also blocks the new readers during
 | 
				
			||||||
	 * an ongoing cpu hotplug operation.
 | 
						 * an ongoing cpu hotplug operation.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	int refcount;
 | 
						atomic_t refcount;
 | 
				
			||||||
	/* And allows lockless put_online_cpus(). */
 | 
					 | 
				
			||||||
	atomic_t puts_pending;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
 | 
					#ifdef CONFIG_DEBUG_LOCK_ALLOC
 | 
				
			||||||
	struct lockdep_map dep_map;
 | 
						struct lockdep_map dep_map;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
} cpu_hotplug = {
 | 
					} cpu_hotplug = {
 | 
				
			||||||
	.active_writer = NULL,
 | 
						.active_writer = NULL,
 | 
				
			||||||
 | 
						.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
 | 
				
			||||||
	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
 | 
						.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
 | 
				
			||||||
	.refcount = 0,
 | 
					 | 
				
			||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
 | 
					#ifdef CONFIG_DEBUG_LOCK_ALLOC
 | 
				
			||||||
	.dep_map = {.name = "cpu_hotplug.lock" },
 | 
						.dep_map = {.name = "cpu_hotplug.lock" },
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					@ -86,15 +87,6 @@ static struct {
 | 
				
			||||||
#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
 | 
					#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
 | 
				
			||||||
#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
 | 
					#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void apply_puts_pending(int max)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int delta;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (atomic_read(&cpu_hotplug.puts_pending) >= max) {
 | 
					 | 
				
			||||||
		delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
 | 
					 | 
				
			||||||
		cpu_hotplug.refcount -= delta;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
void get_online_cpus(void)
 | 
					void get_online_cpus(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -103,8 +95,7 @@ void get_online_cpus(void)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	cpuhp_lock_acquire_read();
 | 
						cpuhp_lock_acquire_read();
 | 
				
			||||||
	mutex_lock(&cpu_hotplug.lock);
 | 
						mutex_lock(&cpu_hotplug.lock);
 | 
				
			||||||
	apply_puts_pending(65536);
 | 
						atomic_inc(&cpu_hotplug.refcount);
 | 
				
			||||||
	cpu_hotplug.refcount++;
 | 
					 | 
				
			||||||
	mutex_unlock(&cpu_hotplug.lock);
 | 
						mutex_unlock(&cpu_hotplug.lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(get_online_cpus);
 | 
					EXPORT_SYMBOL_GPL(get_online_cpus);
 | 
				
			||||||
| 
						 | 
					@ -116,8 +107,7 @@ bool try_get_online_cpus(void)
 | 
				
			||||||
	if (!mutex_trylock(&cpu_hotplug.lock))
 | 
						if (!mutex_trylock(&cpu_hotplug.lock))
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
	cpuhp_lock_acquire_tryread();
 | 
						cpuhp_lock_acquire_tryread();
 | 
				
			||||||
	apply_puts_pending(65536);
 | 
						atomic_inc(&cpu_hotplug.refcount);
 | 
				
			||||||
	cpu_hotplug.refcount++;
 | 
					 | 
				
			||||||
	mutex_unlock(&cpu_hotplug.lock);
 | 
						mutex_unlock(&cpu_hotplug.lock);
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -125,20 +115,18 @@ EXPORT_SYMBOL_GPL(try_get_online_cpus);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void put_online_cpus(void)
 | 
					void put_online_cpus(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						int refcount;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (cpu_hotplug.active_writer == current)
 | 
						if (cpu_hotplug.active_writer == current)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	if (!mutex_trylock(&cpu_hotplug.lock)) {
 | 
					 | 
				
			||||||
		atomic_inc(&cpu_hotplug.puts_pending);
 | 
					 | 
				
			||||||
		cpuhp_lock_release();
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (WARN_ON(!cpu_hotplug.refcount))
 | 
						refcount = atomic_dec_return(&cpu_hotplug.refcount);
 | 
				
			||||||
		cpu_hotplug.refcount++; /* try to fix things up */
 | 
						if (WARN_ON(refcount < 0)) /* try to fix things up */
 | 
				
			||||||
 | 
							atomic_inc(&cpu_hotplug.refcount);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
 | 
				
			||||||
 | 
							wake_up(&cpu_hotplug.wq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
 | 
					 | 
				
			||||||
		wake_up_process(cpu_hotplug.active_writer);
 | 
					 | 
				
			||||||
	mutex_unlock(&cpu_hotplug.lock);
 | 
					 | 
				
			||||||
	cpuhp_lock_release();
 | 
						cpuhp_lock_release();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -168,18 +156,20 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void cpu_hotplug_begin(void)
 | 
					void cpu_hotplug_begin(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	cpu_hotplug.active_writer = current;
 | 
						DEFINE_WAIT(wait);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						cpu_hotplug.active_writer = current;
 | 
				
			||||||
	cpuhp_lock_acquire();
 | 
						cpuhp_lock_acquire();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (;;) {
 | 
						for (;;) {
 | 
				
			||||||
		mutex_lock(&cpu_hotplug.lock);
 | 
							mutex_lock(&cpu_hotplug.lock);
 | 
				
			||||||
		apply_puts_pending(1);
 | 
							prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
 | 
				
			||||||
		if (likely(!cpu_hotplug.refcount))
 | 
							if (likely(!atomic_read(&cpu_hotplug.refcount)))
 | 
				
			||||||
			break;
 | 
									break;
 | 
				
			||||||
		__set_current_state(TASK_UNINTERRUPTIBLE);
 | 
					 | 
				
			||||||
		mutex_unlock(&cpu_hotplug.lock);
 | 
							mutex_unlock(&cpu_hotplug.lock);
 | 
				
			||||||
		schedule();
 | 
							schedule();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						finish_wait(&cpu_hotplug.wq, &wait);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void cpu_hotplug_done(void)
 | 
					void cpu_hotplug_done(void)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -402,6 +402,7 @@ int raw_notifier_call_chain(struct raw_notifier_head *nh,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
 | 
					EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_SRCU
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 *	SRCU notifier chain routines.    Registration and unregistration
 | 
					 *	SRCU notifier chain routines.    Registration and unregistration
 | 
				
			||||||
 *	use a mutex, and call_chain is synchronized by SRCU (no locks).
 | 
					 *	use a mutex, and call_chain is synchronized by SRCU (no locks).
 | 
				
			||||||
| 
						 | 
					@ -528,6 +529,8 @@ void srcu_init_notifier_head(struct srcu_notifier_head *nh)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
 | 
					EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* CONFIG_SRCU */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static ATOMIC_NOTIFIER_HEAD(die_chain);
 | 
					static ATOMIC_NOTIFIER_HEAD(die_chain);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int notrace notify_die(enum die_val val, const char *str,
 | 
					int notrace notify_die(enum die_val val, const char *str,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -251,6 +251,7 @@ config APM_EMULATION
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config PM_OPP
 | 
					config PM_OPP
 | 
				
			||||||
	bool
 | 
						bool
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	---help---
 | 
						---help---
 | 
				
			||||||
	  SOCs have a standard set of tuples consisting of frequency and
 | 
						  SOCs have a standard set of tuples consisting of frequency and
 | 
				
			||||||
	  voltage pairs that the device will support per voltage domain. This
 | 
						  voltage pairs that the device will support per voltage domain. This
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,4 +1,5 @@
 | 
				
			||||||
obj-y += update.o srcu.o
 | 
					obj-y += update.o
 | 
				
			||||||
 | 
					obj-$(CONFIG_SRCU) += srcu.o
 | 
				
			||||||
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
 | 
					obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
 | 
				
			||||||
obj-$(CONFIG_TREE_RCU) += tree.o
 | 
					obj-$(CONFIG_TREE_RCU) += tree.o
 | 
				
			||||||
obj-$(CONFIG_PREEMPT_RCU) += tree.o
 | 
					obj-$(CONFIG_PREEMPT_RCU) += tree.o
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -137,4 +137,10 @@ int rcu_jiffies_till_stall_check(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void rcu_early_boot_tests(void);
 | 
					void rcu_early_boot_tests(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * This function really isn't for public consumption, but RCU is special in
 | 
				
			||||||
 | 
					 * that context switches can allow the state machine to make progress.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					extern void resched_cpu(int cpu);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __LINUX_RCU_H */
 | 
					#endif /* __LINUX_RCU_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -244,7 +244,8 @@ struct rcu_torture_ops {
 | 
				
			||||||
	int (*readlock)(void);
 | 
						int (*readlock)(void);
 | 
				
			||||||
	void (*read_delay)(struct torture_random_state *rrsp);
 | 
						void (*read_delay)(struct torture_random_state *rrsp);
 | 
				
			||||||
	void (*readunlock)(int idx);
 | 
						void (*readunlock)(int idx);
 | 
				
			||||||
	int (*completed)(void);
 | 
						unsigned long (*started)(void);
 | 
				
			||||||
 | 
						unsigned long (*completed)(void);
 | 
				
			||||||
	void (*deferred_free)(struct rcu_torture *p);
 | 
						void (*deferred_free)(struct rcu_torture *p);
 | 
				
			||||||
	void (*sync)(void);
 | 
						void (*sync)(void);
 | 
				
			||||||
	void (*exp_sync)(void);
 | 
						void (*exp_sync)(void);
 | 
				
			||||||
| 
						 | 
					@ -296,11 +297,6 @@ static void rcu_torture_read_unlock(int idx) __releases(RCU)
 | 
				
			||||||
	rcu_read_unlock();
 | 
						rcu_read_unlock();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int rcu_torture_completed(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return rcu_batches_completed();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Update callback in the pipe.  This should be invoked after a grace period.
 | 
					 * Update callback in the pipe.  This should be invoked after a grace period.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					@ -356,7 +352,7 @@ rcu_torture_cb(struct rcu_head *p)
 | 
				
			||||||
		cur_ops->deferred_free(rp);
 | 
							cur_ops->deferred_free(rp);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int rcu_no_completed(void)
 | 
					static unsigned long rcu_no_completed(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -377,7 +373,8 @@ static struct rcu_torture_ops rcu_ops = {
 | 
				
			||||||
	.readlock	= rcu_torture_read_lock,
 | 
						.readlock	= rcu_torture_read_lock,
 | 
				
			||||||
	.read_delay	= rcu_read_delay,
 | 
						.read_delay	= rcu_read_delay,
 | 
				
			||||||
	.readunlock	= rcu_torture_read_unlock,
 | 
						.readunlock	= rcu_torture_read_unlock,
 | 
				
			||||||
	.completed	= rcu_torture_completed,
 | 
						.started	= rcu_batches_started,
 | 
				
			||||||
 | 
						.completed	= rcu_batches_completed,
 | 
				
			||||||
	.deferred_free	= rcu_torture_deferred_free,
 | 
						.deferred_free	= rcu_torture_deferred_free,
 | 
				
			||||||
	.sync		= synchronize_rcu,
 | 
						.sync		= synchronize_rcu,
 | 
				
			||||||
	.exp_sync	= synchronize_rcu_expedited,
 | 
						.exp_sync	= synchronize_rcu_expedited,
 | 
				
			||||||
| 
						 | 
					@ -407,11 +404,6 @@ static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
 | 
				
			||||||
	rcu_read_unlock_bh();
 | 
						rcu_read_unlock_bh();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int rcu_bh_torture_completed(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return rcu_batches_completed_bh();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
 | 
					static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
 | 
						call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
 | 
				
			||||||
| 
						 | 
					@ -423,7 +415,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
 | 
				
			||||||
	.readlock	= rcu_bh_torture_read_lock,
 | 
						.readlock	= rcu_bh_torture_read_lock,
 | 
				
			||||||
	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 | 
						.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 | 
				
			||||||
	.readunlock	= rcu_bh_torture_read_unlock,
 | 
						.readunlock	= rcu_bh_torture_read_unlock,
 | 
				
			||||||
	.completed	= rcu_bh_torture_completed,
 | 
						.started	= rcu_batches_started_bh,
 | 
				
			||||||
 | 
						.completed	= rcu_batches_completed_bh,
 | 
				
			||||||
	.deferred_free	= rcu_bh_torture_deferred_free,
 | 
						.deferred_free	= rcu_bh_torture_deferred_free,
 | 
				
			||||||
	.sync		= synchronize_rcu_bh,
 | 
						.sync		= synchronize_rcu_bh,
 | 
				
			||||||
	.exp_sync	= synchronize_rcu_bh_expedited,
 | 
						.exp_sync	= synchronize_rcu_bh_expedited,
 | 
				
			||||||
| 
						 | 
					@ -466,6 +459,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
 | 
				
			||||||
	.readlock	= rcu_torture_read_lock,
 | 
						.readlock	= rcu_torture_read_lock,
 | 
				
			||||||
	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 | 
						.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 | 
				
			||||||
	.readunlock	= rcu_torture_read_unlock,
 | 
						.readunlock	= rcu_torture_read_unlock,
 | 
				
			||||||
 | 
						.started	= rcu_no_completed,
 | 
				
			||||||
	.completed	= rcu_no_completed,
 | 
						.completed	= rcu_no_completed,
 | 
				
			||||||
	.deferred_free	= rcu_busted_torture_deferred_free,
 | 
						.deferred_free	= rcu_busted_torture_deferred_free,
 | 
				
			||||||
	.sync		= synchronize_rcu_busted,
 | 
						.sync		= synchronize_rcu_busted,
 | 
				
			||||||
| 
						 | 
					@ -510,7 +504,7 @@ static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
 | 
				
			||||||
	srcu_read_unlock(&srcu_ctl, idx);
 | 
						srcu_read_unlock(&srcu_ctl, idx);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int srcu_torture_completed(void)
 | 
					static unsigned long srcu_torture_completed(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return srcu_batches_completed(&srcu_ctl);
 | 
						return srcu_batches_completed(&srcu_ctl);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -564,6 +558,7 @@ static struct rcu_torture_ops srcu_ops = {
 | 
				
			||||||
	.readlock	= srcu_torture_read_lock,
 | 
						.readlock	= srcu_torture_read_lock,
 | 
				
			||||||
	.read_delay	= srcu_read_delay,
 | 
						.read_delay	= srcu_read_delay,
 | 
				
			||||||
	.readunlock	= srcu_torture_read_unlock,
 | 
						.readunlock	= srcu_torture_read_unlock,
 | 
				
			||||||
 | 
						.started	= NULL,
 | 
				
			||||||
	.completed	= srcu_torture_completed,
 | 
						.completed	= srcu_torture_completed,
 | 
				
			||||||
	.deferred_free	= srcu_torture_deferred_free,
 | 
						.deferred_free	= srcu_torture_deferred_free,
 | 
				
			||||||
	.sync		= srcu_torture_synchronize,
 | 
						.sync		= srcu_torture_synchronize,
 | 
				
			||||||
| 
						 | 
					@ -600,7 +595,8 @@ static struct rcu_torture_ops sched_ops = {
 | 
				
			||||||
	.readlock	= sched_torture_read_lock,
 | 
						.readlock	= sched_torture_read_lock,
 | 
				
			||||||
	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 | 
						.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 | 
				
			||||||
	.readunlock	= sched_torture_read_unlock,
 | 
						.readunlock	= sched_torture_read_unlock,
 | 
				
			||||||
	.completed	= rcu_no_completed,
 | 
						.started	= rcu_batches_started_sched,
 | 
				
			||||||
 | 
						.completed	= rcu_batches_completed_sched,
 | 
				
			||||||
	.deferred_free	= rcu_sched_torture_deferred_free,
 | 
						.deferred_free	= rcu_sched_torture_deferred_free,
 | 
				
			||||||
	.sync		= synchronize_sched,
 | 
						.sync		= synchronize_sched,
 | 
				
			||||||
	.exp_sync	= synchronize_sched_expedited,
 | 
						.exp_sync	= synchronize_sched_expedited,
 | 
				
			||||||
| 
						 | 
					@ -638,6 +634,7 @@ static struct rcu_torture_ops tasks_ops = {
 | 
				
			||||||
	.readlock	= tasks_torture_read_lock,
 | 
						.readlock	= tasks_torture_read_lock,
 | 
				
			||||||
	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 | 
						.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
 | 
				
			||||||
	.readunlock	= tasks_torture_read_unlock,
 | 
						.readunlock	= tasks_torture_read_unlock,
 | 
				
			||||||
 | 
						.started	= rcu_no_completed,
 | 
				
			||||||
	.completed	= rcu_no_completed,
 | 
						.completed	= rcu_no_completed,
 | 
				
			||||||
	.deferred_free	= rcu_tasks_torture_deferred_free,
 | 
						.deferred_free	= rcu_tasks_torture_deferred_free,
 | 
				
			||||||
	.sync		= synchronize_rcu_tasks,
 | 
						.sync		= synchronize_rcu_tasks,
 | 
				
			||||||
| 
						 | 
					@ -1015,8 +1012,8 @@ static void rcutorture_trace_dump(void)
 | 
				
			||||||
static void rcu_torture_timer(unsigned long unused)
 | 
					static void rcu_torture_timer(unsigned long unused)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int idx;
 | 
						int idx;
 | 
				
			||||||
	int completed;
 | 
						unsigned long started;
 | 
				
			||||||
	int completed_end;
 | 
						unsigned long completed;
 | 
				
			||||||
	static DEFINE_TORTURE_RANDOM(rand);
 | 
						static DEFINE_TORTURE_RANDOM(rand);
 | 
				
			||||||
	static DEFINE_SPINLOCK(rand_lock);
 | 
						static DEFINE_SPINLOCK(rand_lock);
 | 
				
			||||||
	struct rcu_torture *p;
 | 
						struct rcu_torture *p;
 | 
				
			||||||
| 
						 | 
					@ -1024,7 +1021,10 @@ static void rcu_torture_timer(unsigned long unused)
 | 
				
			||||||
	unsigned long long ts;
 | 
						unsigned long long ts;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	idx = cur_ops->readlock();
 | 
						idx = cur_ops->readlock();
 | 
				
			||||||
	completed = cur_ops->completed();
 | 
						if (cur_ops->started)
 | 
				
			||||||
 | 
							started = cur_ops->started();
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							started = cur_ops->completed();
 | 
				
			||||||
	ts = rcu_trace_clock_local();
 | 
						ts = rcu_trace_clock_local();
 | 
				
			||||||
	p = rcu_dereference_check(rcu_torture_current,
 | 
						p = rcu_dereference_check(rcu_torture_current,
 | 
				
			||||||
				  rcu_read_lock_bh_held() ||
 | 
									  rcu_read_lock_bh_held() ||
 | 
				
			||||||
| 
						 | 
					@ -1047,14 +1047,16 @@ static void rcu_torture_timer(unsigned long unused)
 | 
				
			||||||
		/* Should not happen, but... */
 | 
							/* Should not happen, but... */
 | 
				
			||||||
		pipe_count = RCU_TORTURE_PIPE_LEN;
 | 
							pipe_count = RCU_TORTURE_PIPE_LEN;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	completed_end = cur_ops->completed();
 | 
						completed = cur_ops->completed();
 | 
				
			||||||
	if (pipe_count > 1) {
 | 
						if (pipe_count > 1) {
 | 
				
			||||||
		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
 | 
							do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
 | 
				
			||||||
					  completed, completed_end);
 | 
										  started, completed);
 | 
				
			||||||
		rcutorture_trace_dump();
 | 
							rcutorture_trace_dump();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	__this_cpu_inc(rcu_torture_count[pipe_count]);
 | 
						__this_cpu_inc(rcu_torture_count[pipe_count]);
 | 
				
			||||||
	completed = completed_end - completed;
 | 
						completed = completed - started;
 | 
				
			||||||
 | 
						if (cur_ops->started)
 | 
				
			||||||
 | 
							completed++;
 | 
				
			||||||
	if (completed > RCU_TORTURE_PIPE_LEN) {
 | 
						if (completed > RCU_TORTURE_PIPE_LEN) {
 | 
				
			||||||
		/* Should not happen, but... */
 | 
							/* Should not happen, but... */
 | 
				
			||||||
		completed = RCU_TORTURE_PIPE_LEN;
 | 
							completed = RCU_TORTURE_PIPE_LEN;
 | 
				
			||||||
| 
						 | 
					@ -1073,8 +1075,8 @@ static void rcu_torture_timer(unsigned long unused)
 | 
				
			||||||
static int
 | 
					static int
 | 
				
			||||||
rcu_torture_reader(void *arg)
 | 
					rcu_torture_reader(void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int completed;
 | 
						unsigned long started;
 | 
				
			||||||
	int completed_end;
 | 
						unsigned long completed;
 | 
				
			||||||
	int idx;
 | 
						int idx;
 | 
				
			||||||
	DEFINE_TORTURE_RANDOM(rand);
 | 
						DEFINE_TORTURE_RANDOM(rand);
 | 
				
			||||||
	struct rcu_torture *p;
 | 
						struct rcu_torture *p;
 | 
				
			||||||
| 
						 | 
					@ -1093,7 +1095,10 @@ rcu_torture_reader(void *arg)
 | 
				
			||||||
				mod_timer(&t, jiffies + 1);
 | 
									mod_timer(&t, jiffies + 1);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		idx = cur_ops->readlock();
 | 
							idx = cur_ops->readlock();
 | 
				
			||||||
		completed = cur_ops->completed();
 | 
							if (cur_ops->started)
 | 
				
			||||||
 | 
								started = cur_ops->started();
 | 
				
			||||||
 | 
							else
 | 
				
			||||||
 | 
								started = cur_ops->completed();
 | 
				
			||||||
		ts = rcu_trace_clock_local();
 | 
							ts = rcu_trace_clock_local();
 | 
				
			||||||
		p = rcu_dereference_check(rcu_torture_current,
 | 
							p = rcu_dereference_check(rcu_torture_current,
 | 
				
			||||||
					  rcu_read_lock_bh_held() ||
 | 
										  rcu_read_lock_bh_held() ||
 | 
				
			||||||
| 
						 | 
					@ -1114,14 +1119,16 @@ rcu_torture_reader(void *arg)
 | 
				
			||||||
			/* Should not happen, but... */
 | 
								/* Should not happen, but... */
 | 
				
			||||||
			pipe_count = RCU_TORTURE_PIPE_LEN;
 | 
								pipe_count = RCU_TORTURE_PIPE_LEN;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		completed_end = cur_ops->completed();
 | 
							completed = cur_ops->completed();
 | 
				
			||||||
		if (pipe_count > 1) {
 | 
							if (pipe_count > 1) {
 | 
				
			||||||
			do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
 | 
								do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
 | 
				
			||||||
						  ts, completed, completed_end);
 | 
											  ts, started, completed);
 | 
				
			||||||
			rcutorture_trace_dump();
 | 
								rcutorture_trace_dump();
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		__this_cpu_inc(rcu_torture_count[pipe_count]);
 | 
							__this_cpu_inc(rcu_torture_count[pipe_count]);
 | 
				
			||||||
		completed = completed_end - completed;
 | 
							completed = completed - started;
 | 
				
			||||||
 | 
							if (cur_ops->started)
 | 
				
			||||||
 | 
								completed++;
 | 
				
			||||||
		if (completed > RCU_TORTURE_PIPE_LEN) {
 | 
							if (completed > RCU_TORTURE_PIPE_LEN) {
 | 
				
			||||||
			/* Should not happen, but... */
 | 
								/* Should not happen, but... */
 | 
				
			||||||
			completed = RCU_TORTURE_PIPE_LEN;
 | 
								completed = RCU_TORTURE_PIPE_LEN;
 | 
				
			||||||
| 
						 | 
					@ -1420,6 +1427,9 @@ static int rcu_torture_barrier(void *arg)
 | 
				
			||||||
		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
 | 
							cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
 | 
				
			||||||
		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
 | 
							if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
 | 
				
			||||||
			n_rcu_torture_barrier_error++;
 | 
								n_rcu_torture_barrier_error++;
 | 
				
			||||||
 | 
								pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
 | 
				
			||||||
 | 
								       atomic_read(&barrier_cbs_invoked),
 | 
				
			||||||
 | 
								       n_barrier_cbs);
 | 
				
			||||||
			WARN_ON_ONCE(1);
 | 
								WARN_ON_ONCE(1);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		n_barrier_successes++;
 | 
							n_barrier_successes++;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(srcu_barrier);
 | 
				
			||||||
 * Report the number of batches, correlated with, but not necessarily
 | 
					 * Report the number of batches, correlated with, but not necessarily
 | 
				
			||||||
 * precisely the same as, the number of grace periods that have elapsed.
 | 
					 * precisely the same as, the number of grace periods that have elapsed.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
long srcu_batches_completed(struct srcu_struct *sp)
 | 
					unsigned long srcu_batches_completed(struct srcu_struct *sp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return sp->completed;
 | 
						return sp->completed;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head,
 | 
				
			||||||
		       void (*func)(struct rcu_head *rcu),
 | 
							       void (*func)(struct rcu_head *rcu),
 | 
				
			||||||
		       struct rcu_ctrlblk *rcp);
 | 
							       struct rcu_ctrlblk *rcp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include "tiny_plugin.h"
 | 
					#include "tiny_plugin.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
 | 
					 | 
				
			||||||
static void rcu_idle_enter_common(long long newval)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (newval) {
 | 
					 | 
				
			||||||
		RCU_TRACE(trace_rcu_dyntick(TPS("--="),
 | 
					 | 
				
			||||||
					    rcu_dynticks_nesting, newval));
 | 
					 | 
				
			||||||
		rcu_dynticks_nesting = newval;
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
 | 
					 | 
				
			||||||
				    rcu_dynticks_nesting, newval));
 | 
					 | 
				
			||||||
	if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
 | 
					 | 
				
			||||||
		struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
 | 
					 | 
				
			||||||
					    rcu_dynticks_nesting, newval));
 | 
					 | 
				
			||||||
		ftrace_dump(DUMP_ALL);
 | 
					 | 
				
			||||||
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
 | 
					 | 
				
			||||||
			  current->pid, current->comm,
 | 
					 | 
				
			||||||
			  idle->pid, idle->comm); /* must be idle task! */
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	rcu_sched_qs(); /* implies rcu_bh_inc() */
 | 
					 | 
				
			||||||
	barrier();
 | 
					 | 
				
			||||||
	rcu_dynticks_nesting = newval;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Enter idle, which is an extended quiescent state if we have fully
 | 
					 * Enter idle, which is an extended quiescent state if we have fully
 | 
				
			||||||
 * entered that mode (i.e., if the new value of dynticks_nesting is zero).
 | 
					 * entered that mode.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void rcu_idle_enter(void)
 | 
					void rcu_idle_enter(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long flags;
 | 
					 | 
				
			||||||
	long long newval;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	local_irq_save(flags);
 | 
					 | 
				
			||||||
	WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
 | 
					 | 
				
			||||||
	if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
 | 
					 | 
				
			||||||
	    DYNTICK_TASK_NEST_VALUE)
 | 
					 | 
				
			||||||
		newval = 0;
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
 | 
					 | 
				
			||||||
	rcu_idle_enter_common(newval);
 | 
					 | 
				
			||||||
	local_irq_restore(flags);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rcu_idle_enter);
 | 
					EXPORT_SYMBOL_GPL(rcu_idle_enter);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void rcu_irq_exit(void)
 | 
					void rcu_irq_exit(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long flags;
 | 
					 | 
				
			||||||
	long long newval;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	local_irq_save(flags);
 | 
					 | 
				
			||||||
	newval = rcu_dynticks_nesting - 1;
 | 
					 | 
				
			||||||
	WARN_ON_ONCE(newval < 0);
 | 
					 | 
				
			||||||
	rcu_idle_enter_common(newval);
 | 
					 | 
				
			||||||
	local_irq_restore(flags);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rcu_irq_exit);
 | 
					EXPORT_SYMBOL_GPL(rcu_irq_exit);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
 | 
					 | 
				
			||||||
static void rcu_idle_exit_common(long long oldval)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (oldval) {
 | 
					 | 
				
			||||||
		RCU_TRACE(trace_rcu_dyntick(TPS("++="),
 | 
					 | 
				
			||||||
					    oldval, rcu_dynticks_nesting));
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
 | 
					 | 
				
			||||||
	if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
 | 
					 | 
				
			||||||
		struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
 | 
					 | 
				
			||||||
			  oldval, rcu_dynticks_nesting));
 | 
					 | 
				
			||||||
		ftrace_dump(DUMP_ALL);
 | 
					 | 
				
			||||||
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
 | 
					 | 
				
			||||||
			  current->pid, current->comm,
 | 
					 | 
				
			||||||
			  idle->pid, idle->comm); /* must be idle task! */
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Exit idle, so that we are no longer in an extended quiescent state.
 | 
					 * Exit idle, so that we are no longer in an extended quiescent state.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void rcu_idle_exit(void)
 | 
					void rcu_idle_exit(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long flags;
 | 
					 | 
				
			||||||
	long long oldval;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	local_irq_save(flags);
 | 
					 | 
				
			||||||
	oldval = rcu_dynticks_nesting;
 | 
					 | 
				
			||||||
	WARN_ON_ONCE(rcu_dynticks_nesting < 0);
 | 
					 | 
				
			||||||
	if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
 | 
					 | 
				
			||||||
		rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
 | 
					 | 
				
			||||||
	rcu_idle_exit_common(oldval);
 | 
					 | 
				
			||||||
	local_irq_restore(flags);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rcu_idle_exit);
 | 
					EXPORT_SYMBOL_GPL(rcu_idle_exit);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void rcu_irq_enter(void)
 | 
					void rcu_irq_enter(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long flags;
 | 
					 | 
				
			||||||
	long long oldval;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	local_irq_save(flags);
 | 
					 | 
				
			||||||
	oldval = rcu_dynticks_nesting;
 | 
					 | 
				
			||||||
	rcu_dynticks_nesting++;
 | 
					 | 
				
			||||||
	WARN_ON_ONCE(rcu_dynticks_nesting == 0);
 | 
					 | 
				
			||||||
	rcu_idle_exit_common(oldval);
 | 
					 | 
				
			||||||
	local_irq_restore(flags);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rcu_irq_enter);
 | 
					EXPORT_SYMBOL_GPL(rcu_irq_enter);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -179,22 +89,12 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
bool notrace __rcu_is_watching(void)
 | 
					bool notrace __rcu_is_watching(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return rcu_dynticks_nesting;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(__rcu_is_watching);
 | 
					EXPORT_SYMBOL(__rcu_is_watching);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 | 
					#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Test whether the current CPU was interrupted from idle.  Nested
 | 
					 | 
				
			||||||
 * interrupts don't count, we must be running at the first interrupt
 | 
					 | 
				
			||||||
 * level.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static int rcu_is_cpu_rrupt_from_idle(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return rcu_dynticks_nesting <= 1;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Helper function for rcu_sched_qs() and rcu_bh_qs().
 | 
					 * Helper function for rcu_sched_qs() and rcu_bh_qs().
 | 
				
			||||||
 * Also irqs are disabled to avoid confusion due to interrupt handlers
 | 
					 * Also irqs are disabled to avoid confusion due to interrupt handlers
 | 
				
			||||||
| 
						 | 
					@ -250,7 +150,7 @@ void rcu_bh_qs(void)
 | 
				
			||||||
void rcu_check_callbacks(int user)
 | 
					void rcu_check_callbacks(int user)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	RCU_TRACE(check_cpu_stalls());
 | 
						RCU_TRACE(check_cpu_stalls());
 | 
				
			||||||
	if (user || rcu_is_cpu_rrupt_from_idle())
 | 
						if (user)
 | 
				
			||||||
		rcu_sched_qs();
 | 
							rcu_sched_qs();
 | 
				
			||||||
	else if (!in_softirq())
 | 
						else if (!in_softirq())
 | 
				
			||||||
		rcu_bh_qs();
 | 
							rcu_bh_qs();
 | 
				
			||||||
| 
						 | 
					@ -357,6 +257,11 @@ static void __call_rcu(struct rcu_head *head,
 | 
				
			||||||
	rcp->curtail = &head->next;
 | 
						rcp->curtail = &head->next;
 | 
				
			||||||
	RCU_TRACE(rcp->qlen++);
 | 
						RCU_TRACE(rcp->qlen++);
 | 
				
			||||||
	local_irq_restore(flags);
 | 
						local_irq_restore(flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (unlikely(is_idle_task(current))) {
 | 
				
			||||||
 | 
							/* force scheduling for rcu_sched_qs() */
 | 
				
			||||||
 | 
							resched_cpu(0);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -383,6 +288,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
 | 
				
			||||||
void __init rcu_init(void)
 | 
					void __init rcu_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 | 
						open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 | 
				
			||||||
 | 
						RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
 | 
				
			||||||
 | 
						RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rcu_early_boot_tests();
 | 
						rcu_early_boot_tests();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -145,17 +145,16 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
 | 
				
			||||||
	rcp->ticks_this_gp++;
 | 
						rcp->ticks_this_gp++;
 | 
				
			||||||
	j = jiffies;
 | 
						j = jiffies;
 | 
				
			||||||
	js = ACCESS_ONCE(rcp->jiffies_stall);
 | 
						js = ACCESS_ONCE(rcp->jiffies_stall);
 | 
				
			||||||
	if (*rcp->curtail && ULONG_CMP_GE(j, js)) {
 | 
						if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
 | 
				
			||||||
		pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
 | 
							pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
 | 
				
			||||||
		       rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting,
 | 
							       rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
 | 
				
			||||||
		       jiffies - rcp->gp_start, rcp->qlen);
 | 
							       jiffies - rcp->gp_start, rcp->qlen);
 | 
				
			||||||
		dump_stack();
 | 
							dump_stack();
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if (*rcp->curtail && ULONG_CMP_GE(j, js))
 | 
					 | 
				
			||||||
		ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
 | 
							ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
 | 
				
			||||||
			3 * rcu_jiffies_till_stall_check() + 3;
 | 
								3 * rcu_jiffies_till_stall_check() + 3;
 | 
				
			||||||
	else if (ULONG_CMP_GE(j, js))
 | 
						} else if (ULONG_CMP_GE(j, js)) {
 | 
				
			||||||
		ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
 | 
							ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
 | 
					static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -156,6 +156,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
 | 
				
			||||||
static void invoke_rcu_core(void);
 | 
					static void invoke_rcu_core(void);
 | 
				
			||||||
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 | 
					static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* rcuc/rcub kthread realtime priority */
 | 
				
			||||||
 | 
					static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
 | 
				
			||||||
 | 
					module_param(kthread_prio, int, 0644);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Track the rcutorture test sequence number and the update version
 | 
					 * Track the rcutorture test sequence number and the update version
 | 
				
			||||||
 * number within a given test.  The rcutorture_testseq is incremented
 | 
					 * number within a given test.  The rcutorture_testseq is incremented
 | 
				
			||||||
| 
						 | 
					@ -215,6 +219,9 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
 | 
				
			||||||
#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 | 
					#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
 | 
				
			||||||
 | 
					EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Let the RCU core know that this CPU has gone through the scheduler,
 | 
					 * Let the RCU core know that this CPU has gone through the scheduler,
 | 
				
			||||||
 * which is a quiescent state.  This is called when the need for a
 | 
					 * which is a quiescent state.  This is called when the need for a
 | 
				
			||||||
| 
						 | 
					@ -284,6 +291,22 @@ void rcu_note_context_switch(void)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 | 
					EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Register a quiesecent state for all RCU flavors.  If there is an
 | 
				
			||||||
 | 
					 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
 | 
				
			||||||
 | 
					 * dyntick-idle quiescent state visible to other CPUs (but only for those
 | 
				
			||||||
 | 
					 * RCU flavors in desparate need of a quiescent state, which will normally
 | 
				
			||||||
 | 
					 * be none of them).  Either way, do a lightweight quiescent state for
 | 
				
			||||||
 | 
					 * all RCU flavors.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void rcu_all_qs(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
 | 
				
			||||||
 | 
							rcu_momentary_dyntick_idle();
 | 
				
			||||||
 | 
						this_cpu_inc(rcu_qs_ctr);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(rcu_all_qs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static long blimit = 10;	/* Maximum callbacks per rcu_do_batch. */
 | 
					static long blimit = 10;	/* Maximum callbacks per rcu_do_batch. */
 | 
				
			||||||
static long qhimark = 10000;	/* If this many pending, ignore blimit. */
 | 
					static long qhimark = 10000;	/* If this many pending, ignore blimit. */
 | 
				
			||||||
static long qlowmark = 100;	/* Once only this many pending, use blimit. */
 | 
					static long qlowmark = 100;	/* Once only this many pending, use blimit. */
 | 
				
			||||||
| 
						 | 
					@ -315,18 +338,54 @@ static void force_quiescent_state(struct rcu_state *rsp);
 | 
				
			||||||
static int rcu_pending(void);
 | 
					static int rcu_pending(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Return the number of RCU-sched batches processed thus far for debug & stats.
 | 
					 * Return the number of RCU batches started thus far for debug & stats.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
long rcu_batches_completed_sched(void)
 | 
					unsigned long rcu_batches_started(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return rcu_state_p->gpnum;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(rcu_batches_started);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Return the number of RCU-sched batches started thus far for debug & stats.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					unsigned long rcu_batches_started_sched(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return rcu_sched_state.gpnum;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Return the number of RCU BH batches started thus far for debug & stats.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					unsigned long rcu_batches_started_bh(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return rcu_bh_state.gpnum;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Return the number of RCU batches completed thus far for debug & stats.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					unsigned long rcu_batches_completed(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return rcu_state_p->completed;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(rcu_batches_completed);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Return the number of RCU-sched batches completed thus far for debug & stats.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					unsigned long rcu_batches_completed_sched(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return rcu_sched_state.completed;
 | 
						return rcu_sched_state.completed;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
 | 
					EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Return the number of RCU BH batches processed thus far for debug & stats.
 | 
					 * Return the number of RCU BH batches completed thus far for debug & stats.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
long rcu_batches_completed_bh(void)
 | 
					unsigned long rcu_batches_completed_bh(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return rcu_bh_state.completed;
 | 
						return rcu_bh_state.completed;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -930,16 +989,13 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
 | 
				
			||||||
		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
 | 
							trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
 | 
				
			||||||
		return 1;
 | 
							return 1;
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
 | 
							if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
 | 
				
			||||||
 | 
									 rdp->mynode->gpnum))
 | 
				
			||||||
 | 
								ACCESS_ONCE(rdp->gpwrap) = true;
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * This function really isn't for public consumption, but RCU is special in
 | 
					 | 
				
			||||||
 * that context switches can allow the state machine to make progress.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
extern void resched_cpu(int cpu);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Return true if the specified CPU has passed through a quiescent
 | 
					 * Return true if the specified CPU has passed through a quiescent
 | 
				
			||||||
 * state by virtue of being in or having passed through an dynticks
 | 
					 * state by virtue of being in or having passed through an dynticks
 | 
				
			||||||
| 
						 | 
					@ -1043,6 +1099,22 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
 | 
				
			||||||
	j1 = rcu_jiffies_till_stall_check();
 | 
						j1 = rcu_jiffies_till_stall_check();
 | 
				
			||||||
	ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
 | 
						ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
 | 
				
			||||||
	rsp->jiffies_resched = j + j1 / 2;
 | 
						rsp->jiffies_resched = j + j1 / 2;
 | 
				
			||||||
 | 
						rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Complain about starvation of grace-period kthread.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned long gpa;
 | 
				
			||||||
 | 
						unsigned long j;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						j = jiffies;
 | 
				
			||||||
 | 
						gpa = ACCESS_ONCE(rsp->gp_activity);
 | 
				
			||||||
 | 
						if (j - gpa > 2 * HZ)
 | 
				
			||||||
 | 
							pr_err("%s kthread starved for %ld jiffies!\n",
 | 
				
			||||||
 | 
							       rsp->name, j - gpa);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -1065,11 +1137,13 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void print_other_cpu_stall(struct rcu_state *rsp)
 | 
					static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int cpu;
 | 
						int cpu;
 | 
				
			||||||
	long delta;
 | 
						long delta;
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
						unsigned long gpa;
 | 
				
			||||||
 | 
						unsigned long j;
 | 
				
			||||||
	int ndetected = 0;
 | 
						int ndetected = 0;
 | 
				
			||||||
	struct rcu_node *rnp = rcu_get_root(rsp);
 | 
						struct rcu_node *rnp = rcu_get_root(rsp);
 | 
				
			||||||
	long totqlen = 0;
 | 
						long totqlen = 0;
 | 
				
			||||||
| 
						 | 
					@ -1107,30 +1181,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
 | 
				
			||||||
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
							raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Now rat on any tasks that got kicked up to the root rcu_node
 | 
					 | 
				
			||||||
	 * due to CPU offlining.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	rnp = rcu_get_root(rsp);
 | 
					 | 
				
			||||||
	raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
					 | 
				
			||||||
	ndetected += rcu_print_task_stall(rnp);
 | 
					 | 
				
			||||||
	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	print_cpu_stall_info_end();
 | 
						print_cpu_stall_info_end();
 | 
				
			||||||
	for_each_possible_cpu(cpu)
 | 
						for_each_possible_cpu(cpu)
 | 
				
			||||||
		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
 | 
							totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
 | 
				
			||||||
	pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
 | 
						pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
 | 
				
			||||||
	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
 | 
						       smp_processor_id(), (long)(jiffies - rsp->gp_start),
 | 
				
			||||||
	       (long)rsp->gpnum, (long)rsp->completed, totqlen);
 | 
						       (long)rsp->gpnum, (long)rsp->completed, totqlen);
 | 
				
			||||||
	if (ndetected == 0)
 | 
						if (ndetected) {
 | 
				
			||||||
		pr_err("INFO: Stall ended before state dump start\n");
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		rcu_dump_cpu_stacks(rsp);
 | 
							rcu_dump_cpu_stacks(rsp);
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							if (ACCESS_ONCE(rsp->gpnum) != gpnum ||
 | 
				
			||||||
 | 
							    ACCESS_ONCE(rsp->completed) == gpnum) {
 | 
				
			||||||
 | 
								pr_err("INFO: Stall ended before state dump start\n");
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
 | 
								j = jiffies;
 | 
				
			||||||
 | 
								gpa = ACCESS_ONCE(rsp->gp_activity);
 | 
				
			||||||
 | 
								pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n",
 | 
				
			||||||
 | 
								       rsp->name, j - gpa, j, gpa,
 | 
				
			||||||
 | 
								       jiffies_till_next_fqs);
 | 
				
			||||||
 | 
								/* In this case, the current CPU might be at fault. */
 | 
				
			||||||
 | 
								sched_show_task(current);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Complain about tasks blocking the grace period. */
 | 
						/* Complain about tasks blocking the grace period. */
 | 
				
			||||||
 | 
					 | 
				
			||||||
	rcu_print_detail_task_stall(rsp);
 | 
						rcu_print_detail_task_stall(rsp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rcu_check_gp_kthread_starvation(rsp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	force_quiescent_state(rsp);  /* Kick them all. */
 | 
						force_quiescent_state(rsp);  /* Kick them all. */
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1155,6 +1233,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
 | 
				
			||||||
	pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
 | 
						pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
 | 
				
			||||||
		jiffies - rsp->gp_start,
 | 
							jiffies - rsp->gp_start,
 | 
				
			||||||
		(long)rsp->gpnum, (long)rsp->completed, totqlen);
 | 
							(long)rsp->gpnum, (long)rsp->completed, totqlen);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						rcu_check_gp_kthread_starvation(rsp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rcu_dump_cpu_stacks(rsp);
 | 
						rcu_dump_cpu_stacks(rsp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
						raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
				
			||||||
| 
						 | 
					@ -1225,7 +1306,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 | 
				
			||||||
		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
 | 
							   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* They had a few time units to dump stack, so complain. */
 | 
							/* They had a few time units to dump stack, so complain. */
 | 
				
			||||||
		print_other_cpu_stall(rsp);
 | 
							print_other_cpu_stall(rsp, gpnum);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1562,7 +1643,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 | 
				
			||||||
	bool ret;
 | 
						bool ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Handle the ends of any preceding grace periods first. */
 | 
						/* Handle the ends of any preceding grace periods first. */
 | 
				
			||||||
	if (rdp->completed == rnp->completed) {
 | 
						if (rdp->completed == rnp->completed &&
 | 
				
			||||||
 | 
						    !unlikely(ACCESS_ONCE(rdp->gpwrap))) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* No grace period end, so just accelerate recent callbacks. */
 | 
							/* No grace period end, so just accelerate recent callbacks. */
 | 
				
			||||||
		ret = rcu_accelerate_cbs(rsp, rnp, rdp);
 | 
							ret = rcu_accelerate_cbs(rsp, rnp, rdp);
 | 
				
			||||||
| 
						 | 
					@ -1577,7 +1659,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 | 
				
			||||||
		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
 | 
							trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (rdp->gpnum != rnp->gpnum) {
 | 
						if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * If the current grace period is waiting for this CPU,
 | 
							 * If the current grace period is waiting for this CPU,
 | 
				
			||||||
		 * set up to detect a quiescent state, otherwise don't
 | 
							 * set up to detect a quiescent state, otherwise don't
 | 
				
			||||||
| 
						 | 
					@ -1586,8 +1668,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 | 
				
			||||||
		rdp->gpnum = rnp->gpnum;
 | 
							rdp->gpnum = rnp->gpnum;
 | 
				
			||||||
		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
 | 
							trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
 | 
				
			||||||
		rdp->passed_quiesce = 0;
 | 
							rdp->passed_quiesce = 0;
 | 
				
			||||||
 | 
							rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
 | 
				
			||||||
		rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
 | 
							rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
 | 
				
			||||||
		zero_cpu_stall_ticks(rdp);
 | 
							zero_cpu_stall_ticks(rdp);
 | 
				
			||||||
 | 
							ACCESS_ONCE(rdp->gpwrap) = false;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1601,7 +1685,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 | 
				
			||||||
	local_irq_save(flags);
 | 
						local_irq_save(flags);
 | 
				
			||||||
	rnp = rdp->mynode;
 | 
						rnp = rdp->mynode;
 | 
				
			||||||
	if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
 | 
						if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
 | 
				
			||||||
	     rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
 | 
						     rdp->completed == ACCESS_ONCE(rnp->completed) &&
 | 
				
			||||||
 | 
						     !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */
 | 
				
			||||||
	    !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
 | 
						    !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
 | 
				
			||||||
		local_irq_restore(flags);
 | 
							local_irq_restore(flags);
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
| 
						 | 
					@ -1621,6 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
 | 
				
			||||||
	struct rcu_data *rdp;
 | 
						struct rcu_data *rdp;
 | 
				
			||||||
	struct rcu_node *rnp = rcu_get_root(rsp);
 | 
						struct rcu_node *rnp = rcu_get_root(rsp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ACCESS_ONCE(rsp->gp_activity) = jiffies;
 | 
				
			||||||
	rcu_bind_gp_kthread();
 | 
						rcu_bind_gp_kthread();
 | 
				
			||||||
	raw_spin_lock_irq(&rnp->lock);
 | 
						raw_spin_lock_irq(&rnp->lock);
 | 
				
			||||||
	smp_mb__after_unlock_lock();
 | 
						smp_mb__after_unlock_lock();
 | 
				
			||||||
| 
						 | 
					@ -1681,6 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
 | 
				
			||||||
					    rnp->grphi, rnp->qsmask);
 | 
										    rnp->grphi, rnp->qsmask);
 | 
				
			||||||
		raw_spin_unlock_irq(&rnp->lock);
 | 
							raw_spin_unlock_irq(&rnp->lock);
 | 
				
			||||||
		cond_resched_rcu_qs();
 | 
							cond_resched_rcu_qs();
 | 
				
			||||||
 | 
							ACCESS_ONCE(rsp->gp_activity) = jiffies;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_unlock(&rsp->onoff_mutex);
 | 
						mutex_unlock(&rsp->onoff_mutex);
 | 
				
			||||||
| 
						 | 
					@ -1697,6 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
 | 
				
			||||||
	unsigned long maxj;
 | 
						unsigned long maxj;
 | 
				
			||||||
	struct rcu_node *rnp = rcu_get_root(rsp);
 | 
						struct rcu_node *rnp = rcu_get_root(rsp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ACCESS_ONCE(rsp->gp_activity) = jiffies;
 | 
				
			||||||
	rsp->n_force_qs++;
 | 
						rsp->n_force_qs++;
 | 
				
			||||||
	if (fqs_state == RCU_SAVE_DYNTICK) {
 | 
						if (fqs_state == RCU_SAVE_DYNTICK) {
 | 
				
			||||||
		/* Collect dyntick-idle snapshots. */
 | 
							/* Collect dyntick-idle snapshots. */
 | 
				
			||||||
| 
						 | 
					@ -1735,6 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 | 
				
			||||||
	struct rcu_data *rdp;
 | 
						struct rcu_data *rdp;
 | 
				
			||||||
	struct rcu_node *rnp = rcu_get_root(rsp);
 | 
						struct rcu_node *rnp = rcu_get_root(rsp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ACCESS_ONCE(rsp->gp_activity) = jiffies;
 | 
				
			||||||
	raw_spin_lock_irq(&rnp->lock);
 | 
						raw_spin_lock_irq(&rnp->lock);
 | 
				
			||||||
	smp_mb__after_unlock_lock();
 | 
						smp_mb__after_unlock_lock();
 | 
				
			||||||
	gp_duration = jiffies - rsp->gp_start;
 | 
						gp_duration = jiffies - rsp->gp_start;
 | 
				
			||||||
| 
						 | 
					@ -1771,6 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 | 
				
			||||||
		nocb += rcu_future_gp_cleanup(rsp, rnp);
 | 
							nocb += rcu_future_gp_cleanup(rsp, rnp);
 | 
				
			||||||
		raw_spin_unlock_irq(&rnp->lock);
 | 
							raw_spin_unlock_irq(&rnp->lock);
 | 
				
			||||||
		cond_resched_rcu_qs();
 | 
							cond_resched_rcu_qs();
 | 
				
			||||||
 | 
							ACCESS_ONCE(rsp->gp_activity) = jiffies;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	rnp = rcu_get_root(rsp);
 | 
						rnp = rcu_get_root(rsp);
 | 
				
			||||||
	raw_spin_lock_irq(&rnp->lock);
 | 
						raw_spin_lock_irq(&rnp->lock);
 | 
				
			||||||
| 
						 | 
					@ -1820,6 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
 | 
				
			||||||
			if (rcu_gp_init(rsp))
 | 
								if (rcu_gp_init(rsp))
 | 
				
			||||||
				break;
 | 
									break;
 | 
				
			||||||
			cond_resched_rcu_qs();
 | 
								cond_resched_rcu_qs();
 | 
				
			||||||
 | 
								ACCESS_ONCE(rsp->gp_activity) = jiffies;
 | 
				
			||||||
			WARN_ON(signal_pending(current));
 | 
								WARN_ON(signal_pending(current));
 | 
				
			||||||
			trace_rcu_grace_period(rsp->name,
 | 
								trace_rcu_grace_period(rsp->name,
 | 
				
			||||||
					       ACCESS_ONCE(rsp->gpnum),
 | 
										       ACCESS_ONCE(rsp->gpnum),
 | 
				
			||||||
| 
						 | 
					@ -1863,9 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
 | 
				
			||||||
						       ACCESS_ONCE(rsp->gpnum),
 | 
											       ACCESS_ONCE(rsp->gpnum),
 | 
				
			||||||
						       TPS("fqsend"));
 | 
											       TPS("fqsend"));
 | 
				
			||||||
				cond_resched_rcu_qs();
 | 
									cond_resched_rcu_qs();
 | 
				
			||||||
 | 
									ACCESS_ONCE(rsp->gp_activity) = jiffies;
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				/* Deal with stray signal. */
 | 
									/* Deal with stray signal. */
 | 
				
			||||||
				cond_resched_rcu_qs();
 | 
									cond_resched_rcu_qs();
 | 
				
			||||||
 | 
									ACCESS_ONCE(rsp->gp_activity) = jiffies;
 | 
				
			||||||
				WARN_ON(signal_pending(current));
 | 
									WARN_ON(signal_pending(current));
 | 
				
			||||||
				trace_rcu_grace_period(rsp->name,
 | 
									trace_rcu_grace_period(rsp->name,
 | 
				
			||||||
						       ACCESS_ONCE(rsp->gpnum),
 | 
											       ACCESS_ONCE(rsp->gpnum),
 | 
				
			||||||
| 
						 | 
					@ -2042,8 +2135,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
 | 
				
			||||||
	rnp = rdp->mynode;
 | 
						rnp = rdp->mynode;
 | 
				
			||||||
	raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
						raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
				
			||||||
	smp_mb__after_unlock_lock();
 | 
						smp_mb__after_unlock_lock();
 | 
				
			||||||
	if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
 | 
						if ((rdp->passed_quiesce == 0 &&
 | 
				
			||||||
	    rnp->completed == rnp->gpnum) {
 | 
						     rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
 | 
				
			||||||
 | 
						    rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
 | 
				
			||||||
 | 
						    rdp->gpwrap) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * The grace period in which this quiescent state was
 | 
							 * The grace period in which this quiescent state was
 | 
				
			||||||
| 
						 | 
					@ -2052,6 +2147,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
 | 
				
			||||||
		 * within the current grace period.
 | 
							 * within the current grace period.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		rdp->passed_quiesce = 0;	/* need qs for new gp. */
 | 
							rdp->passed_quiesce = 0;	/* need qs for new gp. */
 | 
				
			||||||
 | 
							rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
 | 
				
			||||||
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
							raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -2096,7 +2192,8 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
 | 
				
			||||||
	 * Was there a quiescent state since the beginning of the grace
 | 
						 * Was there a quiescent state since the beginning of the grace
 | 
				
			||||||
	 * period? If no, then exit and wait for the next call.
 | 
						 * period? If no, then exit and wait for the next call.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (!rdp->passed_quiesce)
 | 
						if (!rdp->passed_quiesce &&
 | 
				
			||||||
 | 
						    rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -2226,6 +2323,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 | 
				
			||||||
			       TPS("cpuofl"));
 | 
								       TPS("cpuofl"));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * All CPUs for the specified rcu_node structure have gone offline,
 | 
				
			||||||
 | 
					 * and all tasks that were preempted within an RCU read-side critical
 | 
				
			||||||
 | 
					 * section while running on one of those CPUs have since exited their RCU
 | 
				
			||||||
 | 
					 * read-side critical section.  Some other CPU is reporting this fact with
 | 
				
			||||||
 | 
					 * the specified rcu_node structure's ->lock held and interrupts disabled.
 | 
				
			||||||
 | 
					 * This function therefore goes up the tree of rcu_node structures,
 | 
				
			||||||
 | 
					 * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
 | 
				
			||||||
 | 
					 * the leaf rcu_node structure's ->qsmaskinit field has already been
 | 
				
			||||||
 | 
					 * updated
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * This function does check that the specified rcu_node structure has
 | 
				
			||||||
 | 
					 * all CPUs offline and no blocked tasks, so it is OK to invoke it
 | 
				
			||||||
 | 
					 * prematurely.  That said, invoking it after the fact will cost you
 | 
				
			||||||
 | 
					 * a needless lock acquisition.  So once it has done its work, don't
 | 
				
			||||||
 | 
					 * invoke it again.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						long mask;
 | 
				
			||||||
 | 
						struct rcu_node *rnp = rnp_leaf;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						for (;;) {
 | 
				
			||||||
 | 
							mask = rnp->grpmask;
 | 
				
			||||||
 | 
							rnp = rnp->parent;
 | 
				
			||||||
 | 
							if (!rnp)
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							raw_spin_lock(&rnp->lock); /* irqs already disabled. */
 | 
				
			||||||
 | 
							smp_mb__after_unlock_lock(); /* GP memory ordering. */
 | 
				
			||||||
 | 
							rnp->qsmaskinit &= ~mask;
 | 
				
			||||||
 | 
							if (rnp->qsmaskinit) {
 | 
				
			||||||
 | 
								raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
 | 
				
			||||||
 | 
								return;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * The CPU has been completely removed, and some other CPU is reporting
 | 
					 * The CPU has been completely removed, and some other CPU is reporting
 | 
				
			||||||
 * this fact from process context.  Do the remainder of the cleanup,
 | 
					 * this fact from process context.  Do the remainder of the cleanup,
 | 
				
			||||||
| 
						 | 
					@ -2236,8 +2373,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 | 
				
			||||||
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
 | 
					static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	unsigned long mask;
 | 
					 | 
				
			||||||
	int need_report = 0;
 | 
					 | 
				
			||||||
	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
 | 
						struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
 | 
				
			||||||
	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 | 
						struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2251,40 +2386,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
 | 
				
			||||||
	/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
 | 
						/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
 | 
				
			||||||
	rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
 | 
						rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
 | 
				
			||||||
	rcu_adopt_orphan_cbs(rsp, flags);
 | 
						rcu_adopt_orphan_cbs(rsp, flags);
 | 
				
			||||||
 | 
						raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
 | 
						/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
 | 
				
			||||||
	mask = rdp->grpmask;	/* rnp->grplo is constant. */
 | 
						raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
				
			||||||
	do {
 | 
						smp_mb__after_unlock_lock();	/* Enforce GP memory-order guarantee. */
 | 
				
			||||||
		raw_spin_lock(&rnp->lock);	/* irqs already disabled. */
 | 
						rnp->qsmaskinit &= ~rdp->grpmask;
 | 
				
			||||||
		smp_mb__after_unlock_lock();
 | 
						if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
 | 
				
			||||||
		rnp->qsmaskinit &= ~mask;
 | 
							rcu_cleanup_dead_rnp(rnp);
 | 
				
			||||||
		if (rnp->qsmaskinit != 0) {
 | 
						rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
 | 
				
			||||||
			if (rnp != rdp->mynode)
 | 
					 | 
				
			||||||
				raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if (rnp == rdp->mynode)
 | 
					 | 
				
			||||||
			need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
 | 
					 | 
				
			||||||
		mask = rnp->grpmask;
 | 
					 | 
				
			||||||
		rnp = rnp->parent;
 | 
					 | 
				
			||||||
	} while (rnp != NULL);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * We still hold the leaf rcu_node structure lock here, and
 | 
					 | 
				
			||||||
	 * irqs are still disabled.  The reason for this subterfuge is
 | 
					 | 
				
			||||||
	 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
 | 
					 | 
				
			||||||
	 * held leads to deadlock.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
 | 
					 | 
				
			||||||
	rnp = rdp->mynode;
 | 
					 | 
				
			||||||
	if (need_report & RCU_OFL_TASKS_NORM_GP)
 | 
					 | 
				
			||||||
		rcu_report_unblock_qs_rnp(rnp, flags);
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
					 | 
				
			||||||
	if (need_report & RCU_OFL_TASKS_EXP_GP)
 | 
					 | 
				
			||||||
		rcu_report_exp_rnp(rsp, rnp, true);
 | 
					 | 
				
			||||||
	WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
 | 
						WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
 | 
				
			||||||
		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
 | 
							  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
 | 
				
			||||||
		  cpu, rdp->qlen, rdp->nxtlist);
 | 
							  cpu, rdp->qlen, rdp->nxtlist);
 | 
				
			||||||
| 
						 | 
					@ -2300,6 +2410,10 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
 | 
					static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -2496,12 +2610,6 @@ static void force_qs_rnp(struct rcu_state *rsp,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
							raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	rnp = rcu_get_root(rsp);
 | 
					 | 
				
			||||||
	if (rnp->qsmask == 0) {
 | 
					 | 
				
			||||||
		raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
					 | 
				
			||||||
		smp_mb__after_unlock_lock();
 | 
					 | 
				
			||||||
		rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -2601,7 +2709,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
 | 
				
			||||||
 * Schedule RCU callback invocation.  If the specified type of RCU
 | 
					 * Schedule RCU callback invocation.  If the specified type of RCU
 | 
				
			||||||
 * does not support RCU priority boosting, just do a direct call,
 | 
					 * does not support RCU priority boosting, just do a direct call,
 | 
				
			||||||
 * otherwise wake up the per-CPU kernel kthread.  Note that because we
 | 
					 * otherwise wake up the per-CPU kernel kthread.  Note that because we
 | 
				
			||||||
 * are running on the current CPU with interrupts disabled, the
 | 
					 * are running on the current CPU with softirqs disabled, the
 | 
				
			||||||
 * rcu_cpu_kthread_task cannot disappear out from under us.
 | 
					 * rcu_cpu_kthread_task cannot disappear out from under us.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 | 
					static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 | 
				
			||||||
| 
						 | 
					@ -3141,9 +3249,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Is the RCU core waiting for a quiescent state from this CPU? */
 | 
						/* Is the RCU core waiting for a quiescent state from this CPU? */
 | 
				
			||||||
	if (rcu_scheduler_fully_active &&
 | 
						if (rcu_scheduler_fully_active &&
 | 
				
			||||||
	    rdp->qs_pending && !rdp->passed_quiesce) {
 | 
						    rdp->qs_pending && !rdp->passed_quiesce &&
 | 
				
			||||||
 | 
						    rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
 | 
				
			||||||
		rdp->n_rp_qs_pending++;
 | 
							rdp->n_rp_qs_pending++;
 | 
				
			||||||
	} else if (rdp->qs_pending && rdp->passed_quiesce) {
 | 
						} else if (rdp->qs_pending &&
 | 
				
			||||||
 | 
							   (rdp->passed_quiesce ||
 | 
				
			||||||
 | 
							    rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
 | 
				
			||||||
		rdp->n_rp_report_qs++;
 | 
							rdp->n_rp_report_qs++;
 | 
				
			||||||
		return 1;
 | 
							return 1;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -3167,7 +3278,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Has a new RCU grace period started? */
 | 
						/* Has a new RCU grace period started? */
 | 
				
			||||||
	if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
 | 
						if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum ||
 | 
				
			||||||
 | 
						    unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */
 | 
				
			||||||
		rdp->n_rp_gp_started++;
 | 
							rdp->n_rp_gp_started++;
 | 
				
			||||||
		return 1;
 | 
							return 1;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -3350,6 +3462,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
 | 
									_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
 | 
				
			||||||
						   rsp->n_barrier_done);
 | 
											   rsp->n_barrier_done);
 | 
				
			||||||
 | 
									smp_mb__before_atomic();
 | 
				
			||||||
				atomic_inc(&rsp->barrier_cpu_count);
 | 
									atomic_inc(&rsp->barrier_cpu_count);
 | 
				
			||||||
				__call_rcu(&rdp->barrier_head,
 | 
									__call_rcu(&rdp->barrier_head,
 | 
				
			||||||
					   rcu_barrier_callback, rsp, cpu, 0);
 | 
										   rcu_barrier_callback, rsp, cpu, 0);
 | 
				
			||||||
| 
						 | 
					@ -3417,9 +3530,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 | 
				
			||||||
	/* Set up local state, ensuring consistent view of global state. */
 | 
						/* Set up local state, ensuring consistent view of global state. */
 | 
				
			||||||
	raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
						raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
				
			||||||
	rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
 | 
						rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
 | 
				
			||||||
	init_callback_list(rdp);
 | 
					 | 
				
			||||||
	rdp->qlen_lazy = 0;
 | 
					 | 
				
			||||||
	ACCESS_ONCE(rdp->qlen) = 0;
 | 
					 | 
				
			||||||
	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
 | 
						rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
 | 
				
			||||||
	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
 | 
						WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
 | 
				
			||||||
	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
 | 
						WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
 | 
				
			||||||
| 
						 | 
					@ -3476,6 +3586,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
 | 
				
			||||||
			rdp->gpnum = rnp->completed;
 | 
								rdp->gpnum = rnp->completed;
 | 
				
			||||||
			rdp->completed = rnp->completed;
 | 
								rdp->completed = rnp->completed;
 | 
				
			||||||
			rdp->passed_quiesce = 0;
 | 
								rdp->passed_quiesce = 0;
 | 
				
			||||||
 | 
								rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
 | 
				
			||||||
			rdp->qs_pending = 0;
 | 
								rdp->qs_pending = 0;
 | 
				
			||||||
			trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
 | 
								trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -3567,17 +3678,35 @@ static int rcu_pm_notify(struct notifier_block *self,
 | 
				
			||||||
static int __init rcu_spawn_gp_kthread(void)
 | 
					static int __init rcu_spawn_gp_kthread(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
						int kthread_prio_in = kthread_prio;
 | 
				
			||||||
	struct rcu_node *rnp;
 | 
						struct rcu_node *rnp;
 | 
				
			||||||
	struct rcu_state *rsp;
 | 
						struct rcu_state *rsp;
 | 
				
			||||||
 | 
						struct sched_param sp;
 | 
				
			||||||
	struct task_struct *t;
 | 
						struct task_struct *t;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Force priority into range. */
 | 
				
			||||||
 | 
						if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
 | 
				
			||||||
 | 
							kthread_prio = 1;
 | 
				
			||||||
 | 
						else if (kthread_prio < 0)
 | 
				
			||||||
 | 
							kthread_prio = 0;
 | 
				
			||||||
 | 
						else if (kthread_prio > 99)
 | 
				
			||||||
 | 
							kthread_prio = 99;
 | 
				
			||||||
 | 
						if (kthread_prio != kthread_prio_in)
 | 
				
			||||||
 | 
							pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
 | 
				
			||||||
 | 
								 kthread_prio, kthread_prio_in);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rcu_scheduler_fully_active = 1;
 | 
						rcu_scheduler_fully_active = 1;
 | 
				
			||||||
	for_each_rcu_flavor(rsp) {
 | 
						for_each_rcu_flavor(rsp) {
 | 
				
			||||||
		t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
 | 
							t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
 | 
				
			||||||
		BUG_ON(IS_ERR(t));
 | 
							BUG_ON(IS_ERR(t));
 | 
				
			||||||
		rnp = rcu_get_root(rsp);
 | 
							rnp = rcu_get_root(rsp);
 | 
				
			||||||
		raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
							raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
				
			||||||
		rsp->gp_kthread = t;
 | 
							rsp->gp_kthread = t;
 | 
				
			||||||
 | 
							if (kthread_prio) {
 | 
				
			||||||
 | 
								sp.sched_priority = kthread_prio;
 | 
				
			||||||
 | 
								sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							wake_up_process(t);
 | 
				
			||||||
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
							raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	rcu_spawn_nocb_kthreads();
 | 
						rcu_spawn_nocb_kthreads();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -27,7 +27,6 @@
 | 
				
			||||||
#include <linux/threads.h>
 | 
					#include <linux/threads.h>
 | 
				
			||||||
#include <linux/cpumask.h>
 | 
					#include <linux/cpumask.h>
 | 
				
			||||||
#include <linux/seqlock.h>
 | 
					#include <linux/seqlock.h>
 | 
				
			||||||
#include <linux/irq_work.h>
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
 | 
					 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
 | 
				
			||||||
| 
						 | 
					@ -172,11 +171,6 @@ struct rcu_node {
 | 
				
			||||||
				/*  queued on this rcu_node structure that */
 | 
									/*  queued on this rcu_node structure that */
 | 
				
			||||||
				/*  are blocking the current grace period, */
 | 
									/*  are blocking the current grace period, */
 | 
				
			||||||
				/*  there can be no such task. */
 | 
									/*  there can be no such task. */
 | 
				
			||||||
	struct completion boost_completion;
 | 
					 | 
				
			||||||
				/* Used to ensure that the rt_mutex used */
 | 
					 | 
				
			||||||
				/*  to carry out the boosting is fully */
 | 
					 | 
				
			||||||
				/*  released with no future boostee accesses */
 | 
					 | 
				
			||||||
				/*  before that rt_mutex is re-initialized. */
 | 
					 | 
				
			||||||
	struct rt_mutex boost_mtx;
 | 
						struct rt_mutex boost_mtx;
 | 
				
			||||||
				/* Used only for the priority-boosting */
 | 
									/* Used only for the priority-boosting */
 | 
				
			||||||
				/*  side effect, not as a lock. */
 | 
									/*  side effect, not as a lock. */
 | 
				
			||||||
| 
						 | 
					@ -257,9 +251,12 @@ struct rcu_data {
 | 
				
			||||||
					/*  in order to detect GP end. */
 | 
										/*  in order to detect GP end. */
 | 
				
			||||||
	unsigned long	gpnum;		/* Highest gp number that this CPU */
 | 
						unsigned long	gpnum;		/* Highest gp number that this CPU */
 | 
				
			||||||
					/*  is aware of having started. */
 | 
										/*  is aware of having started. */
 | 
				
			||||||
 | 
						unsigned long	rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
 | 
				
			||||||
 | 
										/*  for rcu_all_qs() invocations. */
 | 
				
			||||||
	bool		passed_quiesce;	/* User-mode/idle loop etc. */
 | 
						bool		passed_quiesce;	/* User-mode/idle loop etc. */
 | 
				
			||||||
	bool		qs_pending;	/* Core waits for quiesc state. */
 | 
						bool		qs_pending;	/* Core waits for quiesc state. */
 | 
				
			||||||
	bool		beenonline;	/* CPU online at least once. */
 | 
						bool		beenonline;	/* CPU online at least once. */
 | 
				
			||||||
 | 
						bool		gpwrap;		/* Possible gpnum/completed wrap. */
 | 
				
			||||||
	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
 | 
						struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
 | 
				
			||||||
	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
 | 
						unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
 | 
				
			||||||
#ifdef CONFIG_RCU_CPU_STALL_INFO
 | 
					#ifdef CONFIG_RCU_CPU_STALL_INFO
 | 
				
			||||||
| 
						 | 
					@ -340,14 +337,10 @@ struct rcu_data {
 | 
				
			||||||
#ifdef CONFIG_RCU_NOCB_CPU
 | 
					#ifdef CONFIG_RCU_NOCB_CPU
 | 
				
			||||||
	struct rcu_head *nocb_head;	/* CBs waiting for kthread. */
 | 
						struct rcu_head *nocb_head;	/* CBs waiting for kthread. */
 | 
				
			||||||
	struct rcu_head **nocb_tail;
 | 
						struct rcu_head **nocb_tail;
 | 
				
			||||||
	atomic_long_t nocb_q_count;	/* # CBs waiting for kthread */
 | 
						atomic_long_t nocb_q_count;	/* # CBs waiting for nocb */
 | 
				
			||||||
	atomic_long_t nocb_q_count_lazy; /*  (approximate). */
 | 
						atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
 | 
				
			||||||
	struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
 | 
						struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
 | 
				
			||||||
	struct rcu_head **nocb_follower_tail;
 | 
						struct rcu_head **nocb_follower_tail;
 | 
				
			||||||
	atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */
 | 
					 | 
				
			||||||
	atomic_long_t nocb_follower_count_lazy; /*  (approximate). */
 | 
					 | 
				
			||||||
	int nocb_p_count;		/* # CBs being invoked by kthread */
 | 
					 | 
				
			||||||
	int nocb_p_count_lazy;		/*  (approximate). */
 | 
					 | 
				
			||||||
	wait_queue_head_t nocb_wq;	/* For nocb kthreads to sleep on. */
 | 
						wait_queue_head_t nocb_wq;	/* For nocb kthreads to sleep on. */
 | 
				
			||||||
	struct task_struct *nocb_kthread;
 | 
						struct task_struct *nocb_kthread;
 | 
				
			||||||
	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
 | 
						int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
 | 
				
			||||||
| 
						 | 
					@ -356,8 +349,6 @@ struct rcu_data {
 | 
				
			||||||
	struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
 | 
						struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
 | 
				
			||||||
					/* CBs waiting for GP. */
 | 
										/* CBs waiting for GP. */
 | 
				
			||||||
	struct rcu_head **nocb_gp_tail;
 | 
						struct rcu_head **nocb_gp_tail;
 | 
				
			||||||
	long nocb_gp_count;
 | 
					 | 
				
			||||||
	long nocb_gp_count_lazy;
 | 
					 | 
				
			||||||
	bool nocb_leader_sleep;		/* Is the nocb leader thread asleep? */
 | 
						bool nocb_leader_sleep;		/* Is the nocb leader thread asleep? */
 | 
				
			||||||
	struct rcu_data *nocb_next_follower;
 | 
						struct rcu_data *nocb_next_follower;
 | 
				
			||||||
					/* Next follower in wakeup chain. */
 | 
										/* Next follower in wakeup chain. */
 | 
				
			||||||
| 
						 | 
					@ -488,10 +479,14 @@ struct rcu_state {
 | 
				
			||||||
						/*  due to no GP active. */
 | 
											/*  due to no GP active. */
 | 
				
			||||||
	unsigned long gp_start;			/* Time at which GP started, */
 | 
						unsigned long gp_start;			/* Time at which GP started, */
 | 
				
			||||||
						/*  but in jiffies. */
 | 
											/*  but in jiffies. */
 | 
				
			||||||
 | 
						unsigned long gp_activity;		/* Time of last GP kthread */
 | 
				
			||||||
 | 
											/*  activity in jiffies. */
 | 
				
			||||||
	unsigned long jiffies_stall;		/* Time at which to check */
 | 
						unsigned long jiffies_stall;		/* Time at which to check */
 | 
				
			||||||
						/*  for CPU stalls. */
 | 
											/*  for CPU stalls. */
 | 
				
			||||||
	unsigned long jiffies_resched;		/* Time at which to resched */
 | 
						unsigned long jiffies_resched;		/* Time at which to resched */
 | 
				
			||||||
						/*  a reluctant CPU. */
 | 
											/*  a reluctant CPU. */
 | 
				
			||||||
 | 
						unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
 | 
				
			||||||
 | 
											/*  GP start. */
 | 
				
			||||||
	unsigned long gp_max;			/* Maximum GP duration in */
 | 
						unsigned long gp_max;			/* Maximum GP duration in */
 | 
				
			||||||
						/*  jiffies. */
 | 
											/*  jiffies. */
 | 
				
			||||||
	const char *name;			/* Name of structure. */
 | 
						const char *name;			/* Name of structure. */
 | 
				
			||||||
| 
						 | 
					@ -514,13 +509,6 @@ extern struct list_head rcu_struct_flavors;
 | 
				
			||||||
#define for_each_rcu_flavor(rsp) \
 | 
					#define for_each_rcu_flavor(rsp) \
 | 
				
			||||||
	list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
 | 
						list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Return values for rcu_preempt_offline_tasks(). */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define RCU_OFL_TASKS_NORM_GP	0x1		/* Tasks blocking normal */
 | 
					 | 
				
			||||||
						/*  GP were moved to root. */
 | 
					 | 
				
			||||||
#define RCU_OFL_TASKS_EXP_GP	0x2		/* Tasks blocking expedited */
 | 
					 | 
				
			||||||
						/*  GP were moved to root. */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * RCU implementation internal declarations:
 | 
					 * RCU implementation internal declarations:
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					@ -546,27 +534,16 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Forward declarations for rcutree_plugin.h */
 | 
					/* Forward declarations for rcutree_plugin.h */
 | 
				
			||||||
static void rcu_bootup_announce(void);
 | 
					static void rcu_bootup_announce(void);
 | 
				
			||||||
long rcu_batches_completed(void);
 | 
					 | 
				
			||||||
static void rcu_preempt_note_context_switch(void);
 | 
					static void rcu_preempt_note_context_switch(void);
 | 
				
			||||||
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 | 
					static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 | 
				
			||||||
#ifdef CONFIG_HOTPLUG_CPU
 | 
					#ifdef CONFIG_HOTPLUG_CPU
 | 
				
			||||||
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
 | 
					static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 | 
				
			||||||
				      unsigned long flags);
 | 
					 | 
				
			||||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 | 
					#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 | 
				
			||||||
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 | 
					static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 | 
				
			||||||
static int rcu_print_task_stall(struct rcu_node *rnp);
 | 
					static int rcu_print_task_stall(struct rcu_node *rnp);
 | 
				
			||||||
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 | 
					static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 | 
				
			||||||
#ifdef CONFIG_HOTPLUG_CPU
 | 
					 | 
				
			||||||
static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
 | 
					 | 
				
			||||||
				     struct rcu_node *rnp,
 | 
					 | 
				
			||||||
				     struct rcu_data *rdp);
 | 
					 | 
				
			||||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 | 
					 | 
				
			||||||
static void rcu_preempt_check_callbacks(void);
 | 
					static void rcu_preempt_check_callbacks(void);
 | 
				
			||||||
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
 | 
					void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
 | 
				
			||||||
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU)
 | 
					 | 
				
			||||||
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 | 
					 | 
				
			||||||
			       bool wake);
 | 
					 | 
				
			||||||
#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */
 | 
					 | 
				
			||||||
static void __init __rcu_init_preempt(void);
 | 
					static void __init __rcu_init_preempt(void);
 | 
				
			||||||
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 | 
					static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 | 
				
			||||||
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 | 
					static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 | 
				
			||||||
| 
						 | 
					@ -622,24 +599,15 @@ static void rcu_dynticks_task_exit(void);
 | 
				
			||||||
#endif /* #ifndef RCU_TREE_NONCORE */
 | 
					#endif /* #ifndef RCU_TREE_NONCORE */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_RCU_TRACE
 | 
					#ifdef CONFIG_RCU_TRACE
 | 
				
			||||||
 | 
					/* Read out queue lengths for tracing. */
 | 
				
			||||||
 | 
					static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_RCU_NOCB_CPU
 | 
					#ifdef CONFIG_RCU_NOCB_CPU
 | 
				
			||||||
/* Sum up queue lengths for tracing. */
 | 
						*ql = atomic_long_read(&rdp->nocb_q_count);
 | 
				
			||||||
static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
 | 
						*qll = atomic_long_read(&rdp->nocb_q_count_lazy);
 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	*ql = atomic_long_read(&rdp->nocb_q_count) +
 | 
					 | 
				
			||||||
	      rdp->nocb_p_count +
 | 
					 | 
				
			||||||
	      atomic_long_read(&rdp->nocb_follower_count) +
 | 
					 | 
				
			||||||
	      rdp->nocb_p_count + rdp->nocb_gp_count;
 | 
					 | 
				
			||||||
	*qll = atomic_long_read(&rdp->nocb_q_count_lazy) +
 | 
					 | 
				
			||||||
	       rdp->nocb_p_count_lazy +
 | 
					 | 
				
			||||||
	       atomic_long_read(&rdp->nocb_follower_count_lazy) +
 | 
					 | 
				
			||||||
	       rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
 | 
					#else /* #ifdef CONFIG_RCU_NOCB_CPU */
 | 
				
			||||||
static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	*ql = 0;
 | 
						*ql = 0;
 | 
				
			||||||
	*qll = 0;
 | 
						*qll = 0;
 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 | 
					#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
#endif /* #ifdef CONFIG_RCU_TRACE */
 | 
					#endif /* #ifdef CONFIG_RCU_TRACE */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -34,10 +34,6 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "../locking/rtmutex_common.h"
 | 
					#include "../locking/rtmutex_common.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* rcuc/rcub kthread realtime priority */
 | 
					 | 
				
			||||||
static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
 | 
					 | 
				
			||||||
module_param(kthread_prio, int, 0644);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Control variables for per-CPU and per-rcu_node kthreads.  These
 | 
					 * Control variables for per-CPU and per-rcu_node kthreads.  These
 | 
				
			||||||
 * handle all flavors of RCU.
 | 
					 * handle all flavors of RCU.
 | 
				
			||||||
| 
						 | 
					@ -103,6 +99,8 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
 | 
				
			||||||
static struct rcu_state *rcu_state_p = &rcu_preempt_state;
 | 
					static struct rcu_state *rcu_state_p = &rcu_preempt_state;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 | 
					static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 | 
				
			||||||
 | 
					static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 | 
				
			||||||
 | 
								       bool wake);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Tell them what RCU they are running.
 | 
					 * Tell them what RCU they are running.
 | 
				
			||||||
| 
						 | 
					@ -113,25 +111,6 @@ static void __init rcu_bootup_announce(void)
 | 
				
			||||||
	rcu_bootup_announce_oddness();
 | 
						rcu_bootup_announce_oddness();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Return the number of RCU-preempt batches processed thus far
 | 
					 | 
				
			||||||
 * for debug and statistics.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static long rcu_batches_completed_preempt(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return rcu_preempt_state.completed;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Return the number of RCU batches processed thus far for debug & stats.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
long rcu_batches_completed(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return rcu_batches_completed_preempt();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL_GPL(rcu_batches_completed);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Record a preemptible-RCU quiescent state for the specified CPU.  Note
 | 
					 * Record a preemptible-RCU quiescent state for the specified CPU.  Note
 | 
				
			||||||
 * that this just means that the task currently running on the CPU is
 | 
					 * that this just means that the task currently running on the CPU is
 | 
				
			||||||
| 
						 | 
					@ -306,6 +285,15 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
 | 
				
			||||||
	return np;
 | 
						return np;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Return true if the specified rcu_node structure has tasks that were
 | 
				
			||||||
 | 
					 * preempted within an RCU read-side critical section.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return !list_empty(&rnp->blkd_tasks);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Handle special cases during rcu_read_unlock(), such as needing to
 | 
					 * Handle special cases during rcu_read_unlock(), such as needing to
 | 
				
			||||||
 * notify RCU core processing or task having blocked during the RCU
 | 
					 * notify RCU core processing or task having blocked during the RCU
 | 
				
			||||||
| 
						 | 
					@ -313,9 +301,10 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void rcu_read_unlock_special(struct task_struct *t)
 | 
					void rcu_read_unlock_special(struct task_struct *t)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int empty;
 | 
						bool empty;
 | 
				
			||||||
	int empty_exp;
 | 
						bool empty_exp;
 | 
				
			||||||
	int empty_exp_now;
 | 
						bool empty_norm;
 | 
				
			||||||
 | 
						bool empty_exp_now;
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	struct list_head *np;
 | 
						struct list_head *np;
 | 
				
			||||||
#ifdef CONFIG_RCU_BOOST
 | 
					#ifdef CONFIG_RCU_BOOST
 | 
				
			||||||
| 
						 | 
					@ -367,7 +356,8 @@ void rcu_read_unlock_special(struct task_struct *t)
 | 
				
			||||||
				break;
 | 
									break;
 | 
				
			||||||
			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
 | 
								raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		empty = !rcu_preempt_blocked_readers_cgp(rnp);
 | 
							empty = !rcu_preempt_has_tasks(rnp);
 | 
				
			||||||
 | 
							empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
 | 
				
			||||||
		empty_exp = !rcu_preempted_readers_exp(rnp);
 | 
							empty_exp = !rcu_preempted_readers_exp(rnp);
 | 
				
			||||||
		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
 | 
							smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
 | 
				
			||||||
		np = rcu_next_node_entry(t, rnp);
 | 
							np = rcu_next_node_entry(t, rnp);
 | 
				
			||||||
| 
						 | 
					@ -386,6 +376,14 @@ void rcu_read_unlock_special(struct task_struct *t)
 | 
				
			||||||
		drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
 | 
							drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
 | 
				
			||||||
#endif /* #ifdef CONFIG_RCU_BOOST */
 | 
					#endif /* #ifdef CONFIG_RCU_BOOST */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * If this was the last task on the list, go see if we
 | 
				
			||||||
 | 
							 * need to propagate ->qsmaskinit bit clearing up the
 | 
				
			||||||
 | 
							 * rcu_node tree.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							if (!empty && !rcu_preempt_has_tasks(rnp))
 | 
				
			||||||
 | 
								rcu_cleanup_dead_rnp(rnp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * If this was the last task on the current list, and if
 | 
							 * If this was the last task on the current list, and if
 | 
				
			||||||
		 * we aren't waiting on any CPUs, report the quiescent state.
 | 
							 * we aren't waiting on any CPUs, report the quiescent state.
 | 
				
			||||||
| 
						 | 
					@ -393,7 +391,7 @@ void rcu_read_unlock_special(struct task_struct *t)
 | 
				
			||||||
		 * so we must take a snapshot of the expedited state.
 | 
							 * so we must take a snapshot of the expedited state.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		empty_exp_now = !rcu_preempted_readers_exp(rnp);
 | 
							empty_exp_now = !rcu_preempted_readers_exp(rnp);
 | 
				
			||||||
		if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
 | 
							if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
 | 
				
			||||||
			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
 | 
								trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
 | 
				
			||||||
							 rnp->gpnum,
 | 
												 rnp->gpnum,
 | 
				
			||||||
							 0, rnp->qsmask,
 | 
												 0, rnp->qsmask,
 | 
				
			||||||
| 
						 | 
					@ -408,10 +406,8 @@ void rcu_read_unlock_special(struct task_struct *t)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_RCU_BOOST
 | 
					#ifdef CONFIG_RCU_BOOST
 | 
				
			||||||
		/* Unboost if we were boosted. */
 | 
							/* Unboost if we were boosted. */
 | 
				
			||||||
		if (drop_boost_mutex) {
 | 
							if (drop_boost_mutex)
 | 
				
			||||||
			rt_mutex_unlock(&rnp->boost_mtx);
 | 
								rt_mutex_unlock(&rnp->boost_mtx);
 | 
				
			||||||
			complete(&rnp->boost_completion);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
#endif /* #ifdef CONFIG_RCU_BOOST */
 | 
					#endif /* #ifdef CONFIG_RCU_BOOST */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -519,99 +515,13 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
 | 
				
			||||||
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 | 
					static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
 | 
						WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
 | 
				
			||||||
	if (!list_empty(&rnp->blkd_tasks))
 | 
						if (rcu_preempt_has_tasks(rnp))
 | 
				
			||||||
		rnp->gp_tasks = rnp->blkd_tasks.next;
 | 
							rnp->gp_tasks = rnp->blkd_tasks.next;
 | 
				
			||||||
	WARN_ON_ONCE(rnp->qsmask);
 | 
						WARN_ON_ONCE(rnp->qsmask);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_HOTPLUG_CPU
 | 
					#ifdef CONFIG_HOTPLUG_CPU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Handle tasklist migration for case in which all CPUs covered by the
 | 
					 | 
				
			||||||
 * specified rcu_node have gone offline.  Move them up to the root
 | 
					 | 
				
			||||||
 * rcu_node.  The reason for not just moving them to the immediate
 | 
					 | 
				
			||||||
 * parent is to remove the need for rcu_read_unlock_special() to
 | 
					 | 
				
			||||||
 * make more than two attempts to acquire the target rcu_node's lock.
 | 
					 | 
				
			||||||
 * Returns true if there were tasks blocking the current RCU grace
 | 
					 | 
				
			||||||
 * period.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Returns 1 if there was previously a task blocking the current grace
 | 
					 | 
				
			||||||
 * period on the specified rcu_node structure.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * The caller must hold rnp->lock with irqs disabled.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
 | 
					 | 
				
			||||||
				     struct rcu_node *rnp,
 | 
					 | 
				
			||||||
				     struct rcu_data *rdp)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct list_head *lp;
 | 
					 | 
				
			||||||
	struct list_head *lp_root;
 | 
					 | 
				
			||||||
	int retval = 0;
 | 
					 | 
				
			||||||
	struct rcu_node *rnp_root = rcu_get_root(rsp);
 | 
					 | 
				
			||||||
	struct task_struct *t;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (rnp == rnp_root) {
 | 
					 | 
				
			||||||
		WARN_ONCE(1, "Last CPU thought to be offlined?");
 | 
					 | 
				
			||||||
		return 0;  /* Shouldn't happen: at least one CPU online. */
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* If we are on an internal node, complain bitterly. */
 | 
					 | 
				
			||||||
	WARN_ON_ONCE(rnp != rdp->mynode);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Move tasks up to root rcu_node.  Don't try to get fancy for
 | 
					 | 
				
			||||||
	 * this corner-case operation -- just put this node's tasks
 | 
					 | 
				
			||||||
	 * at the head of the root node's list, and update the root node's
 | 
					 | 
				
			||||||
	 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
 | 
					 | 
				
			||||||
	 * if non-NULL.  This might result in waiting for more tasks than
 | 
					 | 
				
			||||||
	 * absolutely necessary, but this is a good performance/complexity
 | 
					 | 
				
			||||||
	 * tradeoff.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
 | 
					 | 
				
			||||||
		retval |= RCU_OFL_TASKS_NORM_GP;
 | 
					 | 
				
			||||||
	if (rcu_preempted_readers_exp(rnp))
 | 
					 | 
				
			||||||
		retval |= RCU_OFL_TASKS_EXP_GP;
 | 
					 | 
				
			||||||
	lp = &rnp->blkd_tasks;
 | 
					 | 
				
			||||||
	lp_root = &rnp_root->blkd_tasks;
 | 
					 | 
				
			||||||
	while (!list_empty(lp)) {
 | 
					 | 
				
			||||||
		t = list_entry(lp->next, typeof(*t), rcu_node_entry);
 | 
					 | 
				
			||||||
		raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
 | 
					 | 
				
			||||||
		smp_mb__after_unlock_lock();
 | 
					 | 
				
			||||||
		list_del(&t->rcu_node_entry);
 | 
					 | 
				
			||||||
		t->rcu_blocked_node = rnp_root;
 | 
					 | 
				
			||||||
		list_add(&t->rcu_node_entry, lp_root);
 | 
					 | 
				
			||||||
		if (&t->rcu_node_entry == rnp->gp_tasks)
 | 
					 | 
				
			||||||
			rnp_root->gp_tasks = rnp->gp_tasks;
 | 
					 | 
				
			||||||
		if (&t->rcu_node_entry == rnp->exp_tasks)
 | 
					 | 
				
			||||||
			rnp_root->exp_tasks = rnp->exp_tasks;
 | 
					 | 
				
			||||||
#ifdef CONFIG_RCU_BOOST
 | 
					 | 
				
			||||||
		if (&t->rcu_node_entry == rnp->boost_tasks)
 | 
					 | 
				
			||||||
			rnp_root->boost_tasks = rnp->boost_tasks;
 | 
					 | 
				
			||||||
#endif /* #ifdef CONFIG_RCU_BOOST */
 | 
					 | 
				
			||||||
		raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	rnp->gp_tasks = NULL;
 | 
					 | 
				
			||||||
	rnp->exp_tasks = NULL;
 | 
					 | 
				
			||||||
#ifdef CONFIG_RCU_BOOST
 | 
					 | 
				
			||||||
	rnp->boost_tasks = NULL;
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * In case root is being boosted and leaf was not.  Make sure
 | 
					 | 
				
			||||||
	 * that we boost the tasks blocking the current grace period
 | 
					 | 
				
			||||||
	 * in this case.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
 | 
					 | 
				
			||||||
	smp_mb__after_unlock_lock();
 | 
					 | 
				
			||||||
	if (rnp_root->boost_tasks != NULL &&
 | 
					 | 
				
			||||||
	    rnp_root->boost_tasks != rnp_root->gp_tasks &&
 | 
					 | 
				
			||||||
	    rnp_root->boost_tasks != rnp_root->exp_tasks)
 | 
					 | 
				
			||||||
		rnp_root->boost_tasks = rnp_root->gp_tasks;
 | 
					 | 
				
			||||||
	raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
 | 
					 | 
				
			||||||
#endif /* #ifdef CONFIG_RCU_BOOST */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return retval;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 | 
					#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -771,7 +681,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
						raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
				
			||||||
	smp_mb__after_unlock_lock();
 | 
						smp_mb__after_unlock_lock();
 | 
				
			||||||
	if (list_empty(&rnp->blkd_tasks)) {
 | 
						if (!rcu_preempt_has_tasks(rnp)) {
 | 
				
			||||||
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
							raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		rnp->exp_tasks = rnp->blkd_tasks.next;
 | 
							rnp->exp_tasks = rnp->blkd_tasks.next;
 | 
				
			||||||
| 
						 | 
					@ -932,15 +842,6 @@ static void __init rcu_bootup_announce(void)
 | 
				
			||||||
	rcu_bootup_announce_oddness();
 | 
						rcu_bootup_announce_oddness();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Return the number of RCU batches processed thus far for debug & stats.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
long rcu_batches_completed(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return rcu_batches_completed_sched();
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL_GPL(rcu_batches_completed);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Because preemptible RCU does not exist, we never have to check for
 | 
					 * Because preemptible RCU does not exist, we never have to check for
 | 
				
			||||||
 * CPUs being in quiescent states.
 | 
					 * CPUs being in quiescent states.
 | 
				
			||||||
| 
						 | 
					@ -960,11 +861,12 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_HOTPLUG_CPU
 | 
					#ifdef CONFIG_HOTPLUG_CPU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Because preemptible RCU does not exist, no quieting of tasks. */
 | 
					/*
 | 
				
			||||||
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
 | 
					 * Because there is no preemptible RCU, there can be no readers blocked.
 | 
				
			||||||
	__releases(rnp->lock)
 | 
					 */
 | 
				
			||||||
 | 
					static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
						return false;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 | 
					#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 | 
				
			||||||
| 
						 | 
					@ -996,23 +898,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 | 
				
			||||||
	WARN_ON_ONCE(rnp->qsmask);
 | 
						WARN_ON_ONCE(rnp->qsmask);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_HOTPLUG_CPU
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Because preemptible RCU does not exist, it never needs to migrate
 | 
					 | 
				
			||||||
 * tasks that were blocked within RCU read-side critical sections, and
 | 
					 | 
				
			||||||
 * such non-existent tasks cannot possibly have been blocking the current
 | 
					 | 
				
			||||||
 * grace period.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
 | 
					 | 
				
			||||||
				     struct rcu_node *rnp,
 | 
					 | 
				
			||||||
				     struct rcu_data *rdp)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Because preemptible RCU does not exist, it never has any callbacks
 | 
					 * Because preemptible RCU does not exist, it never has any callbacks
 | 
				
			||||||
 * to check.
 | 
					 * to check.
 | 
				
			||||||
| 
						 | 
					@ -1031,20 +916,6 @@ void synchronize_rcu_expedited(void)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 | 
					EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_HOTPLUG_CPU
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Because preemptible RCU does not exist, there is never any need to
 | 
					 | 
				
			||||||
 * report on tasks preempted in RCU read-side critical sections during
 | 
					 | 
				
			||||||
 * expedited RCU grace periods.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 | 
					 | 
				
			||||||
			       bool wake)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Because preemptible RCU does not exist, rcu_barrier() is just
 | 
					 * Because preemptible RCU does not exist, rcu_barrier() is just
 | 
				
			||||||
 * another name for rcu_barrier_sched().
 | 
					 * another name for rcu_barrier_sched().
 | 
				
			||||||
| 
						 | 
					@ -1080,7 +951,7 @@ void exit_rcu(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void rcu_initiate_boost_trace(struct rcu_node *rnp)
 | 
					static void rcu_initiate_boost_trace(struct rcu_node *rnp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (list_empty(&rnp->blkd_tasks))
 | 
						if (!rcu_preempt_has_tasks(rnp))
 | 
				
			||||||
		rnp->n_balk_blkd_tasks++;
 | 
							rnp->n_balk_blkd_tasks++;
 | 
				
			||||||
	else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
 | 
						else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
 | 
				
			||||||
		rnp->n_balk_exp_gp_tasks++;
 | 
							rnp->n_balk_exp_gp_tasks++;
 | 
				
			||||||
| 
						 | 
					@ -1127,7 +998,8 @@ static int rcu_boost(struct rcu_node *rnp)
 | 
				
			||||||
	struct task_struct *t;
 | 
						struct task_struct *t;
 | 
				
			||||||
	struct list_head *tb;
 | 
						struct list_head *tb;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
 | 
						if (ACCESS_ONCE(rnp->exp_tasks) == NULL &&
 | 
				
			||||||
 | 
						    ACCESS_ONCE(rnp->boost_tasks) == NULL)
 | 
				
			||||||
		return 0;  /* Nothing left to boost. */
 | 
							return 0;  /* Nothing left to boost. */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
						raw_spin_lock_irqsave(&rnp->lock, flags);
 | 
				
			||||||
| 
						 | 
					@ -1175,15 +1047,11 @@ static int rcu_boost(struct rcu_node *rnp)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	t = container_of(tb, struct task_struct, rcu_node_entry);
 | 
						t = container_of(tb, struct task_struct, rcu_node_entry);
 | 
				
			||||||
	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
 | 
						rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
 | 
				
			||||||
	init_completion(&rnp->boost_completion);
 | 
					 | 
				
			||||||
	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
						raw_spin_unlock_irqrestore(&rnp->lock, flags);
 | 
				
			||||||
	/* Lock only for side effect: boosts task t's priority. */
 | 
						/* Lock only for side effect: boosts task t's priority. */
 | 
				
			||||||
	rt_mutex_lock(&rnp->boost_mtx);
 | 
						rt_mutex_lock(&rnp->boost_mtx);
 | 
				
			||||||
	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
 | 
						rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Wait for boostee to be done w/boost_mtx before reinitializing. */
 | 
					 | 
				
			||||||
	wait_for_completion(&rnp->boost_completion);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
 | 
						return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
 | 
				
			||||||
	       ACCESS_ONCE(rnp->boost_tasks) != NULL;
 | 
						       ACCESS_ONCE(rnp->boost_tasks) != NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1416,12 +1284,8 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
 | 
				
			||||||
	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
 | 
						for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
 | 
				
			||||||
		if ((mask & 0x1) && cpu != outgoingcpu)
 | 
							if ((mask & 0x1) && cpu != outgoingcpu)
 | 
				
			||||||
			cpumask_set_cpu(cpu, cm);
 | 
								cpumask_set_cpu(cpu, cm);
 | 
				
			||||||
	if (cpumask_weight(cm) == 0) {
 | 
						if (cpumask_weight(cm) == 0)
 | 
				
			||||||
		cpumask_setall(cm);
 | 
							cpumask_setall(cm);
 | 
				
			||||||
		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
 | 
					 | 
				
			||||||
			cpumask_clear_cpu(cpu, cm);
 | 
					 | 
				
			||||||
		WARN_ON_ONCE(cpumask_weight(cm) == 0);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	set_cpus_allowed_ptr(t, cm);
 | 
						set_cpus_allowed_ptr(t, cm);
 | 
				
			||||||
	free_cpumask_var(cm);
 | 
						free_cpumask_var(cm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1446,12 +1310,8 @@ static void __init rcu_spawn_boost_kthreads(void)
 | 
				
			||||||
	for_each_possible_cpu(cpu)
 | 
						for_each_possible_cpu(cpu)
 | 
				
			||||||
		per_cpu(rcu_cpu_has_work, cpu) = 0;
 | 
							per_cpu(rcu_cpu_has_work, cpu) = 0;
 | 
				
			||||||
	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
 | 
						BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
 | 
				
			||||||
	rnp = rcu_get_root(rcu_state_p);
 | 
						rcu_for_each_leaf_node(rcu_state_p, rnp)
 | 
				
			||||||
	(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
 | 
							(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
 | 
				
			||||||
	if (NUM_RCU_NODES > 1) {
 | 
					 | 
				
			||||||
		rcu_for_each_leaf_node(rcu_state_p, rnp)
 | 
					 | 
				
			||||||
			(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void rcu_prepare_kthreads(int cpu)
 | 
					static void rcu_prepare_kthreads(int cpu)
 | 
				
			||||||
| 
						 | 
					@ -1605,7 +1465,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
 | 
				
			||||||
		 * completed since we last checked and there are
 | 
							 * completed since we last checked and there are
 | 
				
			||||||
		 * callbacks not yet ready to invoke.
 | 
							 * callbacks not yet ready to invoke.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (rdp->completed != rnp->completed &&
 | 
							if ((rdp->completed != rnp->completed ||
 | 
				
			||||||
 | 
							     unlikely(ACCESS_ONCE(rdp->gpwrap))) &&
 | 
				
			||||||
		    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
 | 
							    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
 | 
				
			||||||
			note_gp_changes(rsp, rdp);
 | 
								note_gp_changes(rsp, rdp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1898,11 +1759,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
 | 
				
			||||||
		ticks_value = rsp->gpnum - rdp->gpnum;
 | 
							ticks_value = rsp->gpnum - rdp->gpnum;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
 | 
						print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
 | 
				
			||||||
	pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
 | 
						pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
 | 
				
			||||||
	       cpu, ticks_value, ticks_title,
 | 
						       cpu, ticks_value, ticks_title,
 | 
				
			||||||
	       atomic_read(&rdtp->dynticks) & 0xfff,
 | 
						       atomic_read(&rdtp->dynticks) & 0xfff,
 | 
				
			||||||
	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
 | 
						       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
 | 
				
			||||||
	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
 | 
						       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
 | 
				
			||||||
 | 
						       ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
 | 
				
			||||||
	       fast_no_hz);
 | 
						       fast_no_hz);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2056,9 +1918,26 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
 | 
				
			||||||
static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
 | 
					static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
 | 
						struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
 | 
				
			||||||
 | 
						unsigned long ret;
 | 
				
			||||||
 | 
					#ifdef CONFIG_PROVE_RCU
 | 
				
			||||||
	struct rcu_head *rhp;
 | 
						struct rcu_head *rhp;
 | 
				
			||||||
 | 
					#endif /* #ifdef CONFIG_PROVE_RCU */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* No-CBs CPUs might have callbacks on any of three lists. */
 | 
						/*
 | 
				
			||||||
 | 
						 * Check count of all no-CBs callbacks awaiting invocation.
 | 
				
			||||||
 | 
						 * There needs to be a barrier before this function is called,
 | 
				
			||||||
 | 
						 * but associated with a prior determination that no more
 | 
				
			||||||
 | 
						 * callbacks would be posted.  In the worst case, the first
 | 
				
			||||||
 | 
						 * barrier in _rcu_barrier() suffices (but the caller cannot
 | 
				
			||||||
 | 
						 * necessarily rely on this, not a substitute for the caller
 | 
				
			||||||
 | 
						 * getting the concurrency design right!).  There must also be
 | 
				
			||||||
 | 
						 * a barrier between the following load an posting of a callback
 | 
				
			||||||
 | 
						 * (if a callback is in fact needed).  This is associated with an
 | 
				
			||||||
 | 
						 * atomic_inc() in the caller.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						ret = atomic_long_read(&rdp->nocb_q_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_PROVE_RCU
 | 
				
			||||||
	rhp = ACCESS_ONCE(rdp->nocb_head);
 | 
						rhp = ACCESS_ONCE(rdp->nocb_head);
 | 
				
			||||||
	if (!rhp)
 | 
						if (!rhp)
 | 
				
			||||||
		rhp = ACCESS_ONCE(rdp->nocb_gp_head);
 | 
							rhp = ACCESS_ONCE(rdp->nocb_gp_head);
 | 
				
			||||||
| 
						 | 
					@ -2072,8 +1951,9 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
 | 
				
			||||||
		       cpu, rhp->func);
 | 
							       cpu, rhp->func);
 | 
				
			||||||
		WARN_ON_ONCE(1);
 | 
							WARN_ON_ONCE(1);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					#endif /* #ifdef CONFIG_PROVE_RCU */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return !!rhp;
 | 
						return !!ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -2095,9 +1975,10 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
 | 
				
			||||||
	struct task_struct *t;
 | 
						struct task_struct *t;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Enqueue the callback on the nocb list and update counts. */
 | 
						/* Enqueue the callback on the nocb list and update counts. */
 | 
				
			||||||
 | 
						atomic_long_add(rhcount, &rdp->nocb_q_count);
 | 
				
			||||||
 | 
						/* rcu_barrier() relies on ->nocb_q_count add before xchg. */
 | 
				
			||||||
	old_rhpp = xchg(&rdp->nocb_tail, rhtp);
 | 
						old_rhpp = xchg(&rdp->nocb_tail, rhtp);
 | 
				
			||||||
	ACCESS_ONCE(*old_rhpp) = rhp;
 | 
						ACCESS_ONCE(*old_rhpp) = rhp;
 | 
				
			||||||
	atomic_long_add(rhcount, &rdp->nocb_q_count);
 | 
					 | 
				
			||||||
	atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
 | 
						atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
 | 
				
			||||||
	smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
 | 
						smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2288,9 +2169,6 @@ wait_again:
 | 
				
			||||||
		/* Move callbacks to wait-for-GP list, which is empty. */
 | 
							/* Move callbacks to wait-for-GP list, which is empty. */
 | 
				
			||||||
		ACCESS_ONCE(rdp->nocb_head) = NULL;
 | 
							ACCESS_ONCE(rdp->nocb_head) = NULL;
 | 
				
			||||||
		rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
 | 
							rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
 | 
				
			||||||
		rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
 | 
					 | 
				
			||||||
		rdp->nocb_gp_count_lazy =
 | 
					 | 
				
			||||||
			atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
 | 
					 | 
				
			||||||
		gotcbs = true;
 | 
							gotcbs = true;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2338,9 +2216,6 @@ wait_again:
 | 
				
			||||||
		/* Append callbacks to follower's "done" list. */
 | 
							/* Append callbacks to follower's "done" list. */
 | 
				
			||||||
		tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
 | 
							tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
 | 
				
			||||||
		*tail = rdp->nocb_gp_head;
 | 
							*tail = rdp->nocb_gp_head;
 | 
				
			||||||
		atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
 | 
					 | 
				
			||||||
		atomic_long_add(rdp->nocb_gp_count_lazy,
 | 
					 | 
				
			||||||
				&rdp->nocb_follower_count_lazy);
 | 
					 | 
				
			||||||
		smp_mb__after_atomic(); /* Store *tail before wakeup. */
 | 
							smp_mb__after_atomic(); /* Store *tail before wakeup. */
 | 
				
			||||||
		if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
 | 
							if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
 | 
				
			||||||
			/*
 | 
								/*
 | 
				
			||||||
| 
						 | 
					@ -2415,13 +2290,11 @@ static int rcu_nocb_kthread(void *arg)
 | 
				
			||||||
		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
 | 
							trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
 | 
				
			||||||
		ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
 | 
							ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
 | 
				
			||||||
		tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
 | 
							tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
 | 
				
			||||||
		c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
 | 
					 | 
				
			||||||
		cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
 | 
					 | 
				
			||||||
		rdp->nocb_p_count += c;
 | 
					 | 
				
			||||||
		rdp->nocb_p_count_lazy += cl;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Each pass through the following loop invokes a callback. */
 | 
							/* Each pass through the following loop invokes a callback. */
 | 
				
			||||||
		trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
 | 
							trace_rcu_batch_start(rdp->rsp->name,
 | 
				
			||||||
 | 
									      atomic_long_read(&rdp->nocb_q_count_lazy),
 | 
				
			||||||
 | 
									      atomic_long_read(&rdp->nocb_q_count), -1);
 | 
				
			||||||
		c = cl = 0;
 | 
							c = cl = 0;
 | 
				
			||||||
		while (list) {
 | 
							while (list) {
 | 
				
			||||||
			next = list->next;
 | 
								next = list->next;
 | 
				
			||||||
| 
						 | 
					@ -2443,9 +2316,9 @@ static int rcu_nocb_kthread(void *arg)
 | 
				
			||||||
			list = next;
 | 
								list = next;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
 | 
							trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
 | 
				
			||||||
		ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
 | 
							smp_mb__before_atomic();  /* _add after CB invocation. */
 | 
				
			||||||
		ACCESS_ONCE(rdp->nocb_p_count_lazy) =
 | 
							atomic_long_add(-c, &rdp->nocb_q_count);
 | 
				
			||||||
						rdp->nocb_p_count_lazy - cl;
 | 
							atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
 | 
				
			||||||
		rdp->n_nocbs_invoked += c;
 | 
							rdp->n_nocbs_invoked += c;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -46,6 +46,8 @@
 | 
				
			||||||
#define RCU_TREE_NONCORE
 | 
					#define RCU_TREE_NONCORE
 | 
				
			||||||
#include "tree.h"
 | 
					#include "tree.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int r_open(struct inode *inode, struct file *file,
 | 
					static int r_open(struct inode *inode, struct file *file,
 | 
				
			||||||
					const struct seq_operations *op)
 | 
										const struct seq_operations *op)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -115,11 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!rdp->beenonline)
 | 
						if (!rdp->beenonline)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d",
 | 
						seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d",
 | 
				
			||||||
		   rdp->cpu,
 | 
							   rdp->cpu,
 | 
				
			||||||
		   cpu_is_offline(rdp->cpu) ? '!' : ' ',
 | 
							   cpu_is_offline(rdp->cpu) ? '!' : ' ',
 | 
				
			||||||
		   ulong2long(rdp->completed), ulong2long(rdp->gpnum),
 | 
							   ulong2long(rdp->completed), ulong2long(rdp->gpnum),
 | 
				
			||||||
		   rdp->passed_quiesce, rdp->qs_pending);
 | 
							   rdp->passed_quiesce,
 | 
				
			||||||
 | 
							   rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
 | 
				
			||||||
 | 
							   rdp->qs_pending);
 | 
				
			||||||
	seq_printf(m, " dt=%d/%llx/%d df=%lu",
 | 
						seq_printf(m, " dt=%d/%llx/%d df=%lu",
 | 
				
			||||||
		   atomic_read(&rdp->dynticks->dynticks),
 | 
							   atomic_read(&rdp->dynticks->dynticks),
 | 
				
			||||||
		   rdp->dynticks->dynticks_nesting,
 | 
							   rdp->dynticks->dynticks_nesting,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -656,9 +656,8 @@ static void run_ksoftirqd(unsigned int cpu)
 | 
				
			||||||
		 * in the task stack here.
 | 
							 * in the task stack here.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		__do_softirq();
 | 
							__do_softirq();
 | 
				
			||||||
		rcu_note_context_switch();
 | 
					 | 
				
			||||||
		local_irq_enable();
 | 
							local_irq_enable();
 | 
				
			||||||
		cond_resched();
 | 
							cond_resched_rcu_qs();
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	local_irq_enable();
 | 
						local_irq_enable();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1215,6 +1215,7 @@ config RCU_TORTURE_TEST
 | 
				
			||||||
	tristate "torture tests for RCU"
 | 
						tristate "torture tests for RCU"
 | 
				
			||||||
	depends on DEBUG_KERNEL
 | 
						depends on DEBUG_KERNEL
 | 
				
			||||||
	select TORTURE_TEST
 | 
						select TORTURE_TEST
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	default n
 | 
						default n
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  This option provides a kernel module that runs torture tests
 | 
						  This option provides a kernel module that runs torture tests
 | 
				
			||||||
| 
						 | 
					@ -1257,7 +1258,7 @@ config RCU_CPU_STALL_TIMEOUT
 | 
				
			||||||
config RCU_CPU_STALL_INFO
 | 
					config RCU_CPU_STALL_INFO
 | 
				
			||||||
	bool "Print additional diagnostics on RCU CPU stall"
 | 
						bool "Print additional diagnostics on RCU CPU stall"
 | 
				
			||||||
	depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
 | 
						depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
 | 
				
			||||||
	default n
 | 
						default y
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  For each stalled CPU that is aware of the current RCU grace
 | 
						  For each stalled CPU that is aware of the current RCU grace
 | 
				
			||||||
	  period, print out additional per-CPU diagnostic information
 | 
						  period, print out additional per-CPU diagnostic information
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -325,6 +325,7 @@ config VIRT_TO_BUS
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config MMU_NOTIFIER
 | 
					config MMU_NOTIFIER
 | 
				
			||||||
	bool
 | 
						bool
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config KSM
 | 
					config KSM
 | 
				
			||||||
	bool "Enable KSM for page merging"
 | 
						bool "Enable KSM for page merging"
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5,6 +5,7 @@ config SECURITY_TOMOYO
 | 
				
			||||||
	select SECURITYFS
 | 
						select SECURITYFS
 | 
				
			||||||
	select SECURITY_PATH
 | 
						select SECURITY_PATH
 | 
				
			||||||
	select SECURITY_NETWORK
 | 
						select SECURITY_NETWORK
 | 
				
			||||||
 | 
						select SRCU
 | 
				
			||||||
	default n
 | 
						default n
 | 
				
			||||||
	help
 | 
						help
 | 
				
			||||||
	  This selects TOMOYO Linux, pathname-based access control.
 | 
						  This selects TOMOYO Linux, pathname-based access control.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -24,7 +24,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
 | 
					ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
 | 
				
			||||||
idlecpus=`mpstat | tail -1 | \
 | 
					idlecpus=`mpstat | tail -1 | \
 | 
				
			||||||
	awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'`
 | 
						awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'`
 | 
				
			||||||
awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
 | 
					awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
 | 
				
			||||||
BEGIN {
 | 
					BEGIN {
 | 
				
			||||||
	cpus2use = idlecpus;
 | 
						cpus2use = idlecpus;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -30,6 +30,7 @@ else
 | 
				
			||||||
	echo Unreadable results directory: $i
 | 
						echo Unreadable results directory: $i
 | 
				
			||||||
	exit 1
 | 
						exit 1
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
 | 
					. tools/testing/selftests/rcutorture/bin/functions.sh
 | 
				
			||||||
 | 
					
 | 
				
			||||||
configfile=`echo $i | sed -e 's/^.*\///'`
 | 
					configfile=`echo $i | sed -e 's/^.*\///'`
 | 
				
			||||||
ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
 | 
					ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
 | 
				
			||||||
| 
						 | 
					@ -48,4 +49,21 @@ else
 | 
				
			||||||
		title="$title ($ngpsps per second)"
 | 
							title="$title ($ngpsps per second)"
 | 
				
			||||||
	fi
 | 
						fi
 | 
				
			||||||
	echo $title
 | 
						echo $title
 | 
				
			||||||
 | 
						nclosecalls=`grep --binary-files=text 'torture: Reader Batch' $i/console.log | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'`
 | 
				
			||||||
 | 
						if test -z "$nclosecalls"
 | 
				
			||||||
 | 
						then
 | 
				
			||||||
 | 
							exit 0
 | 
				
			||||||
 | 
						fi
 | 
				
			||||||
 | 
						if test "$nclosecalls" -eq 0
 | 
				
			||||||
 | 
						then
 | 
				
			||||||
 | 
							exit 0
 | 
				
			||||||
 | 
						fi
 | 
				
			||||||
 | 
						# Compute number of close calls per tenth of an hour
 | 
				
			||||||
 | 
						nclosecalls10=`awk -v nclosecalls=$nclosecalls -v dur=$dur 'BEGIN { print int(nclosecalls * 36000 / dur) }' < /dev/null`
 | 
				
			||||||
 | 
						if test $nclosecalls10 -gt 5 -a $nclosecalls -gt 1
 | 
				
			||||||
 | 
						then
 | 
				
			||||||
 | 
							print_bug $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
 | 
				
			||||||
 | 
						fi
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -8,9 +8,9 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
 | 
					# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# qemu-args defaults to "-nographic", along with arguments specifying the
 | 
					# qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with
 | 
				
			||||||
#			number of CPUs and other options generated from
 | 
					#			arguments specifying the number of CPUs and other
 | 
				
			||||||
#			the underlying CPU architecture.
 | 
					#			options generated from the underlying CPU architecture.
 | 
				
			||||||
# boot_args defaults to value returned by the per_version_boot_params
 | 
					# boot_args defaults to value returned by the per_version_boot_params
 | 
				
			||||||
#			shell function.
 | 
					#			shell function.
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
| 
						 | 
					@ -138,7 +138,7 @@ then
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Generate -smp qemu argument.
 | 
					# Generate -smp qemu argument.
 | 
				
			||||||
qemu_args="-nographic $qemu_args"
 | 
					qemu_args="-enable-kvm -soundhw pcspk -nographic $qemu_args"
 | 
				
			||||||
cpu_count=`configNR_CPUS.sh $config_template`
 | 
					cpu_count=`configNR_CPUS.sh $config_template`
 | 
				
			||||||
cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
 | 
					cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
 | 
				
			||||||
vcpus=`identify_qemu_vcpus`
 | 
					vcpus=`identify_qemu_vcpus`
 | 
				
			||||||
| 
						 | 
					@ -168,6 +168,7 @@ then
 | 
				
			||||||
	touch $resdir/buildonly
 | 
						touch $resdir/buildonly
 | 
				
			||||||
	exit 0
 | 
						exit 0
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
 | 
					echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log
 | 
				
			||||||
echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
 | 
					echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
 | 
				
			||||||
( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
 | 
					( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
 | 
				
			||||||
qemu_pid=$!
 | 
					qemu_pid=$!
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -26,12 +26,15 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 | 
					# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
T=$1
 | 
					F=$1
 | 
				
			||||||
title=$2
 | 
					title=$2
 | 
				
			||||||
 | 
					T=/tmp/parse-build.sh.$$
 | 
				
			||||||
 | 
					trap 'rm -rf $T' 0
 | 
				
			||||||
 | 
					mkdir $T
 | 
				
			||||||
 | 
					
 | 
				
			||||||
. functions.sh
 | 
					. functions.sh
 | 
				
			||||||
 | 
					
 | 
				
			||||||
if grep -q CC < $T
 | 
					if grep -q CC < $F
 | 
				
			||||||
then
 | 
					then
 | 
				
			||||||
	:
 | 
						:
 | 
				
			||||||
else
 | 
					else
 | 
				
			||||||
| 
						 | 
					@ -39,18 +42,21 @@ else
 | 
				
			||||||
	exit 1
 | 
						exit 1
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
 | 
					
 | 
				
			||||||
if grep -q "error:" < $T
 | 
					if grep -q "error:" < $F
 | 
				
			||||||
then
 | 
					then
 | 
				
			||||||
	print_bug $title build errors:
 | 
						print_bug $title build errors:
 | 
				
			||||||
	grep "error:" < $T
 | 
						grep "error:" < $F
 | 
				
			||||||
	exit 2
 | 
						exit 2
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
exit 0
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T
 | 
					grep warning: < $F > $T/warnings
 | 
				
			||||||
 | 
					grep "include/linux/*rcu*\.h:" $T/warnings > $T/hwarnings
 | 
				
			||||||
 | 
					grep "kernel/rcu/[^/]*:" $T/warnings > $T/cwarnings
 | 
				
			||||||
 | 
					cat $T/hwarnings $T/cwarnings > $T/rcuwarnings
 | 
				
			||||||
 | 
					if test -s $T/rcuwarnings
 | 
				
			||||||
then
 | 
					then
 | 
				
			||||||
	print_warning $title build errors:
 | 
						print_warning $title build errors:
 | 
				
			||||||
	egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T
 | 
						cat $T/rcuwarnings
 | 
				
			||||||
	exit 2
 | 
						exit 2
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
exit 0
 | 
					exit 0
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -36,7 +36,7 @@ if grep -Pq '\x00' < $file
 | 
				
			||||||
then
 | 
					then
 | 
				
			||||||
	print_warning Console output contains nul bytes, old qemu still running?
 | 
						print_warning Console output contains nul bytes, old qemu still running?
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
 | 
					egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
 | 
				
			||||||
if test -s $T
 | 
					if test -s $T
 | 
				
			||||||
then
 | 
					then
 | 
				
			||||||
	print_warning Assertion failure in $file $title
 | 
						print_warning Assertion failure in $file $title
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue