MIPS: MT: Remove SMTC support
Nobody is maintaining SMTC anymore and there also seems to be no userbase. Which is a pity - the SMTC technology primarily developed by Kevin D. Kissell <kevink@paralogos.com> is an ingenious demonstration for the MT ASE's power and elegance. Based on Markos Chandras <Markos.Chandras@imgtec.com> patch https://patchwork.linux-mips.org/patch/6719/ which while very similar did no longer apply cleanly when I tried to merge it plus some additional post-SMTC cleanup - SMTC was a feature as tricky to remove as it was to merge once upon a time. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
		
					parent
					
						
							
								8b2e62cc34
							
						
					
				
			
			
				commit
				
					
						b633648c5a
					
				
			
		
					 64 changed files with 72 additions and 4097 deletions
				
			
		|  | @ -1852,7 +1852,7 @@ config FORCE_MAX_ZONEORDER | |||
| 
 | ||||
| config CEVT_GIC | ||||
| 	bool "Use GIC global counter for clock events" | ||||
| 	depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC) | ||||
| 	depends on IRQ_GIC && !MIPS_SEAD3 | ||||
| 	help | ||||
| 	  Use the GIC global counter for the clock events. The R4K clock | ||||
| 	  event driver is always present, so if the platform ends up not | ||||
|  | @ -1936,24 +1936,6 @@ config MIPS_MT_SMP | |||
| 	  Intel Hyperthreading feature. For further information go to | ||||
| 	  <http://www.imgtec.com/mips/mips-multithreading.asp>. | ||||
| 
 | ||||
| config MIPS_MT_SMTC | ||||
| 	bool "Use all TCs on all VPEs for SMP (DEPRECATED)" | ||||
| 	depends on CPU_MIPS32_R2 | ||||
| 	depends on SYS_SUPPORTS_MULTITHREADING | ||||
| 	depends on !MIPS_CPS | ||||
| 	select CPU_MIPSR2_IRQ_VI | ||||
| 	select CPU_MIPSR2_IRQ_EI | ||||
| 	select MIPS_MT | ||||
| 	select SMP | ||||
| 	select SMP_UP | ||||
| 	select SYS_SUPPORTS_SMP | ||||
| 	select NR_CPUS_DEFAULT_8 | ||||
| 	help | ||||
| 	  This is a kernel model which is known as SMTC. This is | ||||
| 	  supported on cores with the MT ASE and presents all TCs | ||||
| 	  available on all VPEs to support SMP. For further | ||||
| 	  information see <http://www.linux-mips.org/wiki/34K#SMTC>. | ||||
| 
 | ||||
| endchoice | ||||
| 
 | ||||
| config MIPS_MT | ||||
|  | @ -1977,7 +1959,7 @@ config SYS_SUPPORTS_MULTITHREADING | |||
| config MIPS_MT_FPAFF | ||||
| 	bool "Dynamic FPU affinity for FP-intensive threads" | ||||
| 	default y | ||||
| 	depends on MIPS_MT_SMP || MIPS_MT_SMTC | ||||
| 	depends on MIPS_MT_SMP | ||||
| 
 | ||||
| config MIPS_VPE_LOADER | ||||
| 	bool "VPE loader support." | ||||
|  | @ -1999,29 +1981,6 @@ config MIPS_VPE_LOADER_MT | |||
| 	default "y" | ||||
| 	depends on MIPS_VPE_LOADER && !MIPS_CMP | ||||
| 
 | ||||
| config MIPS_MT_SMTC_IM_BACKSTOP | ||||
| 	bool "Use per-TC register bits as backstop for inhibited IM bits" | ||||
| 	depends on MIPS_MT_SMTC | ||||
| 	default n | ||||
| 	help | ||||
| 	  To support multiple TC microthreads acting as "CPUs" within | ||||
| 	  a VPE, VPE-wide interrupt mask bits must be specially manipulated | ||||
| 	  during interrupt handling. To support legacy drivers and interrupt | ||||
| 	  controller management code, SMTC has a "backstop" to track and | ||||
| 	  if necessary restore the interrupt mask. This has some performance | ||||
| 	  impact on interrupt service overhead. | ||||
| 
 | ||||
| config MIPS_MT_SMTC_IRQAFF | ||||
| 	bool "Support IRQ affinity API" | ||||
| 	depends on MIPS_MT_SMTC | ||||
| 	default n | ||||
| 	help | ||||
| 	  Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) | ||||
| 	  for SMTC Linux kernel. Requires platform support, of which | ||||
| 	  an example can be found in the MIPS kernel i8259 and Malta | ||||
| 	  platform code.  Adds some overhead to interrupt dispatch, and | ||||
| 	  should be used only if you know what you are doing. | ||||
| 
 | ||||
| config MIPS_VPE_LOADER_TOM | ||||
| 	bool "Load VPE program into memory hidden from linux" | ||||
| 	depends on MIPS_VPE_LOADER | ||||
|  | @ -2049,7 +2008,7 @@ config MIPS_VPE_APSP_API_MT | |||
| 
 | ||||
| config MIPS_CMP | ||||
| 	bool "MIPS CMP framework support (DEPRECATED)" | ||||
| 	depends on SYS_SUPPORTS_MIPS_CMP && !MIPS_MT_SMTC | ||||
| 	depends on SYS_SUPPORTS_MIPS_CMP | ||||
| 	select MIPS_GIC_IPI | ||||
| 	select SYNC_R4K | ||||
| 	select WEAK_ORDERING | ||||
|  | @ -2256,7 +2215,7 @@ config NODES_SHIFT | |||
| 
 | ||||
| config HW_PERF_EVENTS | ||||
| 	bool "Enable hardware performance counter support for perf events" | ||||
| 	depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) | ||||
| 	depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) | ||||
| 	default y | ||||
| 	help | ||||
| 	  Enable hardware performance counter support for perf events. If | ||||
|  |  | |||
|  | @ -79,15 +79,6 @@ config CMDLINE_OVERRIDE | |||
| 
 | ||||
| 	  Normally, you will choose 'N' here. | ||||
| 
 | ||||
| config SMTC_IDLE_HOOK_DEBUG | ||||
| 	bool "Enable additional debug checks before going into CPU idle loop" | ||||
| 	depends on DEBUG_KERNEL && MIPS_MT_SMTC | ||||
| 	help | ||||
| 	  This option enables Enable additional debug checks before going into | ||||
| 	  CPU idle loop.  For details on these checks, see | ||||
| 	  arch/mips/kernel/smtc.c.  This debugging option result in significant | ||||
| 	  overhead so should be disabled in production kernels. | ||||
| 
 | ||||
| config SB1XXX_CORELIS | ||||
| 	bool "Corelis Debugger" | ||||
| 	depends on SIBYTE_SB1xxx_SOC | ||||
|  |  | |||
|  | @ -1,196 +0,0 @@ | |||
| CONFIG_MIPS_MALTA=y | ||||
| CONFIG_CPU_LITTLE_ENDIAN=y | ||||
| CONFIG_CPU_MIPS32_R2=y | ||||
| CONFIG_PAGE_SIZE_16KB=y | ||||
| CONFIG_MIPS_MT_SMTC=y | ||||
| # CONFIG_MIPS_MT_FPAFF is not set | ||||
| CONFIG_NR_CPUS=9 | ||||
| CONFIG_HZ_48=y | ||||
| CONFIG_LOCALVERSION="smtc" | ||||
| CONFIG_SYSVIPC=y | ||||
| CONFIG_POSIX_MQUEUE=y | ||||
| CONFIG_AUDIT=y | ||||
| CONFIG_IKCONFIG=y | ||||
| CONFIG_IKCONFIG_PROC=y | ||||
| CONFIG_LOG_BUF_SHIFT=15 | ||||
| CONFIG_SYSCTL_SYSCALL=y | ||||
| CONFIG_EMBEDDED=y | ||||
| CONFIG_SLAB=y | ||||
| CONFIG_MODULES=y | ||||
| CONFIG_MODULE_UNLOAD=y | ||||
| CONFIG_MODVERSIONS=y | ||||
| CONFIG_MODULE_SRCVERSION_ALL=y | ||||
| # CONFIG_BLK_DEV_BSG is not set | ||||
| CONFIG_PCI=y | ||||
| # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||||
| CONFIG_NET=y | ||||
| CONFIG_PACKET=y | ||||
| CONFIG_UNIX=y | ||||
| CONFIG_XFRM_USER=m | ||||
| CONFIG_NET_KEY=y | ||||
| CONFIG_INET=y | ||||
| CONFIG_IP_MULTICAST=y | ||||
| CONFIG_IP_ADVANCED_ROUTER=y | ||||
| CONFIG_IP_MULTIPLE_TABLES=y | ||||
| CONFIG_IP_ROUTE_MULTIPATH=y | ||||
| CONFIG_IP_ROUTE_VERBOSE=y | ||||
| CONFIG_IP_PNP=y | ||||
| CONFIG_IP_PNP_DHCP=y | ||||
| CONFIG_IP_PNP_BOOTP=y | ||||
| CONFIG_NET_IPIP=m | ||||
| CONFIG_IP_MROUTE=y | ||||
| CONFIG_IP_PIMSM_V1=y | ||||
| CONFIG_IP_PIMSM_V2=y | ||||
| CONFIG_SYN_COOKIES=y | ||||
| CONFIG_INET_AH=m | ||||
| CONFIG_INET_ESP=m | ||||
| CONFIG_INET_IPCOMP=m | ||||
| # CONFIG_INET_LRO is not set | ||||
| CONFIG_INET6_AH=m | ||||
| CONFIG_INET6_ESP=m | ||||
| CONFIG_INET6_IPCOMP=m | ||||
| CONFIG_IPV6_TUNNEL=m | ||||
| CONFIG_BRIDGE=m | ||||
| CONFIG_VLAN_8021Q=m | ||||
| CONFIG_ATALK=m | ||||
| CONFIG_DEV_APPLETALK=m | ||||
| CONFIG_IPDDP=m | ||||
| CONFIG_IPDDP_ENCAP=y | ||||
| CONFIG_NET_SCHED=y | ||||
| CONFIG_NET_SCH_CBQ=m | ||||
| CONFIG_NET_SCH_HTB=m | ||||
| CONFIG_NET_SCH_HFSC=m | ||||
| CONFIG_NET_SCH_PRIO=m | ||||
| CONFIG_NET_SCH_RED=m | ||||
| CONFIG_NET_SCH_SFQ=m | ||||
| CONFIG_NET_SCH_TEQL=m | ||||
| CONFIG_NET_SCH_TBF=m | ||||
| CONFIG_NET_SCH_GRED=m | ||||
| CONFIG_NET_SCH_DSMARK=m | ||||
| CONFIG_NET_SCH_NETEM=m | ||||
| CONFIG_NET_SCH_INGRESS=m | ||||
| CONFIG_NET_CLS_BASIC=m | ||||
| CONFIG_NET_CLS_TCINDEX=m | ||||
| CONFIG_NET_CLS_ROUTE4=m | ||||
| CONFIG_NET_CLS_FW=m | ||||
| CONFIG_NET_CLS_U32=m | ||||
| CONFIG_NET_CLS_RSVP=m | ||||
| CONFIG_NET_CLS_RSVP6=m | ||||
| CONFIG_NET_CLS_ACT=y | ||||
| CONFIG_NET_ACT_POLICE=y | ||||
| CONFIG_NET_CLS_IND=y | ||||
| # CONFIG_WIRELESS is not set | ||||
| CONFIG_DEVTMPFS=y | ||||
| CONFIG_BLK_DEV_LOOP=y | ||||
| CONFIG_BLK_DEV_CRYPTOLOOP=m | ||||
| CONFIG_IDE=y | ||||
| # CONFIG_IDE_PROC_FS is not set | ||||
| # CONFIG_IDEPCI_PCIBUS_ORDER is not set | ||||
| CONFIG_BLK_DEV_GENERIC=y | ||||
| CONFIG_BLK_DEV_PIIX=y | ||||
| CONFIG_SCSI=y | ||||
| CONFIG_BLK_DEV_SD=y | ||||
| CONFIG_CHR_DEV_SG=y | ||||
| # CONFIG_SCSI_LOWLEVEL is not set | ||||
| CONFIG_NETDEVICES=y | ||||
| # CONFIG_NET_VENDOR_3COM is not set | ||||
| # CONFIG_NET_VENDOR_ADAPTEC is not set | ||||
| # CONFIG_NET_VENDOR_ALTEON is not set | ||||
| CONFIG_PCNET32=y | ||||
| # CONFIG_NET_VENDOR_ATHEROS is not set | ||||
| # CONFIG_NET_VENDOR_BROADCOM is not set | ||||
| # CONFIG_NET_VENDOR_BROCADE is not set | ||||
| # CONFIG_NET_VENDOR_CHELSIO is not set | ||||
| # CONFIG_NET_VENDOR_CISCO is not set | ||||
| # CONFIG_NET_VENDOR_DEC is not set | ||||
| # CONFIG_NET_VENDOR_DLINK is not set | ||||
| # CONFIG_NET_VENDOR_EMULEX is not set | ||||
| # CONFIG_NET_VENDOR_EXAR is not set | ||||
| # CONFIG_NET_VENDOR_HP is not set | ||||
| # CONFIG_NET_VENDOR_INTEL is not set | ||||
| # CONFIG_NET_VENDOR_MARVELL is not set | ||||
| # CONFIG_NET_VENDOR_MELLANOX is not set | ||||
| # CONFIG_NET_VENDOR_MICREL is not set | ||||
| # CONFIG_NET_VENDOR_MYRI is not set | ||||
| # CONFIG_NET_VENDOR_NATSEMI is not set | ||||
| # CONFIG_NET_VENDOR_NVIDIA is not set | ||||
| # CONFIG_NET_VENDOR_OKI is not set | ||||
| # CONFIG_NET_PACKET_ENGINE is not set | ||||
| # CONFIG_NET_VENDOR_QLOGIC is not set | ||||
| # CONFIG_NET_VENDOR_REALTEK is not set | ||||
| # CONFIG_NET_VENDOR_RDC is not set | ||||
| # CONFIG_NET_VENDOR_SEEQ is not set | ||||
| # CONFIG_NET_VENDOR_SILAN is not set | ||||
| # CONFIG_NET_VENDOR_SIS is not set | ||||
| # CONFIG_NET_VENDOR_SMSC is not set | ||||
| # CONFIG_NET_VENDOR_STMICRO is not set | ||||
| # CONFIG_NET_VENDOR_SUN is not set | ||||
| # CONFIG_NET_VENDOR_TEHUTI is not set | ||||
| # CONFIG_NET_VENDOR_TI is not set | ||||
| # CONFIG_NET_VENDOR_TOSHIBA is not set | ||||
| # CONFIG_NET_VENDOR_VIA is not set | ||||
| # CONFIG_WLAN is not set | ||||
| # CONFIG_VT is not set | ||||
| CONFIG_LEGACY_PTY_COUNT=16 | ||||
| CONFIG_SERIAL_8250=y | ||||
| CONFIG_SERIAL_8250_CONSOLE=y | ||||
| CONFIG_HW_RANDOM=y | ||||
| # CONFIG_HWMON is not set | ||||
| CONFIG_VIDEO_OUTPUT_CONTROL=m | ||||
| CONFIG_FB=y | ||||
| CONFIG_FIRMWARE_EDID=y | ||||
| CONFIG_FB_MATROX=y | ||||
| CONFIG_FB_MATROX_G=y | ||||
| CONFIG_USB=y | ||||
| CONFIG_USB_EHCI_HCD=y | ||||
| # CONFIG_USB_EHCI_TT_NEWSCHED is not set | ||||
| CONFIG_USB_UHCI_HCD=y | ||||
| CONFIG_USB_STORAGE=y | ||||
| CONFIG_NEW_LEDS=y | ||||
| CONFIG_LEDS_CLASS=y | ||||
| CONFIG_LEDS_TRIGGERS=y | ||||
| CONFIG_LEDS_TRIGGER_TIMER=y | ||||
| CONFIG_LEDS_TRIGGER_IDE_DISK=y | ||||
| CONFIG_LEDS_TRIGGER_HEARTBEAT=y | ||||
| CONFIG_LEDS_TRIGGER_BACKLIGHT=y | ||||
| CONFIG_LEDS_TRIGGER_DEFAULT_ON=y | ||||
| CONFIG_RTC_CLASS=y | ||||
| CONFIG_RTC_DRV_CMOS=y | ||||
| CONFIG_EXT2_FS=y | ||||
| CONFIG_EXT3_FS=y | ||||
| # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||||
| CONFIG_XFS_FS=y | ||||
| CONFIG_XFS_QUOTA=y | ||||
| CONFIG_XFS_POSIX_ACL=y | ||||
| CONFIG_QUOTA=y | ||||
| CONFIG_QFMT_V2=y | ||||
| CONFIG_MSDOS_FS=m | ||||
| CONFIG_VFAT_FS=m | ||||
| CONFIG_PROC_KCORE=y | ||||
| CONFIG_TMPFS=y | ||||
| CONFIG_NFS_FS=y | ||||
| CONFIG_ROOT_NFS=y | ||||
| CONFIG_CIFS=m | ||||
| CONFIG_CIFS_WEAK_PW_HASH=y | ||||
| CONFIG_CIFS_XATTR=y | ||||
| CONFIG_CIFS_POSIX=y | ||||
| CONFIG_NLS_CODEPAGE_437=m | ||||
| CONFIG_NLS_ISO8859_1=m | ||||
| # CONFIG_FTRACE is not set | ||||
| CONFIG_CRYPTO_NULL=m | ||||
| CONFIG_CRYPTO_PCBC=m | ||||
| CONFIG_CRYPTO_HMAC=y | ||||
| CONFIG_CRYPTO_MICHAEL_MIC=m | ||||
| CONFIG_CRYPTO_SHA512=m | ||||
| CONFIG_CRYPTO_TGR192=m | ||||
| CONFIG_CRYPTO_WP512=m | ||||
| CONFIG_CRYPTO_ANUBIS=m | ||||
| CONFIG_CRYPTO_BLOWFISH=m | ||||
| CONFIG_CRYPTO_CAST5=m | ||||
| CONFIG_CRYPTO_CAST6=m | ||||
| CONFIG_CRYPTO_KHAZAD=m | ||||
| CONFIG_CRYPTO_SERPENT=m | ||||
| CONFIG_CRYPTO_TEA=m | ||||
| CONFIG_CRYPTO_TWOFISH=m | ||||
| # CONFIG_CRYPTO_ANSI_CPRNG is not set | ||||
| # CONFIG_CRYPTO_HW is not set | ||||
|  | @ -17,26 +17,8 @@ | |||
| #ifdef CONFIG_64BIT | ||||
| #include <asm/asmmacro-64.h> | ||||
| #endif | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #include <asm/mipsmtregs.h> | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	.macro	local_irq_enable reg=t0 | ||||
| 	mfc0	\reg, CP0_TCSTATUS | ||||
| 	ori	\reg, \reg, TCSTATUS_IXMT | ||||
| 	xori	\reg, \reg, TCSTATUS_IXMT | ||||
| 	mtc0	\reg, CP0_TCSTATUS | ||||
| 	_ehb | ||||
| 	.endm | ||||
| 
 | ||||
| 	.macro	local_irq_disable reg=t0 | ||||
| 	mfc0	\reg, CP0_TCSTATUS | ||||
| 	ori	\reg, \reg, TCSTATUS_IXMT | ||||
| 	mtc0	\reg, CP0_TCSTATUS | ||||
| 	_ehb | ||||
| 	.endm | ||||
| #elif defined(CONFIG_CPU_MIPSR2) | ||||
| #ifdef CONFIG_CPU_MIPSR2 | ||||
| 	.macro	local_irq_enable reg=t0 | ||||
| 	ei | ||||
| 	irq_enable_hazard | ||||
|  | @ -71,7 +53,7 @@ | |||
| 	sw      \reg, TI_PRE_COUNT($28) | ||||
| #endif | ||||
| 	.endm | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| #endif /* CONFIG_CPU_MIPSR2 */ | ||||
| 
 | ||||
| 	.macro	fpu_save_16even thread tmp=t0 | ||||
| 	cfc1	\tmp, fcr31 | ||||
|  |  | |||
|  | @ -65,17 +65,12 @@ struct cpuinfo_mips { | |||
| #ifdef CONFIG_64BIT | ||||
| 	int			vmbits; /* Virtual memory size in bits */ | ||||
| #endif | ||||
| #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | ||||
| #ifdef CONFIG_MIPS_MT_SMP | ||||
| 	/*
 | ||||
| 	 * In the MIPS MT "SMTC" model, each TC is considered | ||||
| 	 * to be a "CPU" for the purposes of scheduling, but | ||||
| 	 * exception resources, ASID spaces, etc, are common | ||||
| 	 * to all TCs within the same VPE. | ||||
| 	 * There is not necessarily a 1:1 mapping of VPE num to CPU number | ||||
| 	 * in particular on multi-core systems. | ||||
| 	 */ | ||||
| 	int			vpe_id;	 /* Virtual Processor number */ | ||||
| #endif | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	int			tc_id;	 /* Thread Context number */ | ||||
| #endif | ||||
| 	void			*data;	/* Additional data */ | ||||
| 	unsigned int		watch_reg_count;   /* Number that exist */ | ||||
|  | @ -117,7 +112,7 @@ struct proc_cpuinfo_notifier_args { | |||
| 	unsigned long n; | ||||
| }; | ||||
| 
 | ||||
| #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | ||||
| #ifdef CONFIG_MIPS_MT_SMP | ||||
| # define cpu_vpe_id(cpuinfo)	((cpuinfo)->vpe_id) | ||||
| #else | ||||
| # define cpu_vpe_id(cpuinfo)	0 | ||||
|  |  | |||
|  | @ -48,11 +48,7 @@ | |||
| enum fixed_addresses { | ||||
| #define FIX_N_COLOURS 8 | ||||
| 	FIX_CMAP_BEGIN, | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2), | ||||
| #else | ||||
| 	FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2), | ||||
| #endif | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 	/* reserved pte's for temporary kernel mappings */ | ||||
| 	FIX_KMAP_BEGIN = FIX_CMAP_END + 1, | ||||
|  |  | |||
|  | @ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq) | |||
| #define irq_canonicalize(irq) (irq)	/* Sane hardware, sane code ... */ | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 
 | ||||
| struct irqaction; | ||||
| 
 | ||||
| extern unsigned long irq_hwmask[]; | ||||
| extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, | ||||
| 			  unsigned long hwmask); | ||||
| 
 | ||||
| static inline void smtc_im_ack_irq(unsigned int irq) | ||||
| { | ||||
| 	if (irq_hwmask[irq] & ST0_IM) | ||||
| 		set_c0_status(irq_hwmask[irq] & ST0_IM); | ||||
| } | ||||
| 
 | ||||
| #else | ||||
| 
 | ||||
| static inline void smtc_im_ack_irq(unsigned int irq) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||||
| #include <linux/cpumask.h> | ||||
| 
 | ||||
| extern int plat_set_irq_affinity(struct irq_data *d, | ||||
| 				 const struct cpumask *affinity, bool force); | ||||
| extern void smtc_forward_irq(struct irq_data *d); | ||||
| 
 | ||||
| /*
 | ||||
|  * IRQ affinity hook invoked at the beginning of interrupt dispatch | ||||
|  * if option is enabled. | ||||
|  * | ||||
|  * Up through Linux 2.6.22 (at least) cpumask operations are very | ||||
|  * inefficient on MIPS.	 Initial prototypes of SMTC IRQ affinity | ||||
|  * used a "fast path" per-IRQ-descriptor cache of affinity information | ||||
|  * to reduce latency.  As there is a project afoot to optimize the | ||||
|  * cpumask implementations, this version is optimistically assuming | ||||
|  * that cpumask.h macro overhead is reasonable during interrupt dispatch. | ||||
|  */ | ||||
| static inline int handle_on_other_cpu(unsigned int irq) | ||||
| { | ||||
| 	struct irq_data *d = irq_get_irq_data(irq); | ||||
| 
 | ||||
| 	if (cpumask_test_cpu(smp_processor_id(), d->affinity)) | ||||
| 		return 0; | ||||
| 	smtc_forward_irq(d); | ||||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| #else /* Not doing SMTC affinity */ | ||||
| 
 | ||||
| static inline int handle_on_other_cpu(unsigned int irq) { return 0; } | ||||
| 
 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | ||||
| 
 | ||||
| static inline void smtc_im_backstop(unsigned int irq) | ||||
| { | ||||
| 	if (irq_hwmask[irq] & 0x0000ff00) | ||||
| 		write_c0_tccontext(read_c0_tccontext() & | ||||
| 				   ~(irq_hwmask[irq] & 0x0000ff00)); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Clear interrupt mask handling "backstop" if irq_hwmask | ||||
|  * entry so indicates. This implies that the ack() or end() | ||||
|  * functions will take over re-enabling the low-level mask. | ||||
|  * Otherwise it will be done on return from exception. | ||||
|  */ | ||||
| static inline int smtc_handle_on_other_cpu(unsigned int irq) | ||||
| { | ||||
| 	int ret = handle_on_other_cpu(irq); | ||||
| 
 | ||||
| 	if (!ret) | ||||
| 		smtc_im_backstop(irq); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| #else | ||||
| 
 | ||||
| static inline void smtc_im_backstop(unsigned int irq) { } | ||||
| static inline int smtc_handle_on_other_cpu(unsigned int irq) | ||||
| { | ||||
| 	return handle_on_other_cpu(irq); | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| extern void do_IRQ(unsigned int irq); | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||||
| 
 | ||||
| extern void do_IRQ_no_affinity(unsigned int irq); | ||||
| 
 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||||
| 
 | ||||
| extern void arch_init_irq(void); | ||||
| extern void spurious_interrupt(void); | ||||
| 
 | ||||
|  |  | |||
|  | @ -17,7 +17,7 @@ | |||
| #include <linux/stringify.h> | ||||
| #include <asm/hazards.h> | ||||
| 
 | ||||
| #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) | ||||
| #ifdef CONFIG_CPU_MIPSR2 | ||||
| 
 | ||||
| static inline void arch_local_irq_disable(void) | ||||
| { | ||||
|  | @ -118,30 +118,15 @@ void arch_local_irq_disable(void); | |||
| unsigned long arch_local_irq_save(void); | ||||
| void arch_local_irq_restore(unsigned long flags); | ||||
| void __arch_local_irq_restore(unsigned long flags); | ||||
| #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ | ||||
| 
 | ||||
| 
 | ||||
| extern void smtc_ipi_replay(void); | ||||
| #endif /* CONFIG_CPU_MIPSR2 */ | ||||
| 
 | ||||
| static inline void arch_local_irq_enable(void) | ||||
| { | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/*
 | ||||
| 	 * SMTC kernel needs to do a software replay of queued | ||||
| 	 * IPIs, at the cost of call overhead on each local_irq_enable() | ||||
| 	 */ | ||||
| 	smtc_ipi_replay(); | ||||
| #endif | ||||
| 	__asm__ __volatile__( | ||||
| 	"	.set	push						\n" | ||||
| 	"	.set	reorder						\n" | ||||
| 	"	.set	noat						\n" | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	"	mfc0	$1, $2, 1	# SMTC - clear TCStatus.IXMT	\n" | ||||
| 	"	ori	$1, 0x400					\n" | ||||
| 	"	xori	$1, 0x400					\n" | ||||
| 	"	mtc0	$1, $2, 1					\n" | ||||
| #elif defined(CONFIG_CPU_MIPSR2) | ||||
| #if   defined(CONFIG_CPU_MIPSR2) | ||||
| 	"	ei							\n" | ||||
| #else | ||||
| 	"	mfc0	$1,$12						\n" | ||||
|  | @ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void) | |||
| 	asm __volatile__( | ||||
| 	"	.set	push						\n" | ||||
| 	"	.set	reorder						\n" | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	"	mfc0	%[flags], $2, 1					\n" | ||||
| #else | ||||
| 	"	mfc0	%[flags], $12					\n" | ||||
| #endif | ||||
| 	"	.set	pop						\n" | ||||
| 	: [flags] "=r" (flags)); | ||||
| 
 | ||||
|  | @ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void) | |||
| 
 | ||||
| static inline int arch_irqs_disabled_flags(unsigned long flags) | ||||
| { | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/*
 | ||||
| 	 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU | ||||
| 	 */ | ||||
| 	return flags & 0x400; | ||||
| #else | ||||
| 	return !(flags & 1); | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| #endif /* #ifndef __ASSEMBLY__ */ | ||||
|  |  | |||
|  | @ -80,36 +80,6 @@ | |||
| 	.endm | ||||
| 
 | ||||
| 	.macro	kernel_entry_setup | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	mfc0	t0, CP0_CONFIG | ||||
| 	bgez	t0, 9f | ||||
| 	mfc0	t0, CP0_CONFIG, 1 | ||||
| 	bgez	t0, 9f | ||||
| 	mfc0	t0, CP0_CONFIG, 2 | ||||
| 	bgez	t0, 9f | ||||
| 	mfc0	t0, CP0_CONFIG, 3 | ||||
| 	and	t0, 1<<2 | ||||
| 	bnez	t0, 0f | ||||
| 9: | ||||
| 	/* Assume we came from YAMON... */ | ||||
| 	PTR_LA	v0, 0x9fc00534	/* YAMON print */ | ||||
| 	lw	v0, (v0) | ||||
| 	move	a0, zero | ||||
| 	PTR_LA	a1, nonmt_processor | ||||
| 	jal	v0 | ||||
| 
 | ||||
| 	PTR_LA	v0, 0x9fc00520	/* YAMON exit */ | ||||
| 	lw	v0, (v0) | ||||
| 	li	a0, 1 | ||||
| 	jal	v0 | ||||
| 
 | ||||
| 1:	b	1b | ||||
| 
 | ||||
| 	__INITDATA | ||||
| nonmt_processor: | ||||
| 	.asciz	"SMTC kernel requires the MT ASE to run\n" | ||||
| 	__FINIT | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_EVA | ||||
| 	sync | ||||
|  |  | |||
|  | @ -10,37 +10,6 @@ | |||
| #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H | ||||
| 
 | ||||
| 	.macro	kernel_entry_setup | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	mfc0	t0, CP0_CONFIG | ||||
| 	bgez	t0, 9f | ||||
| 	mfc0	t0, CP0_CONFIG, 1 | ||||
| 	bgez	t0, 9f | ||||
| 	mfc0	t0, CP0_CONFIG, 2 | ||||
| 	bgez	t0, 9f | ||||
| 	mfc0	t0, CP0_CONFIG, 3 | ||||
| 	and	t0, 1<<2 | ||||
| 	bnez	t0, 0f | ||||
| 9 : | ||||
| 	/* Assume we came from YAMON... */ | ||||
| 	PTR_LA	v0, 0x9fc00534	/* YAMON print */ | ||||
| 	lw	v0, (v0) | ||||
| 	move	a0, zero | ||||
| 	PTR_LA	a1, nonmt_processor | ||||
| 	jal	v0 | ||||
| 
 | ||||
| 	PTR_LA	v0, 0x9fc00520	/* YAMON exit */ | ||||
| 	lw	v0, (v0) | ||||
| 	li	a0, 1 | ||||
| 	jal	v0 | ||||
| 
 | ||||
| 1 :	b	1b | ||||
| 
 | ||||
| 	__INITDATA | ||||
| nonmt_processor : | ||||
| 	.asciz	"SMTC kernel requires the MT ASE to run\n" | ||||
| 	__FINIT | ||||
| 0 : | ||||
| #endif | ||||
| 	.endm | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
|  | @ -1,7 +1,6 @@ | |||
| /*
 | ||||
|  * Definitions and decalrations for MIPS MT support | ||||
|  * that are common between SMTC, VSMP, and/or AP/SP | ||||
|  * kernel models. | ||||
|  * Definitions and decalrations for MIPS MT support that are common between | ||||
|  * the VSMP, and AP/SP kernel models. | ||||
|  */ | ||||
| #ifndef __ASM_MIPS_MT_H | ||||
| #define __ASM_MIPS_MT_H | ||||
|  |  | |||
|  | @ -1014,19 +1014,8 @@ do {									\ | |||
| #define write_c0_compare3(val)	__write_32bit_c0_register($11, 7, val) | ||||
| 
 | ||||
| #define read_c0_status()	__read_32bit_c0_register($12, 0) | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #define write_c0_status(val)						\ | ||||
| do {									\ | ||||
| 	__write_32bit_c0_register($12, 0, val);				\ | ||||
| 	__ehb();							\ | ||||
| } while (0) | ||||
| #else | ||||
| /*
 | ||||
|  * Legacy non-SMTC code, which may be hazardous | ||||
|  * but which might not support EHB | ||||
|  */ | ||||
| 
 | ||||
| #define write_c0_status(val)	__write_32bit_c0_register($12, 0, val) | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| #define read_c0_cause()		__read_32bit_c0_register($13, 0) | ||||
| #define write_c0_cause(val)	__write_32bit_c0_register($13, 0, val) | ||||
|  | @ -1750,11 +1739,6 @@ static inline void tlb_write_random(void) | |||
| /*
 | ||||
|  * Manipulate bits in a c0 register. | ||||
|  */ | ||||
| #ifndef CONFIG_MIPS_MT_SMTC | ||||
| /*
 | ||||
|  * SMTC Linux requires shutting-down microthread scheduling | ||||
|  * during CP0 register read-modify-write sequences. | ||||
|  */ | ||||
| #define __BUILD_SET_C0(name)					\ | ||||
| static inline unsigned int					\ | ||||
| set_c0_##name(unsigned int set)					\ | ||||
|  | @ -1793,121 +1777,6 @@ change_c0_##name(unsigned int change, unsigned int val)		\ | |||
| 	return res;						\ | ||||
| } | ||||
| 
 | ||||
| #else /* SMTC versions that manage MT scheduling */ | ||||
| 
 | ||||
| #include <linux/irqflags.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with | ||||
|  * header file recursion. | ||||
|  */ | ||||
| static inline unsigned int __dmt(void) | ||||
| { | ||||
| 	int res; | ||||
| 
 | ||||
| 	__asm__ __volatile__( | ||||
| 	"	.set	push						\n" | ||||
| 	"	.set	mips32r2					\n" | ||||
| 	"	.set	noat						\n" | ||||
| 	"	.word	0x41610BC1			# dmt $1	\n" | ||||
| 	"	ehb							\n" | ||||
| 	"	move	%0, $1						\n" | ||||
| 	"	.set	pop						\n" | ||||
| 	: "=r" (res)); | ||||
| 
 | ||||
| 	instruction_hazard(); | ||||
| 
 | ||||
| 	return res; | ||||
| } | ||||
| 
 | ||||
| #define __VPECONTROL_TE_SHIFT	15 | ||||
| #define __VPECONTROL_TE		(1UL << __VPECONTROL_TE_SHIFT) | ||||
| 
 | ||||
| #define __EMT_ENABLE		__VPECONTROL_TE | ||||
| 
 | ||||
| static inline void __emt(unsigned int previous) | ||||
| { | ||||
| 	if ((previous & __EMT_ENABLE)) | ||||
| 		__asm__ __volatile__( | ||||
| 		"	.set	mips32r2				\n" | ||||
| 		"	.word	0x41600be1		# emt		\n" | ||||
| 		"	ehb						\n" | ||||
| 		"	.set	mips0					\n"); | ||||
| } | ||||
| 
 | ||||
| static inline void __ehb(void) | ||||
| { | ||||
| 	__asm__ __volatile__( | ||||
| 	"	.set	mips32r2					\n" | ||||
| 	"	ehb							\n"		"	.set	mips0						\n"); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Note that local_irq_save/restore affect TC-specific IXMT state, | ||||
|  * not Status.IE as in non-SMTC kernel. | ||||
|  */ | ||||
| 
 | ||||
| #define __BUILD_SET_C0(name)					\ | ||||
| static inline unsigned int					\ | ||||
| set_c0_##name(unsigned int set)					\ | ||||
| {								\ | ||||
| 	unsigned int res;					\ | ||||
| 	unsigned int new;					\ | ||||
| 	unsigned int omt;					\ | ||||
| 	unsigned long flags;					\ | ||||
| 								\ | ||||
| 	local_irq_save(flags);					\ | ||||
| 	omt = __dmt();						\ | ||||
| 	res = read_c0_##name();					\ | ||||
| 	new = res | set;					\ | ||||
| 	write_c0_##name(new);					\ | ||||
| 	__emt(omt);						\ | ||||
| 	local_irq_restore(flags);				\ | ||||
| 								\ | ||||
| 	return res;						\ | ||||
| }								\ | ||||
| 								\ | ||||
| static inline unsigned int					\ | ||||
| clear_c0_##name(unsigned int clear)				\ | ||||
| {								\ | ||||
| 	unsigned int res;					\ | ||||
| 	unsigned int new;					\ | ||||
| 	unsigned int omt;					\ | ||||
| 	unsigned long flags;					\ | ||||
| 								\ | ||||
| 	local_irq_save(flags);					\ | ||||
| 	omt = __dmt();						\ | ||||
| 	res = read_c0_##name();					\ | ||||
| 	new = res & ~clear;					\ | ||||
| 	write_c0_##name(new);					\ | ||||
| 	__emt(omt);						\ | ||||
| 	local_irq_restore(flags);				\ | ||||
| 								\ | ||||
| 	return res;						\ | ||||
| }								\ | ||||
| 								\ | ||||
| static inline unsigned int					\ | ||||
| change_c0_##name(unsigned int change, unsigned int newbits)	\ | ||||
| {								\ | ||||
| 	unsigned int res;					\ | ||||
| 	unsigned int new;					\ | ||||
| 	unsigned int omt;					\ | ||||
| 	unsigned long flags;					\ | ||||
| 								\ | ||||
| 	local_irq_save(flags);					\ | ||||
| 								\ | ||||
| 	omt = __dmt();						\ | ||||
| 	res = read_c0_##name();					\ | ||||
| 	new = res & ~change;					\ | ||||
| 	new |= (newbits & change);				\ | ||||
| 	write_c0_##name(new);					\ | ||||
| 	__emt(omt);						\ | ||||
| 	local_irq_restore(flags);				\ | ||||
| 								\ | ||||
| 	return res;						\ | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| __BUILD_SET_C0(status) | ||||
| __BUILD_SET_C0(cause) | ||||
| __BUILD_SET_C0(config) | ||||
|  |  | |||
|  | @ -18,10 +18,6 @@ | |||
| #include <asm/cacheflush.h> | ||||
| #include <asm/hazards.h> | ||||
| #include <asm/tlbflush.h> | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #include <asm/mipsmtregs.h> | ||||
| #include <asm/smtc.h> | ||||
| #endif /* SMTC */ | ||||
| #include <asm-generic/mm_hooks.h> | ||||
| 
 | ||||
| #define TLBMISS_HANDLER_SETUP_PGD(pgd)					\ | ||||
|  | @ -63,13 +59,6 @@ extern unsigned long pgd_current[]; | |||
| #define ASID_INC	0x10 | ||||
| #define ASID_MASK	0xff0 | ||||
| 
 | ||||
| #elif defined(CONFIG_MIPS_MT_SMTC) | ||||
| 
 | ||||
| #define ASID_INC	0x1 | ||||
| extern unsigned long smtc_asid_mask; | ||||
| #define ASID_MASK	(smtc_asid_mask) | ||||
| #define HW_ASID_MASK	0xff | ||||
| /* End SMTC/34K debug hack */ | ||||
| #else /* FIXME: not correct for R6000 */ | ||||
| 
 | ||||
| #define ASID_INC	0x1 | ||||
|  | @ -92,7 +81,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
| #define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) | ||||
| #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) | ||||
| 
 | ||||
| #ifndef CONFIG_MIPS_MT_SMTC | ||||
| /* Normal, classic MIPS get_new_mmu_context */ | ||||
| static inline void | ||||
| get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | ||||
|  | @ -115,12 +103,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
| 	cpu_context(cpu, mm) = asid_cache(cpu) = asid; | ||||
| } | ||||
| 
 | ||||
| #else /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| #define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu)) | ||||
| 
 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Initialize the context related info for a new mm_struct | ||||
|  * instance. | ||||
|  | @ -141,46 +123,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| { | ||||
| 	unsigned int cpu = smp_processor_id(); | ||||
| 	unsigned long flags; | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	unsigned long oldasid; | ||||
| 	unsigned long mtflags; | ||||
| 	int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||||
| 	local_irq_save(flags); | ||||
| 	mtflags = dvpe(); | ||||
| #else /* Not SMTC */ | ||||
| 	local_irq_save(flags); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| 	/* Check if our ASID is of an older version and thus invalid */ | ||||
| 	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) | ||||
| 		get_new_mmu_context(next, cpu); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/*
 | ||||
| 	 * If the EntryHi ASID being replaced happens to be | ||||
| 	 * the value flagged at ASID recycling time as having | ||||
| 	 * an extended life, clear the bit showing it being | ||||
| 	 * in use by this "CPU", and if that's the last bit, | ||||
| 	 * free up the ASID value for use and flush any old | ||||
| 	 * instances of it from the TLB. | ||||
| 	 */ | ||||
| 	oldasid = (read_c0_entryhi() & ASID_MASK); | ||||
| 	if(smtc_live_asid[mytlb][oldasid]) { | ||||
| 		smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||||
| 		if(smtc_live_asid[mytlb][oldasid] == 0) | ||||
| 			smtc_flush_tlb_asid(oldasid); | ||||
| 	} | ||||
| 	/*
 | ||||
| 	 * Tread softly on EntryHi, and so long as we support | ||||
| 	 * having ASID_MASK smaller than the hardware maximum, | ||||
| 	 * make sure no "soft" bits become "hard"... | ||||
| 	 */ | ||||
| 	write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | | ||||
| 			 cpu_asid(cpu, next)); | ||||
| 	ehb(); /* Make sure it propagates to TCStatus */ | ||||
| 	evpe(mtflags); | ||||
| #else | ||||
| 	write_c0_entryhi(cpu_asid(cpu, next)); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	TLBMISS_HANDLER_SETUP_PGD(next->pgd); | ||||
| 
 | ||||
| 	/*
 | ||||
|  | @ -213,34 +161,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
| 	unsigned long flags; | ||||
| 	unsigned int cpu = smp_processor_id(); | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	unsigned long oldasid; | ||||
| 	unsigned long mtflags; | ||||
| 	int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 
 | ||||
| 	/* Unconditionally get a new ASID.  */ | ||||
| 	get_new_mmu_context(next, cpu); | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/* See comments for similar code above */ | ||||
| 	mtflags = dvpe(); | ||||
| 	oldasid = read_c0_entryhi() & ASID_MASK; | ||||
| 	if(smtc_live_asid[mytlb][oldasid]) { | ||||
| 		smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||||
| 		if(smtc_live_asid[mytlb][oldasid] == 0) | ||||
| 			 smtc_flush_tlb_asid(oldasid); | ||||
| 	} | ||||
| 	/* See comments for similar code above */ | ||||
| 	write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | | ||||
| 			 cpu_asid(cpu, next)); | ||||
| 	ehb(); /* Make sure it propagates to TCStatus */ | ||||
| 	evpe(mtflags); | ||||
| #else | ||||
| 	write_c0_entryhi(cpu_asid(cpu, next)); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	TLBMISS_HANDLER_SETUP_PGD(next->pgd); | ||||
| 
 | ||||
| 	/* mark mmu ownership change */ | ||||
|  | @ -258,48 +184,15 @@ static inline void | |||
| drop_mmu_context(struct mm_struct *mm, unsigned cpu) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	unsigned long oldasid; | ||||
| 	/* Can't use spinlock because called from TLB flush within DVPE */ | ||||
| 	unsigned int prevvpe; | ||||
| 	int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 
 | ||||
| 	if (cpumask_test_cpu(cpu, mm_cpumask(mm)))  { | ||||
| 		get_new_mmu_context(mm, cpu); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		/* See comments for similar code above */ | ||||
| 		prevvpe = dvpe(); | ||||
| 		oldasid = (read_c0_entryhi() & ASID_MASK); | ||||
| 		if (smtc_live_asid[mytlb][oldasid]) { | ||||
| 			smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||||
| 			if(smtc_live_asid[mytlb][oldasid] == 0) | ||||
| 				smtc_flush_tlb_asid(oldasid); | ||||
| 		} | ||||
| 		/* See comments for similar code above */ | ||||
| 		write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | ||||
| 				| cpu_asid(cpu, mm)); | ||||
| 		ehb(); /* Make sure it propagates to TCStatus */ | ||||
| 		evpe(prevvpe); | ||||
| #else /* not CONFIG_MIPS_MT_SMTC */ | ||||
| 		write_c0_entryhi(cpu_asid(cpu, mm)); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	} else { | ||||
| 		/* will get a new context next time */ | ||||
| #ifndef CONFIG_MIPS_MT_SMTC | ||||
| 		cpu_context(cpu, mm) = 0; | ||||
| #else /* SMTC */ | ||||
| 		int i; | ||||
| 
 | ||||
| 		/* SMTC shares the TLB (and ASIDs) across VPEs */ | ||||
| 		for_each_online_cpu(i) { | ||||
| 		    if((smtc_status & SMTC_TLB_SHARED) | ||||
| 		    || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | ||||
| 			cpu_context(i, mm) = 0; | ||||
| 		} | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	} | ||||
| 	local_irq_restore(flags); | ||||
| } | ||||
|  |  | |||
|  | @ -144,13 +144,7 @@ search_module_dbetables(unsigned long addr) | |||
| #define MODULE_KERNEL_TYPE "64BIT " | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #define MODULE_KERNEL_SMTC "MT_SMTC " | ||||
| #else | ||||
| #define MODULE_KERNEL_SMTC "" | ||||
| #endif | ||||
| 
 | ||||
| #define MODULE_ARCH_VERMAGIC \ | ||||
| 	MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC | ||||
| 	MODULE_PROC_FAMILY MODULE_KERNEL_TYPE | ||||
| 
 | ||||
| #endif /* _ASM_MODULE_H */ | ||||
|  |  | |||
|  | @ -39,9 +39,6 @@ struct pt_regs { | |||
| 	unsigned long cp0_badvaddr; | ||||
| 	unsigned long cp0_cause; | ||||
| 	unsigned long cp0_epc; | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	unsigned long cp0_tcstatus; | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||||
| 	unsigned long long mpl[3];	  /* MTM{0,1,2} */ | ||||
| 	unsigned long long mtp[3];	  /* MTP{0,1,2} */ | ||||
|  |  | |||
|  | @ -43,11 +43,10 @@ | |||
| 	: "i" (op), "R" (*(unsigned char *)(addr))) | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT | ||||
| /*
 | ||||
|  * Temporary hacks for SMTC debug. Optionally force single-threaded | ||||
|  * execution during I-cache flushes. | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Optionally force single-threaded execution during I-cache flushes. | ||||
|  */ | ||||
| #define PROTECT_CACHE_FLUSHES 1 | ||||
| 
 | ||||
| #ifdef PROTECT_CACHE_FLUSHES | ||||
|  |  | |||
|  | @ -1,78 +0,0 @@ | |||
| #ifndef _ASM_SMTC_MT_H | ||||
| #define _ASM_SMTC_MT_H | ||||
| 
 | ||||
| /*
 | ||||
|  * Definitions for SMTC multitasking on MIPS MT cores | ||||
|  */ | ||||
| 
 | ||||
| #include <asm/mips_mt.h> | ||||
| #include <asm/smtc_ipi.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * System-wide SMTC status information | ||||
|  */ | ||||
| 
 | ||||
| extern unsigned int smtc_status; | ||||
| 
 | ||||
| #define SMTC_TLB_SHARED 0x00000001 | ||||
| #define SMTC_MTC_ACTIVE 0x00000002 | ||||
| 
 | ||||
| /*
 | ||||
|  * TLB/ASID Management information | ||||
|  */ | ||||
| 
 | ||||
| #define MAX_SMTC_TLBS 2 | ||||
| #define MAX_SMTC_ASIDS 256 | ||||
| #if NR_CPUS <= 8 | ||||
| typedef char asiduse; | ||||
| #else | ||||
| #if NR_CPUS <= 16 | ||||
| typedef short asiduse; | ||||
| #else | ||||
| typedef long asiduse; | ||||
| #endif | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * VPE Management information | ||||
|  */ | ||||
| 
 | ||||
| #define MAX_SMTC_VPES	MAX_SMTC_TLBS	/* FIXME: May not always be true. */ | ||||
| 
 | ||||
| extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | ||||
| 
 | ||||
| struct mm_struct; | ||||
| struct task_struct; | ||||
| 
 | ||||
| void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); | ||||
| void self_ipi(struct smtc_ipi *); | ||||
| void smtc_flush_tlb_asid(unsigned long asid); | ||||
| extern int smtc_build_cpu_map(int startslot); | ||||
| extern void smtc_prepare_cpus(int cpus); | ||||
| extern void smtc_smp_finish(void); | ||||
| extern void smtc_boot_secondary(int cpu, struct task_struct *t); | ||||
| extern void smtc_cpus_done(void); | ||||
| extern void smtc_init_secondary(void); | ||||
| 
 | ||||
| 
 | ||||
| /*
 | ||||
|  * Sharing the TLB between multiple VPEs means that the | ||||
|  * "random" index selection function is not allowed to | ||||
|  * select the current value of the Index register. To | ||||
|  * avoid additional TLB pressure, the Index registers | ||||
|  * are "parked" with an non-Valid value. | ||||
|  */ | ||||
| 
 | ||||
| #define PARKED_INDEX	((unsigned int)0x80000000) | ||||
| 
 | ||||
| /*
 | ||||
|  * Define low-level interrupt mask for IPIs, if necessary. | ||||
|  * By default, use SW interrupt 1, which requires no external | ||||
|  * hardware support, but which works only for single-core | ||||
|  * MIPS MT systems. | ||||
|  */ | ||||
| #ifndef MIPS_CPU_IPI_IRQ | ||||
| #define MIPS_CPU_IPI_IRQ 1 | ||||
| #endif | ||||
| 
 | ||||
| #endif /*  _ASM_SMTC_MT_H */ | ||||
|  | @ -1,129 +0,0 @@ | |||
| /*
 | ||||
|  * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code. | ||||
|  */ | ||||
| #ifndef __ASM_SMTC_IPI_H | ||||
| #define __ASM_SMTC_IPI_H | ||||
| 
 | ||||
| #include <linux/spinlock.h> | ||||
| 
 | ||||
| //#define SMTC_IPI_DEBUG
 | ||||
| 
 | ||||
| #ifdef SMTC_IPI_DEBUG | ||||
| #include <asm/mipsregs.h> | ||||
| #include <asm/mipsmtregs.h> | ||||
| #endif /* SMTC_IPI_DEBUG */ | ||||
| 
 | ||||
| /*
 | ||||
|  * An IPI "message" | ||||
|  */ | ||||
| 
 | ||||
| struct smtc_ipi { | ||||
| 	struct smtc_ipi *flink; | ||||
| 	int type; | ||||
| 	void *arg; | ||||
| 	int dest; | ||||
| #ifdef	SMTC_IPI_DEBUG | ||||
| 	int sender; | ||||
| 	long stamp; | ||||
| #endif /* SMTC_IPI_DEBUG */ | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * Defined IPI Types | ||||
|  */ | ||||
| 
 | ||||
| #define LINUX_SMP_IPI 1 | ||||
| #define SMTC_CLOCK_TICK 2 | ||||
| #define IRQ_AFFINITY_IPI 3 | ||||
| 
 | ||||
| /*
 | ||||
|  * A queue of IPI messages | ||||
|  */ | ||||
| 
 | ||||
| struct smtc_ipi_q { | ||||
| 	struct smtc_ipi *head; | ||||
| 	spinlock_t lock; | ||||
| 	struct smtc_ipi *tail; | ||||
| 	int depth; | ||||
| 	int resched_flag;	/* reschedule already queued */ | ||||
| }; | ||||
| 
 | ||||
| static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&q->lock, flags); | ||||
| 	if (q->head == NULL) | ||||
| 		q->head = q->tail = p; | ||||
| 	else | ||||
| 		q->tail->flink = p; | ||||
| 	p->flink = NULL; | ||||
| 	q->tail = p; | ||||
| 	q->depth++; | ||||
| #ifdef	SMTC_IPI_DEBUG | ||||
| 	p->sender = read_c0_tcbind(); | ||||
| 	p->stamp = read_c0_count(); | ||||
| #endif /* SMTC_IPI_DEBUG */ | ||||
| 	spin_unlock_irqrestore(&q->lock, flags); | ||||
| } | ||||
| 
 | ||||
| static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q) | ||||
| { | ||||
| 	struct smtc_ipi *p; | ||||
| 
 | ||||
| 	if (q->head == NULL) | ||||
| 		p = NULL; | ||||
| 	else { | ||||
| 		p = q->head; | ||||
| 		q->head = q->head->flink; | ||||
| 		q->depth--; | ||||
| 		/* Arguably unnecessary, but leaves queue cleaner */ | ||||
| 		if (q->head == NULL) | ||||
| 			q->tail = NULL; | ||||
| 	} | ||||
| 
 | ||||
| 	return p; | ||||
| } | ||||
| 
 | ||||
| static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 	struct smtc_ipi *p; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&q->lock, flags); | ||||
| 	p = __smtc_ipi_dq(q); | ||||
| 	spin_unlock_irqrestore(&q->lock, flags); | ||||
| 
 | ||||
| 	return p; | ||||
| } | ||||
| 
 | ||||
| static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&q->lock, flags); | ||||
| 	if (q->head == NULL) { | ||||
| 		q->head = q->tail = p; | ||||
| 		p->flink = NULL; | ||||
| 	} else { | ||||
| 		p->flink = q->head; | ||||
| 		q->head = p; | ||||
| 	} | ||||
| 	q->depth++; | ||||
| 	spin_unlock_irqrestore(&q->lock, flags); | ||||
| } | ||||
| 
 | ||||
| static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 	int retval; | ||||
| 
 | ||||
| 	spin_lock_irqsave(&q->lock, flags); | ||||
| 	retval = q->depth; | ||||
| 	spin_unlock_irqrestore(&q->lock, flags); | ||||
| 	return retval; | ||||
| } | ||||
| 
 | ||||
| extern void smtc_send_ipi(int cpu, int type, unsigned int action); | ||||
| 
 | ||||
| #endif /* __ASM_SMTC_IPI_H */ | ||||
|  | @ -1,23 +0,0 @@ | |||
| /*
 | ||||
|  * Definitions for SMTC /proc entries | ||||
|  * Copyright(C) 2005 MIPS Technologies Inc. | ||||
|  */ | ||||
| #ifndef __ASM_SMTC_PROC_H | ||||
| #define __ASM_SMTC_PROC_H | ||||
| 
 | ||||
| /*
 | ||||
|  * per-"CPU" statistics | ||||
|  */ | ||||
| 
 | ||||
| struct smtc_cpu_proc { | ||||
| 	unsigned long timerints; | ||||
| 	unsigned long selfipis; | ||||
| }; | ||||
| 
 | ||||
| extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; | ||||
| 
 | ||||
| /* Count of number of recoveries of "stolen" FPU access rights on 34K */ | ||||
| 
 | ||||
| extern atomic_t smtc_fpu_recoveries; | ||||
| 
 | ||||
| #endif /* __ASM_SMTC_PROC_H */ | ||||
|  | @ -19,22 +19,12 @@ | |||
| #include <asm/asm-offsets.h> | ||||
| #include <asm/thread_info.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * For SMTC kernel, global IE should be left set, and interrupts | ||||
|  * controlled exclusively via IXMT. | ||||
|  */ | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #define STATMASK 0x1e | ||||
| #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||||
| #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||||
| #define STATMASK 0x3f | ||||
| #else | ||||
| #define STATMASK 0x1f | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #include <asm/mipsmtregs.h> | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| 		.macro	SAVE_AT | ||||
| 		.set	push | ||||
| 		.set	noat | ||||
|  | @ -186,16 +176,6 @@ | |||
| 		mfc0	v1, CP0_STATUS | ||||
| 		LONG_S	$2, PT_R2(sp) | ||||
| 		LONG_S	v1, PT_STATUS(sp) | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		/*
 | ||||
| 		 * Ideally, these instructions would be shuffled in | ||||
| 		 * to cover the pipeline delay. | ||||
| 		 */ | ||||
| 		.set	mips32 | ||||
| 		mfc0	k0, CP0_TCSTATUS | ||||
| 		.set	mips0 | ||||
| 		LONG_S	k0, PT_TCSTATUS(sp) | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		LONG_S	$4, PT_R4(sp) | ||||
| 		mfc0	v1, CP0_CAUSE | ||||
| 		LONG_S	$5, PT_R5(sp) | ||||
|  | @ -321,36 +301,6 @@ | |||
| 		.set	push | ||||
| 		.set	reorder | ||||
| 		.set	noat | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		.set	mips32r2 | ||||
| 		/*
 | ||||
| 		 * We need to make sure the read-modify-write | ||||
| 		 * of Status below isn't perturbed by an interrupt | ||||
| 		 * or cross-TC access, so we need to do at least a DMT, | ||||
| 		 * protected by an interrupt-inhibit. But setting IXMT | ||||
| 		 * also creates a few-cycle window where an IPI could | ||||
| 		 * be queued and not be detected before potentially | ||||
| 		 * returning to a WAIT or user-mode loop. It must be | ||||
| 		 * replayed. | ||||
| 		 * | ||||
| 		 * We're in the middle of a context switch, and | ||||
| 		 * we can't dispatch it directly without trashing | ||||
| 		 * some registers, so we'll try to detect this unlikely | ||||
| 		 * case and program a software interrupt in the VPE, | ||||
| 		 * as would be done for a cross-VPE IPI.  To accommodate | ||||
| 		 * the handling of that case, we're doing a DVPE instead | ||||
| 		 * of just a DMT here to protect against other threads. | ||||
| 		 * This is a lot of cruft to cover a tiny window. | ||||
| 		 * If you can find a better design, implement it! | ||||
| 		 * | ||||
| 		 */ | ||||
| 		mfc0	v0, CP0_TCSTATUS | ||||
| 		ori	v0, TCSTATUS_IXMT | ||||
| 		mtc0	v0, CP0_TCSTATUS | ||||
| 		_ehb | ||||
| 		DVPE	5				# dvpe a1 | ||||
| 		jal	mips_ihb | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		mfc0	a0, CP0_STATUS | ||||
| 		ori	a0, STATMASK | ||||
| 		xori	a0, STATMASK | ||||
|  | @ -362,59 +312,6 @@ | |||
| 		and	v0, v1 | ||||
| 		or	v0, a0 | ||||
| 		mtc0	v0, CP0_STATUS | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| /*
 | ||||
|  * Only after EXL/ERL have been restored to status can we | ||||
|  * restore TCStatus.IXMT. | ||||
|  */ | ||||
| 		LONG_L	v1, PT_TCSTATUS(sp) | ||||
| 		_ehb | ||||
| 		mfc0	a0, CP0_TCSTATUS | ||||
| 		andi	v1, TCSTATUS_IXMT | ||||
| 		bnez	v1, 0f | ||||
| 
 | ||||
| /*
 | ||||
|  * We'd like to detect any IPIs queued in the tiny window | ||||
|  * above and request an software interrupt to service them | ||||
|  * when we ERET. | ||||
|  * | ||||
|  * Computing the offset into the IPIQ array of the executing | ||||
|  * TC's IPI queue in-line would be tedious.  We use part of | ||||
|  * the TCContext register to hold 16 bits of offset that we | ||||
|  * can add in-line to find the queue head. | ||||
|  */ | ||||
| 		mfc0	v0, CP0_TCCONTEXT | ||||
| 		la	a2, IPIQ | ||||
| 		srl	v0, v0, 16 | ||||
| 		addu	a2, a2, v0 | ||||
| 		LONG_L	v0, 0(a2) | ||||
| 		beqz	v0, 0f | ||||
| /*
 | ||||
|  * If we have a queue, provoke dispatch within the VPE by setting C_SW1 | ||||
|  */ | ||||
| 		mfc0	v0, CP0_CAUSE | ||||
| 		ori	v0, v0, C_SW1 | ||||
| 		mtc0	v0, CP0_CAUSE | ||||
| 0: | ||||
| 		/*
 | ||||
| 		 * This test should really never branch but | ||||
| 		 * let's be prudent here.  Having atomized | ||||
| 		 * the shared register modifications, we can | ||||
| 		 * now EVPE, and must do so before interrupts | ||||
| 		 * are potentially re-enabled. | ||||
| 		 */ | ||||
| 		andi	a1, a1, MVPCONTROL_EVP | ||||
| 		beqz	a1, 1f | ||||
| 		evpe | ||||
| 1: | ||||
| 		/* We know that TCStatua.IXMT should be set from above */ | ||||
| 		xori	a0, a0, TCSTATUS_IXMT | ||||
| 		or	a0, a0, v1 | ||||
| 		mtc0	a0, CP0_TCSTATUS | ||||
| 		_ehb | ||||
| 
 | ||||
| 		.set	mips0 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		LONG_L	v1, PT_EPC(sp) | ||||
| 		MTC0	v1, CP0_EPC | ||||
| 		LONG_L	$31, PT_R31(sp) | ||||
|  | @ -467,33 +364,11 @@ | |||
|  * Set cp0 enable bit as sign that we're running on the kernel stack | ||||
|  */ | ||||
| 		.macro	CLI | ||||
| #if !defined(CONFIG_MIPS_MT_SMTC) | ||||
| 		mfc0	t0, CP0_STATUS | ||||
| 		li	t1, ST0_CU0 | STATMASK | ||||
| 		or	t0, t1 | ||||
| 		xori	t0, STATMASK | ||||
| 		mtc0	t0, CP0_STATUS | ||||
| #else /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		/*
 | ||||
| 		 * For SMTC, we need to set privilege | ||||
| 		 * and disable interrupts only for the | ||||
| 		 * current TC, using the TCStatus register. | ||||
| 		 */ | ||||
| 		mfc0	t0, CP0_TCSTATUS | ||||
| 		/* Fortunately CU 0 is in the same place in both registers */ | ||||
| 		/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ | ||||
| 		li	t1, ST0_CU0 | 0x08001c00 | ||||
| 		or	t0, t1 | ||||
| 		/* Clear TKSU, leave IXMT */ | ||||
| 		xori	t0, 0x00001800 | ||||
| 		mtc0	t0, CP0_TCSTATUS | ||||
| 		_ehb | ||||
| 		/* We need to leave the global IE bit set, but clear EXL...*/ | ||||
| 		mfc0	t0, CP0_STATUS | ||||
| 		ori	t0, ST0_EXL | ST0_ERL | ||||
| 		xori	t0, ST0_EXL | ST0_ERL | ||||
| 		mtc0	t0, CP0_STATUS | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		irq_disable_hazard | ||||
| 		.endm | ||||
| 
 | ||||
|  | @ -502,35 +377,11 @@ | |||
|  * Set cp0 enable bit as sign that we're running on the kernel stack | ||||
|  */ | ||||
| 		.macro	STI | ||||
| #if !defined(CONFIG_MIPS_MT_SMTC) | ||||
| 		mfc0	t0, CP0_STATUS | ||||
| 		li	t1, ST0_CU0 | STATMASK | ||||
| 		or	t0, t1 | ||||
| 		xori	t0, STATMASK & ~1 | ||||
| 		mtc0	t0, CP0_STATUS | ||||
| #else /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		/*
 | ||||
| 		 * For SMTC, we need to set privilege | ||||
| 		 * and enable interrupts only for the | ||||
| 		 * current TC, using the TCStatus register. | ||||
| 		 */ | ||||
| 		_ehb | ||||
| 		mfc0	t0, CP0_TCSTATUS | ||||
| 		/* Fortunately CU 0 is in the same place in both registers */ | ||||
| 		/* Set TCU0, TKSU (for later inversion) and IXMT */ | ||||
| 		li	t1, ST0_CU0 | 0x08001c00 | ||||
| 		or	t0, t1 | ||||
| 		/* Clear TKSU *and* IXMT */ | ||||
| 		xori	t0, 0x00001c00 | ||||
| 		mtc0	t0, CP0_TCSTATUS | ||||
| 		_ehb | ||||
| 		/* We need to leave the global IE bit set, but clear EXL...*/ | ||||
| 		mfc0	t0, CP0_STATUS | ||||
| 		ori	t0, ST0_EXL | ||||
| 		xori	t0, ST0_EXL | ||||
| 		mtc0	t0, CP0_STATUS | ||||
| 		/* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		irq_enable_hazard | ||||
| 		.endm | ||||
| 
 | ||||
|  | @ -540,32 +391,6 @@ | |||
|  * Set cp0 enable bit as sign that we're running on the kernel stack | ||||
|  */ | ||||
| 		.macro	KMODE | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		/*
 | ||||
| 		 * This gets baroque in SMTC.  We want to | ||||
| 		 * protect the non-atomic clearing of EXL | ||||
| 		 * with DMT/EMT, but we don't want to take | ||||
| 		 * an interrupt while DMT is still in effect. | ||||
| 		 */ | ||||
| 
 | ||||
| 		/* KMODE gets invoked from both reorder and noreorder code */ | ||||
| 		.set	push | ||||
| 		.set	mips32r2 | ||||
| 		.set	noreorder | ||||
| 		mfc0	v0, CP0_TCSTATUS | ||||
| 		andi	v1, v0, TCSTATUS_IXMT | ||||
| 		ori	v0, TCSTATUS_IXMT | ||||
| 		mtc0	v0, CP0_TCSTATUS | ||||
| 		_ehb | ||||
| 		DMT	2				# dmt	v0 | ||||
| 		/*
 | ||||
| 		 * We don't know a priori if ra is "live" | ||||
| 		 */ | ||||
| 		move	t0, ra | ||||
| 		jal	mips_ihb | ||||
| 		nop	/* delay slot */ | ||||
| 		move	ra, t0 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		mfc0	t0, CP0_STATUS | ||||
| 		li	t1, ST0_CU0 | (STATMASK & ~1) | ||||
| #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||||
|  | @ -576,25 +401,6 @@ | |||
| 		or	t0, t1 | ||||
| 		xori	t0, STATMASK & ~1 | ||||
| 		mtc0	t0, CP0_STATUS | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		_ehb | ||||
| 		andi	v0, v0, VPECONTROL_TE | ||||
| 		beqz	v0, 2f | ||||
| 		nop	/* delay slot */ | ||||
| 		emt | ||||
| 2: | ||||
| 		mfc0	v0, CP0_TCSTATUS | ||||
| 		/* Clear IXMT, then OR in previous value */ | ||||
| 		ori	v0, TCSTATUS_IXMT | ||||
| 		xori	v0, TCSTATUS_IXMT | ||||
| 		or	v0, v1, v0 | ||||
| 		mtc0	v0, CP0_TCSTATUS | ||||
| 		/*
 | ||||
| 		 * irq_disable_hazard below should expand to EHB | ||||
| 		 * on 24K/34K CPUS | ||||
| 		 */ | ||||
| 		.set pop | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		irq_disable_hazard | ||||
| 		.endm | ||||
| 
 | ||||
|  |  | |||
|  | @ -159,11 +159,7 @@ static inline struct thread_info *current_thread_info(void) | |||
|  * We stash processor id into a COP0 register to retrieve it fast | ||||
|  * at kernel exception entry. | ||||
|  */ | ||||
| #if defined(CONFIG_MIPS_MT_SMTC) | ||||
| #define SMP_CPUID_REG		2, 2	/* TCBIND */ | ||||
| #define ASM_SMP_CPUID_REG	$2, 2 | ||||
| #define SMP_CPUID_PTRSHIFT	19 | ||||
| #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT) | ||||
| #if   defined(CONFIG_MIPS_PGD_C0_CONTEXT) | ||||
| #define SMP_CPUID_REG		20, 0	/* XCONTEXT */ | ||||
| #define ASM_SMP_CPUID_REG	$20 | ||||
| #define SMP_CPUID_PTRSHIFT	48 | ||||
|  | @ -179,13 +175,8 @@ static inline struct thread_info *current_thread_info(void) | |||
| #define SMP_CPUID_REGSHIFT	(SMP_CPUID_PTRSHIFT + 2) | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #define ASM_CPUID_MFC0		mfc0 | ||||
| #define UASM_i_CPUID_MFC0	uasm_i_mfc0 | ||||
| #else | ||||
| #define ASM_CPUID_MFC0		MFC0 | ||||
| #define UASM_i_CPUID_MFC0	UASM_i_MFC0 | ||||
| #endif | ||||
| 
 | ||||
| #endif /* __KERNEL__ */ | ||||
| #endif /* _ASM_THREAD_INFO_H */ | ||||
|  |  | |||
|  | @ -52,14 +52,11 @@ extern int (*perf_irq)(void); | |||
|  */ | ||||
| extern unsigned int __weak get_c0_compare_int(void); | ||||
| extern int r4k_clockevent_init(void); | ||||
| extern int smtc_clockevent_init(void); | ||||
| extern int gic_clockevent_init(void); | ||||
| 
 | ||||
| static inline int mips_clockevent_init(void) | ||||
| { | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	return smtc_clockevent_init(); | ||||
| #elif defined(CONFIG_CEVT_GIC) | ||||
| #if   defined(CONFIG_CEVT_GIC) | ||||
| 	return (gic_clockevent_init() | r4k_clockevent_init()); | ||||
| #elif defined(CONFIG_CEVT_R4K) | ||||
| 	return r4k_clockevent_init(); | ||||
|  |  | |||
|  | @ -17,7 +17,6 @@ endif | |||
| 
 | ||||
| obj-$(CONFIG_CEVT_BCM1480)	+= cevt-bcm1480.o | ||||
| obj-$(CONFIG_CEVT_R4K)		+= cevt-r4k.o | ||||
| obj-$(CONFIG_MIPS_MT_SMTC)	+= cevt-smtc.o | ||||
| obj-$(CONFIG_CEVT_DS1287)	+= cevt-ds1287.o | ||||
| obj-$(CONFIG_CEVT_GIC)		+= cevt-gic.o | ||||
| obj-$(CONFIG_CEVT_GT641XX)	+= cevt-gt641xx.o | ||||
|  | @ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS)		+= smp-bmips.o bmips_vec.o | |||
| 
 | ||||
| obj-$(CONFIG_MIPS_MT)		+= mips-mt.o | ||||
| obj-$(CONFIG_MIPS_MT_FPAFF)	+= mips-mt-fpaff.o | ||||
| obj-$(CONFIG_MIPS_MT_SMTC)	+= smtc.o smtc-asm.o smtc-proc.o | ||||
| obj-$(CONFIG_MIPS_MT_SMP)	+= smp-mt.o | ||||
| obj-$(CONFIG_MIPS_CMP)		+= smp-cmp.o | ||||
| obj-$(CONFIG_MIPS_CPS)		+= smp-cps.o cps-vec.o | ||||
|  |  | |||
|  | @ -64,9 +64,6 @@ void output_ptreg_defines(void) | |||
| 	OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); | ||||
| 	OFFSET(PT_STATUS, pt_regs, cp0_status); | ||||
| 	OFFSET(PT_CAUSE, pt_regs, cp0_cause); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||||
| 	OFFSET(PT_MPL, pt_regs, mpl); | ||||
| 	OFFSET(PT_MTP, pt_regs, mtp); | ||||
|  |  | |||
|  | @ -12,17 +12,10 @@ | |||
| #include <linux/smp.h> | ||||
| #include <linux/irq.h> | ||||
| 
 | ||||
| #include <asm/smtc_ipi.h> | ||||
| #include <asm/time.h> | ||||
| #include <asm/cevt-r4k.h> | ||||
| #include <asm/gic.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * The SMTC Kernel for the 34K, 1004K, et. al. replaces several | ||||
|  * of these routines with SMTC-specific variants. | ||||
|  */ | ||||
| 
 | ||||
| #ifndef CONFIG_MIPS_MT_SMTC | ||||
| static int mips_next_event(unsigned long delta, | ||||
| 			   struct clock_event_device *evt) | ||||
| { | ||||
|  | @ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta, | |||
| 	return res; | ||||
| } | ||||
| 
 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| void mips_set_clock_mode(enum clock_event_mode mode, | ||||
| 				struct clock_event_device *evt) | ||||
| { | ||||
|  | @ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode, | |||
| DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); | ||||
| int cp0_timer_irq_installed; | ||||
| 
 | ||||
| #ifndef CONFIG_MIPS_MT_SMTC | ||||
| irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||||
| { | ||||
| 	const int r2 = cpu_has_mips_r2; | ||||
|  | @ -82,8 +72,6 @@ out: | |||
| 	return IRQ_HANDLED; | ||||
| } | ||||
| 
 | ||||
| #endif /* Not CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| struct irqaction c0_compare_irqaction = { | ||||
| 	.handler = c0_compare_interrupt, | ||||
| 	.flags = IRQF_PERCPU | IRQF_TIMER, | ||||
|  | @ -170,7 +158,6 @@ int c0_compare_int_usable(void) | |||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| #ifndef CONFIG_MIPS_MT_SMTC | ||||
| int r4k_clockevent_init(void) | ||||
| { | ||||
| 	unsigned int cpu = smp_processor_id(); | ||||
|  | @ -225,4 +212,3 @@ int r4k_clockevent_init(void) | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| #endif /* Not CONFIG_MIPS_MT_SMTC */ | ||||
|  |  | |||
|  | @ -1,324 +0,0 @@ | |||
| /*
 | ||||
|  * This file is subject to the terms and conditions of the GNU General Public | ||||
|  * License.  See the file "COPYING" in the main directory of this archive | ||||
|  * for more details. | ||||
|  * | ||||
|  * Copyright (C) 2007 MIPS Technologies, Inc. | ||||
|  * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> | ||||
|  * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl | ||||
|  */ | ||||
| #include <linux/clockchips.h> | ||||
| #include <linux/interrupt.h> | ||||
| #include <linux/percpu.h> | ||||
| #include <linux/smp.h> | ||||
| #include <linux/irq.h> | ||||
| 
 | ||||
| #include <asm/smtc_ipi.h> | ||||
| #include <asm/time.h> | ||||
| #include <asm/cevt-r4k.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * Variant clock event timer support for SMTC on MIPS 34K, 1004K | ||||
|  * or other MIPS MT cores. | ||||
|  * | ||||
|  * Notes on SMTC Support: | ||||
|  * | ||||
|  * SMTC has multiple microthread TCs pretending to be Linux CPUs. | ||||
|  * But there's only one Count/Compare pair per VPE, and Compare | ||||
|  * interrupts are taken opportunisitically by available TCs | ||||
|  * bound to the VPE with the Count register.  The new timer | ||||
|  * framework provides for global broadcasts, but we really | ||||
|  * want VPE-level multicasts for best behavior. So instead | ||||
|  * of invoking the high-level clock-event broadcast code, | ||||
|  * this version of SMTC support uses the historical SMTC | ||||
|  * multicast mechanisms "under the hood", appearing to the | ||||
|  * generic clock layer as if the interrupts are per-CPU. | ||||
|  * | ||||
|  * The approach taken here is to maintain a set of NR_CPUS | ||||
|  * virtual timers, and track which "CPU" needs to be alerted | ||||
|  * at each event. | ||||
|  * | ||||
|  * It's unlikely that we'll see a MIPS MT core with more than | ||||
|  * 2 VPEs, but we *know* that we won't need to handle more | ||||
|  * VPEs than we have "CPUs".  So NCPUs arrays of NCPUs elements | ||||
|  * is always going to be overkill, but always going to be enough. | ||||
|  */ | ||||
| 
 | ||||
| unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; | ||||
| static int smtc_nextinvpe[NR_CPUS]; | ||||
| 
 | ||||
| /*
 | ||||
|  * Timestamps stored are absolute values to be programmed | ||||
|  * into Count register.	 Valid timestamps will never be zero. | ||||
|  * If a Zero Count value is actually calculated, it is converted | ||||
|  * to be a 1, which will introduce 1 or two CPU cycles of error | ||||
|  * roughly once every four billion events, which at 1000 HZ means | ||||
|  * about once every 50 days.  If that's actually a problem, one | ||||
|  * could alternate squashing 0 to 1 and to -1. | ||||
|  */ | ||||
| 
 | ||||
| #define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) | ||||
| #define ISVALID(x) ((x) != 0L) | ||||
| 
 | ||||
| /*
 | ||||
|  * Time comparison is subtle, as it's really truncated | ||||
|  * modular arithmetic. | ||||
|  */ | ||||
| 
 | ||||
| #define IS_SOONER(a, b, reference) \ | ||||
|     (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) | ||||
| 
 | ||||
| /*
 | ||||
|  * CATCHUP_INCREMENT, used when the function falls behind the counter. | ||||
|  * Could be an increasing function instead of a constant; | ||||
|  */ | ||||
| 
 | ||||
| #define CATCHUP_INCREMENT 64 | ||||
| 
 | ||||
| static int mips_next_event(unsigned long delta, | ||||
| 				struct clock_event_device *evt) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 	unsigned int mtflags; | ||||
| 	unsigned long timestamp, reference, previous; | ||||
| 	unsigned long nextcomp = 0L; | ||||
| 	int vpe = current_cpu_data.vpe_id; | ||||
| 	int cpu = smp_processor_id(); | ||||
| 	local_irq_save(flags); | ||||
| 	mtflags = dmt(); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Maintain the per-TC virtual timer | ||||
| 	 * and program the per-VPE shared Count register | ||||
| 	 * as appropriate here... | ||||
| 	 */ | ||||
| 	reference = (unsigned long)read_c0_count(); | ||||
| 	timestamp = MAKEVALID(reference + delta); | ||||
| 	/*
 | ||||
| 	 * To really model the clock, we have to catch the case | ||||
| 	 * where the current next-in-VPE timestamp is the old | ||||
| 	 * timestamp for the calling CPE, but the new value is | ||||
| 	 * in fact later.  In that case, we have to do a full | ||||
| 	 * scan and discover the new next-in-VPE CPU id and | ||||
| 	 * timestamp. | ||||
| 	 */ | ||||
| 	previous = smtc_nexttime[vpe][cpu]; | ||||
| 	if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) | ||||
| 	    && IS_SOONER(previous, timestamp, reference)) { | ||||
| 		int i; | ||||
| 		int soonest = cpu; | ||||
| 
 | ||||
| 		/*
 | ||||
| 		 * Update timestamp array here, so that new | ||||
| 		 * value gets considered along with those of | ||||
| 		 * other virtual CPUs on the VPE. | ||||
| 		 */ | ||||
| 		smtc_nexttime[vpe][cpu] = timestamp; | ||||
| 		for_each_online_cpu(i) { | ||||
| 			if (ISVALID(smtc_nexttime[vpe][i]) | ||||
| 			    && IS_SOONER(smtc_nexttime[vpe][i], | ||||
| 				smtc_nexttime[vpe][soonest], reference)) { | ||||
| 				    soonest = i; | ||||
| 			} | ||||
| 		} | ||||
| 		smtc_nextinvpe[vpe] = soonest; | ||||
| 		nextcomp = smtc_nexttime[vpe][soonest]; | ||||
| 	/*
 | ||||
| 	 * Otherwise, we don't have to process the whole array rank, | ||||
| 	 * we just have to see if the event horizon has gotten closer. | ||||
| 	 */ | ||||
| 	} else { | ||||
| 		if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || | ||||
| 		    IS_SOONER(timestamp, | ||||
| 			smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { | ||||
| 			    smtc_nextinvpe[vpe] = cpu; | ||||
| 			    nextcomp = timestamp; | ||||
| 		} | ||||
| 		/*
 | ||||
| 		 * Since next-in-VPE may me the same as the executing | ||||
| 		 * virtual CPU, we update the array *after* checking | ||||
| 		 * its value. | ||||
| 		 */ | ||||
| 		smtc_nexttime[vpe][cpu] = timestamp; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * It may be that, in fact, we don't need to update Compare, | ||||
| 	 * but if we do, we want to make sure we didn't fall into | ||||
| 	 * a crack just behind Count. | ||||
| 	 */ | ||||
| 	if (ISVALID(nextcomp)) { | ||||
| 		write_c0_compare(nextcomp); | ||||
| 		ehb(); | ||||
| 		/*
 | ||||
| 		 * We never return an error, we just make sure | ||||
| 		 * that we trigger the handlers as quickly as | ||||
| 		 * we can if we fell behind. | ||||
| 		 */ | ||||
| 		while ((nextcomp - (unsigned long)read_c0_count()) | ||||
| 			> (unsigned long)LONG_MAX) { | ||||
| 			nextcomp += CATCHUP_INCREMENT; | ||||
| 			write_c0_compare(nextcomp); | ||||
| 			ehb(); | ||||
| 		} | ||||
| 	} | ||||
| 	emt(mtflags); | ||||
| 	local_irq_restore(flags); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| void smtc_distribute_timer(int vpe) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 	unsigned int mtflags; | ||||
| 	int cpu; | ||||
| 	struct clock_event_device *cd; | ||||
| 	unsigned long nextstamp; | ||||
| 	unsigned long reference; | ||||
| 
 | ||||
| 
 | ||||
| repeat: | ||||
| 	nextstamp = 0L; | ||||
| 	for_each_online_cpu(cpu) { | ||||
| 	    /*
 | ||||
| 	     * Find virtual CPUs within the current VPE who have | ||||
| 	     * unserviced timer requests whose time is now past. | ||||
| 	     */ | ||||
| 	    local_irq_save(flags); | ||||
| 	    mtflags = dmt(); | ||||
| 	    if (cpu_data[cpu].vpe_id == vpe && | ||||
| 		ISVALID(smtc_nexttime[vpe][cpu])) { | ||||
| 		reference = (unsigned long)read_c0_count(); | ||||
| 		if ((smtc_nexttime[vpe][cpu] - reference) | ||||
| 			 > (unsigned long)LONG_MAX) { | ||||
| 			    smtc_nexttime[vpe][cpu] = 0L; | ||||
| 			    emt(mtflags); | ||||
| 			    local_irq_restore(flags); | ||||
| 			    /*
 | ||||
| 			     * We don't send IPIs to ourself. | ||||
| 			     */ | ||||
| 			    if (cpu != smp_processor_id()) { | ||||
| 				smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||||
| 			    } else { | ||||
| 				cd = &per_cpu(mips_clockevent_device, cpu); | ||||
| 				cd->event_handler(cd); | ||||
| 			    } | ||||
| 		} else { | ||||
| 			/* Local to VPE but Valid Time not yet reached. */ | ||||
| 			if (!ISVALID(nextstamp) || | ||||
| 			    IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, | ||||
| 			    reference)) { | ||||
| 				smtc_nextinvpe[vpe] = cpu; | ||||
| 				nextstamp = smtc_nexttime[vpe][cpu]; | ||||
| 			} | ||||
| 			emt(mtflags); | ||||
| 			local_irq_restore(flags); | ||||
| 		} | ||||
| 	    } else { | ||||
| 		emt(mtflags); | ||||
| 		local_irq_restore(flags); | ||||
| 
 | ||||
| 	    } | ||||
| 	} | ||||
| 	/* Reprogram for interrupt at next soonest timestamp for VPE */ | ||||
| 	if (ISVALID(nextstamp)) { | ||||
| 		write_c0_compare(nextstamp); | ||||
| 		ehb(); | ||||
| 		if ((nextstamp - (unsigned long)read_c0_count()) | ||||
| 			> (unsigned long)LONG_MAX) | ||||
| 				goto repeat; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||||
| { | ||||
| 	int cpu = smp_processor_id(); | ||||
| 
 | ||||
| 	/* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ | ||||
| 	handle_perf_irq(1); | ||||
| 
 | ||||
| 	if (read_c0_cause() & (1 << 30)) { | ||||
| 		/* Clear Count/Compare Interrupt */ | ||||
| 		write_c0_compare(read_c0_compare()); | ||||
| 		smtc_distribute_timer(cpu_data[cpu].vpe_id); | ||||
| 	} | ||||
| 	return IRQ_HANDLED; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| int smtc_clockevent_init(void) | ||||
| { | ||||
| 	uint64_t mips_freq = mips_hpt_frequency; | ||||
| 	unsigned int cpu = smp_processor_id(); | ||||
| 	struct clock_event_device *cd; | ||||
| 	unsigned int irq; | ||||
| 	int i; | ||||
| 	int j; | ||||
| 
 | ||||
| 	if (!cpu_has_counter || !mips_hpt_frequency) | ||||
| 		return -ENXIO; | ||||
| 	if (cpu == 0) { | ||||
| 		for (i = 0; i < num_possible_cpus(); i++) { | ||||
| 			smtc_nextinvpe[i] = 0; | ||||
| 			for (j = 0; j < num_possible_cpus(); j++) | ||||
| 				smtc_nexttime[i][j] = 0L; | ||||
| 		} | ||||
| 		/*
 | ||||
| 		 * SMTC also can't have the usablility test | ||||
| 		 * run by secondary TCs once Compare is in use. | ||||
| 		 */ | ||||
| 		if (!c0_compare_int_usable()) | ||||
| 			return -ENXIO; | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * With vectored interrupts things are getting platform specific. | ||||
| 	 * get_c0_compare_int is a hook to allow a platform to return the | ||||
| 	 * interrupt number of it's liking. | ||||
| 	 */ | ||||
| 	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; | ||||
| 	if (get_c0_compare_int) | ||||
| 		irq = get_c0_compare_int(); | ||||
| 
 | ||||
| 	cd = &per_cpu(mips_clockevent_device, cpu); | ||||
| 
 | ||||
| 	cd->name		= "MIPS"; | ||||
| 	cd->features		= CLOCK_EVT_FEAT_ONESHOT; | ||||
| 
 | ||||
| 	/* Calculate the min / max delta */ | ||||
| 	cd->mult	= div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||||
| 	cd->shift		= 32; | ||||
| 	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd); | ||||
| 	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd); | ||||
| 
 | ||||
| 	cd->rating		= 300; | ||||
| 	cd->irq			= irq; | ||||
| 	cd->cpumask		= cpumask_of(cpu); | ||||
| 	cd->set_next_event	= mips_next_event; | ||||
| 	cd->set_mode		= mips_set_clock_mode; | ||||
| 	cd->event_handler	= mips_event_handler; | ||||
| 
 | ||||
| 	clockevents_register_device(cd); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * On SMTC we only want to do the data structure | ||||
| 	 * initialization and IRQ setup once. | ||||
| 	 */ | ||||
| 	if (cpu) | ||||
| 		return 0; | ||||
| 	/*
 | ||||
| 	 * And we need the hwmask associated with the c0_compare | ||||
| 	 * vector to be initialized. | ||||
| 	 */ | ||||
| 	irq_hwmask[irq] = (0x100 << cp0_compare_irq); | ||||
| 	if (cp0_timer_irq_installed) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	cp0_timer_irq_installed = 1; | ||||
| 
 | ||||
| 	setup_irq(irq, &c0_compare_irqaction); | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
|  | @ -62,7 +62,7 @@ static inline void check_errata(void) | |||
| 	case CPU_34K: | ||||
| 		/*
 | ||||
| 		 * Erratum "RPS May Cause Incorrect Instruction Execution" | ||||
| 		 * This code only handles VPE0, any SMP/SMTC/RTOS code | ||||
| 		 * This code only handles VPE0, any SMP/RTOS code | ||||
| 		 * making use of VPE1 will be responsable for that VPE. | ||||
| 		 */ | ||||
| 		if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) | ||||
|  |  | |||
|  | @ -16,9 +16,6 @@ | |||
| #include <asm/isadep.h> | ||||
| #include <asm/thread_info.h> | ||||
| #include <asm/war.h> | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #include <asm/mipsmtregs.h> | ||||
| #endif | ||||
| 
 | ||||
| #ifndef CONFIG_PREEMPT | ||||
| #define resume_kernel	restore_all | ||||
|  | @ -89,41 +86,6 @@ FEXPORT(syscall_exit) | |||
| 	bnez	t0, syscall_exit_work | ||||
| 
 | ||||
| restore_all:				# restore full frame | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | ||||
| /* Re-arm any temporarily masked interrupts not explicitly "acked" */ | ||||
| 	mfc0	v0, CP0_TCSTATUS | ||||
| 	ori	v1, v0, TCSTATUS_IXMT | ||||
| 	mtc0	v1, CP0_TCSTATUS | ||||
| 	andi	v0, TCSTATUS_IXMT | ||||
| 	_ehb | ||||
| 	mfc0	t0, CP0_TCCONTEXT | ||||
| 	DMT	9				# dmt t1 | ||||
| 	jal	mips_ihb | ||||
| 	mfc0	t2, CP0_STATUS | ||||
| 	andi	t3, t0, 0xff00 | ||||
| 	or	t2, t2, t3 | ||||
| 	mtc0	t2, CP0_STATUS | ||||
| 	_ehb | ||||
| 	andi	t1, t1, VPECONTROL_TE | ||||
| 	beqz	t1, 1f | ||||
| 	EMT | ||||
| 1: | ||||
| 	mfc0	v1, CP0_TCSTATUS | ||||
| 	/* We set IXMT above, XOR should clear it here */ | ||||
| 	xori	v1, v1, TCSTATUS_IXMT | ||||
| 	or	v1, v0, v1 | ||||
| 	mtc0	v1, CP0_TCSTATUS | ||||
| 	_ehb | ||||
| 	xor	t0, t0, t3 | ||||
| 	mtc0	t0, CP0_TCCONTEXT | ||||
| #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | ||||
| /* Detect and execute deferred IPI "interrupts" */ | ||||
| 	LONG_L	s0, TI_REGS($28) | ||||
| 	LONG_S	sp, TI_REGS($28) | ||||
| 	jal	deferred_smtc_ipi | ||||
| 	LONG_S	s0, TI_REGS($28) | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	.set	noat
 | ||||
| 	RESTORE_TEMP | ||||
| 	RESTORE_AT | ||||
|  |  | |||
|  | @ -21,20 +21,6 @@ | |||
| #include <asm/war.h> | ||||
| #include <asm/thread_info.h> | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #define PANIC_PIC(msg)					\ | ||||
| 		.set	push;				\
 | ||||
| 		.set	nomicromips;			\
 | ||||
| 		.set	reorder;			\
 | ||||
| 		PTR_LA	a0,8f;				\
 | ||||
| 		.set	noat;				\
 | ||||
| 		PTR_LA	AT, panic;			\
 | ||||
| 		jr	AT;				\
 | ||||
| 9:		b	9b;				\
 | ||||
| 		.set	pop;				\
 | ||||
| 		TEXT(msg) | ||||
| #endif | ||||
| 
 | ||||
| 	__INIT | ||||
| 
 | ||||
| /* | ||||
|  | @ -251,15 +237,6 @@ NESTED(except_vec_vi, 0, sp) | |||
| 	SAVE_AT | ||||
| 	.set	push
 | ||||
| 	.set	noreorder
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/* | ||||
| 	 * To keep from blindly blocking *all* interrupts | ||||
| 	 * during service by SMTC kernel, we also want to | ||||
| 	 * pass the IM value to be cleared. | ||||
| 	 */ | ||||
| FEXPORT(except_vec_vi_mori) | ||||
| 	ori	a0, $0, 0 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	PTR_LA	v1, except_vec_vi_handler | ||||
| FEXPORT(except_vec_vi_lui) | ||||
| 	lui	v0, 0		/* Patched */ | ||||
|  | @ -277,37 +254,10 @@ EXPORT(except_vec_vi_end) | |||
| NESTED(except_vec_vi_handler, 0, sp) | ||||
| 	SAVE_TEMP | ||||
| 	SAVE_STATIC | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/* | ||||
| 	 * SMTC has an interesting problem that interrupts are level-triggered, | ||||
| 	 * and the CLI macro will clear EXL, potentially causing a duplicate | ||||
| 	 * interrupt service invocation. So we need to clear the associated | ||||
| 	 * IM bit of Status prior to doing CLI, and restore it after the | ||||
| 	 * service routine has been invoked - we must assume that the | ||||
| 	 * service routine will have cleared the state, and any active | ||||
| 	 * level represents a new or otherwised unserviced event... | ||||
| 	 */ | ||||
| 	mfc0	t1, CP0_STATUS | ||||
| 	and	t0, a0, t1 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | ||||
| 	mfc0	t2, CP0_TCCONTEXT | ||||
| 	or	t2, t0, t2 | ||||
| 	mtc0	t2, CP0_TCCONTEXT | ||||
| #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | ||||
| 	xor	t1, t1, t0 | ||||
| 	mtc0	t1, CP0_STATUS | ||||
| 	_ehb | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	CLI | ||||
| #ifdef CONFIG_TRACE_IRQFLAGS | ||||
| 	move	s0, v0 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	move	s1, a0 | ||||
| #endif | ||||
| 	TRACE_IRQS_OFF | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	move	a0, s1 | ||||
| #endif | ||||
| 	move	v0, s0 | ||||
| #endif | ||||
| 
 | ||||
|  | @ -496,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
| 
 | ||||
| 	.align	5
 | ||||
| 	LEAF(handle_ri_rdhwr_vivt) | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	PANIC_PIC("handle_ri_rdhwr_vivt called") | ||||
| #else | ||||
| 	.set	push
 | ||||
| 	.set	noat
 | ||||
| 	.set	noreorder
 | ||||
|  | @ -517,7 +464,6 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
| 	.set	pop
 | ||||
| 	bltz	k1, handle_ri	/* slow path */ | ||||
| 	/* fall thru */ | ||||
| #endif | ||||
| 	END(handle_ri_rdhwr_vivt) | ||||
| 
 | ||||
| 	LEAF(handle_ri_rdhwr) | ||||
|  |  | |||
|  | @ -35,33 +35,12 @@ | |||
| 	 */ | ||||
| 	.macro	setup_c0_status set clr | ||||
| 	.set	push
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/* | ||||
| 	 * For SMTC, we need to set privilege and disable interrupts only for | ||||
| 	 * the current TC, using the TCStatus register. | ||||
| 	 */ | ||||
| 	mfc0	t0, CP0_TCSTATUS | ||||
| 	/* Fortunately CU 0 is in the same place in both registers */ | ||||
| 	/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ | ||||
| 	li	t1, ST0_CU0 | 0x08001c00 | ||||
| 	or	t0, t1 | ||||
| 	/* Clear TKSU, leave IXMT */ | ||||
| 	xori	t0, 0x00001800 | ||||
| 	mtc0	t0, CP0_TCSTATUS | ||||
| 	_ehb | ||||
| 	/* We need to leave the global IE bit set, but clear EXL...*/ | ||||
| 	mfc0	t0, CP0_STATUS | ||||
| 	or	t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr | ||||
| 	xor	t0, ST0_EXL | ST0_ERL | \clr | ||||
| 	mtc0	t0, CP0_STATUS | ||||
| #else | ||||
| 	mfc0	t0, CP0_STATUS | ||||
| 	or	t0, ST0_CU0|\set|0x1f|\clr | ||||
| 	xor	t0, 0x1f|\clr | ||||
| 	mtc0	t0, CP0_STATUS | ||||
| 	.set	noreorder
 | ||||
| 	sll	zero,3				# ehb | ||||
| #endif | ||||
| 	.set	pop
 | ||||
| 	.endm | ||||
| 
 | ||||
|  | @ -115,24 +94,6 @@ NESTED(kernel_entry, 16, sp)			# kernel entry point | |||
| 	jr	t0 | ||||
| 0: | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/* | ||||
| 	 * In SMTC kernel, "CLI" is thread-specific, in TCStatus. | ||||
| 	 * We still need to enable interrupts globally in Status, | ||||
| 	 * and clear EXL/ERL. | ||||
| 	 * | ||||
| 	 * TCContext is used to track interrupt levels under | ||||
| 	 * service in SMTC kernel. Clear for boot TC before | ||||
| 	 * allowing any interrupts. | ||||
| 	 */ | ||||
| 	mtc0	zero, CP0_TCCONTEXT | ||||
| 
 | ||||
| 	mfc0	t0, CP0_STATUS | ||||
| 	ori	t0, t0, 0xff1f | ||||
| 	xori	t0, t0, 0x001e | ||||
| 	mtc0	t0, CP0_STATUS | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| 	PTR_LA		t0, __bss_start		# clear .bss | ||||
| 	LONG_S		zero, (t0) | ||||
| 	PTR_LA		t1, __bss_stop - LONGSIZE | ||||
|  | @ -164,25 +125,8 @@ NESTED(kernel_entry, 16, sp)			# kernel entry point | |||
|  * function after setting up the stack and gp registers. | ||||
|  */ | ||||
| NESTED(smp_bootstrap, 16, sp) | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/* | ||||
| 	 * Read-modify-writes of Status must be atomic, and this | ||||
| 	 * is one case where CLI is invoked without EXL being | ||||
| 	 * necessarily set. The CLI and setup_c0_status will | ||||
| 	 * in fact be redundant for all but the first TC of | ||||
| 	 * each VPE being booted. | ||||
| 	 */ | ||||
| 	DMT	10	# dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ | ||||
| 	jal	mips_ihb | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	smp_slave_setup | ||||
| 	setup_c0_status_sec | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	andi	t2, t2, VPECONTROL_TE | ||||
| 	beqz	t2, 2f | ||||
| 	EMT		# emt | ||||
| 2: | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	j	start_secondary | ||||
| 	END(smp_bootstrap) | ||||
| #endif /* CONFIG_SMP */ | ||||
|  |  | |||
|  | @ -42,9 +42,6 @@ static struct irq_chip i8259A_chip = { | |||
| 	.irq_disable		= disable_8259A_irq, | ||||
| 	.irq_unmask		= enable_8259A_irq, | ||||
| 	.irq_mask_ack		= mask_and_ack_8259A, | ||||
| #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||||
| 	.irq_set_affinity	= plat_set_irq_affinity, | ||||
| #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -180,7 +177,6 @@ handle_real_irq: | |||
| 		outb(cached_master_mask, PIC_MASTER_IMR); | ||||
| 		outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ | ||||
| 	} | ||||
| 	smtc_im_ack_irq(irq); | ||||
| 	raw_spin_unlock_irqrestore(&i8259A_lock, flags); | ||||
| 	return; | ||||
| 
 | ||||
|  |  | |||
|  | @ -229,18 +229,8 @@ void __init check_wait(void) | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void smtc_idle_hook(void) | ||||
| { | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	void smtc_idle_loop_hook(void); | ||||
| 
 | ||||
| 	smtc_idle_loop_hook(); | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| void arch_cpu_idle(void) | ||||
| { | ||||
| 	smtc_idle_hook(); | ||||
| 	if (cpu_wait) | ||||
| 		cpu_wait(); | ||||
| 	else | ||||
|  |  | |||
|  | @ -53,13 +53,9 @@ static inline void unmask_msc_irq(struct irq_data *d) | |||
|  */ | ||||
| static void level_mask_and_ack_msc_irq(struct irq_data *d) | ||||
| { | ||||
| 	unsigned int irq = d->irq; | ||||
| 
 | ||||
| 	mask_msc_irq(d); | ||||
| 	if (!cpu_has_veic) | ||||
| 		MSCIC_WRITE(MSC01_IC_EOI, 0); | ||||
| 	/* This actually needs to be a call into platform code */ | ||||
| 	smtc_im_ack_irq(irq); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -78,7 +74,6 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d) | |||
| 		MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); | ||||
| 		MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); | ||||
| 	} | ||||
| 	smtc_im_ack_irq(irq); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
|  | @ -73,7 +73,6 @@ void free_irqno(unsigned int irq) | |||
|  */ | ||||
| void ack_bad_irq(unsigned int irq) | ||||
| { | ||||
| 	smtc_im_ack_irq(irq); | ||||
| 	printk("unexpected IRQ # %d\n", irq); | ||||
| } | ||||
| 
 | ||||
|  | @ -142,23 +141,7 @@ void __irq_entry do_IRQ(unsigned int irq) | |||
| { | ||||
| 	irq_enter(); | ||||
| 	check_stack_overflow(); | ||||
| 	if (!smtc_handle_on_other_cpu(irq)) | ||||
| 		generic_handle_irq(irq); | ||||
| 	irq_exit(); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||||
| /*
 | ||||
|  * To avoid inefficient and in some cases pathological re-checking of | ||||
|  * IRQ affinity, we have this variant that skips the affinity check. | ||||
|  */ | ||||
| 
 | ||||
| void __irq_entry do_IRQ_no_affinity(unsigned int irq) | ||||
| { | ||||
| 	irq_enter(); | ||||
| 	smtc_im_backstop(irq); | ||||
| 	generic_handle_irq(irq); | ||||
| 	irq_exit(); | ||||
| } | ||||
| 
 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||||
|  |  | |||
|  | @ -1,5 +1,5 @@ | |||
| /*
 | ||||
|  * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels | ||||
|  * General MIPS MT support routines, usable in AP/SP and SMVP. | ||||
|  * Copyright (C) 2005 Mips Technologies, Inc | ||||
|  */ | ||||
| #include <linux/cpu.h> | ||||
|  |  | |||
|  | @ -1,5 +1,5 @@ | |||
| /*
 | ||||
|  * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels | ||||
|  * General MIPS MT support routines, usable in AP/SP and SMVP. | ||||
|  * Copyright (C) 2005 Mips Technologies, Inc | ||||
|  */ | ||||
| 
 | ||||
|  | @ -57,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl) | |||
| 	int tc; | ||||
| 	unsigned long haltval; | ||||
| 	unsigned long tcstatval; | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	void smtc_soft_dump(void); | ||||
| #endif /* CONFIG_MIPT_MT_SMTC */ | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	vpflags = dvpe(); | ||||
|  | @ -116,9 +113,6 @@ void mips_mt_regdump(unsigned long mvpctl) | |||
| 		if (!haltval) | ||||
| 			write_tc_c0_tchalt(0); | ||||
| 	} | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	smtc_soft_dump(); | ||||
| #endif /* CONFIG_MIPT_MT_SMTC */ | ||||
| 	printk("===========================\n"); | ||||
| 	evpe(vpflags); | ||||
| 	local_irq_restore(flags); | ||||
|  | @ -295,21 +289,11 @@ void mips_mt_set_cpuoptions(void) | |||
| 
 | ||||
| void mt_cflush_lockdown(void) | ||||
| { | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	void smtc_cflush_lockdown(void); | ||||
| 
 | ||||
| 	smtc_cflush_lockdown(); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	/* FILL IN VSMP and AP/SP VERSIONS HERE */ | ||||
| } | ||||
| 
 | ||||
| void mt_cflush_release(void) | ||||
| { | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	void smtc_cflush_release(void); | ||||
| 
 | ||||
| 	smtc_cflush_release(); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	/* FILL IN VSMP and AP/SP VERSIONS HERE */ | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -140,13 +140,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
| 	 */ | ||||
| 	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/*
 | ||||
| 	 * SMTC restores TCStatus after Status, and the CU bits | ||||
| 	 * are aliased there. | ||||
| 	 */ | ||||
| 	childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); | ||||
| #endif | ||||
| 	clear_tsk_thread_flag(p, TIF_USEDFPU); | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_FPAFF | ||||
|  |  | |||
|  | @ -87,18 +87,6 @@ | |||
| 
 | ||||
| 	PTR_ADDU	t0, $28, _THREAD_SIZE - 32 | ||||
| 	set_saved_sp	t0, t1, t2 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/* Read-modify-writes of Status must be atomic on a VPE */ | ||||
| 	mfc0	t2, CP0_TCSTATUS | ||||
| 	ori	t1, t2, TCSTATUS_IXMT | ||||
| 	mtc0	t1, CP0_TCSTATUS | ||||
| 	andi	t2, t2, TCSTATUS_IXMT | ||||
| 	_ehb | ||||
| 	DMT	8				# dmt	t0 | ||||
| 	move	t1,ra | ||||
| 	jal	mips_ihb | ||||
| 	move	ra,t1 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	mfc0	t1, CP0_STATUS		/* Do we really need this? */ | ||||
| 	li	a3, 0xff01 | ||||
| 	and	t1, a3 | ||||
|  | @ -107,18 +95,6 @@ | |||
| 	and	a2, a3 | ||||
| 	or	a2, t1 | ||||
| 	mtc0	a2, CP0_STATUS | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	_ehb | ||||
| 	andi	t0, t0, VPECONTROL_TE | ||||
| 	beqz	t0, 1f | ||||
| 	emt | ||||
| 1: | ||||
| 	mfc0	t1, CP0_TCSTATUS | ||||
| 	xori	t1, t1, TCSTATUS_IXMT | ||||
| 	or	t1, t1, t2 | ||||
| 	mtc0	t1, CP0_TCSTATUS | ||||
| 	_ehb | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	move	v0, a0 | ||||
| 	jr	ra | ||||
| 	END(resume) | ||||
|  | @ -176,19 +152,10 @@ LEAF(_restore_msa) | |||
| #define FPU_DEFAULT  0x00000000 | ||||
| 
 | ||||
| LEAF(_init_fpu) | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */ | ||||
| 	mfc0	t0, CP0_TCSTATUS | ||||
| 	/* Bit position is the same for Status, TCStatus */ | ||||
| 	li	t1, ST0_CU1 | ||||
| 	or	t0, t1 | ||||
| 	mtc0	t0, CP0_TCSTATUS | ||||
| #else /* Normal MIPS CU1 enable */ | ||||
| 	mfc0	t0, CP0_STATUS | ||||
| 	li	t1, ST0_CU1 | ||||
| 	or	t0, t1 | ||||
| 	mtc0	t0, CP0_STATUS | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	enable_fpu_hazard | ||||
| 
 | ||||
| 	li	t1, FPU_DEFAULT | ||||
|  |  | |||
|  | @ -36,7 +36,6 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id) | |||
| 	unsigned long flags; | ||||
| 	int i; | ||||
| 
 | ||||
| 	/* Ought not to be strictly necessary for SMTC builds */ | ||||
| 	local_irq_save(flags); | ||||
| 	vpeflags = dvpe(); | ||||
| 	set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); | ||||
|  |  | |||
|  | @ -49,14 +49,11 @@ static void cmp_init_secondary(void) | |||
| 
 | ||||
| 	/* Enable per-cpu interrupts: platform specific */ | ||||
| 
 | ||||
| #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | ||||
| #ifdef CONFIG_MIPS_MT_SMP | ||||
| 	if (cpu_has_mipsmt) | ||||
| 		c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & | ||||
| 			TCBIND_CURVPE; | ||||
| #endif | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	c->tc_id  = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| static void cmp_smp_finish(void) | ||||
|  | @ -135,10 +132,6 @@ void __init cmp_smp_setup(void) | |||
| 		unsigned int mvpconf0 = read_c0_mvpconf0(); | ||||
| 
 | ||||
| 		nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||||
| #elif defined(CONFIG_MIPS_MT_SMTC) | ||||
| 		unsigned int mvpconf0 = read_c0_mvpconf0(); | ||||
| 
 | ||||
| 		nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||||
| #endif | ||||
| 		smp_num_siblings = nvpe; | ||||
| 	} | ||||
|  |  | |||
|  | @ -43,10 +43,6 @@ | |||
| #include <asm/time.h> | ||||
| #include <asm/setup.h> | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| #include <asm/mipsmtregs.h> | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| volatile cpumask_t cpu_callin_map;	/* Bitmask of started secondaries */ | ||||
| 
 | ||||
| int __cpu_number_map[NR_CPUS];		/* Map physical to logical */ | ||||
|  | @ -102,12 +98,6 @@ asmlinkage void start_secondary(void) | |||
| { | ||||
| 	unsigned int cpu; | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/* Only do cpu_probe for first TC of CPU */ | ||||
| 	if ((read_c0_tcbind() & TCBIND_CURTC) != 0) | ||||
| 		__cpu_name[smp_processor_id()] = __cpu_name[0]; | ||||
| 	else | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	cpu_probe(); | ||||
| 	cpu_report(); | ||||
| 	per_cpu_trap_init(false); | ||||
|  | @ -238,13 +228,10 @@ static void flush_tlb_mm_ipi(void *mm) | |||
|  *  o collapses to normal function call on UP kernels | ||||
|  *  o collapses to normal function call on systems with a single shared | ||||
|  *    primary cache. | ||||
|  *  o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. | ||||
|  */ | ||||
| static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) | ||||
| { | ||||
| #ifndef CONFIG_MIPS_MT_SMTC | ||||
| 	smp_call_function(func, info, 1); | ||||
| #endif | ||||
| } | ||||
| 
 | ||||
| static inline void smp_on_each_tlb(void (*func) (void *info), void *info) | ||||
|  |  | |||
|  | @ -1,133 +0,0 @@ | |||
| /* | ||||
|  * Assembly Language Functions for MIPS MT SMTC support | ||||
|  */ | ||||
| 
 | ||||
| /* | ||||
|  * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ | ||||
| 
 | ||||
| #include <asm/regdef.h> | ||||
| #include <asm/asmmacro.h> | ||||
| #include <asm/stackframe.h> | ||||
| #include <asm/irqflags.h> | ||||
| 
 | ||||
| /* | ||||
|  * "Software Interrupt" linkage. | ||||
|  * | ||||
|  * This is invoked when an "Interrupt" is sent from one TC to another, | ||||
|  * where the TC to be interrupted is halted, has it's Restart address | ||||
|  * and Status values saved by the "remote control" thread, then modified | ||||
|  * to cause execution to begin here, in kenel mode. This code then | ||||
|  * disguises the TC state as that of an exception and transfers | ||||
|  * control to the general exception or vectored interrupt handler. | ||||
|  */ | ||||
| 	.set noreorder
 | ||||
| 
 | ||||
| /* | ||||
| The __smtc_ipi_vector would use k0 and k1 as temporaries and | ||||
| 1) Set EXL (this is per-VPE, so this can't be done by proxy!) | ||||
| 2) Restore the K/CU and IXMT bits to the pre "exception" state | ||||
|    (EXL means no interrupts and access to the kernel map). | ||||
| 3) Set EPC to be the saved value of TCRestart. | ||||
| 4) Jump to the exception handler entry point passed by the sender. | ||||
| 
 | ||||
| CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? | ||||
| */ | ||||
| 
 | ||||
| /* | ||||
|  * Reviled and slandered vision: Set EXL and restore K/CU/IXMT | ||||
|  * state of pre-halt thread, then save everything and call | ||||
|  * thought some function pointer to imaginary_exception, which | ||||
|  * will parse a register value or memory message queue to | ||||
|  * deliver things like interprocessor interrupts. On return | ||||
|  * from that function, jump to the global ret_from_irq code | ||||
|  * to invoke the scheduler and return as appropriate. | ||||
|  */ | ||||
| 
 | ||||
| #define PT_PADSLOT4 (PT_R0-8) | ||||
| #define PT_PADSLOT5 (PT_R0-4) | ||||
| 
 | ||||
| 	.text | ||||
| 	.align 5
 | ||||
| FEXPORT(__smtc_ipi_vector) | ||||
| #ifdef CONFIG_CPU_MICROMIPS | ||||
| 	nop | ||||
| #endif | ||||
| 	.set	noat
 | ||||
| 	/* Disable thread scheduling to make Status update atomic */ | ||||
| 	DMT	27					# dmt	k1 | ||||
| 	_ehb | ||||
| 	/* Set EXL */ | ||||
| 	mfc0	k0,CP0_STATUS | ||||
| 	ori	k0,k0,ST0_EXL | ||||
| 	mtc0	k0,CP0_STATUS | ||||
| 	_ehb | ||||
| 	/* Thread scheduling now inhibited by EXL. Restore TE state. */ | ||||
| 	andi	k1,k1,VPECONTROL_TE | ||||
| 	beqz	k1,1f | ||||
| 	emt | ||||
| 1: | ||||
| 	/* | ||||
| 	 * The IPI sender has put some information on the anticipated | ||||
| 	 * kernel stack frame.	If we were in user mode, this will be | ||||
| 	 * built above the saved kernel SP.  If we were already in the | ||||
| 	 * kernel, it will be built above the current CPU SP. | ||||
| 	 * | ||||
| 	 * Were we in kernel mode, as indicated by CU0? | ||||
| 	 */ | ||||
| 	sll	k1,k0,3 | ||||
| 	.set noreorder
 | ||||
| 	bltz	k1,2f | ||||
| 	move	k1,sp | ||||
| 	.set reorder
 | ||||
| 	/* | ||||
| 	 * If previously in user mode, set CU0 and use kernel stack. | ||||
| 	 */ | ||||
| 	li	k1,ST0_CU0 | ||||
| 	or	k1,k1,k0 | ||||
| 	mtc0	k1,CP0_STATUS | ||||
| 	_ehb | ||||
| 	get_saved_sp | ||||
| 	/* Interrupting TC will have pre-set values in slots in the new frame */ | ||||
| 2:	subu	k1,k1,PT_SIZE | ||||
| 	/* Load TCStatus Value */ | ||||
| 	lw	k0,PT_TCSTATUS(k1) | ||||
| 	/* Write it to TCStatus to restore CU/KSU/IXMT state */ | ||||
| 	mtc0	k0,$2,1 | ||||
| 	_ehb | ||||
| 	lw	k0,PT_EPC(k1) | ||||
| 	mtc0	k0,CP0_EPC | ||||
| 	/* Save all will redundantly recompute the SP, but use it for now */ | ||||
| 	SAVE_ALL | ||||
| 	CLI | ||||
| 	TRACE_IRQS_OFF | ||||
| 	/* Function to be invoked passed stack pad slot 5 */ | ||||
| 	lw	t0,PT_PADSLOT5(sp) | ||||
| 	/* Argument from sender passed in stack pad slot 4 */ | ||||
| 	lw	a0,PT_PADSLOT4(sp) | ||||
| 	LONG_L	s0, TI_REGS($28) | ||||
| 	LONG_S	sp, TI_REGS($28) | ||||
| 	PTR_LA	ra, ret_from_irq | ||||
| 	jr	t0 | ||||
| 
 | ||||
| /* | ||||
|  * Called from idle loop to provoke processing of queued IPIs | ||||
|  * First IPI message in queue passed as argument. | ||||
|  */ | ||||
| 
 | ||||
| LEAF(self_ipi) | ||||
| 	/* Before anything else, block interrupts */ | ||||
| 	mfc0	t0,CP0_TCSTATUS | ||||
| 	ori	t1,t0,TCSTATUS_IXMT | ||||
| 	mtc0	t1,CP0_TCSTATUS | ||||
| 	_ehb | ||||
| 	/* We know we're in kernel mode, so prepare stack frame */ | ||||
| 	subu	t1,sp,PT_SIZE | ||||
| 	sw	ra,PT_EPC(t1) | ||||
| 	sw	a0,PT_PADSLOT4(t1) | ||||
| 	la	t2,ipi_decode | ||||
| 	sw	t2,PT_PADSLOT5(t1) | ||||
| 	/* Save pre-disable value of TCStatus */ | ||||
| 	sw	t0,PT_TCSTATUS(t1) | ||||
| 	j	__smtc_ipi_vector | ||||
| 	nop | ||||
| END(self_ipi) | ||||
|  | @ -1,102 +0,0 @@ | |||
| /*
 | ||||
|  * /proc hooks for SMTC kernel | ||||
|  * Copyright (C) 2005 Mips Technologies, Inc | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
| #include <linux/sched.h> | ||||
| #include <linux/cpumask.h> | ||||
| #include <linux/interrupt.h> | ||||
| 
 | ||||
| #include <asm/cpu.h> | ||||
| #include <asm/processor.h> | ||||
| #include <linux/atomic.h> | ||||
| #include <asm/hardirq.h> | ||||
| #include <asm/mmu_context.h> | ||||
| #include <asm/mipsregs.h> | ||||
| #include <asm/cacheflush.h> | ||||
| #include <linux/proc_fs.h> | ||||
| #include <linux/seq_file.h> | ||||
| 
 | ||||
| #include <asm/smtc_proc.h> | ||||
| 
 | ||||
| /*
 | ||||
|  * /proc diagnostic and statistics hooks | ||||
|  */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Statistics gathered | ||||
|  */ | ||||
| unsigned long selfipis[NR_CPUS]; | ||||
| 
 | ||||
| struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; | ||||
| 
 | ||||
| atomic_t smtc_fpu_recoveries; | ||||
| 
 | ||||
| static int smtc_proc_show(struct seq_file *m, void *v) | ||||
| { | ||||
| 	int i; | ||||
| 	extern unsigned long ebase; | ||||
| 
 | ||||
| 	seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status); | ||||
| 	seq_printf(m, "Config7: 0x%08x\n", read_c0_config7()); | ||||
| 	seq_printf(m, "EBASE: 0x%08lx\n", ebase); | ||||
| 	seq_printf(m, "Counter Interrupts taken per CPU (TC)\n"); | ||||
| 	for (i=0; i < NR_CPUS; i++) | ||||
| 		seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints); | ||||
| 	seq_printf(m, "Self-IPIs by CPU:\n"); | ||||
| 	for(i = 0; i < NR_CPUS; i++) | ||||
| 		seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | ||||
| 	seq_printf(m, "%d Recoveries of \"stolen\" FPU\n", | ||||
| 		   atomic_read(&smtc_fpu_recoveries)); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int smtc_proc_open(struct inode *inode, struct file *file) | ||||
| { | ||||
| 	return single_open(file, smtc_proc_show, NULL); | ||||
| } | ||||
| 
 | ||||
| static const struct file_operations smtc_proc_fops = { | ||||
| 	.open		= smtc_proc_open, | ||||
| 	.read		= seq_read, | ||||
| 	.llseek		= seq_lseek, | ||||
| 	.release	= single_release, | ||||
| }; | ||||
| 
 | ||||
| void init_smtc_stats(void) | ||||
| { | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i=0; i<NR_CPUS; i++) { | ||||
| 		smtc_cpu_stats[i].timerints = 0; | ||||
| 		smtc_cpu_stats[i].selfipis = 0; | ||||
| 	} | ||||
| 
 | ||||
| 	atomic_set(&smtc_fpu_recoveries, 0); | ||||
| 
 | ||||
| 	proc_create("smtc", 0444, NULL, &smtc_proc_fops); | ||||
| } | ||||
| 
 | ||||
| static int proc_cpuinfo_chain_call(struct notifier_block *nfb, | ||||
| 	unsigned long action_unused, void *data) | ||||
| { | ||||
| 	struct proc_cpuinfo_notifier_args *pcn = data; | ||||
| 	struct seq_file *m = pcn->m; | ||||
| 	unsigned long n = pcn->n; | ||||
| 
 | ||||
| 	if (!cpu_has_mipsmt) | ||||
| 		return NOTIFY_OK; | ||||
| 
 | ||||
| 	seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); | ||||
| 	seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id); | ||||
| 
 | ||||
| 	return NOTIFY_OK; | ||||
| } | ||||
| 
 | ||||
| static int __init proc_cpuinfo_notifier_init(void) | ||||
| { | ||||
| 	return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0); | ||||
| } | ||||
| 
 | ||||
| subsys_initcall(proc_cpuinfo_notifier_init); | ||||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							|  | @ -6,8 +6,6 @@ | |||
|  * not have done anything significant (but they may have had interrupts | ||||
|  * enabled briefly - prom_smp_finish() should not be responsible for enabling | ||||
|  * interrupts...) | ||||
|  * | ||||
|  * FIXME: broken for SMTC | ||||
|  */ | ||||
| 
 | ||||
| #include <linux/kernel.h> | ||||
|  | @ -33,14 +31,6 @@ void synchronise_count_master(int cpu) | |||
| 	unsigned long flags; | ||||
| 	unsigned int initcount; | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/*
 | ||||
| 	 * SMTC needs to synchronise per VPE, not per CPU | ||||
| 	 * ignore for now | ||||
| 	 */ | ||||
| 	return; | ||||
| #endif | ||||
| 
 | ||||
| 	printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
|  | @ -110,14 +100,6 @@ void synchronise_count_slave(int cpu) | |||
| 	int i; | ||||
| 	unsigned int initcount; | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/*
 | ||||
| 	 * SMTC needs to synchronise per VPE, not per CPU | ||||
| 	 * ignore for now | ||||
| 	 */ | ||||
| 	return; | ||||
| #endif | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Not every cpu is online at the time this gets called, | ||||
| 	 * so we first wait for the master to say everyone is ready | ||||
|  |  | |||
|  | @ -26,7 +26,6 @@ | |||
| #include <asm/cpu-features.h> | ||||
| #include <asm/cpu-type.h> | ||||
| #include <asm/div64.h> | ||||
| #include <asm/smtc_ipi.h> | ||||
| #include <asm/time.h> | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
|  | @ -370,9 +370,6 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
| { | ||||
| 	static int die_counter; | ||||
| 	int sig = SIGSEGV; | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	unsigned long dvpret; | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| 	oops_enter(); | ||||
| 
 | ||||
|  | @ -382,13 +379,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
| 
 | ||||
| 	console_verbose(); | ||||
| 	raw_spin_lock_irq(&die_lock); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	dvpret = dvpe(); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	bust_spinlocks(1); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	mips_mt_regdump(dvpret); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| 	printk("%s[#%d]:\n", str, ++die_counter); | ||||
| 	show_registers(regs); | ||||
|  | @ -1759,19 +1750,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
| 		extern char rollback_except_vec_vi; | ||||
| 		char *vec_start = using_rollback_handler() ? | ||||
| 			&rollback_except_vec_vi : &except_vec_vi; | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		/*
 | ||||
| 		 * We need to provide the SMTC vectored interrupt handler | ||||
| 		 * not only with the address of the handler, but with the | ||||
| 		 * Status.IM bit to be masked before going there. | ||||
| 		 */ | ||||
| 		extern char except_vec_vi_mori; | ||||
| #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) | ||||
| 		const int mori_offset = &except_vec_vi_mori - vec_start + 2; | ||||
| #else | ||||
| 		const int mori_offset = &except_vec_vi_mori - vec_start; | ||||
| #endif | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) | ||||
| 		const int lui_offset = &except_vec_vi_lui - vec_start + 2; | ||||
| 		const int ori_offset = &except_vec_vi_ori - vec_start + 2; | ||||
|  | @ -1795,12 +1773,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | |||
| #else | ||||
| 				handler_len); | ||||
| #endif | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		BUG_ON(n > 7);	/* Vector index %d exceeds SMTC maximum. */ | ||||
| 
 | ||||
| 		h = (u16 *)(b + mori_offset); | ||||
| 		*h = (0x100 << n); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		h = (u16 *)(b + lui_offset); | ||||
| 		*h = (handler >> 16) & 0xffff; | ||||
| 		h = (u16 *)(b + ori_offset); | ||||
|  | @ -1870,20 +1842,6 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
| 	unsigned int cpu = smp_processor_id(); | ||||
| 	unsigned int status_set = ST0_CU0; | ||||
| 	unsigned int hwrena = cpu_hwrena_impl_bits; | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	int secondaryTC = 0; | ||||
| 	int bootTC = (cpu == 0); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Only do per_cpu_trap_init() for first TC of Each VPE. | ||||
| 	 * Note that this hack assumes that the SMTC init code | ||||
| 	 * assigns TCs consecutively and in ascending order. | ||||
| 	 */ | ||||
| 
 | ||||
| 	if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||||
| 	    ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) | ||||
| 		secondaryTC = 1; | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Disable coprocessors and select 32-bit or 64-bit addressing | ||||
|  | @ -1911,10 +1869,6 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
| 	if (hwrena) | ||||
| 		write_c0_hwrena(hwrena); | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	if (!secondaryTC) { | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| 	if (cpu_has_veic || cpu_has_vint) { | ||||
| 		unsigned long sr = set_c0_status(ST0_BEV); | ||||
| 		write_c0_ebase(ebase); | ||||
|  | @ -1949,10 +1903,6 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
| 		cp0_perfcount_irq = -1; | ||||
| 	} | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	} | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| 	if (!cpu_data[cpu].asid_cache) | ||||
| 		cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; | ||||
| 
 | ||||
|  | @ -1961,23 +1911,10 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
| 	BUG_ON(current->mm); | ||||
| 	enter_lazy_tlb(&init_mm, current); | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	if (bootTC) { | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 		/* Boot CPU's cache setup in setup_arch(). */ | ||||
| 		if (!is_boot_cpu) | ||||
| 			cpu_cache_init(); | ||||
| 		tlb_init(); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	} else if (!secondaryTC) { | ||||
| 		/*
 | ||||
| 		 * First TC in non-boot VPE must do subset of tlb_init() | ||||
| 		 * for MMU countrol registers. | ||||
| 		 */ | ||||
| 		write_c0_pagemask(PM_DEFAULT_MASK); | ||||
| 		write_c0_wired(0); | ||||
| 	} | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	TLBMISS_HANDLER_SETUP(); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -127,9 +127,8 @@ int vpe_run(struct vpe *v) | |||
| 	clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * SMTC/SMVP kernels manage VPE enable independently, | ||||
| 	 * but uniprocessor kernels need to turn it on, even | ||||
| 	 * if that wasn't the pre-dvpe() state. | ||||
| 	 * SMVP kernels manage VPE enable independently, but uniprocessor | ||||
| 	 * kernels need to turn it on, even if that wasn't the pre-dvpe() state. | ||||
| 	 */ | ||||
| #ifdef CONFIG_SMP | ||||
| 	evpe(vpeflags); | ||||
|  | @ -454,12 +453,11 @@ int __init vpe_module_init(void) | |||
| 
 | ||||
| 			settc(tc); | ||||
| 
 | ||||
| 			/* Any TC that is bound to VPE0 gets left as is - in
 | ||||
| 			 * case we are running SMTC on VPE0. A TC that is bound | ||||
| 			 * to any other VPE gets bound to VPE0, ideally I'd like | ||||
| 			 * to make it homeless but it doesn't appear to let me | ||||
| 			 * bind a TC to a non-existent VPE. Which is perfectly | ||||
| 			 * reasonable. | ||||
| 			/*
 | ||||
| 			 * A TC that is bound to any other VPE gets bound to | ||||
| 			 * VPE0, ideally I'd like to make it homeless but it | ||||
| 			 * doesn't appear to let me bind a TC to a non-existent | ||||
| 			 * VPE. Which is perfectly reasonable. | ||||
| 			 * | ||||
| 			 * The (un)bound state is visible to an EJTAG probe so | ||||
| 			 * may notify GDB... | ||||
|  |  | |||
|  | @ -61,7 +61,7 @@ | |||
| /* we have a cascade of 8 irqs */ | ||||
| #define MIPS_CPU_IRQ_CASCADE		8 | ||||
| 
 | ||||
| #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | ||||
| #ifdef CONFIG_MIPS_MT_SMP | ||||
| int gic_present; | ||||
| #endif | ||||
| 
 | ||||
|  | @ -440,7 +440,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) | |||
| 	arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call); | ||||
| #endif | ||||
| 
 | ||||
| #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) | ||||
| #ifndef CONFIG_MIPS_MT_SMP | ||||
| 	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | | ||||
| 		IE_IRQ3 | IE_IRQ4 | IE_IRQ5); | ||||
| #else | ||||
|  |  | |||
|  | @ -15,7 +15,7 @@ | |||
| #include <linux/export.h> | ||||
| #include <linux/stringify.h> | ||||
| 
 | ||||
| #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) | ||||
| #ifndef CONFIG_CPU_MIPSR2 | ||||
| 
 | ||||
| /*
 | ||||
|  * For cli() we have to insert nops to make sure that the new value | ||||
|  | @ -42,12 +42,7 @@ notrace void arch_local_irq_disable(void) | |||
| 	__asm__ __volatile__( | ||||
| 	"	.set	push						\n" | ||||
| 	"	.set	noat						\n" | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	"	mfc0	$1, $2, 1					\n" | ||||
| 	"	ori	$1, 0x400					\n" | ||||
| 	"	.set	noreorder					\n" | ||||
| 	"	mtc0	$1, $2, 1					\n" | ||||
| #elif defined(CONFIG_CPU_MIPSR2) | ||||
| #if   defined(CONFIG_CPU_MIPSR2) | ||||
| 	/* see irqflags.h for inline function */ | ||||
| #else | ||||
| 	"	mfc0	$1,$12						\n" | ||||
|  | @ -77,13 +72,7 @@ notrace unsigned long arch_local_irq_save(void) | |||
| 	"	.set	push						\n" | ||||
| 	"	.set	reorder						\n" | ||||
| 	"	.set	noat						\n" | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	"	mfc0	%[flags], $2, 1				\n" | ||||
| 	"	ori	$1, %[flags], 0x400				\n" | ||||
| 	"	.set	noreorder					\n" | ||||
| 	"	mtc0	$1, $2, 1					\n" | ||||
| 	"	andi	%[flags], %[flags], 0x400			\n" | ||||
| #elif defined(CONFIG_CPU_MIPSR2) | ||||
| #if   defined(CONFIG_CPU_MIPSR2) | ||||
| 	/* see irqflags.h for inline function */ | ||||
| #else | ||||
| 	"	mfc0	%[flags], $12					\n" | ||||
|  | @ -108,29 +97,13 @@ notrace void arch_local_irq_restore(unsigned long flags) | |||
| { | ||||
| 	unsigned long __tmp1; | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	/*
 | ||||
| 	 * SMTC kernel needs to do a software replay of queued | ||||
| 	 * IPIs, at the cost of branch and call overhead on each | ||||
| 	 * local_irq_restore() | ||||
| 	 */ | ||||
| 	if (unlikely(!(flags & 0x0400))) | ||||
| 		smtc_ipi_replay(); | ||||
| #endif | ||||
| 	preempt_disable(); | ||||
| 
 | ||||
| 	__asm__ __volatile__( | ||||
| 	"	.set	push						\n" | ||||
| 	"	.set	noreorder					\n" | ||||
| 	"	.set	noat						\n" | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	"	mfc0	$1, $2, 1					\n" | ||||
| 	"	andi	%[flags], 0x400					\n" | ||||
| 	"	ori	$1, 0x400					\n" | ||||
| 	"	xori	$1, 0x400					\n" | ||||
| 	"	or	%[flags], $1					\n" | ||||
| 	"	mtc0	%[flags], $2, 1					\n" | ||||
| #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||||
| #if   defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||||
| 	/* see irqflags.h for inline function */ | ||||
| #elif defined(CONFIG_CPU_MIPSR2) | ||||
| 	/* see irqflags.h for inline function */ | ||||
|  | @ -163,14 +136,7 @@ notrace void __arch_local_irq_restore(unsigned long flags) | |||
| 	"	.set	push						\n" | ||||
| 	"	.set	noreorder					\n" | ||||
| 	"	.set	noat						\n" | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	"	mfc0	$1, $2, 1					\n" | ||||
| 	"	andi	%[flags], 0x400					\n" | ||||
| 	"	ori	$1, 0x400					\n" | ||||
| 	"	xori	$1, 0x400					\n" | ||||
| 	"	or	%[flags], $1					\n" | ||||
| 	"	mtc0	%[flags], $2, 1					\n" | ||||
| #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||||
| #if   defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||||
| 	/* see irqflags.h for inline function */ | ||||
| #elif defined(CONFIG_CPU_MIPSR2) | ||||
| 	/* see irqflags.h for inline function */ | ||||
|  | @ -192,4 +158,4 @@ notrace void __arch_local_irq_restore(unsigned long flags) | |||
| } | ||||
| EXPORT_SYMBOL(__arch_local_irq_restore); | ||||
| 
 | ||||
| #endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ | ||||
| #endif /* !CONFIG_CPU_MIPSR2 */ | ||||
|  |  | |||
|  | @ -50,7 +50,7 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) | |||
| { | ||||
| 	preempt_disable(); | ||||
| 
 | ||||
| #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) | ||||
| #ifndef CONFIG_MIPS_MT_SMP | ||||
| 	smp_call_function(func, info, 1); | ||||
| #endif | ||||
| 	func(info); | ||||
|  | @ -427,7 +427,7 @@ static void r4k___flush_cache_all(void) | |||
| 
 | ||||
| static inline int has_valid_asid(const struct mm_struct *mm) | ||||
| { | ||||
| #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | ||||
| #ifdef CONFIG_MIPS_MT_SMP | ||||
| 	int i; | ||||
| 
 | ||||
| 	for_each_online_cpu(i) | ||||
|  |  | |||
|  | @ -44,27 +44,6 @@ | |||
| #include <asm/tlb.h> | ||||
| #include <asm/fixmap.h> | ||||
| 
 | ||||
| /* Atomicity and interruptability */ | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 
 | ||||
| #include <asm/mipsmtregs.h> | ||||
| 
 | ||||
| #define ENTER_CRITICAL(flags) \ | ||||
| 	{ \ | ||||
| 	unsigned int mvpflags; \ | ||||
| 	local_irq_save(flags);\ | ||||
| 	mvpflags = dvpe() | ||||
| #define EXIT_CRITICAL(flags) \ | ||||
| 	evpe(mvpflags); \ | ||||
| 	local_irq_restore(flags); \ | ||||
| 	} | ||||
| #else | ||||
| 
 | ||||
| #define ENTER_CRITICAL(flags) local_irq_save(flags) | ||||
| #define EXIT_CRITICAL(flags) local_irq_restore(flags) | ||||
| 
 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| /*
 | ||||
|  * We have up to 8 empty zeroed pages so we can map one of the right colour | ||||
|  * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions | ||||
|  | @ -100,20 +79,6 @@ void setup_zero_pages(void) | |||
| 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| static pte_t *kmap_coherent_pte; | ||||
| static void __init kmap_coherent_init(void) | ||||
| { | ||||
| 	unsigned long vaddr; | ||||
| 
 | ||||
| 	/* cache the first coherent kmap pte */ | ||||
| 	vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | ||||
| 	kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | ||||
| } | ||||
| #else | ||||
| static inline void kmap_coherent_init(void) {} | ||||
| #endif | ||||
| 
 | ||||
| void *kmap_coherent(struct page *page, unsigned long addr) | ||||
| { | ||||
| 	enum fixed_addresses idx; | ||||
|  | @ -126,12 +91,7 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
| 
 | ||||
| 	pagefault_disable(); | ||||
| 	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	idx += FIX_N_COLOURS * smp_processor_id() + | ||||
| 		(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0); | ||||
| #else | ||||
| 	idx += in_interrupt() ? FIX_N_COLOURS : 0; | ||||
| #endif | ||||
| 	vaddr = __fix_to_virt(FIX_CMAP_END - idx); | ||||
| 	pte = mk_pte(page, PAGE_KERNEL); | ||||
| #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | ||||
|  | @ -140,44 +100,29 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
| 	entrylo = pte_to_entrylo(pte_val(pte)); | ||||
| #endif | ||||
| 
 | ||||
| 	ENTER_CRITICAL(flags); | ||||
| 	local_irq_save(flags); | ||||
| 	old_ctx = read_c0_entryhi(); | ||||
| 	write_c0_entryhi(vaddr & (PAGE_MASK << 1)); | ||||
| 	write_c0_entrylo0(entrylo); | ||||
| 	write_c0_entrylo1(entrylo); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | ||||
| 	/* preload TLB instead of local_flush_tlb_one() */ | ||||
| 	mtc0_tlbw_hazard(); | ||||
| 	tlb_probe(); | ||||
| 	tlb_probe_hazard(); | ||||
| 	tlbidx = read_c0_index(); | ||||
| 	mtc0_tlbw_hazard(); | ||||
| 	if (tlbidx < 0) | ||||
| 		tlb_write_random(); | ||||
| 	else | ||||
| 		tlb_write_indexed(); | ||||
| #else | ||||
| 	tlbidx = read_c0_wired(); | ||||
| 	write_c0_wired(tlbidx + 1); | ||||
| 	write_c0_index(tlbidx); | ||||
| 	mtc0_tlbw_hazard(); | ||||
| 	tlb_write_indexed(); | ||||
| #endif | ||||
| 	tlbw_use_hazard(); | ||||
| 	write_c0_entryhi(old_ctx); | ||||
| 	EXIT_CRITICAL(flags); | ||||
| 	local_irq_restore(flags); | ||||
| 
 | ||||
| 	return (void*) vaddr; | ||||
| } | ||||
| 
 | ||||
| void kunmap_coherent(void) | ||||
| { | ||||
| #ifndef CONFIG_MIPS_MT_SMTC | ||||
| 	unsigned int wired; | ||||
| 	unsigned long flags, old_ctx; | ||||
| 
 | ||||
| 	ENTER_CRITICAL(flags); | ||||
| 	local_irq_save(flags); | ||||
| 	old_ctx = read_c0_entryhi(); | ||||
| 	wired = read_c0_wired() - 1; | ||||
| 	write_c0_wired(wired); | ||||
|  | @ -189,8 +134,7 @@ void kunmap_coherent(void) | |||
| 	tlb_write_indexed(); | ||||
| 	tlbw_use_hazard(); | ||||
| 	write_c0_entryhi(old_ctx); | ||||
| 	EXIT_CRITICAL(flags); | ||||
| #endif | ||||
| 	local_irq_restore(flags); | ||||
| 	pagefault_enable(); | ||||
| } | ||||
| 
 | ||||
|  | @ -256,7 +200,7 @@ EXPORT_SYMBOL_GPL(copy_from_user_page); | |||
| void __init fixrange_init(unsigned long start, unsigned long end, | ||||
| 	pgd_t *pgd_base) | ||||
| { | ||||
| #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) | ||||
| #ifdef CONFIG_HIGHMEM | ||||
| 	pgd_t *pgd; | ||||
| 	pud_t *pud; | ||||
| 	pmd_t *pmd; | ||||
|  | @ -327,8 +271,6 @@ void __init paging_init(void) | |||
| #ifdef CONFIG_HIGHMEM | ||||
| 	kmap_init(); | ||||
| #endif | ||||
| 	kmap_coherent_init(); | ||||
| 
 | ||||
| #ifdef CONFIG_ZONE_DMA | ||||
| 	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; | ||||
| #endif | ||||
|  |  | |||
|  | @ -25,28 +25,6 @@ | |||
| 
 | ||||
| extern void build_tlb_refill_handler(void); | ||||
| 
 | ||||
| /* Atomicity and interruptability */ | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 
 | ||||
| #include <asm/smtc.h> | ||||
| #include <asm/mipsmtregs.h> | ||||
| 
 | ||||
| #define ENTER_CRITICAL(flags) \ | ||||
| 	{ \ | ||||
| 	unsigned int mvpflags; \ | ||||
| 	local_irq_save(flags);\ | ||||
| 	mvpflags = dvpe() | ||||
| #define EXIT_CRITICAL(flags) \ | ||||
| 	evpe(mvpflags); \ | ||||
| 	local_irq_restore(flags); \ | ||||
| 	} | ||||
| #else | ||||
| 
 | ||||
| #define ENTER_CRITICAL(flags) local_irq_save(flags) | ||||
| #define EXIT_CRITICAL(flags) local_irq_restore(flags) | ||||
| 
 | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| /*
 | ||||
|  * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, | ||||
|  * unfortunately, itlb is not totally transparent to software. | ||||
|  | @ -75,7 +53,7 @@ void local_flush_tlb_all(void) | |||
| 	unsigned long old_ctx; | ||||
| 	int entry, ftlbhighset; | ||||
| 
 | ||||
| 	ENTER_CRITICAL(flags); | ||||
| 	local_irq_save(flags); | ||||
| 	/* Save old context and create impossible VPN2 value */ | ||||
| 	old_ctx = read_c0_entryhi(); | ||||
| 	write_c0_entrylo0(0); | ||||
|  | @ -112,7 +90,7 @@ void local_flush_tlb_all(void) | |||
| 	tlbw_use_hazard(); | ||||
| 	write_c0_entryhi(old_ctx); | ||||
| 	flush_itlb(); | ||||
| 	EXIT_CRITICAL(flags); | ||||
| 	local_irq_restore(flags); | ||||
| } | ||||
| EXPORT_SYMBOL(local_flush_tlb_all); | ||||
| 
 | ||||
|  | @ -142,7 +120,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
| 	if (cpu_context(cpu, mm) != 0) { | ||||
| 		unsigned long size, flags; | ||||
| 
 | ||||
| 		ENTER_CRITICAL(flags); | ||||
| 		local_irq_save(flags); | ||||
| 		start = round_down(start, PAGE_SIZE << 1); | ||||
| 		end = round_up(end, PAGE_SIZE << 1); | ||||
| 		size = (end - start) >> (PAGE_SHIFT + 1); | ||||
|  | @ -176,7 +154,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
| 			drop_mmu_context(mm, cpu); | ||||
| 		} | ||||
| 		flush_itlb(); | ||||
| 		EXIT_CRITICAL(flags); | ||||
| 		local_irq_restore(flags); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -184,7 +162,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
| { | ||||
| 	unsigned long size, flags; | ||||
| 
 | ||||
| 	ENTER_CRITICAL(flags); | ||||
| 	local_irq_save(flags); | ||||
| 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||||
| 	size = (size + 1) >> 1; | ||||
| 	if (size <= (current_cpu_data.tlbsizeftlbsets ? | ||||
|  | @ -220,7 +198,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
| 		local_flush_tlb_all(); | ||||
| 	} | ||||
| 	flush_itlb(); | ||||
| 	EXIT_CRITICAL(flags); | ||||
| 	local_irq_restore(flags); | ||||
| } | ||||
| 
 | ||||
| void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||||
|  | @ -233,7 +211,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
| 
 | ||||
| 		newpid = cpu_asid(cpu, vma->vm_mm); | ||||
| 		page &= (PAGE_MASK << 1); | ||||
| 		ENTER_CRITICAL(flags); | ||||
| 		local_irq_save(flags); | ||||
| 		oldpid = read_c0_entryhi(); | ||||
| 		write_c0_entryhi(page | newpid); | ||||
| 		mtc0_tlbw_hazard(); | ||||
|  | @ -253,7 +231,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
| 	finish: | ||||
| 		write_c0_entryhi(oldpid); | ||||
| 		flush_itlb_vm(vma); | ||||
| 		EXIT_CRITICAL(flags); | ||||
| 		local_irq_restore(flags); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | @ -266,7 +244,7 @@ void local_flush_tlb_one(unsigned long page) | |||
| 	unsigned long flags; | ||||
| 	int oldpid, idx; | ||||
| 
 | ||||
| 	ENTER_CRITICAL(flags); | ||||
| 	local_irq_save(flags); | ||||
| 	oldpid = read_c0_entryhi(); | ||||
| 	page &= (PAGE_MASK << 1); | ||||
| 	write_c0_entryhi(page); | ||||
|  | @ -285,7 +263,7 @@ void local_flush_tlb_one(unsigned long page) | |||
| 	} | ||||
| 	write_c0_entryhi(oldpid); | ||||
| 	flush_itlb(); | ||||
| 	EXIT_CRITICAL(flags); | ||||
| 	local_irq_restore(flags); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -308,7 +286,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
| 	if (current->active_mm != vma->vm_mm) | ||||
| 		return; | ||||
| 
 | ||||
| 	ENTER_CRITICAL(flags); | ||||
| 	local_irq_save(flags); | ||||
| 
 | ||||
| 	pid = read_c0_entryhi() & ASID_MASK; | ||||
| 	address &= (PAGE_MASK << 1); | ||||
|  | @ -358,7 +336,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
| 	} | ||||
| 	tlbw_use_hazard(); | ||||
| 	flush_itlb_vm(vma); | ||||
| 	EXIT_CRITICAL(flags); | ||||
| 	local_irq_restore(flags); | ||||
| } | ||||
| 
 | ||||
| void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | ||||
|  | @ -369,7 +347,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
| 	unsigned long old_pagemask; | ||||
| 	unsigned long old_ctx; | ||||
| 
 | ||||
| 	ENTER_CRITICAL(flags); | ||||
| 	local_irq_save(flags); | ||||
| 	/* Save old context and create impossible VPN2 value */ | ||||
| 	old_ctx = read_c0_entryhi(); | ||||
| 	old_pagemask = read_c0_pagemask(); | ||||
|  | @ -389,7 +367,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
| 	tlbw_use_hazard();	/* What is the hazard here? */ | ||||
| 	write_c0_pagemask(old_pagemask); | ||||
| 	local_flush_tlb_all(); | ||||
| 	EXIT_CRITICAL(flags); | ||||
| 	local_irq_restore(flags); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||||
|  | @ -399,13 +377,13 @@ int __init has_transparent_hugepage(void) | |||
| 	unsigned int mask; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	ENTER_CRITICAL(flags); | ||||
| 	local_irq_save(flags); | ||||
| 	write_c0_pagemask(PM_HUGE_MASK); | ||||
| 	back_to_back_c0_hazard(); | ||||
| 	mask = read_c0_pagemask(); | ||||
| 	write_c0_pagemask(PM_DEFAULT_MASK); | ||||
| 
 | ||||
| 	EXIT_CRITICAL(flags); | ||||
| 	local_irq_restore(flags); | ||||
| 
 | ||||
| 	return mask == PM_HUGE_MASK; | ||||
| } | ||||
|  |  | |||
|  | @ -8,6 +8,3 @@ | |||
| obj-y				:= malta-amon.o malta-display.o malta-init.o \
 | ||||
| 				   malta-int.o malta-memory.o malta-platform.o \
 | ||||
| 				   malta-reset.o malta-setup.o malta-time.o | ||||
| 
 | ||||
| # FIXME FIXME FIXME
 | ||||
| obj-$(CONFIG_MIPS_MT_SMTC)	+= malta-smtc.o | ||||
|  |  | |||
|  | @ -116,8 +116,6 @@ phys_t mips_cpc_default_phys_base(void) | |||
| 	return CPC_BASE_ADDR; | ||||
| } | ||||
| 
 | ||||
| extern struct plat_smp_ops msmtc_smp_ops; | ||||
| 
 | ||||
| void __init prom_init(void) | ||||
| { | ||||
| 	mips_display_message("LINUX"); | ||||
|  | @ -304,8 +302,4 @@ mips_pci_controller: | |||
| 		return; | ||||
| 	if (!register_vsmp_smp_ops()) | ||||
| 		return; | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 	register_smp_ops(&msmtc_smp_ops); | ||||
| #endif | ||||
| } | ||||
|  |  | |||
|  | @ -504,28 +504,9 @@ void __init arch_init_irq(void) | |||
| 	} else if (cpu_has_vint) { | ||||
| 		set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); | ||||
| 		set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq, | ||||
| 			(0x100 << MIPSCPU_INT_I8259A)); | ||||
| 		setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, | ||||
| 			&corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI)); | ||||
| 		/*
 | ||||
| 		 * Temporary hack to ensure that the subsidiary device | ||||
| 		 * interrupts coing in via the i8259A, but associated | ||||
| 		 * with low IRQ numbers, will restore the Status.IM | ||||
| 		 * value associated with the i8259A. | ||||
| 		 */ | ||||
| 		{ | ||||
| 			int i; | ||||
| 
 | ||||
| 			for (i = 0; i < 16; i++) | ||||
| 				irq_hwmask[i] = (0x100 << MIPSCPU_INT_I8259A); | ||||
| 		} | ||||
| #else /* Not SMTC */ | ||||
| 		setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); | ||||
| 		setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, | ||||
| 						&corehi_irqaction); | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 	} else { | ||||
| 		setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); | ||||
| 		setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, | ||||
|  |  | |||
|  | @ -77,11 +77,7 @@ const char *get_system_type(void) | |||
| 	return "MIPS Malta"; | ||||
| } | ||||
| 
 | ||||
| #if defined(CONFIG_MIPS_MT_SMTC) | ||||
| const char display_string[] = "	      SMTC LINUX ON MALTA	"; | ||||
| #else | ||||
| const char display_string[] = "	       LINUX ON MALTA	    "; | ||||
| #endif /* CONFIG_MIPS_MT_SMTC */ | ||||
| 
 | ||||
| #ifdef CONFIG_BLK_DEV_FD | ||||
| static void __init fd_activate(void) | ||||
|  |  | |||
|  | @ -1,162 +0,0 @@ | |||
| /*
 | ||||
|  * Malta Platform-specific hooks for SMP operation | ||||
|  */ | ||||
| #include <linux/irq.h> | ||||
| #include <linux/init.h> | ||||
| 
 | ||||
| #include <asm/mipsregs.h> | ||||
| #include <asm/mipsmtregs.h> | ||||
| #include <asm/smtc.h> | ||||
| #include <asm/smtc_ipi.h> | ||||
| 
 | ||||
| /* VPE/SMP Prototype implements platform interfaces directly */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Cause the specified action to be performed on a targeted "CPU" | ||||
|  */ | ||||
| 
 | ||||
| static void msmtc_send_ipi_single(int cpu, unsigned int action) | ||||
| { | ||||
| 	/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ | ||||
| 	smtc_send_ipi(cpu, LINUX_SMP_IPI, action); | ||||
| } | ||||
| 
 | ||||
| static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) | ||||
| { | ||||
| 	unsigned int i; | ||||
| 
 | ||||
| 	for_each_cpu(i, mask) | ||||
| 		msmtc_send_ipi_single(i, action); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Post-config but pre-boot cleanup entry point | ||||
|  */ | ||||
| static void msmtc_init_secondary(void) | ||||
| { | ||||
| 	int myvpe; | ||||
| 
 | ||||
| 	/* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ | ||||
| 	myvpe = read_c0_tcbind() & TCBIND_CURVPE; | ||||
| 	if (myvpe != 0) { | ||||
| 		/* Ideally, this should be done only once per VPE, but... */ | ||||
| 		clear_c0_status(ST0_IM); | ||||
| 		set_c0_status((0x100 << cp0_compare_irq) | ||||
| 				| (0x100 << MIPS_CPU_IPI_IRQ)); | ||||
| 		if (cp0_perfcount_irq >= 0) | ||||
| 			set_c0_status(0x100 << cp0_perfcount_irq); | ||||
| 	} | ||||
| 
 | ||||
| 	smtc_init_secondary(); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Platform "CPU" startup hook | ||||
|  */ | ||||
| static void msmtc_boot_secondary(int cpu, struct task_struct *idle) | ||||
| { | ||||
| 	smtc_boot_secondary(cpu, idle); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * SMP initialization finalization entry point | ||||
|  */ | ||||
| static void msmtc_smp_finish(void) | ||||
| { | ||||
| 	smtc_smp_finish(); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Hook for after all CPUs are online | ||||
|  */ | ||||
| 
 | ||||
| static void msmtc_cpus_done(void) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Platform SMP pre-initialization | ||||
|  * | ||||
|  * As noted above, we can assume a single CPU for now | ||||
|  * but it may be multithreaded. | ||||
|  */ | ||||
| 
 | ||||
| static void __init msmtc_smp_setup(void) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * we won't get the definitive value until | ||||
| 	 * we've run smtc_prepare_cpus later, but | ||||
| 	 * we would appear to need an upper bound now. | ||||
| 	 */ | ||||
| 	smp_num_siblings = smtc_build_cpu_map(0); | ||||
| } | ||||
| 
 | ||||
| static void __init msmtc_prepare_cpus(unsigned int max_cpus) | ||||
| { | ||||
| 	smtc_prepare_cpus(max_cpus); | ||||
| } | ||||
| 
 | ||||
| struct plat_smp_ops msmtc_smp_ops = { | ||||
| 	.send_ipi_single	= msmtc_send_ipi_single, | ||||
| 	.send_ipi_mask		= msmtc_send_ipi_mask, | ||||
| 	.init_secondary		= msmtc_init_secondary, | ||||
| 	.smp_finish		= msmtc_smp_finish, | ||||
| 	.cpus_done		= msmtc_cpus_done, | ||||
| 	.boot_secondary		= msmtc_boot_secondary, | ||||
| 	.smp_setup		= msmtc_smp_setup, | ||||
| 	.prepare_cpus		= msmtc_prepare_cpus, | ||||
| }; | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | ||||
| /*
 | ||||
|  * IRQ affinity hook | ||||
|  */ | ||||
| 
 | ||||
| 
 | ||||
| int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, | ||||
| 			  bool force) | ||||
| { | ||||
| 	cpumask_t tmask; | ||||
| 	int cpu = 0; | ||||
| 	void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * On the legacy Malta development board, all I/O interrupts | ||||
| 	 * are routed through the 8259 and combined in a single signal | ||||
| 	 * to the CPU daughterboard, and on the CoreFPGA2/3 34K models, | ||||
| 	 * that signal is brought to IP2 of both VPEs. To avoid racing | ||||
| 	 * concurrent interrupt service events, IP2 is enabled only on | ||||
| 	 * one VPE, by convention VPE0.	 So long as no bits are ever | ||||
| 	 * cleared in the affinity mask, there will never be any | ||||
| 	 * interrupt forwarding.  But as soon as a program or operator | ||||
| 	 * sets affinity for one of the related IRQs, we need to make | ||||
| 	 * sure that we don't ever try to forward across the VPE boundary, | ||||
| 	 * at least not until we engineer a system where the interrupt | ||||
| 	 * _ack() or _end() function can somehow know that it corresponds | ||||
| 	 * to an interrupt taken on another VPE, and perform the appropriate | ||||
| 	 * restoration of Status.IM state using MFTR/MTTR instead of the | ||||
| 	 * normal local behavior. We also ensure that no attempt will | ||||
| 	 * be made to forward to an offline "CPU". | ||||
| 	 */ | ||||
| 
 | ||||
| 	cpumask_copy(&tmask, affinity); | ||||
| 	for_each_cpu(cpu, affinity) { | ||||
| 		if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) | ||||
| 			cpu_clear(cpu, tmask); | ||||
| 	} | ||||
| 	cpumask_copy(d->affinity, &tmask); | ||||
| 
 | ||||
| 	if (cpus_empty(tmask)) | ||||
| 		/*
 | ||||
| 		 * We could restore a default mask here, but the | ||||
| 		 * runtime code can anyway deal with the null set | ||||
| 		 */ | ||||
| 		printk(KERN_WARNING | ||||
| 		       "IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq); | ||||
| 
 | ||||
| 	/* Do any generic SMTC IRQ affinity setup */ | ||||
| 	smtc_set_irq_affinity(d->irq, tmask); | ||||
| 
 | ||||
| 	return IRQ_SET_MASK_OK_NOCOPY; | ||||
| } | ||||
| #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ | ||||
|  | @ -10,4 +10,3 @@ obj-$(CONFIG_PCI) += msp_pci.o | |||
| obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o | ||||
| obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o | ||||
| obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o | ||||
| obj-$(CONFIG_MIPS_MT_SMTC) += msp_smtc.o | ||||
|  |  | |||
|  | @ -32,7 +32,7 @@ extern void msp_vsmp_int_init(void); | |||
| 
 | ||||
| /* vectored interrupt implementation */ | ||||
| 
 | ||||
| /* SW0/1 interrupts are used for SMP/SMTC */ | ||||
| /* SW0/1 interrupts are used for SMP  */ | ||||
| static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); } | ||||
| static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); } | ||||
| static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); } | ||||
|  | @ -138,14 +138,6 @@ void __init arch_init_irq(void) | |||
| 	set_vi_handler(MSP_INT_SEC, sec_int_dispatch); | ||||
| #ifdef CONFIG_MIPS_MT_SMP | ||||
| 	msp_vsmp_int_init(); | ||||
| #elif defined CONFIG_MIPS_MT_SMTC | ||||
| 	/*Set hwmask for all platform devices */ | ||||
| 	irq_hwmask[MSP_INT_MAC0] = C_IRQ0; | ||||
| 	irq_hwmask[MSP_INT_MAC1] = C_IRQ1; | ||||
| 	irq_hwmask[MSP_INT_USB] = C_IRQ2; | ||||
| 	irq_hwmask[MSP_INT_SAR] = C_IRQ3; | ||||
| 	irq_hwmask[MSP_INT_SEC] = C_IRQ5; | ||||
| 
 | ||||
| #endif	/* CONFIG_MIPS_MT_SMP */ | ||||
| #endif	/* CONFIG_MIPS_MT */ | ||||
| 	/* setup the cascaded interrupts */ | ||||
|  | @ -153,8 +145,10 @@ void __init arch_init_irq(void) | |||
| 	setup_irq(MSP_INT_PER, &per_cascade_msp); | ||||
| 
 | ||||
| #else | ||||
| 	/* setup the 2nd-level SLP register based interrupt controller */ | ||||
| 	/* VSMP /SMTC support support is not enabled for SLP */ | ||||
| 	/*
 | ||||
| 	 * Setup the 2nd-level SLP register based interrupt controller. | ||||
| 	 * VSMP support support is not enabled for SLP. | ||||
| 	 */ | ||||
| 	msp_slp_irq_init(); | ||||
| 
 | ||||
| 	/* setup the cascaded SLP/PER interrupts */ | ||||
|  |  | |||
|  | @ -120,10 +120,9 @@ static void msp_cic_irq_ack(struct irq_data *d) | |||
| 	* hurt for the others | ||||
| 	*/ | ||||
| 	*CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE)); | ||||
| 	smtc_im_ack_irq(d->irq); | ||||
| } | ||||
| 
 | ||||
| /*Note: Limiting to VSMP . Not tested in SMTC */ | ||||
| /* Note: Limiting to VSMP.  */ | ||||
| 
 | ||||
| #ifdef CONFIG_MIPS_MT_SMP | ||||
| static int msp_cic_irq_set_affinity(struct irq_data *d, | ||||
|  | @ -183,10 +182,6 @@ void __init msp_cic_irq_init(void) | |||
| 	for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { | ||||
| 		irq_set_chip_and_handler(i, &msp_cic_irq_controller, | ||||
| 					 handle_level_irq); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		/* Mask of CIC interrupt */ | ||||
| 		irq_hwmask[i] = C_IRQ4; | ||||
| #endif | ||||
| 	} | ||||
| 
 | ||||
| 	/* Initialize the PER interrupt sub-system */ | ||||
|  |  | |||
|  | @ -113,9 +113,6 @@ void __init msp_per_irq_init(void) | |||
| 	/* initialize all the IRQ descriptors */ | ||||
| 	for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) { | ||||
| 		irq_set_chip(i, &msp_per_irq_controller); | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		irq_hwmask[i] = C_IRQ4; | ||||
| #endif | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -147,8 +147,6 @@ void __init plat_mem_setup(void) | |||
| 	pm_power_off = msp_power_off; | ||||
| } | ||||
| 
 | ||||
| extern struct plat_smp_ops msp_smtc_smp_ops; | ||||
| 
 | ||||
| void __init prom_init(void) | ||||
| { | ||||
| 	unsigned long family; | ||||
|  | @ -229,9 +227,5 @@ void __init prom_init(void) | |||
| 	 */ | ||||
| 	msp_serial_setup(); | ||||
| 
 | ||||
| 	if (register_vsmp_smp_ops()) { | ||||
| #ifdef CONFIG_MIPS_MT_SMTC | ||||
| 		register_smp_ops(&msp_smtc_smp_ops); | ||||
| #endif | ||||
| 	} | ||||
| 	register_vsmp_smp_ops(); | ||||
| } | ||||
|  |  | |||
|  | @ -1,104 +0,0 @@ | |||
| /*
 | ||||
|  * MSP71xx Platform-specific hooks for SMP operation | ||||
|  */ | ||||
| #include <linux/irq.h> | ||||
| #include <linux/init.h> | ||||
| 
 | ||||
| #include <asm/mipsmtregs.h> | ||||
| #include <asm/mipsregs.h> | ||||
| #include <asm/smtc.h> | ||||
| #include <asm/smtc_ipi.h> | ||||
| 
 | ||||
| /* VPE/SMP Prototype implements platform interfaces directly */ | ||||
| 
 | ||||
| /*
 | ||||
|  * Cause the specified action to be performed on a targeted "CPU" | ||||
|  */ | ||||
| 
 | ||||
| static void msp_smtc_send_ipi_single(int cpu, unsigned int action) | ||||
| { | ||||
| 	/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ | ||||
| 	smtc_send_ipi(cpu, LINUX_SMP_IPI, action); | ||||
| } | ||||
| 
 | ||||
| static void msp_smtc_send_ipi_mask(const struct cpumask *mask, | ||||
| 						unsigned int action) | ||||
| { | ||||
| 	unsigned int i; | ||||
| 
 | ||||
| 	for_each_cpu(i, mask) | ||||
| 		msp_smtc_send_ipi_single(i, action); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Post-config but pre-boot cleanup entry point | ||||
|  */ | ||||
| static void msp_smtc_init_secondary(void) | ||||
| { | ||||
| 	int myvpe; | ||||
| 
 | ||||
| 	/* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ | ||||
| 	myvpe = read_c0_tcbind() & TCBIND_CURVPE; | ||||
| 	if (myvpe > 0) | ||||
| 		change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | | ||||
| 				STATUSF_IP6 | STATUSF_IP7); | ||||
| 	smtc_init_secondary(); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Platform "CPU" startup hook | ||||
|  */ | ||||
| static void msp_smtc_boot_secondary(int cpu, struct task_struct *idle) | ||||
| { | ||||
| 	smtc_boot_secondary(cpu, idle); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * SMP initialization finalization entry point | ||||
|  */ | ||||
| static void msp_smtc_smp_finish(void) | ||||
| { | ||||
| 	smtc_smp_finish(); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Hook for after all CPUs are online | ||||
|  */ | ||||
| 
 | ||||
| static void msp_smtc_cpus_done(void) | ||||
| { | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Platform SMP pre-initialization | ||||
|  * | ||||
|  * As noted above, we can assume a single CPU for now | ||||
|  * but it may be multithreaded. | ||||
|  */ | ||||
| 
 | ||||
| static void __init msp_smtc_smp_setup(void) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * we won't get the definitive value until | ||||
| 	 * we've run smtc_prepare_cpus later, but | ||||
| 	 */ | ||||
| 
 | ||||
| 	if (read_c0_config3() & (1 << 2)) | ||||
| 		smp_num_siblings = smtc_build_cpu_map(0); | ||||
| } | ||||
| 
 | ||||
| static void __init msp_smtc_prepare_cpus(unsigned int max_cpus) | ||||
| { | ||||
| 	smtc_prepare_cpus(max_cpus); | ||||
| } | ||||
| 
 | ||||
| struct plat_smp_ops msp_smtc_smp_ops = { | ||||
| 	.send_ipi_single	= msp_smtc_send_ipi_single, | ||||
| 	.send_ipi_mask		= msp_smtc_send_ipi_mask, | ||||
| 	.init_secondary		= msp_smtc_init_secondary, | ||||
| 	.smp_finish		= msp_smtc_smp_finish, | ||||
| 	.cpus_done		= msp_smtc_cpus_done, | ||||
| 	.boot_secondary		= msp_smtc_boot_secondary, | ||||
| 	.smp_setup		= msp_smtc_smp_setup, | ||||
| 	.prepare_cpus		= msp_smtc_prepare_cpus, | ||||
| }; | ||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Ralf Baechle
				Ralf Baechle