| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This software is available to you under a choice of one of two | 
					
						
							|  |  |  |  * licenses.  You may choose to be licensed under the terms of the GNU | 
					
						
							|  |  |  |  * General Public License (GPL) Version 2, available from the file | 
					
						
							|  |  |  |  * COPYING in the main directory of this source tree, or the | 
					
						
							|  |  |  |  * OpenIB.org BSD license below: | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     Redistribution and use in source and binary forms, with or | 
					
						
							|  |  |  |  *     without modification, are permitted provided that the following | 
					
						
							|  |  |  |  *     conditions are met: | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *	- Redistributions of source code must retain the above | 
					
						
							|  |  |  |  *	  copyright notice, this list of conditions and the following | 
					
						
							|  |  |  |  *	  disclaimer. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *	- Redistributions in binary form must reproduce the above | 
					
						
							|  |  |  |  *	  copyright notice, this list of conditions and the following | 
					
						
							|  |  |  |  *	  disclaimer in the documentation and/or other materials | 
					
						
							|  |  |  |  *	  provided with the distribution. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
					
						
							|  |  |  |  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
					
						
							|  |  |  |  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
					
						
							|  |  |  |  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
					
						
							|  |  |  |  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
					
						
							|  |  |  |  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
					
						
							|  |  |  |  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
					
						
							|  |  |  |  * SOFTWARE. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifndef MLX4_DEVICE_H
 | 
					
						
							|  |  |  | #define MLX4_DEVICE_H
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #include <linux/pci.h>
 | 
					
						
							|  |  |  | #include <linux/completion.h>
 | 
					
						
							|  |  |  | #include <linux/radix-tree.h>
 | 
					
						
							| 
									
										
										
										
											2012-07-18 22:33:51 +00:00
										 |  |  | #include <linux/cpu_rmap.h>
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-07-26 16:09:06 -07:00
										 |  |  | #include <linux/atomic.h>
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-03-22 22:37:47 +00:00
										 |  |  | #define MAX_MSIX_P_PORT		17
 | 
					
						
							|  |  |  | #define MAX_MSIX		64
 | 
					
						
							|  |  |  | #define MSIX_LEGACY_SZ		4
 | 
					
						
							|  |  |  | #define MIN_MSIX_P_PORT		5
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_FLAG_MSI_X		= 1 << 0, | 
					
						
							| 
									
										
										
										
											2007-06-18 08:15:02 -07:00
										 |  |  | 	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1, | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | 	MLX4_FLAG_MASTER	= 1 << 2, | 
					
						
							|  |  |  | 	MLX4_FLAG_SLAVE		= 1 << 3, | 
					
						
							|  |  |  | 	MLX4_FLAG_SRIOV		= 1 << 4, | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:52 +00:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_PORT_CAP_IS_SM	= 1 << 1, | 
					
						
							|  |  |  | 	MLX4_PORT_CAP_DEV_MGMT_SUP = 1 << 19, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | enum { | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:42 +00:00
										 |  |  | 	MLX4_MAX_PORTS		= 2, | 
					
						
							|  |  |  | 	MLX4_MAX_PORT_PKEYS	= 128 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												mlx4_core: Implement mechanism for reserved Q_Keys
The SR-IOV special QP tunneling mechanism uses proxy special QPs
(instead of the real special QPs) for MADs on guests.  These proxy QPs
send their packets to a "tunnel" QP owned by the master.  The master
then forwards the MAD (after any required paravirtualization) to the
real special QP, which sends out the MAD.
For security reasons (i.e., to prevent guests from sending MADs to
tunnel QPs belonging to other guests), each proxy-tunnel QP pair is
assigned a unique, reserved, Q_Key.  These Q_Keys are available only
for proxy and tunnel QPs -- if the guest tries to use these Q_Keys
with other QPs, it will fail.
This patch introduces a mechanism for reserving a block of 64K Q_Keys
for proxy/tunneling use.
The patch introduces also two new fields into mlx4_dev: base_sqpn and
base_tunnel_sqpn.
In SR-IOV mode, the QP numbers for the "real," proxy, and tunnel sqps
are added to the reserved QPN area (so that they will not change).
There are 8 special QPs per port in the HCA, and each of them is
assigned both a proxy and a tunnel QP, for each VF and for the PF as
well in SR-IOV mode.
The QPNs for these QPs are arranged as follows:
 1. The real SQP numbers (8)
 2. The proxy SQPs (8 * (max number of VFs + max number of PFs)
 3. The tunnel SQPs (8 * (max number of VFs + max number of PFs)
To support these QPs, two new fields are added to struct mlx4_dev:
  base_sqp:  this is the QP number of the first of the real SQPs
  base_tunnel_sqp: this is the qp number of the first qp in the tunnel
                   sqp region. (On guests, this is the first tunnel
                   sqp of the 8 which are assigned to that guest).
In addition, in SR-IOV mode, sqp_start is the number of the first
proxy SQP in the proxy SQP region.  (In guests, this is the first
proxy SQP of the 8 which are assigned to that guest)
Note that in non-SR-IOV mode, there are no proxies and no tunnels.
In this case, sqp_start is set to sqp_base -- which minimizes code
changes.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-06-19 11:21:42 +03:00
										 |  |  | /* base qkey for use in sriov tunnel-qp/proxy-qp communication.
 | 
					
						
							|  |  |  |  * These qkeys must not be allowed for general use. This is a 64k range, | 
					
						
							|  |  |  |  * and to test for violation, we use the mask (protect against future chg). | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | #define MLX4_RESERVED_QKEY_BASE  (0xFFFF0000)
 | 
					
						
							|  |  |  | #define MLX4_RESERVED_QKEY_MASK  (0xFFFF0000)
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-09-18 09:14:18 +02:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_BOARD_ID_LEN = 64 | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_MAX_NUM_PF		= 16, | 
					
						
							|  |  |  | 	MLX4_MAX_NUM_VF		= 64, | 
					
						
							|  |  |  | 	MLX4_MFUNC_MAX		= 80, | 
					
						
							| 
									
										
											  
											
												net/mlx4_core: Fix number of EQs used in ICM initialisation
In SRIOV mode, the number of EQs used when computing the total ICM size
was incorrect.
To fix this, we do the following:
1. We add a new structure to mlx4_dev, mlx4_phys_caps, to contain physical HCA
   capabilities.  The PPF uses the phys capabilities when it computes things
   like ICM size.
   The dev_caps structure will then contain the paravirtualized values, making
   bookkeeping much easier in SRIOV mode. We add a structure rather than a
   single parameter because there will be other fields in the phys_caps.
   The first field we add to the mlx4_phys_caps structure is num_phys_eqs.
2. In INIT_HCA, when running in SRIOV mode, the "log_num_eqs" parameter
   passed to the FW is the number of EQs per VF/PF; each function (PF or VF)
   has this number of EQs available.
   However, the total number of EQs which must be allowed for in the ICM is
   (1 << log_num_eqs) * (#VFs + #PFs).  Rather than compute this quantity,
   we allocate ICM space for 1024 EQs (which is the device maximum
   number of EQs, and which is the value we place in the mlx4_phys_caps structure).
   For INIT_HCA, however, we use the per-function number of EQs as described
   above.
Signed-off-by: Marcel Apfelbaum <marcela@dev.mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-05-30 09:14:51 +00:00
										 |  |  | 	MLX4_MAX_EQ_NUM		= 1024, | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | 	MLX4_MFUNC_EQ_NUM	= 4, | 
					
						
							|  |  |  | 	MLX4_MFUNC_MAX_EQES     = 8, | 
					
						
							|  |  |  | 	MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1) | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-07-05 04:03:46 +00:00
										 |  |  | /* Driver supports 3 diffrent device methods to manage traffic steering:
 | 
					
						
							|  |  |  |  *	-device managed - High level API for ib and eth flow steering. FW is | 
					
						
							|  |  |  |  *			  managing flow steering tables. | 
					
						
							| 
									
										
										
										
											2012-07-05 04:03:44 +00:00
										 |  |  |  *	- B0 steering mode - Common low level API for ib and (if supported) eth. | 
					
						
							|  |  |  |  *	- A0 steering mode - Limited low level API for eth. In case of IB, | 
					
						
							|  |  |  |  *			     B0 mode is in use. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_STEERING_MODE_A0, | 
					
						
							| 
									
										
											  
											
												{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-07-05 04:03:46 +00:00
										 |  |  | 	MLX4_STEERING_MODE_B0, | 
					
						
							|  |  |  | 	MLX4_STEERING_MODE_DEVICE_MANAGED | 
					
						
							| 
									
										
										
										
											2012-07-05 04:03:44 +00:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline const char *mlx4_steering_mode_str(int steering_mode) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	switch (steering_mode) { | 
					
						
							|  |  |  | 	case MLX4_STEERING_MODE_A0: | 
					
						
							|  |  |  | 		return "A0 steering"; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	case MLX4_STEERING_MODE_B0: | 
					
						
							|  |  |  | 		return "B0 steering"; | 
					
						
							| 
									
										
											  
											
												{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-07-05 04:03:46 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	case MLX4_STEERING_MODE_DEVICE_MANAGED: | 
					
						
							|  |  |  | 		return "Device managed flow steering"; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-07-05 04:03:44 +00:00
										 |  |  | 	default: | 
					
						
							|  |  |  | 		return "Unrecognize steering mode"; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | enum { | 
					
						
							| 
									
										
										
										
											2011-06-15 14:41:42 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_RC		= 1LL <<  0, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_UC		= 1LL <<  1, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_UD		= 1LL <<  2, | 
					
						
							| 
									
										
										
										
											2011-06-02 09:01:33 -07:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_XRC		= 1LL <<  3, | 
					
						
							| 
									
										
										
										
											2011-06-15 14:41:42 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_SRQ		= 1LL <<  6, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_IPOIB_CSUM	= 1LL <<  7, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_DPDP		= 1LL << 12, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_BLH		= 1LL << 15, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_MEM_WINDOW	= 1LL << 16, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_APM		= 1LL << 17, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_ATOMIC	= 1LL << 18, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_RAW_MCAST	= 1LL << 19, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_UD_AV_PORT	= 1LL << 20, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_UD_MCAST	= 1LL << 21, | 
					
						
							| 
									
										
										
										
											2011-07-07 19:19:29 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_IBOE		= 1LL << 30, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_UC_LOOPBACK	= 1LL << 32, | 
					
						
							| 
									
										
										
										
											2011-10-18 01:50:42 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_FCS_KEEP	= 1LL << 34, | 
					
						
							| 
									
										
										
										
											2011-11-26 19:55:15 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_WOL_PORT1	= 1LL << 37, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_WOL_PORT2	= 1LL << 38, | 
					
						
							| 
									
										
										
										
											2011-07-07 19:19:29 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_UDP_RSS	= 1LL << 40, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_VEP_UC_STEER	= 1LL << 41, | 
					
						
							| 
									
										
										
										
											2011-06-15 14:47:14 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_VEP_MC_STEER	= 1LL << 42, | 
					
						
							| 
									
										
										
										
											2011-12-19 04:00:26 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_COUNTERS	= 1LL << 48, | 
					
						
							| 
									
										
											  
											
												mlx4: Use port management change event instead of smp_snoop
The port management change event can replace smp_snoop.  If the
capability bit for this event is set in dev-caps, the event is used
(by the driver setting the PORT_MNG_CHG_EVENT bit in the async event
mask in the MAP_EQ fw command).  In this case, when the driver passes
incoming SMP PORT_INFO SET mads to the FW, the FW generates port
management change events to signal any changes to the driver.
If the FW generates these events, smp_snoop shouldn't be invoked in
ib_process_mad(), or duplicate events will occur (once from the
FW-generated event, and once from smp_snoop).
In the case where the FW does not generate port management change
events smp_snoop needs to be invoked to create these events.  The flow
in smp_snoop has been modified to make use of the same procedures as
in the fw-generated-event event case to generate the port management
events (LID change, Client-rereg, Pkey change, and/or GID change).
Port management change event handling required changing the
mlx4_ib_event and mlx4_dispatch_event prototypes; the "param" argument
(last argument) had to be changed to unsigned long in order to
accomodate passing the EQE pointer.
We also needed to move the definition of struct mlx4_eqe from
net/mlx4.h to file device.h -- to make it available to the IB driver,
to handle port management change events.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-06-19 11:21:40 +03:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_SENSE_SUPPORT	= 1LL << 55, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, | 
					
						
							| 
									
										
										
										
											2012-10-21 14:59:24 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG_64B_EQE	= 1LL << 61, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG_64B_CQE	= 1LL << 62 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-04-29 17:04:25 +03:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG2_RSS			= 1LL <<  0, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAG2_RSS_TOP		= 1LL <<  1, | 
					
						
							| 
									
										
											  
											
												{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-07-05 04:03:46 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG2_RSS_XOR		= 1LL <<  2, | 
					
						
							| 
									
										
										
										
											2013-01-30 23:07:10 +00:00
										 |  |  | 	MLX4_DEV_CAP_FLAG2_FS_EN		= 1LL <<  3, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN	= 1LL <<  4 | 
					
						
							| 
									
										
										
										
											2012-04-29 17:04:25 +03:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-10-21 14:59:24 +00:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_64B_EQE_ENABLED	= 1LL << 0, | 
					
						
							|  |  |  | 	MLX4_DEV_CAP_64B_CQE_ENABLED	= 1LL << 1 | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_USER_DEV_CAP_64B_CQE	= 1L << 0 | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_FUNC_CAP_64B_EQE_CQE	= 1L << 0 | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-10-24 11:02:34 +02:00
										 |  |  | #define MLX4_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-23 08:12:26 -07:00
										 |  |  | enum { | 
					
						
							| 
									
										
										
										
											2013-02-06 16:19:14 +00:00
										 |  |  | 	MLX4_BMME_FLAG_WIN_TYPE_2B	= 1 <<  1, | 
					
						
							| 
									
										
										
										
											2008-07-23 08:12:26 -07:00
										 |  |  | 	MLX4_BMME_FLAG_LOCAL_INV	= 1 <<  6, | 
					
						
							|  |  |  | 	MLX4_BMME_FLAG_REMOTE_INV	= 1 <<  7, | 
					
						
							|  |  |  | 	MLX4_BMME_FLAG_TYPE_2_WIN	= 1 <<  9, | 
					
						
							|  |  |  | 	MLX4_BMME_FLAG_RESERVED_LKEY	= 1 << 10, | 
					
						
							|  |  |  | 	MLX4_BMME_FLAG_FAST_REG_WR	= 1 << 11, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | enum mlx4_event { | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_COMP		   = 0x00, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_PATH_MIG	   = 0x01, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_COMM_EST	   = 0x02, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_SQ_DRAINED	   = 0x03, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE	   = 0x13, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_SRQ_LIMIT	   = 0x14, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_CQ_ERROR	   = 0x04, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_EEC_CATAS_ERROR	   = 0x06, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR  = 0x08, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e, | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | 	MLX4_EVENT_TYPE_CMD		   = 0x0a, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_VEP_UPDATE	   = 0x19, | 
					
						
							|  |  |  | 	MLX4_EVENT_TYPE_COMM_CHANNEL	   = 0x18, | 
					
						
							| 
									
										
										
										
											2012-03-06 15:50:49 +02:00
										 |  |  | 	MLX4_EVENT_TYPE_FATAL_WARNING	   = 0x1b, | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | 	MLX4_EVENT_TYPE_FLR_EVENT	   = 0x1c, | 
					
						
							| 
									
										
											  
											
												mlx4: Use port management change event instead of smp_snoop
The port management change event can replace smp_snoop.  If the
capability bit for this event is set in dev-caps, the event is used
(by the driver setting the PORT_MNG_CHG_EVENT bit in the async event
mask in the MAP_EQ fw command).  In this case, when the driver passes
incoming SMP PORT_INFO SET mads to the FW, the FW generates port
management change events to signal any changes to the driver.
If the FW generates these events, smp_snoop shouldn't be invoked in
ib_process_mad(), or duplicate events will occur (once from the
FW-generated event, and once from smp_snoop).
In the case where the FW does not generate port management change
events smp_snoop needs to be invoked to create these events.  The flow
in smp_snoop has been modified to make use of the same procedures as
in the fw-generated-event event case to generate the port management
events (LID change, Client-rereg, Pkey change, and/or GID change).
Port management change event handling required changing the
mlx4_ib_event and mlx4_dispatch_event prototypes; the "param" argument
(last argument) had to be changed to unsigned long in order to
accomodate passing the EQE pointer.
We also needed to move the definition of struct mlx4_eqe from
net/mlx4.h to file device.h -- to make it available to the IB driver,
to handle port management change events.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-06-19 11:21:40 +03:00
										 |  |  | 	MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | 	MLX4_EVENT_TYPE_NONE		   = 0xff, | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_PORT_CHANGE_SUBTYPE_DOWN	= 1, | 
					
						
							|  |  |  | 	MLX4_PORT_CHANGE_SUBTYPE_ACTIVE	= 4 | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-03-06 15:50:49 +02:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:48 +00:00
										 |  |  | enum slave_port_state { | 
					
						
							|  |  |  | 	SLAVE_PORT_DOWN = 0, | 
					
						
							|  |  |  | 	SLAVE_PENDING_UP, | 
					
						
							|  |  |  | 	SLAVE_PORT_UP, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | enum slave_port_gen_event { | 
					
						
							|  |  |  | 	SLAVE_PORT_GEN_EVENT_DOWN = 0, | 
					
						
							|  |  |  | 	SLAVE_PORT_GEN_EVENT_UP, | 
					
						
							|  |  |  | 	SLAVE_PORT_GEN_EVENT_NONE, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | enum slave_port_state_event { | 
					
						
							|  |  |  | 	MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, | 
					
						
							|  |  |  | 	MLX4_PORT_STATE_DEV_EVENT_PORT_UP, | 
					
						
							|  |  |  | 	MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID, | 
					
						
							|  |  |  | 	MLX4_PORT_STATE_IB_EVENT_GID_INVALID, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_PERM_LOCAL_READ	= 1 << 10, | 
					
						
							|  |  |  | 	MLX4_PERM_LOCAL_WRITE	= 1 << 11, | 
					
						
							|  |  |  | 	MLX4_PERM_REMOTE_READ	= 1 << 12, | 
					
						
							|  |  |  | 	MLX4_PERM_REMOTE_WRITE	= 1 << 13, | 
					
						
							| 
									
										
										
										
											2013-02-06 16:19:14 +00:00
										 |  |  | 	MLX4_PERM_ATOMIC	= 1 << 14, | 
					
						
							|  |  |  | 	MLX4_PERM_BIND_MW	= 1 << 15, | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_OPCODE_NOP			= 0x00, | 
					
						
							|  |  |  | 	MLX4_OPCODE_SEND_INVAL		= 0x01, | 
					
						
							|  |  |  | 	MLX4_OPCODE_RDMA_WRITE		= 0x08, | 
					
						
							|  |  |  | 	MLX4_OPCODE_RDMA_WRITE_IMM	= 0x09, | 
					
						
							|  |  |  | 	MLX4_OPCODE_SEND		= 0x0a, | 
					
						
							|  |  |  | 	MLX4_OPCODE_SEND_IMM		= 0x0b, | 
					
						
							|  |  |  | 	MLX4_OPCODE_LSO			= 0x0e, | 
					
						
							|  |  |  | 	MLX4_OPCODE_RDMA_READ		= 0x10, | 
					
						
							|  |  |  | 	MLX4_OPCODE_ATOMIC_CS		= 0x11, | 
					
						
							|  |  |  | 	MLX4_OPCODE_ATOMIC_FA		= 0x12, | 
					
						
							| 
									
										
										
										
											2010-04-14 17:23:39 +03:00
										 |  |  | 	MLX4_OPCODE_MASKED_ATOMIC_CS	= 0x14, | 
					
						
							|  |  |  | 	MLX4_OPCODE_MASKED_ATOMIC_FA	= 0x15, | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	MLX4_OPCODE_BIND_MW		= 0x18, | 
					
						
							|  |  |  | 	MLX4_OPCODE_FMR			= 0x19, | 
					
						
							|  |  |  | 	MLX4_OPCODE_LOCAL_INVAL		= 0x1b, | 
					
						
							|  |  |  | 	MLX4_OPCODE_CONFIG_CMD		= 0x1f, | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	MLX4_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00, | 
					
						
							|  |  |  | 	MLX4_RECV_OPCODE_SEND		= 0x01, | 
					
						
							|  |  |  | 	MLX4_RECV_OPCODE_SEND_IMM	= 0x02, | 
					
						
							|  |  |  | 	MLX4_RECV_OPCODE_SEND_INVAL	= 0x03, | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	MLX4_CQE_OPCODE_ERROR		= 0x1e, | 
					
						
							|  |  |  | 	MLX4_CQE_OPCODE_RESIZE		= 0x16, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_STAT_RATE_OFFSET	= 5 | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-12-02 11:44:49 +00:00
										 |  |  | enum mlx4_protocol { | 
					
						
							| 
									
										
										
										
											2011-03-22 22:38:17 +00:00
										 |  |  | 	MLX4_PROT_IB_IPV6 = 0, | 
					
						
							|  |  |  | 	MLX4_PROT_ETH, | 
					
						
							|  |  |  | 	MLX4_PROT_IB_IPV4, | 
					
						
							|  |  |  | 	MLX4_PROT_FCOE | 
					
						
							| 
									
										
										
										
											2010-12-02 11:44:49 +00:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-15 14:25:23 -07:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_MTT_FLAG_PRESENT		= 1 | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-10-22 10:25:29 -07:00
										 |  |  | enum mlx4_qp_region { | 
					
						
							|  |  |  | 	MLX4_QP_REGION_FW = 0, | 
					
						
							|  |  |  | 	MLX4_QP_REGION_ETH_ADDR, | 
					
						
							|  |  |  | 	MLX4_QP_REGION_FC_ADDR, | 
					
						
							|  |  |  | 	MLX4_QP_REGION_FC_EXCH, | 
					
						
							|  |  |  | 	MLX4_NUM_QP_REGION | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-10-22 15:38:42 -07:00
										 |  |  | enum mlx4_port_type { | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | 	MLX4_PORT_TYPE_NONE	= 0, | 
					
						
							| 
									
										
										
										
											2009-03-18 19:45:11 -07:00
										 |  |  | 	MLX4_PORT_TYPE_IB	= 1, | 
					
						
							|  |  |  | 	MLX4_PORT_TYPE_ETH	= 2, | 
					
						
							|  |  |  | 	MLX4_PORT_TYPE_AUTO	= 3 | 
					
						
							| 
									
										
										
										
											2008-10-22 15:38:42 -07:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-10-22 11:44:46 -07:00
										 |  |  | enum mlx4_special_vlan_idx { | 
					
						
							|  |  |  | 	MLX4_NO_VLAN_IDX        = 0, | 
					
						
							|  |  |  | 	MLX4_VLAN_MISS_IDX, | 
					
						
							|  |  |  | 	MLX4_VLAN_REGULAR | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-03-22 22:38:17 +00:00
										 |  |  | enum mlx4_steer_type { | 
					
						
							|  |  |  | 	MLX4_MC_STEER = 0, | 
					
						
							|  |  |  | 	MLX4_UC_STEER, | 
					
						
							|  |  |  | 	MLX4_NUM_STEERS | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-10-22 10:25:29 -07:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_NUM_FEXCH          = 64 * 1024, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-10-07 16:24:16 +02:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_MAX_FAST_REG_PAGES = 511, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												mlx4: Use port management change event instead of smp_snoop
The port management change event can replace smp_snoop.  If the
capability bit for this event is set in dev-caps, the event is used
(by the driver setting the PORT_MNG_CHG_EVENT bit in the async event
mask in the MAP_EQ fw command).  In this case, when the driver passes
incoming SMP PORT_INFO SET mads to the FW, the FW generates port
management change events to signal any changes to the driver.
If the FW generates these events, smp_snoop shouldn't be invoked in
ib_process_mad(), or duplicate events will occur (once from the
FW-generated event, and once from smp_snoop).
In the case where the FW does not generate port management change
events smp_snoop needs to be invoked to create these events.  The flow
in smp_snoop has been modified to make use of the same procedures as
in the fw-generated-event event case to generate the port management
events (LID change, Client-rereg, Pkey change, and/or GID change).
Port management change event handling required changing the
mlx4_ib_event and mlx4_dispatch_event prototypes; the "param" argument
(last argument) had to be changed to unsigned long in order to
accomodate passing the EQE pointer.
We also needed to move the definition of struct mlx4_eqe from
net/mlx4.h to file device.h -- to make it available to the IB driver,
to handle port management change events.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-06-19 11:21:40 +03:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_DEV_PMC_SUBTYPE_GUID_INFO	 = 0x14, | 
					
						
							|  |  |  | 	MLX4_DEV_PMC_SUBTYPE_PORT_INFO	 = 0x15, | 
					
						
							|  |  |  | 	MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE	 = 0x16, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Port mgmt change event handling */ | 
					
						
							|  |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK	= 1 << 0, | 
					
						
							|  |  |  | 	MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK		= 1 << 1, | 
					
						
							|  |  |  | 	MLX4_EQ_PORT_INFO_LID_CHANGE_MASK		= 1 << 2, | 
					
						
							|  |  |  | 	MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK		= 1 << 3, | 
					
						
							|  |  |  | 	MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK	= 1 << 4, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
 | 
					
						
							|  |  |  | 			     MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												IB/mlx4: Use multiple WQ blocks to post smaller send WQEs
ConnectX HCA supports shrinking WQEs, so that a single work request
can be made of multiple units of wqe_shift.  This way, WRs can differ
in size, and do not have to be a power of 2 in size, saving memory and
speeding up send WR posting.  Unfortunately, if we do this then the
wqe_index field in CQEs can't be used to look up the WR ID anymore, so
our implementation does this only if selective signaling is off.
Further, on 32-bit platforms, we can't use vmap() to make the QP
buffer virtually contigious. Thus we have to use constant-sized WRs to
make sure a WR is always fully within a single page-sized chunk.
Finally, we use WRs with the NOP opcode to avoid wrapping around the
queue buffer in the middle of posting a WR, and we set the
NoErrorCompletion bit to avoid getting completions with error for NOP
WRs.  However, NEC is only supported starting with firmware 2.2.232,
so we use constant-sized WRs for older firmware.  And, since MLX QPs
only support SEND, we use constant-sized WRs in this case.
When stamping during NOP posting, do stamping following setting of the
NOP WQE valid bit.
Signed-off-by: Michael S. Tsirkin <mst@dev.mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
											
										 
											2008-01-28 10:40:59 +02:00
										 |  |  | static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return (major << 32) | (minor << 16) | subminor; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												net/mlx4_core: Fix number of EQs used in ICM initialisation
In SRIOV mode, the number of EQs used when computing the total ICM size
was incorrect.
To fix this, we do the following:
1. We add a new structure to mlx4_dev, mlx4_phys_caps, to contain physical HCA
   capabilities.  The PPF uses the phys capabilities when it computes things
   like ICM size.
   The dev_caps structure will then contain the paravirtualized values, making
   bookkeeping much easier in SRIOV mode. We add a structure rather than a
   single parameter because there will be other fields in the phys_caps.
   The first field we add to the mlx4_phys_caps structure is num_phys_eqs.
2. In INIT_HCA, when running in SRIOV mode, the "log_num_eqs" parameter
   passed to the FW is the number of EQs per VF/PF; each function (PF or VF)
   has this number of EQs available.
   However, the total number of EQs which must be allowed for in the ICM is
   (1 << log_num_eqs) * (#VFs + #PFs).  Rather than compute this quantity,
   we allocate ICM space for 1024 EQs (which is the device maximum
   number of EQs, and which is the value we place in the mlx4_phys_caps structure).
   For INIT_HCA, however, we use the per-function number of EQs as described
   above.
Signed-off-by: Marcel Apfelbaum <marcela@dev.mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-05-30 09:14:51 +00:00
										 |  |  | struct mlx4_phys_caps { | 
					
						
							| 
									
										
											  
											
												mlx4: Put physical GID and P_Key table sizes in mlx4_phys_caps struct and paravirtualize them
To allow easy paravirtualization of P_Key and GID table sizes, keep
paravirtualized sizes in mlx4_dev->caps, but save the actual physical
sizes from FW in struct: mlx4_dev->phys_cap.
In addition, in SR-IOV mode, do the following:
1. Reduce reported P_Key table size by 1.
   This is done to reserve the highest P_Key index for internal use,
   for declaring an invalid P_Key in P_Key paravirtualization.
   We require a P_Key index which always contain an invalid P_Key
   value for this purpose (i.e., one which cannot be modified by
   the subnet manager).  The way to do this is to reduce the
   P_Key table size reported to the subnet manager by 1, so that
   it will not attempt to access the P_Key at index #127.
2. Paravirtualize the GID table size to 1. Thus, each guest sees
   only a single GID (at its paravirtualized index 0).
In addition, since we are paravirtualizing the GID table size to 1, we
add paravirtualization of the master GID event here (i.e., we do not
do ib_dispatch_event() for the GUID change event on the master, since
its (only) GUID never changes).
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-06-19 11:21:44 +03:00
										 |  |  | 	u32			gid_phys_table_len[MLX4_MAX_PORTS + 1]; | 
					
						
							|  |  |  | 	u32			pkey_phys_table_len[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
											  
											
												net/mlx4_core: Fix number of EQs used in ICM initialisation
In SRIOV mode, the number of EQs used when computing the total ICM size
was incorrect.
To fix this, we do the following:
1. We add a new structure to mlx4_dev, mlx4_phys_caps, to contain physical HCA
   capabilities.  The PPF uses the phys capabilities when it computes things
   like ICM size.
   The dev_caps structure will then contain the paravirtualized values, making
   bookkeeping much easier in SRIOV mode. We add a structure rather than a
   single parameter because there will be other fields in the phys_caps.
   The first field we add to the mlx4_phys_caps structure is num_phys_eqs.
2. In INIT_HCA, when running in SRIOV mode, the "log_num_eqs" parameter
   passed to the FW is the number of EQs per VF/PF; each function (PF or VF)
   has this number of EQs available.
   However, the total number of EQs which must be allowed for in the ICM is
   (1 << log_num_eqs) * (#VFs + #PFs).  Rather than compute this quantity,
   we allocate ICM space for 1024 EQs (which is the device maximum
   number of EQs, and which is the value we place in the mlx4_phys_caps structure).
   For INIT_HCA, however, we use the per-function number of EQs as described
   above.
Signed-off-by: Marcel Apfelbaum <marcela@dev.mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-05-30 09:14:51 +00:00
										 |  |  | 	u32			num_phys_eqs; | 
					
						
							| 
									
										
											  
											
												mlx4: Modify proxy/tunnel QP mechanism so that guests do no calculations
Previously, the structure of a guest's proxy QPs followed the
structure of the PPF special qps (qp0 port 1, qp0 port 2, qp1 port 1,
qp1 port 2, ...).  The guest then did offset calculations on the
sqp_base qp number that the PPF passed to it in QUERY_FUNC_CAP().
This is now changed so that the guest does no offset calculations
regarding proxy or tunnel QPs to use.  This change frees the PPF from
needing to adhere to a specific order in allocating proxy and tunnel
QPs.
Now QUERY_FUNC_CAP provides each port individually with its proxy
qp0, proxy qp1, tunnel qp0, and tunnel qp1 QP numbers, and these are
used directly where required (with no offset calculations).
To accomplish this change, several fields were added to the phys_caps
structure for use by the PPF and by non-SR-IOV mode:
    base_sqpn -- in non-sriov mode, this was formerly sqp_start.
    base_proxy_sqpn -- the first physical proxy qp number -- used by PPF
    base_tunnel_sqpn -- the first physical tunnel qp number -- used by PPF.
The current code in the PPF still adheres to the previous layout of
sqps, proxy-sqps and tunnel-sqps.  However, the PPF can change this
layout without affecting VF or (paravirtualized) PF code.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-08-03 08:40:57 +00:00
										 |  |  | 	u32			base_sqpn; | 
					
						
							|  |  |  | 	u32			base_proxy_sqpn; | 
					
						
							|  |  |  | 	u32			base_tunnel_sqpn; | 
					
						
							| 
									
										
											  
											
												net/mlx4_core: Fix number of EQs used in ICM initialisation
In SRIOV mode, the number of EQs used when computing the total ICM size
was incorrect.
To fix this, we do the following:
1. We add a new structure to mlx4_dev, mlx4_phys_caps, to contain physical HCA
   capabilities.  The PPF uses the phys capabilities when it computes things
   like ICM size.
   The dev_caps structure will then contain the paravirtualized values, making
   bookkeeping much easier in SRIOV mode. We add a structure rather than a
   single parameter because there will be other fields in the phys_caps.
   The first field we add to the mlx4_phys_caps structure is num_phys_eqs.
2. In INIT_HCA, when running in SRIOV mode, the "log_num_eqs" parameter
   passed to the FW is the number of EQs per VF/PF; each function (PF or VF)
   has this number of EQs available.
   However, the total number of EQs which must be allowed for in the ICM is
   (1 << log_num_eqs) * (#VFs + #PFs).  Rather than compute this quantity,
   we allocate ICM space for 1024 EQs (which is the device maximum
   number of EQs, and which is the value we place in the mlx4_phys_caps structure).
   For INIT_HCA, however, we use the per-function number of EQs as described
   above.
Signed-off-by: Marcel Apfelbaum <marcela@dev.mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-05-30 09:14:51 +00:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | struct mlx4_caps { | 
					
						
							|  |  |  | 	u64			fw_ver; | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | 	u32			function; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	int			num_ports; | 
					
						
							| 
									
										
										
										
											2007-06-18 08:15:02 -07:00
										 |  |  | 	int			vl_cap[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2008-10-22 10:56:48 -07:00
										 |  |  | 	int			ib_mtu_cap[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2008-11-28 21:29:46 -08:00
										 |  |  | 	__be32			ib_port_def_cap[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2008-10-22 10:56:48 -07:00
										 |  |  | 	u64			def_mac[MLX4_MAX_PORTS + 1]; | 
					
						
							|  |  |  | 	int			eth_mtu_cap[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2007-06-18 08:15:02 -07:00
										 |  |  | 	int			gid_table_len[MLX4_MAX_PORTS + 1]; | 
					
						
							|  |  |  | 	int			pkey_table_len[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2010-08-24 03:46:23 +00:00
										 |  |  | 	int			trans_type[MLX4_MAX_PORTS + 1]; | 
					
						
							|  |  |  | 	int			vendor_oui[MLX4_MAX_PORTS + 1]; | 
					
						
							|  |  |  | 	int			wavelength[MLX4_MAX_PORTS + 1]; | 
					
						
							|  |  |  | 	u64			trans_code[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	int			local_ca_ack_delay; | 
					
						
							|  |  |  | 	int			num_uars; | 
					
						
							| 
									
										
										
										
											2011-12-13 04:12:13 +00:00
										 |  |  | 	u32			uar_page_size; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	int			bf_reg_size; | 
					
						
							|  |  |  | 	int			bf_regs_per_page; | 
					
						
							|  |  |  | 	int			max_sq_sg; | 
					
						
							|  |  |  | 	int			max_rq_sg; | 
					
						
							|  |  |  | 	int			num_qps; | 
					
						
							|  |  |  | 	int			max_wqes; | 
					
						
							|  |  |  | 	int			max_sq_desc_sz; | 
					
						
							|  |  |  | 	int			max_rq_desc_sz; | 
					
						
							|  |  |  | 	int			max_qp_init_rdma; | 
					
						
							|  |  |  | 	int			max_qp_dest_rdma; | 
					
						
							| 
									
										
											  
											
												mlx4: Modify proxy/tunnel QP mechanism so that guests do no calculations
Previously, the structure of a guest's proxy QPs followed the
structure of the PPF special qps (qp0 port 1, qp0 port 2, qp1 port 1,
qp1 port 2, ...).  The guest then did offset calculations on the
sqp_base qp number that the PPF passed to it in QUERY_FUNC_CAP().
This is now changed so that the guest does no offset calculations
regarding proxy or tunnel QPs to use.  This change frees the PPF from
needing to adhere to a specific order in allocating proxy and tunnel
QPs.
Now QUERY_FUNC_CAP provides each port individually with its proxy
qp0, proxy qp1, tunnel qp0, and tunnel qp1 QP numbers, and these are
used directly where required (with no offset calculations).
To accomplish this change, several fields were added to the phys_caps
structure for use by the PPF and by non-SR-IOV mode:
    base_sqpn -- in non-sriov mode, this was formerly sqp_start.
    base_proxy_sqpn -- the first physical proxy qp number -- used by PPF
    base_tunnel_sqpn -- the first physical tunnel qp number -- used by PPF.
The current code in the PPF still adheres to the previous layout of
sqps, proxy-sqps and tunnel-sqps.  However, the PPF can change this
layout without affecting VF or (paravirtualized) PF code.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-08-03 08:40:57 +00:00
										 |  |  | 	u32			*qp0_proxy; | 
					
						
							|  |  |  | 	u32			*qp1_proxy; | 
					
						
							|  |  |  | 	u32			*qp0_tunnel; | 
					
						
							|  |  |  | 	u32			*qp1_tunnel; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	int			num_srqs; | 
					
						
							|  |  |  | 	int			max_srq_wqes; | 
					
						
							|  |  |  | 	int			max_srq_sge; | 
					
						
							|  |  |  | 	int			reserved_srqs; | 
					
						
							|  |  |  | 	int			num_cqs; | 
					
						
							|  |  |  | 	int			max_cqes; | 
					
						
							|  |  |  | 	int			reserved_cqs; | 
					
						
							|  |  |  | 	int			num_eqs; | 
					
						
							|  |  |  | 	int			reserved_eqs; | 
					
						
							| 
									
										
										
										
											2008-12-22 07:15:03 -08:00
										 |  |  | 	int			num_comp_vectors; | 
					
						
							| 
									
										
										
										
											2011-03-22 22:37:47 +00:00
										 |  |  | 	int			comp_pool; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	int			num_mpts; | 
					
						
							| 
									
										
										
										
											2012-02-09 18:10:06 +02:00
										 |  |  | 	int			max_fmr_maps; | 
					
						
							| 
									
										
										
										
											2011-12-13 04:16:56 +00:00
										 |  |  | 	int			num_mtts; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	int			fmr_reserved_mtts; | 
					
						
							|  |  |  | 	int			reserved_mtts; | 
					
						
							|  |  |  | 	int			reserved_mrws; | 
					
						
							|  |  |  | 	int			reserved_uars; | 
					
						
							|  |  |  | 	int			num_mgms; | 
					
						
							|  |  |  | 	int			num_amgms; | 
					
						
							|  |  |  | 	int			reserved_mcgs; | 
					
						
							|  |  |  | 	int			num_qp_per_mgm; | 
					
						
							| 
									
										
										
										
											2012-07-05 04:03:44 +00:00
										 |  |  | 	int			steering_mode; | 
					
						
							| 
									
										
											  
											
												{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-07-05 04:03:46 +00:00
										 |  |  | 	int			fs_log_max_ucast_qp_range_size; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	int			num_pds; | 
					
						
							|  |  |  | 	int			reserved_pds; | 
					
						
							| 
									
										
										
										
											2011-06-02 09:01:33 -07:00
										 |  |  | 	int			max_xrcds; | 
					
						
							|  |  |  | 	int			reserved_xrcds; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	int			mtt_entry_sz; | 
					
						
							| 
									
										
										
										
											2007-06-26 15:55:28 +03:00
										 |  |  | 	u32			max_msg_sz; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	u32			page_size_cap; | 
					
						
							| 
									
										
										
										
											2011-06-15 14:41:42 +00:00
										 |  |  | 	u64			flags; | 
					
						
							| 
									
										
										
										
											2012-04-29 17:04:25 +03:00
										 |  |  | 	u64			flags2; | 
					
						
							| 
									
										
										
										
											2008-07-23 08:12:26 -07:00
										 |  |  | 	u32			bmme_flags; | 
					
						
							|  |  |  | 	u32			reserved_lkey; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	u16			stat_rate_support; | 
					
						
							| 
									
										
										
										
											2007-06-18 08:15:02 -07:00
										 |  |  | 	u8			port_width_cap[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2008-04-16 21:09:27 -07:00
										 |  |  | 	int			max_gso_sz; | 
					
						
							| 
									
										
										
										
											2012-04-29 17:04:25 +03:00
										 |  |  | 	int			max_rss_tbl_sz; | 
					
						
							| 
									
										
										
										
											2008-10-22 10:25:29 -07:00
										 |  |  | 	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION]; | 
					
						
							|  |  |  | 	int			reserved_qps; | 
					
						
							|  |  |  | 	int                     reserved_qps_base[MLX4_NUM_QP_REGION]; | 
					
						
							|  |  |  | 	int                     log_num_macs; | 
					
						
							|  |  |  | 	int                     log_num_vlans; | 
					
						
							|  |  |  | 	int                     log_num_prios; | 
					
						
							| 
									
										
										
										
											2008-10-22 15:38:42 -07:00
										 |  |  | 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1]; | 
					
						
							|  |  |  | 	u8			supported_type[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2011-12-19 04:00:34 +00:00
										 |  |  | 	u8                      suggested_type[MLX4_MAX_PORTS + 1]; | 
					
						
							|  |  |  | 	u8                      default_sense[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:41 +00:00
										 |  |  | 	u32			port_mask[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2009-03-18 19:45:11 -07:00
										 |  |  | 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2011-06-15 14:47:14 +00:00
										 |  |  | 	u32			max_counters; | 
					
						
							| 
									
										
										
										
											2012-01-11 19:02:17 +02:00
										 |  |  | 	u8			port_ib_mtu[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:40 +00:00
										 |  |  | 	u16			sqp_demux; | 
					
						
							| 
									
										
										
										
											2012-10-21 14:59:24 +00:00
										 |  |  | 	u32			eqe_size; | 
					
						
							|  |  |  | 	u32			cqe_size; | 
					
						
							|  |  |  | 	u8			eqe_factor; | 
					
						
							|  |  |  | 	u32			userspace_caps; /* userspace must be aware of these */ | 
					
						
							|  |  |  | 	u32			function_caps;  /* VFs must be aware of these */ | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_buf_list { | 
					
						
							|  |  |  | 	void		       *buf; | 
					
						
							|  |  |  | 	dma_addr_t		map; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_buf { | 
					
						
							| 
									
										
										
										
											2008-02-06 21:17:59 -08:00
										 |  |  | 	struct mlx4_buf_list	direct; | 
					
						
							|  |  |  | 	struct mlx4_buf_list   *page_list; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	int			nbufs; | 
					
						
							|  |  |  | 	int			npages; | 
					
						
							|  |  |  | 	int			page_shift; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_mtt { | 
					
						
							| 
									
										
										
										
											2011-12-13 04:16:56 +00:00
										 |  |  | 	u32			offset; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	int			order; | 
					
						
							|  |  |  | 	int			page_shift; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-23 11:55:45 -07:00
										 |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_DB_PER_PAGE = PAGE_SIZE / 4 | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_db_pgdir { | 
					
						
							|  |  |  | 	struct list_head	list; | 
					
						
							|  |  |  | 	DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE); | 
					
						
							|  |  |  | 	DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2); | 
					
						
							|  |  |  | 	unsigned long	       *bits[2]; | 
					
						
							|  |  |  | 	__be32		       *db_page; | 
					
						
							|  |  |  | 	dma_addr_t		db_dma; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_ib_user_db_page; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_db { | 
					
						
							|  |  |  | 	__be32			*db; | 
					
						
							|  |  |  | 	union { | 
					
						
							|  |  |  | 		struct mlx4_db_pgdir		*pgdir; | 
					
						
							|  |  |  | 		struct mlx4_ib_user_db_page	*user_page; | 
					
						
							|  |  |  | 	}			u; | 
					
						
							|  |  |  | 	dma_addr_t		dma; | 
					
						
							|  |  |  | 	int			index; | 
					
						
							|  |  |  | 	int			order; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-25 14:27:08 -07:00
										 |  |  | struct mlx4_hwq_resources { | 
					
						
							|  |  |  | 	struct mlx4_db		db; | 
					
						
							|  |  |  | 	struct mlx4_mtt		mtt; | 
					
						
							|  |  |  | 	struct mlx4_buf		buf; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | struct mlx4_mr { | 
					
						
							|  |  |  | 	struct mlx4_mtt		mtt; | 
					
						
							|  |  |  | 	u64			iova; | 
					
						
							|  |  |  | 	u64			size; | 
					
						
							|  |  |  | 	u32			key; | 
					
						
							|  |  |  | 	u32			pd; | 
					
						
							|  |  |  | 	u32			access; | 
					
						
							|  |  |  | 	int			enabled; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-02-06 16:19:14 +00:00
										 |  |  | enum mlx4_mw_type { | 
					
						
							|  |  |  | 	MLX4_MW_TYPE_1 = 1, | 
					
						
							|  |  |  | 	MLX4_MW_TYPE_2 = 2, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_mw { | 
					
						
							|  |  |  | 	u32			key; | 
					
						
							|  |  |  | 	u32			pd; | 
					
						
							|  |  |  | 	enum mlx4_mw_type	type; | 
					
						
							|  |  |  | 	int			enabled; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-08-01 12:29:05 +03:00
										 |  |  | struct mlx4_fmr { | 
					
						
							|  |  |  | 	struct mlx4_mr		mr; | 
					
						
							|  |  |  | 	struct mlx4_mpt_entry  *mpt; | 
					
						
							|  |  |  | 	__be64		       *mtts; | 
					
						
							|  |  |  | 	dma_addr_t		dma_handle; | 
					
						
							|  |  |  | 	int			max_pages; | 
					
						
							|  |  |  | 	int			max_maps; | 
					
						
							|  |  |  | 	int			maps; | 
					
						
							|  |  |  | 	u8			page_shift; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | struct mlx4_uar { | 
					
						
							|  |  |  | 	unsigned long		pfn; | 
					
						
							|  |  |  | 	int			index; | 
					
						
							| 
									
										
										
										
											2011-03-22 22:38:41 +00:00
										 |  |  | 	struct list_head	bf_list; | 
					
						
							|  |  |  | 	unsigned		free_bf_bmap; | 
					
						
							|  |  |  | 	void __iomem	       *map; | 
					
						
							|  |  |  | 	void __iomem	       *bf_map; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_bf { | 
					
						
							|  |  |  | 	unsigned long		offset; | 
					
						
							|  |  |  | 	int			buf_size; | 
					
						
							|  |  |  | 	struct mlx4_uar	       *uar; | 
					
						
							|  |  |  | 	void __iomem	       *reg; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_cq { | 
					
						
							|  |  |  | 	void (*comp)		(struct mlx4_cq *); | 
					
						
							|  |  |  | 	void (*event)		(struct mlx4_cq *, enum mlx4_event); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	struct mlx4_uar	       *uar; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	u32			cons_index; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	__be32		       *set_ci_db; | 
					
						
							|  |  |  | 	__be32		       *arm_db; | 
					
						
							|  |  |  | 	int			arm_sn; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	int			cqn; | 
					
						
							| 
									
										
										
										
											2008-12-22 07:15:03 -08:00
										 |  |  | 	unsigned		vector; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	atomic_t		refcount; | 
					
						
							|  |  |  | 	struct completion	free; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_qp { | 
					
						
							|  |  |  | 	void (*event)		(struct mlx4_qp *, enum mlx4_event); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	int			qpn; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	atomic_t		refcount; | 
					
						
							|  |  |  | 	struct completion	free; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_srq { | 
					
						
							|  |  |  | 	void (*event)		(struct mlx4_srq *, enum mlx4_event); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	int			srqn; | 
					
						
							|  |  |  | 	int			max; | 
					
						
							|  |  |  | 	int			max_gs; | 
					
						
							|  |  |  | 	int			wqe_shift; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	atomic_t		refcount; | 
					
						
							|  |  |  | 	struct completion	free; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_av { | 
					
						
							|  |  |  | 	__be32			port_pd; | 
					
						
							|  |  |  | 	u8			reserved1; | 
					
						
							|  |  |  | 	u8			g_slid; | 
					
						
							|  |  |  | 	__be16			dlid; | 
					
						
							|  |  |  | 	u8			reserved2; | 
					
						
							|  |  |  | 	u8			gid_index; | 
					
						
							|  |  |  | 	u8			stat_rate; | 
					
						
							|  |  |  | 	u8			hop_limit; | 
					
						
							|  |  |  | 	__be32			sl_tclass_flowlabel; | 
					
						
							|  |  |  | 	u8			dgid[16]; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-10-24 21:08:52 -07:00
										 |  |  | struct mlx4_eth_av { | 
					
						
							|  |  |  | 	__be32		port_pd; | 
					
						
							|  |  |  | 	u8		reserved1; | 
					
						
							|  |  |  | 	u8		smac_idx; | 
					
						
							|  |  |  | 	u16		reserved2; | 
					
						
							|  |  |  | 	u8		reserved3; | 
					
						
							|  |  |  | 	u8		gid_index; | 
					
						
							|  |  |  | 	u8		stat_rate; | 
					
						
							|  |  |  | 	u8		hop_limit; | 
					
						
							|  |  |  | 	__be32		sl_tclass_flowlabel; | 
					
						
							|  |  |  | 	u8		dgid[16]; | 
					
						
							|  |  |  | 	u32		reserved4[2]; | 
					
						
							|  |  |  | 	__be16		vlan; | 
					
						
							|  |  |  | 	u8		mac[6]; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | union mlx4_ext_av { | 
					
						
							|  |  |  | 	struct mlx4_av		ib; | 
					
						
							|  |  |  | 	struct mlx4_eth_av	eth; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-15 14:47:14 +00:00
										 |  |  | struct mlx4_counter { | 
					
						
							|  |  |  | 	u8	reserved1[3]; | 
					
						
							|  |  |  | 	u8	counter_mode; | 
					
						
							|  |  |  | 	__be32	num_ifc; | 
					
						
							|  |  |  | 	u32	reserved2[2]; | 
					
						
							|  |  |  | 	__be64	rx_frames; | 
					
						
							|  |  |  | 	__be64	rx_bytes; | 
					
						
							|  |  |  | 	__be64	tx_frames; | 
					
						
							|  |  |  | 	__be64	tx_bytes; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | struct mlx4_dev { | 
					
						
							|  |  |  | 	struct pci_dev	       *pdev; | 
					
						
							|  |  |  | 	unsigned long		flags; | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | 	unsigned long		num_slaves; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	struct mlx4_caps	caps; | 
					
						
							| 
									
										
											  
											
												net/mlx4_core: Fix number of EQs used in ICM initialisation
In SRIOV mode, the number of EQs used when computing the total ICM size
was incorrect.
To fix this, we do the following:
1. We add a new structure to mlx4_dev, mlx4_phys_caps, to contain physical HCA
   capabilities.  The PPF uses the phys capabilities when it computes things
   like ICM size.
   The dev_caps structure will then contain the paravirtualized values, making
   bookkeeping much easier in SRIOV mode. We add a structure rather than a
   single parameter because there will be other fields in the phys_caps.
   The first field we add to the mlx4_phys_caps structure is num_phys_eqs.
2. In INIT_HCA, when running in SRIOV mode, the "log_num_eqs" parameter
   passed to the FW is the number of EQs per VF/PF; each function (PF or VF)
   has this number of EQs available.
   However, the total number of EQs which must be allowed for in the ICM is
   (1 << log_num_eqs) * (#VFs + #PFs).  Rather than compute this quantity,
   we allocate ICM space for 1024 EQs (which is the device maximum
   number of EQs, and which is the value we place in the mlx4_phys_caps structure).
   For INIT_HCA, however, we use the per-function number of EQs as described
   above.
Signed-off-by: Marcel Apfelbaum <marcela@dev.mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-05-30 09:14:51 +00:00
										 |  |  | 	struct mlx4_phys_caps	phys_caps; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 	struct radix_tree_root	qp_table_tree; | 
					
						
							| 
									
										
										
										
											2011-03-22 22:38:07 +00:00
										 |  |  | 	u8			rev_id; | 
					
						
							| 
									
										
										
										
											2007-09-18 09:14:18 +02:00
										 |  |  | 	char			board_id[MLX4_BOARD_ID_LEN]; | 
					
						
							| 
									
										
											  
											
												mlx4_core: Modify driver initialization flow to accommodate SRIOV for Ethernet
1. Added module parameters sr_iov and probe_vf for controlling enablement of
   SRIOV mode.
2. Increased default max num-qps, num-mpts and log_num_macs to accomodate
   SRIOV mode
3. Added port_type_array as a module parameter to allow driver startup with
   ports configured as desired.
   In SRIOV mode, only ETH is supported, and this array is ignored; otherwise,
   for the case where the FW supports both port types (ETH and IB), the
   port_type_array parameter is used.
   By default, the port_type_array is set to configure both ports as IB.
4. When running in sriov mode, the master needs to initialize the ICM eq table
   to hold the eq's for itself and also for all the slaves.
5. mlx4_set_port_mask() now invoked from mlx4_init_hca, instead of in mlx4_dev_cap.
6. Introduced sriov VF (slave) device startup/teardown logic (mainly procedures
   mlx4_init_slave, mlx4_slave_exit, mlx4_slave_cap, mlx4_slave_exit and flow
   modifications in __mlx4_init_one, mlx4_init_hca, and mlx4_setup_hca).
   VFs obtain their startup information from the PF (master) device via the
   comm channel.
7. In SRIOV mode (both PF and VF), MSI_X must be enabled, or the driver
   aborts loading the device.
8. Do not allow setting port type via sysfs when running in SRIOV mode.
9. mlx4_get_ownership:  Currently, only one PF is supported by the driver.
   If the HCA is burned with FW which enables more than one PF, only one
   of the PFs is allowed to run.  The first one up grabs a FW ownership
   semaphone -- all other PFs will find that semaphore taken, and the
   driver will not allow them to run.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: Liran Liss <liranl@mellanox.co.il>
Signed-off-by: Marcel Apfelbaum <marcela@mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2011-12-13 04:18:30 +00:00
										 |  |  | 	int			num_vfs; | 
					
						
							| 
									
										
										
										
											2012-12-06 17:12:00 +00:00
										 |  |  | 	int			oper_log_mgm_entry_size; | 
					
						
							| 
									
										
										
										
											2012-07-05 04:03:48 +00:00
										 |  |  | 	u64			regid_promisc_array[MLX4_MAX_PORTS + 1]; | 
					
						
							|  |  |  | 	u64			regid_allmulti_array[MLX4_MAX_PORTS + 1]; | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												mlx4: Use port management change event instead of smp_snoop
The port management change event can replace smp_snoop.  If the
capability bit for this event is set in dev-caps, the event is used
(by the driver setting the PORT_MNG_CHG_EVENT bit in the async event
mask in the MAP_EQ fw command).  In this case, when the driver passes
incoming SMP PORT_INFO SET mads to the FW, the FW generates port
management change events to signal any changes to the driver.
If the FW generates these events, smp_snoop shouldn't be invoked in
ib_process_mad(), or duplicate events will occur (once from the
FW-generated event, and once from smp_snoop).
In the case where the FW does not generate port management change
events smp_snoop needs to be invoked to create these events.  The flow
in smp_snoop has been modified to make use of the same procedures as
in the fw-generated-event event case to generate the port management
events (LID change, Client-rereg, Pkey change, and/or GID change).
Port management change event handling required changing the
mlx4_ib_event and mlx4_dispatch_event prototypes; the "param" argument
(last argument) had to be changed to unsigned long in order to
accomodate passing the EQE pointer.
We also needed to move the definition of struct mlx4_eqe from
net/mlx4.h to file device.h -- to make it available to the IB driver,
to handle port management change events.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-06-19 11:21:40 +03:00
										 |  |  | struct mlx4_eqe { | 
					
						
							|  |  |  | 	u8			reserved1; | 
					
						
							|  |  |  | 	u8			type; | 
					
						
							|  |  |  | 	u8			reserved2; | 
					
						
							|  |  |  | 	u8			subtype; | 
					
						
							|  |  |  | 	union { | 
					
						
							|  |  |  | 		u32		raw[6]; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			__be32	cqn; | 
					
						
							|  |  |  | 		} __packed comp; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			u16	reserved1; | 
					
						
							|  |  |  | 			__be16	token; | 
					
						
							|  |  |  | 			u32	reserved2; | 
					
						
							|  |  |  | 			u8	reserved3[3]; | 
					
						
							|  |  |  | 			u8	status; | 
					
						
							|  |  |  | 			__be64	out_param; | 
					
						
							|  |  |  | 		} __packed cmd; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			__be32	qpn; | 
					
						
							|  |  |  | 		} __packed qp; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			__be32	srqn; | 
					
						
							|  |  |  | 		} __packed srq; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			__be32	cqn; | 
					
						
							|  |  |  | 			u32	reserved1; | 
					
						
							|  |  |  | 			u8	reserved2[3]; | 
					
						
							|  |  |  | 			u8	syndrome; | 
					
						
							|  |  |  | 		} __packed cq_err; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			u32	reserved1[2]; | 
					
						
							|  |  |  | 			__be32	port; | 
					
						
							|  |  |  | 		} __packed port_change; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			#define COMM_CHANNEL_BIT_ARRAY_SIZE	4
 | 
					
						
							|  |  |  | 			u32 reserved; | 
					
						
							|  |  |  | 			u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; | 
					
						
							|  |  |  | 		} __packed comm_channel_arm; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			u8	port; | 
					
						
							|  |  |  | 			u8	reserved[3]; | 
					
						
							|  |  |  | 			__be64	mac; | 
					
						
							|  |  |  | 		} __packed mac_update; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			__be32	slave_id; | 
					
						
							|  |  |  | 		} __packed flr_event; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			__be16  current_temperature; | 
					
						
							|  |  |  | 			__be16  warning_threshold; | 
					
						
							|  |  |  | 		} __packed warming; | 
					
						
							|  |  |  | 		struct { | 
					
						
							|  |  |  | 			u8 reserved[3]; | 
					
						
							|  |  |  | 			u8 port; | 
					
						
							|  |  |  | 			union { | 
					
						
							|  |  |  | 				struct { | 
					
						
							|  |  |  | 					__be16 mstr_sm_lid; | 
					
						
							|  |  |  | 					__be16 port_lid; | 
					
						
							|  |  |  | 					__be32 changed_attr; | 
					
						
							|  |  |  | 					u8 reserved[3]; | 
					
						
							|  |  |  | 					u8 mstr_sm_sl; | 
					
						
							|  |  |  | 					__be64 gid_prefix; | 
					
						
							|  |  |  | 				} __packed port_info; | 
					
						
							|  |  |  | 				struct { | 
					
						
							|  |  |  | 					__be32 block_ptr; | 
					
						
							|  |  |  | 					__be32 tbl_entries_mask; | 
					
						
							|  |  |  | 				} __packed tbl_change_info; | 
					
						
							|  |  |  | 			} params; | 
					
						
							|  |  |  | 		} __packed port_mgmt_change; | 
					
						
							|  |  |  | 	}			event; | 
					
						
							|  |  |  | 	u8			slave_id; | 
					
						
							|  |  |  | 	u8			reserved3[2]; | 
					
						
							|  |  |  | 	u8			owner; | 
					
						
							|  |  |  | } __packed; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | struct mlx4_init_port_param { | 
					
						
							|  |  |  | 	int			set_guid0; | 
					
						
							|  |  |  | 	int			set_node_guid; | 
					
						
							|  |  |  | 	int			set_si_guid; | 
					
						
							|  |  |  | 	u16			mtu; | 
					
						
							|  |  |  | 	int			port_width_cap; | 
					
						
							|  |  |  | 	u16			vl_cap; | 
					
						
							|  |  |  | 	u16			max_gid; | 
					
						
							|  |  |  | 	u16			max_pkey; | 
					
						
							|  |  |  | 	u64			guid0; | 
					
						
							|  |  |  | 	u64			node_guid; | 
					
						
							|  |  |  | 	u64			si_guid; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-10-22 15:38:42 -07:00
										 |  |  | #define mlx4_foreach_port(port, dev, type)				\
 | 
					
						
							|  |  |  | 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	\ | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:41 +00:00
										 |  |  | 		if ((type) == (dev)->caps.port_mask[(port)]) | 
					
						
							| 
									
										
										
										
											2008-10-22 15:38:42 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:55 +00:00
										 |  |  | #define mlx4_foreach_non_ib_transport_port(port, dev)                     \
 | 
					
						
							|  |  |  | 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	  \ | 
					
						
							|  |  |  | 		if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:41 +00:00
										 |  |  | #define mlx4_foreach_ib_transport_port(port, dev)                         \
 | 
					
						
							|  |  |  | 	for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)	  \ | 
					
						
							|  |  |  | 		if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ | 
					
						
							|  |  |  | 			((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-06-19 11:21:33 +03:00
										 |  |  | #define MLX4_INVALID_SLAVE_ID	0xFF
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												mlx4: Use port management change event instead of smp_snoop
The port management change event can replace smp_snoop.  If the
capability bit for this event is set in dev-caps, the event is used
(by the driver setting the PORT_MNG_CHG_EVENT bit in the async event
mask in the MAP_EQ fw command).  In this case, when the driver passes
incoming SMP PORT_INFO SET mads to the FW, the FW generates port
management change events to signal any changes to the driver.
If the FW generates these events, smp_snoop shouldn't be invoked in
ib_process_mad(), or duplicate events will occur (once from the
FW-generated event, and once from smp_snoop).
In the case where the FW does not generate port management change
events smp_snoop needs to be invoked to create these events.  The flow
in smp_snoop has been modified to make use of the same procedures as
in the fw-generated-event event case to generate the port management
events (LID change, Client-rereg, Pkey change, and/or GID change).
Port management change event handling required changing the
mlx4_ib_event and mlx4_dispatch_event prototypes; the "param" argument
(last argument) had to be changed to unsigned long in order to
accomodate passing the EQE pointer.
We also needed to move the definition of struct mlx4_eqe from
net/mlx4.h to file device.h -- to make it available to the IB driver,
to handle port management change events.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-06-19 11:21:40 +03:00
										 |  |  | void handle_port_mgmt_change_event(struct work_struct *work); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-06-19 11:21:41 +03:00
										 |  |  | static inline int mlx4_master_func_num(struct mlx4_dev *dev) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return dev->caps.function; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | static inline int mlx4_is_master(struct mlx4_dev *dev) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return dev->flags & MLX4_FLAG_MASTER; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
											  
											
												mlx4: Modify proxy/tunnel QP mechanism so that guests do no calculations
Previously, the structure of a guest's proxy QPs followed the
structure of the PPF special qps (qp0 port 1, qp0 port 2, qp1 port 1,
qp1 port 2, ...).  The guest then did offset calculations on the
sqp_base qp number that the PPF passed to it in QUERY_FUNC_CAP().
This is now changed so that the guest does no offset calculations
regarding proxy or tunnel QPs to use.  This change frees the PPF from
needing to adhere to a specific order in allocating proxy and tunnel
QPs.
Now QUERY_FUNC_CAP provides each port individually with its proxy
qp0, proxy qp1, tunnel qp0, and tunnel qp1 QP numbers, and these are
used directly where required (with no offset calculations).
To accomplish this change, several fields were added to the phys_caps
structure for use by the PPF and by non-SR-IOV mode:
    base_sqpn -- in non-sriov mode, this was formerly sqp_start.
    base_proxy_sqpn -- the first physical proxy qp number -- used by PPF
    base_tunnel_sqpn -- the first physical tunnel qp number -- used by PPF.
The current code in the PPF still adheres to the previous layout of
sqps, proxy-sqps and tunnel-sqps.  However, the PPF can change this
layout without affecting VF or (paravirtualized) PF code.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-08-03 08:40:57 +00:00
										 |  |  | 	return (qpn < dev->phys_caps.base_sqpn + 8 + | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:41 +00:00
										 |  |  | 		16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
											  
											
												mlx4: Modify proxy/tunnel QP mechanism so that guests do no calculations
Previously, the structure of a guest's proxy QPs followed the
structure of the PPF special qps (qp0 port 1, qp0 port 2, qp1 port 1,
qp1 port 2, ...).  The guest then did offset calculations on the
sqp_base qp number that the PPF passed to it in QUERY_FUNC_CAP().
This is now changed so that the guest does no offset calculations
regarding proxy or tunnel QPs to use.  This change frees the PPF from
needing to adhere to a specific order in allocating proxy and tunnel
QPs.
Now QUERY_FUNC_CAP provides each port individually with its proxy
qp0, proxy qp1, tunnel qp0, and tunnel qp1 QP numbers, and these are
used directly where required (with no offset calculations).
To accomplish this change, several fields were added to the phys_caps
structure for use by the PPF and by non-SR-IOV mode:
    base_sqpn -- in non-sriov mode, this was formerly sqp_start.
    base_proxy_sqpn -- the first physical proxy qp number -- used by PPF
    base_tunnel_sqpn -- the first physical tunnel qp number -- used by PPF.
The current code in the PPF still adheres to the previous layout of
sqps, proxy-sqps and tunnel-sqps.  However, the PPF can change this
layout without affecting VF or (paravirtualized) PF code.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-08-03 08:40:57 +00:00
										 |  |  | 	int guest_proxy_base = dev->phys_caps.base_proxy_sqpn + slave * 8; | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:41 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
											  
											
												mlx4: Modify proxy/tunnel QP mechanism so that guests do no calculations
Previously, the structure of a guest's proxy QPs followed the
structure of the PPF special qps (qp0 port 1, qp0 port 2, qp1 port 1,
qp1 port 2, ...).  The guest then did offset calculations on the
sqp_base qp number that the PPF passed to it in QUERY_FUNC_CAP().
This is now changed so that the guest does no offset calculations
regarding proxy or tunnel QPs to use.  This change frees the PPF from
needing to adhere to a specific order in allocating proxy and tunnel
QPs.
Now QUERY_FUNC_CAP provides each port individually with its proxy
qp0, proxy qp1, tunnel qp0, and tunnel qp1 QP numbers, and these are
used directly where required (with no offset calculations).
To accomplish this change, several fields were added to the phys_caps
structure for use by the PPF and by non-SR-IOV mode:
    base_sqpn -- in non-sriov mode, this was formerly sqp_start.
    base_proxy_sqpn -- the first physical proxy qp number -- used by PPF
    base_tunnel_sqpn -- the first physical tunnel qp number -- used by PPF.
The current code in the PPF still adheres to the previous layout of
sqps, proxy-sqps and tunnel-sqps.  However, the PPF can change this
layout without affecting VF or (paravirtualized) PF code.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-08-03 08:40:57 +00:00
										 |  |  | 	if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8) | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:41 +00:00
										 |  |  | 		return 1; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return 0; | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2010-10-24 21:08:52 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-13 04:10:33 +00:00
										 |  |  | static inline int mlx4_is_mfunc(struct mlx4_dev *dev) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static inline int mlx4_is_slave(struct mlx4_dev *dev) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	return dev->flags & MLX4_FLAG_SLAVE; | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2010-10-24 21:08:52 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | 
					
						
							|  |  |  | 		   struct mlx4_buf *buf); | 
					
						
							|  |  |  | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); | 
					
						
							| 
									
										
										
										
											2008-02-06 21:07:54 -08:00
										 |  |  | static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2008-01-28 10:40:51 +02:00
										 |  |  | 	if (BITS_PER_LONG == 64 || buf->nbufs == 1) | 
					
						
							| 
									
										
										
										
											2008-02-06 21:17:59 -08:00
										 |  |  | 		return buf->direct.buf + offset; | 
					
						
							| 
									
										
										
										
											2008-02-06 21:07:54 -08:00
										 |  |  | 	else | 
					
						
							| 
									
										
										
										
											2008-02-06 21:17:59 -08:00
										 |  |  | 		return buf->page_list[offset >> PAGE_SHIFT].buf + | 
					
						
							| 
									
										
										
										
											2008-02-06 21:07:54 -08:00
										 |  |  | 			(offset & (PAGE_SIZE - 1)); | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); | 
					
						
							|  |  |  | void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); | 
					
						
							| 
									
										
										
										
											2011-06-02 09:01:33 -07:00
										 |  |  | int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); | 
					
						
							|  |  |  | void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar); | 
					
						
							|  |  |  | void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar); | 
					
						
							| 
									
										
										
										
											2011-03-22 22:38:41 +00:00
										 |  |  | int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf); | 
					
						
							|  |  |  | void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf); | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, | 
					
						
							|  |  |  | 		  struct mlx4_mtt *mtt); | 
					
						
							|  |  |  | void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt); | 
					
						
							|  |  |  | u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, | 
					
						
							|  |  |  | 		  int npages, int page_shift, struct mlx4_mr *mr); | 
					
						
							| 
									
										
										
										
											2013-02-06 16:19:09 +00:00
										 |  |  | int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); | 
					
						
							| 
									
										
										
										
											2013-02-06 16:19:14 +00:00
										 |  |  | int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, | 
					
						
							|  |  |  | 		  struct mlx4_mw *mw); | 
					
						
							|  |  |  | void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw); | 
					
						
							|  |  |  | int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw); | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 
					
						
							|  |  |  | 		   int start_index, int npages, u64 *page_list); | 
					
						
							|  |  |  | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 
					
						
							|  |  |  | 		       struct mlx4_buf *buf); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-23 11:55:45 -07:00
										 |  |  | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); | 
					
						
							|  |  |  | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-04-25 14:27:08 -07:00
										 |  |  | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | 
					
						
							|  |  |  | 		       int size, int max_direct); | 
					
						
							|  |  |  | void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, | 
					
						
							|  |  |  | 		       int size); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | 
					
						
							| 
									
										
										
										
											2008-04-29 13:46:50 -07:00
										 |  |  | 		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, | 
					
						
							| 
									
										
										
										
											2008-12-22 07:15:03 -08:00
										 |  |  | 		  unsigned vector, int collapsed); | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-10-10 12:01:37 -07:00
										 |  |  | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); | 
					
						
							|  |  |  | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp); | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-02 10:43:26 -07:00
										 |  |  | int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn, | 
					
						
							|  |  |  | 		   struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq); | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq); | 
					
						
							|  |  |  | int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark); | 
					
						
							| 
									
										
										
										
											2007-06-21 13:03:11 +03:00
										 |  |  | int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark); | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-06-18 08:15:02 -07:00
										 |  |  | int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-13 04:16:21 +00:00
										 |  |  | int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 
					
						
							|  |  |  | 			int block_mcast_loopback, enum mlx4_protocol prot); | 
					
						
							|  |  |  | int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 
					
						
							|  |  |  | 			enum mlx4_protocol prot); | 
					
						
							| 
									
										
										
										
											2008-07-14 23:48:48 -07:00
										 |  |  | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 
					
						
							| 
									
										
											  
											
												{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-07-05 04:03:46 +00:00
										 |  |  | 			  u8 port, int block_mcast_loopback, | 
					
						
							|  |  |  | 			  enum mlx4_protocol protocol, u64 *reg_id); | 
					
						
							| 
									
										
										
										
											2010-12-02 11:44:49 +00:00
										 |  |  | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 
					
						
							| 
									
										
											  
											
												{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-07-05 04:03:46 +00:00
										 |  |  | 			  enum mlx4_protocol protocol, u64 reg_id); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | enum { | 
					
						
							|  |  |  | 	MLX4_DOMAIN_UVERBS	= 0x1000, | 
					
						
							|  |  |  | 	MLX4_DOMAIN_ETHTOOL     = 0x2000, | 
					
						
							|  |  |  | 	MLX4_DOMAIN_RFS         = 0x3000, | 
					
						
							|  |  |  | 	MLX4_DOMAIN_NIC    = 0x5000, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | enum mlx4_net_trans_rule_id { | 
					
						
							|  |  |  | 	MLX4_NET_TRANS_RULE_ID_ETH = 0, | 
					
						
							|  |  |  | 	MLX4_NET_TRANS_RULE_ID_IB, | 
					
						
							|  |  |  | 	MLX4_NET_TRANS_RULE_ID_IPV6, | 
					
						
							|  |  |  | 	MLX4_NET_TRANS_RULE_ID_IPV4, | 
					
						
							|  |  |  | 	MLX4_NET_TRANS_RULE_ID_TCP, | 
					
						
							|  |  |  | 	MLX4_NET_TRANS_RULE_ID_UDP, | 
					
						
							|  |  |  | 	MLX4_NET_TRANS_RULE_NUM, /* should be last */ | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-09-05 22:50:48 +00:00
										 |  |  | extern const u16 __sw_id_hw[]; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-09-05 22:50:49 +00:00
										 |  |  | static inline int map_hw_to_sw_id(u16 header_id) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	int i; | 
					
						
							|  |  |  | 	for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) { | 
					
						
							|  |  |  | 		if (header_id == __sw_id_hw[i]) | 
					
						
							|  |  |  | 			return i; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return -EINVAL; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-07-05 04:03:46 +00:00
										 |  |  | enum mlx4_net_trans_promisc_mode { | 
					
						
							|  |  |  | 	MLX4_FS_PROMISC_NONE = 0, | 
					
						
							|  |  |  | 	MLX4_FS_PROMISC_UPLINK, | 
					
						
							| 
									
										
										
										
											2012-07-05 04:03:48 +00:00
										 |  |  | 	/* For future use. Not implemented yet */ | 
					
						
							| 
									
										
											  
											
												{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-07-05 04:03:46 +00:00
										 |  |  | 	MLX4_FS_PROMISC_FUNCTION_PORT, | 
					
						
							|  |  |  | 	MLX4_FS_PROMISC_ALL_MULTI, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_spec_eth { | 
					
						
							|  |  |  | 	u8	dst_mac[6]; | 
					
						
							|  |  |  | 	u8	dst_mac_msk[6]; | 
					
						
							|  |  |  | 	u8	src_mac[6]; | 
					
						
							|  |  |  | 	u8	src_mac_msk[6]; | 
					
						
							|  |  |  | 	u8	ether_type_enable; | 
					
						
							|  |  |  | 	__be16	ether_type; | 
					
						
							|  |  |  | 	__be16	vlan_id_msk; | 
					
						
							|  |  |  | 	__be16	vlan_id; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_spec_tcp_udp { | 
					
						
							|  |  |  | 	__be16 dst_port; | 
					
						
							|  |  |  | 	__be16 dst_port_msk; | 
					
						
							|  |  |  | 	__be16 src_port; | 
					
						
							|  |  |  | 	__be16 src_port_msk; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_spec_ipv4 { | 
					
						
							|  |  |  | 	__be32 dst_ip; | 
					
						
							|  |  |  | 	__be32 dst_ip_msk; | 
					
						
							|  |  |  | 	__be32 src_ip; | 
					
						
							|  |  |  | 	__be32 src_ip_msk; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_spec_ib { | 
					
						
							|  |  |  | 	__be32	r_qpn; | 
					
						
							|  |  |  | 	__be32	qpn_msk; | 
					
						
							|  |  |  | 	u8	dst_gid[16]; | 
					
						
							|  |  |  | 	u8	dst_gid_msk[16]; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_spec_list { | 
					
						
							|  |  |  | 	struct	list_head list; | 
					
						
							|  |  |  | 	enum	mlx4_net_trans_rule_id id; | 
					
						
							|  |  |  | 	union { | 
					
						
							|  |  |  | 		struct mlx4_spec_eth eth; | 
					
						
							|  |  |  | 		struct mlx4_spec_ib ib; | 
					
						
							|  |  |  | 		struct mlx4_spec_ipv4 ipv4; | 
					
						
							|  |  |  | 		struct mlx4_spec_tcp_udp tcp_udp; | 
					
						
							|  |  |  | 	}; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | enum mlx4_net_trans_hw_rule_queue { | 
					
						
							|  |  |  | 	MLX4_NET_TRANS_Q_FIFO, | 
					
						
							|  |  |  | 	MLX4_NET_TRANS_Q_LIFO, | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct mlx4_net_trans_rule { | 
					
						
							|  |  |  | 	struct	list_head list; | 
					
						
							|  |  |  | 	enum	mlx4_net_trans_hw_rule_queue queue_mode; | 
					
						
							|  |  |  | 	bool	exclusive; | 
					
						
							|  |  |  | 	bool	allow_loopback; | 
					
						
							|  |  |  | 	enum	mlx4_net_trans_promisc_mode promisc_mode; | 
					
						
							|  |  |  | 	u8	port; | 
					
						
							|  |  |  | 	u16	priority; | 
					
						
							|  |  |  | 	u32	qpn; | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-07-05 04:03:48 +00:00
										 |  |  | int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, | 
					
						
							|  |  |  | 				enum mlx4_net_trans_promisc_mode mode); | 
					
						
							|  |  |  | int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, | 
					
						
							|  |  |  | 				   enum mlx4_net_trans_promisc_mode mode); | 
					
						
							| 
									
										
										
										
											2011-03-22 22:38:31 +00:00
										 |  |  | int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); | 
					
						
							|  |  |  | int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); | 
					
						
							|  |  |  | int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); | 
					
						
							|  |  |  | int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); | 
					
						
							|  |  |  | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-12-13 04:16:21 +00:00
										 |  |  | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); | 
					
						
							|  |  |  | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); | 
					
						
							| 
									
										
										
										
											2013-02-07 02:25:22 +00:00
										 |  |  | int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port); | 
					
						
							|  |  |  | int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); | 
					
						
							| 
									
										
										
										
											2012-01-19 09:45:05 +00:00
										 |  |  | void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap); | 
					
						
							| 
									
										
										
										
											2012-03-06 04:04:47 +00:00
										 |  |  | int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, | 
					
						
							|  |  |  | 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); | 
					
						
							|  |  |  | int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | 
					
						
							|  |  |  | 			   u8 promisc); | 
					
						
							| 
									
										
										
										
											2012-04-04 21:33:25 +00:00
										 |  |  | int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); | 
					
						
							|  |  |  | int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, | 
					
						
							|  |  |  | 		u8 *pg, u16 *ratelimit); | 
					
						
							| 
									
										
										
										
											2010-08-26 17:19:22 +03:00
										 |  |  | int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); | 
					
						
							| 
									
										
										
										
											2008-10-22 11:44:46 -07:00
										 |  |  | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); | 
					
						
							|  |  |  | void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-08-01 12:29:05 +03:00
										 |  |  | int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, | 
					
						
							|  |  |  | 		      int npages, u64 iova, u32 *lkey, u32 *rkey); | 
					
						
							|  |  |  | int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, | 
					
						
							|  |  |  | 		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr); | 
					
						
							|  |  |  | int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr); | 
					
						
							|  |  |  | void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, | 
					
						
							|  |  |  | 		    u32 *lkey, u32 *rkey); | 
					
						
							|  |  |  | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); | 
					
						
							|  |  |  | int mlx4_SYNC_TPT(struct mlx4_dev *dev); | 
					
						
							| 
									
										
										
										
											2010-08-24 03:46:18 +00:00
										 |  |  | int mlx4_test_interrupts(struct mlx4_dev *dev); | 
					
						
							| 
									
										
										
										
											2012-07-18 22:33:51 +00:00
										 |  |  | int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, | 
					
						
							|  |  |  | 		   int *vector); | 
					
						
							| 
									
										
										
										
											2011-03-22 22:37:47 +00:00
										 |  |  | void mlx4_release_eq(struct mlx4_dev *dev, int vec); | 
					
						
							| 
									
										
										
										
											2007-08-01 12:29:05 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-03-22 22:37:59 +00:00
										 |  |  | int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); | 
					
						
							|  |  |  | int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-15 14:47:14 +00:00
										 |  |  | int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); | 
					
						
							|  |  |  | void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												{NET, IB}/mlx4: Add device managed flow steering firmware API
The driver is modified to support three operation modes.
If supported by firmware use the device managed flow steering
API, that which we call device managed steering mode. Else, if
the firmware supports the B0 steering mode use it, and finally,
if none of the above, use the A0 steering mode.
When the steering mode is device managed, the code is modified
such that L2 based rules set by the mlx4_en driver for Ethernet
unicast and multicast, and the IB stack multicast attach calls
done through the mlx4_ib driver are all routed to use the device
managed API.
When attaching rule using device managed flow steering API,
the firmware returns a 64 bit registration id, which is to be
provided during detach.
Currently the firmware is always programmed during HCA initialization
to use standard L2 hashing. Future work should be done to allow
configuring the flow-steering hash function with common, non
proprietary means.
Signed-off-by: Hadar Hen Zion <hadarh@mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
											
										 
											2012-07-05 04:03:46 +00:00
										 |  |  | int mlx4_flow_attach(struct mlx4_dev *dev, | 
					
						
							|  |  |  | 		     struct mlx4_net_trans_rule *rule, u64 *reg_id); | 
					
						
							|  |  |  | int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:43 +00:00
										 |  |  | void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, | 
					
						
							|  |  |  | 			  int i, int val); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
											  
											
												mlx4_core: Implement mechanism for reserved Q_Keys
The SR-IOV special QP tunneling mechanism uses proxy special QPs
(instead of the real special QPs) for MADs on guests.  These proxy QPs
send their packets to a "tunnel" QP owned by the master.  The master
then forwards the MAD (after any required paravirtualization) to the
real special QP, which sends out the MAD.
For security reasons (i.e., to prevent guests from sending MADs to
tunnel QPs belonging to other guests), each proxy-tunnel QP pair is
assigned a unique, reserved, Q_Key.  These Q_Keys are available only
for proxy and tunnel QPs -- if the guest tries to use these Q_Keys
with other QPs, it will fail.
This patch introduces a mechanism for reserving a block of 64K Q_Keys
for proxy/tunneling use.
The patch introduces also two new fields into mlx4_dev: base_sqpn and
base_tunnel_sqpn.
In SR-IOV mode, the QP numbers for the "real," proxy, and tunnel sqps
are added to the reserved QPN area (so that they will not change).
There are 8 special QPs per port in the HCA, and each of them is
assigned both a proxy and a tunnel QP, for each VF and for the PF as
well in SR-IOV mode.
The QPNs for these QPs are arranged as follows:
 1. The real SQP numbers (8)
 2. The proxy SQPs (8 * (max number of VFs + max number of PFs)
 3. The tunnel SQPs (8 * (max number of VFs + max number of PFs)
To support these QPs, two new fields are added to struct mlx4_dev:
  base_sqp:  this is the QP number of the first of the real SQPs
  base_tunnel_sqp: this is the qp number of the first qp in the tunnel
                   sqp region. (On guests, this is the first tunnel
                   sqp of the 8 which are assigned to that guest).
In addition, in SR-IOV mode, sqp_start is the number of the first
proxy SQP in the proxy SQP region.  (In guests, this is the first
proxy SQP of the 8 which are assigned to that guest)
Note that in non-SR-IOV mode, there are no proxies and no tunnels.
In this case, sqp_start is set to sqp_base -- which minimizes code
changes.
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
											
										 
											2012-06-19 11:21:42 +03:00
										 |  |  | int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:48 +00:00
										 |  |  | int mlx4_is_slave_active(struct mlx4_dev *dev, int slave); | 
					
						
							|  |  |  | int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port); | 
					
						
							|  |  |  | int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port); | 
					
						
							|  |  |  | int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr); | 
					
						
							|  |  |  | int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change); | 
					
						
							|  |  |  | enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port); | 
					
						
							|  |  |  | int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:56 +00:00
										 |  |  | void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid); | 
					
						
							|  |  |  | __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave); | 
					
						
							| 
									
										
										
										
											2012-08-03 08:40:48 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2007-05-08 18:00:38 -07:00
										 |  |  | #endif /* MLX4_DEVICE_H */
 |