net/mlx4_en: Configure the XPS queue mapping on driver load
Only TX rings of User Piority 0 are mapped. TX rings of other UP's are using UP 0 mapping. XPS is not in use when num_tc is set. Signed-off-by: Ido Shamay <idos@mellanox.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
					parent
					
						
							
								84c864038d
							
						
					
				
			
			
				commit
				
					
						d03a68f821
					
				
			
		
					 3 changed files with 16 additions and 4 deletions
				
			
		| 
						 | 
				
			
			@ -1910,8 +1910,10 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 | 
			
		|||
				      prof->tx_ring_size, i, TX, node))
 | 
			
		||||
			goto err;
 | 
			
		||||
 | 
			
		||||
		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
 | 
			
		||||
					   prof->tx_ring_size, TXBB_SIZE, node))
 | 
			
		||||
		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
 | 
			
		||||
					   priv->base_tx_qpn + i,
 | 
			
		||||
					   prof->tx_ring_size, TXBB_SIZE,
 | 
			
		||||
					   node, i))
 | 
			
		||||
			goto err;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -55,7 +55,7 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
 | 
			
		|||
 | 
			
		||||
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 | 
			
		||||
			   struct mlx4_en_tx_ring **pring, int qpn, u32 size,
 | 
			
		||||
			   u16 stride, int node)
 | 
			
		||||
			   u16 stride, int node, int queue_index)
 | 
			
		||||
{
 | 
			
		||||
	struct mlx4_en_dev *mdev = priv->mdev;
 | 
			
		||||
	struct mlx4_en_tx_ring *ring;
 | 
			
		||||
| 
						 | 
				
			
			@ -140,6 +140,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 | 
			
		|||
		ring->bf_enabled = true;
 | 
			
		||||
 | 
			
		||||
	ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
 | 
			
		||||
	ring->queue_index = queue_index;
 | 
			
		||||
 | 
			
		||||
	if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
 | 
			
		||||
		cpumask_set_cpu(queue_index, &ring->affinity_mask);
 | 
			
		||||
 | 
			
		||||
	*pring = ring;
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -206,6 +210,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 | 
			
		|||
 | 
			
		||||
	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
 | 
			
		||||
			       &ring->qp, &ring->qp_state);
 | 
			
		||||
	if (!user_prio && cpu_online(ring->queue_index))
 | 
			
		||||
		netif_set_xps_queue(priv->dev, &ring->affinity_mask,
 | 
			
		||||
				    ring->queue_index);
 | 
			
		||||
 | 
			
		||||
	return err;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -255,6 +255,8 @@ struct mlx4_en_tx_ring {
 | 
			
		|||
	u16 poll_cnt;
 | 
			
		||||
	struct mlx4_en_tx_info *tx_info;
 | 
			
		||||
	u8 *bounce_buf;
 | 
			
		||||
	u8 queue_index;
 | 
			
		||||
	cpumask_t affinity_mask;
 | 
			
		||||
	u32 last_nr_txbb;
 | 
			
		||||
	struct mlx4_qp qp;
 | 
			
		||||
	struct mlx4_qp_context context;
 | 
			
		||||
| 
						 | 
				
			
			@ -719,7 +721,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
 | 
			
		|||
 | 
			
		||||
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 | 
			
		||||
			   struct mlx4_en_tx_ring **pring,
 | 
			
		||||
			   int qpn, u32 size, u16 stride, int node);
 | 
			
		||||
			   int qpn, u32 size, u16 stride,
 | 
			
		||||
			   int node, int queue_index);
 | 
			
		||||
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
 | 
			
		||||
			     struct mlx4_en_tx_ring **pring);
 | 
			
		||||
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue