Commit bb05ea7e authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'enic'



Govindarajulu Varadarajan says:

====================
enic updates

This series fixes minor bugs and adds new features like Accelerated RFS,
busy_poll, tx clean-up in napi_poll.

v3:
* While doing tx cleanup in napi, ignore budget and clean up all desc possible.

v2:
* Fix #ifdef coding style issue in '[PATCH 4/8] enic: alloc/free rx_cpu_rmap'
  And [PATCH 5/8] enic: Add Accelerated RFS support'
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents eef92962 4cfe8785
......@@ -2,5 +2,5 @@ obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
enic_ethtool.o enic_api.o
enic_ethtool.o enic_api.o enic_clsf.o
......@@ -99,6 +99,44 @@ struct enic_port_profile {
u8 mac_addr[ETH_ALEN];
};
#ifdef CONFIG_RFS_ACCEL
/* enic_rfs_fltr_node - rfs filter node in hash table
* @@keys: IPv4 5 tuple
* @flow_id: flow_id of clsf filter provided by kernel
* @fltr_id: filter id of clsf filter returned by adaptor
* @rq_id: desired rq index
* @node: hlist_node
*/
struct enic_rfs_fltr_node {
struct flow_keys keys;
u32 flow_id;
u16 fltr_id;
u16 rq_id;
struct hlist_node node;
};
/* enic_rfs_flw_tbl - rfs flow table
* @max: Maximum number of filters vNIC supports
* @free: Number of free filters available
* @toclean: hash table index to clean next
* @ht_head: hash table list head
* @lock: spin lock
* @rfs_may_expire: timer function for enic_rps_may_expire_flow
*/
struct enic_rfs_flw_tbl {
u16 max;
int free;
#define ENIC_RFS_FLW_BITSHIFT (10)
#define ENIC_RFS_FLW_MASK ((1 << ENIC_RFS_FLW_BITSHIFT) - 1)
u16 toclean:ENIC_RFS_FLW_BITSHIFT;
struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT];
spinlock_t lock;
struct timer_list rfs_may_expire;
};
#endif /* CONFIG_RFS_ACCEL */
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
......@@ -140,7 +178,7 @@ struct enic {
unsigned int rq_count;
u64 rq_truncated_pkts;
u64 rq_bad_fcs;
struct napi_struct napi[ENIC_RQ_MAX];
struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
/* interrupt resource cache line section */
____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
......@@ -150,6 +188,9 @@ struct enic {
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
unsigned int cq_count;
#ifdef CONFIG_RFS_ACCEL
struct enic_rfs_flw_tbl rfs_h;
#endif
};
static inline struct device *enic_get_dev(struct enic *enic)
......
......@@ -34,13 +34,13 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
struct vnic_dev *vdev = enic->vdev;
spin_lock(&enic->enic_api_lock);
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
vnic_dev_cmd_proxy_by_index_start(vdev, vf);
err = vnic_dev_cmd(vdev, cmd, a0, a1, wait);
vnic_dev_cmd_proxy_end(vdev);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
spin_unlock(&enic->enic_api_lock);
return err;
......
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_link.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <net/flow_keys.h>
#include "enic_res.h"
#include "enic_clsf.h"
/* enic_addfltr_5t - Add ipv4 5tuple filter
* @enic: enic struct of vnic
* @keys: flow_keys of ipv4 5tuple
* @rq: rq number to steer to
*
* This function returns filter_id(hardware_id) of the filter
* added. In case of error it returns an negative number.
*/
int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
{
int res;
struct filter data;
switch (keys->ip_proto) {
case IPPROTO_TCP:
data.u.ipv4.protocol = PROTO_TCP;
break;
case IPPROTO_UDP:
data.u.ipv4.protocol = PROTO_UDP;
break;
default:
return -EPROTONOSUPPORT;
};
data.type = FILTER_IPV4_5TUPLE;
data.u.ipv4.src_addr = ntohl(keys->src);
data.u.ipv4.dst_addr = ntohl(keys->dst);
data.u.ipv4.src_port = ntohs(keys->port16[0]);
data.u.ipv4.dst_port = ntohs(keys->port16[1]);
data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
spin_lock_bh(&enic->devcmd_lock);
res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
spin_unlock_bh(&enic->devcmd_lock);
res = (res == 0) ? rq : res;
return res;
}
/* enic_delfltr - Delete clsf filter
* @enic: enic struct of vnic
* @filter_id: filter_is(hardware_id) of filter to be deleted
*
* This function returns zero in case of success, negative number incase of
* error.
*/
int enic_delfltr(struct enic *enic, u16 filter_id)
{
int ret;
spin_lock_bh(&enic->devcmd_lock);
ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL);
spin_unlock_bh(&enic->devcmd_lock);
return ret;
}
#ifdef CONFIG_RFS_ACCEL
void enic_flow_may_expire(unsigned long data)
{
struct enic *enic = (struct enic *)data;
bool res;
int j;
spin_lock(&enic->rfs_h.lock);
for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
struct hlist_head *hhead;
struct hlist_node *tmp;
struct enic_rfs_fltr_node *n;
hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
hlist_for_each_entry_safe(n, tmp, hhead, node) {
res = rps_may_expire_flow(enic->netdev, n->rq_id,
n->flow_id, n->fltr_id);
if (res) {
res = enic_delfltr(enic, n->fltr_id);
if (unlikely(res))
continue;
hlist_del(&n->node);
kfree(n);
enic->rfs_h.free++;
}
}
}
spin_unlock(&enic->rfs_h.lock);
mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
}
/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
* @enic: enic data
*/
void enic_rfs_flw_tbl_init(struct enic *enic)
{
int i;
spin_lock_init(&enic->rfs_h.lock);
for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
enic->rfs_h.max = enic->config.num_arfs;
enic->rfs_h.free = enic->rfs_h.max;
enic->rfs_h.toclean = 0;
init_timer(&enic->rfs_h.rfs_may_expire);
enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire;
enic->rfs_h.rfs_may_expire.data = (unsigned long)enic;
mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
}
void enic_rfs_flw_tbl_free(struct enic *enic)
{
int i, res;
del_timer_sync(&enic->rfs_h.rfs_may_expire);
spin_lock(&enic->rfs_h.lock);
enic->rfs_h.free = 0;
for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
struct hlist_head *hhead;
struct hlist_node *tmp;
struct enic_rfs_fltr_node *n;
hhead = &enic->rfs_h.ht_head[i];
hlist_for_each_entry_safe(n, tmp, hhead, node) {
enic_delfltr(enic, n->fltr_id);
hlist_del(&n->node);
kfree(n);
}
}
spin_unlock(&enic->rfs_h.lock);
}
static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
struct flow_keys *k)
{
struct enic_rfs_fltr_node *tpos;
hlist_for_each_entry(tpos, h, node)
if (tpos->keys.src == k->src &&
tpos->keys.dst == k->dst &&
tpos->keys.ports == k->ports &&
tpos->keys.ip_proto == k->ip_proto &&
tpos->keys.n_proto == k->n_proto)
return tpos;
return NULL;
}
int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct flow_keys keys;
struct enic_rfs_fltr_node *n;
struct enic *enic;
u16 tbl_idx;
int res, i;
enic = netdev_priv(dev);
res = skb_flow_dissect(skb, &keys);
if (!res || keys.n_proto != htons(ETH_P_IP) ||
(keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
return -EPROTONOSUPPORT;
tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
spin_lock(&enic->rfs_h.lock);
n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
if (n) { /* entry already present */
if (rxq_index == n->rq_id) {
res = -EEXIST;
goto ret_unlock;
}
/* desired rq changed for the flow, we need to delete
* old fltr and add new one
*
* The moment we delete the fltr, the upcoming pkts
* are put it default rq based on rss. When we add
* new filter, upcoming pkts are put in desired queue.
* This could cause ooo pkts.
*
* Lets 1st try adding new fltr and then del old one.
*/
i = --enic->rfs_h.free;
/* clsf tbl is full, we have to del old fltr first*/
if (unlikely(i < 0)) {
enic->rfs_h.free++;
res = enic_delfltr(enic, n->fltr_id);
if (unlikely(res < 0))
goto ret_unlock;
res = enic_addfltr_5t(enic, &keys, rxq_index);
if (res < 0) {
hlist_del(&n->node);
enic->rfs_h.free++;
goto ret_unlock;
}
/* add new fltr 1st then del old fltr */
} else {
int ret;
res = enic_addfltr_5t(enic, &keys, rxq_index);
if (res < 0) {
enic->rfs_h.free++;
goto ret_unlock;
}
ret = enic_delfltr(enic, n->fltr_id);
/* deleting old fltr failed. Add old fltr to list.
* enic_flow_may_expire() will try to delete it later.
*/
if (unlikely(ret < 0)) {
struct enic_rfs_fltr_node *d;
struct hlist_head *head;
head = &enic->rfs_h.ht_head[tbl_idx];
d = kmalloc(sizeof(*d), GFP_ATOMIC);
if (d) {
d->fltr_id = n->fltr_id;
INIT_HLIST_NODE(&d->node);
hlist_add_head(&d->node, head);
}
} else {
enic->rfs_h.free++;
}
}
n->rq_id = rxq_index;
n->fltr_id = res;
n->flow_id = flow_id;
/* entry not present */
} else {
i = --enic->rfs_h.free;
if (i <= 0) {
enic->rfs_h.free++;
res = -EBUSY;
goto ret_unlock;
}
n = kmalloc(sizeof(*n), GFP_ATOMIC);
if (!n) {
res = -ENOMEM;
enic->rfs_h.free++;
goto ret_unlock;
}
res = enic_addfltr_5t(enic, &keys, rxq_index);
if (res < 0) {
kfree(n);
enic->rfs_h.free++;
goto ret_unlock;
}
n->rq_id = rxq_index;
n->fltr_id = res;
n->flow_id = flow_id;
n->keys = keys;
INIT_HLIST_NODE(&n->node);
hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
}
ret_unlock:
spin_unlock(&enic->rfs_h.lock);
return res;
}
#else
void enic_rfs_flw_tbl_init(struct enic *enic)
{
}
void enic_rfs_flw_tbl_free(struct enic *enic)
{
}
#endif /* CONFIG_RFS_ACCEL */
#ifndef _ENIC_CLSF_H_
#define _ENIC_CLSF_H_
#include "vnic_dev.h"
#include "enic.h"
#define ENIC_CLSF_EXPIRE_COUNT 128
int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq);
int enic_delfltr(struct enic *enic, u16 filter_id);
#ifdef CONFIG_RFS_ACCEL
void enic_rfs_flw_tbl_init(struct enic *enic);
void enic_rfs_flw_tbl_free(struct enic *enic);
int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#endif /* CONFIG_RFS_ACCEL */
#endif /* _ENIC_CLSF_H_ */
......@@ -29,9 +29,9 @@ int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_fw_info(enic->vdev, fw_info);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -40,9 +40,9 @@ int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_stats_dump(enic->vdev, vstats);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -54,9 +54,9 @@ int enic_dev_add_station_addr(struct enic *enic)
if (!is_valid_ether_addr(enic->netdev->dev_addr))
return -EADDRNOTAVAIL;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -68,9 +68,9 @@ int enic_dev_del_station_addr(struct enic *enic)
if (!is_valid_ether_addr(enic->netdev->dev_addr))
return -EADDRNOTAVAIL;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -80,10 +80,10 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_packet_filter(enic->vdev, directed,
multicast, broadcast, promisc, allmulti);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -92,9 +92,9 @@ int enic_dev_add_addr(struct enic *enic, const u8 *addr)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_add_addr(enic->vdev, addr);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -103,9 +103,9 @@ int enic_dev_del_addr(struct enic *enic, const u8 *addr)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_del_addr(enic->vdev, addr);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -114,9 +114,9 @@ int enic_dev_notify_unset(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_notify_unset(enic->vdev);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -125,9 +125,9 @@ int enic_dev_hang_notify(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_hang_notify(enic->vdev);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -136,10 +136,10 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -148,9 +148,9 @@ int enic_dev_enable(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_enable_wait(enic->vdev);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -159,9 +159,9 @@ int enic_dev_disable(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_disable(enic->vdev);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -170,9 +170,9 @@ int enic_dev_intr_coal_timer_info(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_intr_coal_timer_info(enic->vdev);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -181,9 +181,9 @@ int enic_vnic_dev_deinit(struct enic *enic)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_deinit(enic->vdev);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -192,10 +192,10 @@ int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_init_prov2(enic->vdev,
(u8 *)vp, vic_provinfo_size(vp));
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -204,9 +204,9 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_deinit_done(enic->vdev, status);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -217,9 +217,9 @@ int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct enic *enic = netdev_priv(netdev);
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = enic_add_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -230,9 +230,9 @@ int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct enic *enic = netdev_priv(netdev);
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = enic_del_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -241,9 +241,9 @@ int enic_dev_enable2(struct enic *enic, int active)
{
int err;
spin_lock(&enic->devcmd_lock);
spin_lock_bh(&enic->devcmd_lock);
err = vnic_dev_enable2(enic->vdev, active);
spin_unlock(&enic->devcmd_lock);
spin_unlock_bh(&enic->devcmd_lock);
return err;
}
......@@ -252,9 +252,9 @@ int enic_dev_enable2_done(struct enic *enic, int *status)
{
int err;