2 #include <linux/if_ether.h>
3 #include <linux/if_link.h>
4 #include <linux/netdevice.h>
6 #include <linux/types.h>
7 #include <linux/skbuff.h>
8 #include <net/flow_keys.h>
10 #include "enic_clsf.h"
12 /* enic_addfltr_5t - Add ipv4 5tuple filter
13 * @enic: enic struct of vnic
14 * @keys: flow_keys of ipv4 5tuple
15 * @rq: rq number to steer to
17 * This function returns filter_id(hardware_id) of the filter
18 * added. In case of error it returns an negative number.
20 int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
25 switch (keys->ip_proto) {
27 data.u.ipv4.protocol = PROTO_TCP;
30 data.u.ipv4.protocol = PROTO_UDP;
33 return -EPROTONOSUPPORT;
35 data.type = FILTER_IPV4_5TUPLE;
36 data.u.ipv4.src_addr = ntohl(keys->src);
37 data.u.ipv4.dst_addr = ntohl(keys->dst);
38 data.u.ipv4.src_port = ntohs(keys->port16[0]);
39 data.u.ipv4.dst_port = ntohs(keys->port16[1]);
40 data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
42 spin_lock_bh(&enic->devcmd_lock);
43 res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
44 spin_unlock_bh(&enic->devcmd_lock);
45 res = (res == 0) ? rq : res;
50 /* enic_delfltr - Delete clsf filter
51 * @enic: enic struct of vnic
52 * @filter_id: filter_is(hardware_id) of filter to be deleted
54 * This function returns zero in case of success, negative number incase of
57 int enic_delfltr(struct enic *enic, u16 filter_id)
61 spin_lock_bh(&enic->devcmd_lock);
62 ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL);
63 spin_unlock_bh(&enic->devcmd_lock);
68 #ifdef CONFIG_RFS_ACCEL
69 void enic_flow_may_expire(unsigned long data)
71 struct enic *enic = (struct enic *)data;
75 spin_lock(&enic->rfs_h.lock);
76 for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
77 struct hlist_head *hhead;
78 struct hlist_node *tmp;
79 struct enic_rfs_fltr_node *n;
81 hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
82 hlist_for_each_entry_safe(n, tmp, hhead, node) {
83 res = rps_may_expire_flow(enic->netdev, n->rq_id,
84 n->flow_id, n->fltr_id);
86 res = enic_delfltr(enic, n->fltr_id);
95 spin_unlock(&enic->rfs_h.lock);
96 mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
99 /* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
102 void enic_rfs_flw_tbl_init(struct enic *enic)
106 spin_lock_init(&enic->rfs_h.lock);
107 for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
108 INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
109 enic->rfs_h.max = enic->config.num_arfs;
110 enic->rfs_h.free = enic->rfs_h.max;
111 enic->rfs_h.toclean = 0;
112 init_timer(&enic->rfs_h.rfs_may_expire);
113 enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire;
114 enic->rfs_h.rfs_may_expire.data = (unsigned long)enic;
115 mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
118 void enic_rfs_flw_tbl_free(struct enic *enic)
122 del_timer_sync(&enic->rfs_h.rfs_may_expire);
123 spin_lock(&enic->rfs_h.lock);
124 enic->rfs_h.free = 0;
125 for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
126 struct hlist_head *hhead;
127 struct hlist_node *tmp;
128 struct enic_rfs_fltr_node *n;
130 hhead = &enic->rfs_h.ht_head[i];
131 hlist_for_each_entry_safe(n, tmp, hhead, node) {
132 enic_delfltr(enic, n->fltr_id);
137 spin_unlock(&enic->rfs_h.lock);
140 static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
143 struct enic_rfs_fltr_node *tpos;
145 hlist_for_each_entry(tpos, h, node)
146 if (tpos->keys.src == k->src &&
147 tpos->keys.dst == k->dst &&
148 tpos->keys.ports == k->ports &&
149 tpos->keys.ip_proto == k->ip_proto &&
150 tpos->keys.n_proto == k->n_proto)
155 int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
156 u16 rxq_index, u32 flow_id)
158 struct flow_keys keys;
159 struct enic_rfs_fltr_node *n;
164 enic = netdev_priv(dev);
165 res = skb_flow_dissect(skb, &keys);
166 if (!res || keys.n_proto != htons(ETH_P_IP) ||
167 (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
168 return -EPROTONOSUPPORT;
170 tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
171 spin_lock(&enic->rfs_h.lock);
172 n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
174 if (n) { /* entry already present */
175 if (rxq_index == n->rq_id) {
180 /* desired rq changed for the flow, we need to delete
181 * old fltr and add new one
183 * The moment we delete the fltr, the upcoming pkts
184 * are put it default rq based on rss. When we add
185 * new filter, upcoming pkts are put in desired queue.
186 * This could cause ooo pkts.
188 * Lets 1st try adding new fltr and then del old one.
190 i = --enic->rfs_h.free;
191 /* clsf tbl is full, we have to del old fltr first*/
192 if (unlikely(i < 0)) {
194 res = enic_delfltr(enic, n->fltr_id);
195 if (unlikely(res < 0))
197 res = enic_addfltr_5t(enic, &keys, rxq_index);
203 /* add new fltr 1st then del old fltr */
207 res = enic_addfltr_5t(enic, &keys, rxq_index);
212 ret = enic_delfltr(enic, n->fltr_id);
213 /* deleting old fltr failed. Add old fltr to list.
214 * enic_flow_may_expire() will try to delete it later.
216 if (unlikely(ret < 0)) {
217 struct enic_rfs_fltr_node *d;
218 struct hlist_head *head;
220 head = &enic->rfs_h.ht_head[tbl_idx];
221 d = kmalloc(sizeof(*d), GFP_ATOMIC);
223 d->fltr_id = n->fltr_id;
224 INIT_HLIST_NODE(&d->node);
225 hlist_add_head(&d->node, head);
231 n->rq_id = rxq_index;
233 n->flow_id = flow_id;
234 /* entry not present */
236 i = --enic->rfs_h.free;
243 n = kmalloc(sizeof(*n), GFP_ATOMIC);
250 res = enic_addfltr_5t(enic, &keys, rxq_index);
256 n->rq_id = rxq_index;
258 n->flow_id = flow_id;
260 INIT_HLIST_NODE(&n->node);
261 hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
265 spin_unlock(&enic->rfs_h.lock);
271 void enic_rfs_flw_tbl_init(struct enic *enic)
275 void enic_rfs_flw_tbl_free(struct enic *enic)
279 #endif /* CONFIG_RFS_ACCEL */