2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include "cxgb4_tc_u32_parse.h"
37 #include "cxgb4_tc_u32.h"
39 /* Fill ch_filter_specification with parsed match value/mask pair. */
40 static int fill_match_fields(struct adapter *adap,
41 struct ch_filter_specification *fs,
42 struct tc_cls_u32_offload *cls,
43 const struct cxgb4_match_field *entry,
51 for (i = 0; i < cls->knode.sel->nkeys; i++) {
52 off = cls->knode.sel->keys[i].off;
53 val = cls->knode.sel->keys[i].val;
54 mask = cls->knode.sel->keys[i].mask;
57 /* For next headers, parse only keys with offmask */
58 if (!cls->knode.sel->keys[i].offmask)
61 /* For the remaining, parse only keys without offmask */
62 if (cls->knode.sel->keys[i].offmask)
68 for (j = 0; entry[j].val; j++) {
69 if (off == entry[j].off) {
71 err = entry[j].val(fs, val, mask);
85 int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
86 struct tc_cls_u32_offload *cls)
88 const struct cxgb4_match_field *start, *link_start = NULL;
89 struct adapter *adapter = netdev2adap(dev);
90 struct ch_filter_specification fs;
91 struct cxgb4_tc_u32_table *t;
92 struct cxgb4_link *link;
93 unsigned int filter_id;
94 u32 uhtid, link_uhtid;
98 if (!can_tc_u32_offload(dev))
101 if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
104 /* Fetch the location to insert the filter. */
105 filter_id = cls->knode.handle & 0xFFFFF;
107 if (filter_id > adapter->tids.nftids) {
108 dev_err(adapter->pdev_dev,
109 "Location %d out of range for insertion. Max: %d\n",
110 filter_id, adapter->tids.nftids);
115 uhtid = TC_U32_USERHTID(cls->knode.handle);
116 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
118 /* Ensure that uhtid is either root u32 (i.e. 0x800)
119 * or a a valid linked bucket.
121 if (uhtid != 0x800 && uhtid >= t->size)
124 /* Ensure link handle uhtid is sane, if specified. */
125 if (link_uhtid >= t->size)
128 memset(&fs, 0, sizeof(fs));
130 if (protocol == htons(ETH_P_IPV6)) {
131 start = cxgb4_ipv6_fields;
134 start = cxgb4_ipv4_fields;
138 if (uhtid != 0x800) {
139 /* Link must exist from root node before insertion. */
140 if (!t->table[uhtid - 1].link_handle)
143 /* Link must have a valid supported next header. */
144 link_start = t->table[uhtid - 1].match_field;
149 /* Parse links and record them for subsequent jumps to valid
153 const struct cxgb4_next_header *next;
159 if (t->table[link_uhtid - 1].link_handle) {
160 dev_err(adapter->pdev_dev,
161 "Link handle exists for: 0x%x\n",
166 next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
168 /* Try to find matches that allow jumps to next header. */
169 for (i = 0; next[i].jump; i++) {
170 if (next[i].offoff != cls->knode.sel->offoff ||
171 next[i].shift != cls->knode.sel->offshift ||
172 next[i].mask != cls->knode.sel->offmask ||
173 next[i].offset != cls->knode.sel->off)
176 /* Found a possible candidate. Find a key that
177 * matches the corresponding offset, value, and
178 * mask to jump to next header.
180 for (j = 0; j < cls->knode.sel->nkeys; j++) {
181 off = cls->knode.sel->keys[j].off;
182 val = cls->knode.sel->keys[j].val;
183 mask = cls->knode.sel->keys[j].mask;
185 if (next[i].match_off == off &&
186 next[i].match_val == val &&
187 next[i].match_mask == mask) {
194 continue; /* Try next candidate. */
196 /* Candidate to jump to next header found.
197 * Translate all keys to internal specification
198 * and store them in jump table. This spec is copied
199 * later to set the actual filters.
201 ret = fill_match_fields(adapter, &fs, cls,
206 link = &t->table[link_uhtid - 1];
207 link->match_field = next[i].jump;
208 link->link_handle = cls->knode.handle;
209 memcpy(&link->fs, &fs, sizeof(fs));
213 /* No candidate found to jump to next header. */
220 /* Fill ch_filter_specification match fields to be shipped to hardware.
221 * Copy the linked spec (if any) first. And then update the spec as
224 if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
225 /* Copy linked ch_filter_specification */
226 memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
227 ret = fill_match_fields(adapter, &fs, cls,
233 ret = fill_match_fields(adapter, &fs, cls, start, false);
237 /* The filter spec has been completely built from the info
238 * provided from u32. We now set some default fields in the
242 /* Match only packets coming from the ingress port where this
243 * filter will be created.
245 fs.val.iport = netdev2pinfo(dev)->port_id;
248 /* Enable filter hit counts. */
251 /* Set type of filter - IPv6 or IPv4 */
252 fs.type = is_ipv6 ? 1 : 0;
255 ret = cxgb4_set_filter(dev, filter_id, &fs);
259 /* If this is a linked bucket, then set the corresponding
260 * entry in the bitmap to mark it as belonging to this linked
263 if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
264 set_bit(filter_id, t->table[uhtid - 1].tid_map);
270 int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
271 struct tc_cls_u32_offload *cls)
273 struct adapter *adapter = netdev2adap(dev);
274 unsigned int filter_id, max_tids, i, j;
275 struct cxgb4_link *link = NULL;
276 struct cxgb4_tc_u32_table *t;
280 if (!can_tc_u32_offload(dev))
283 /* Fetch the location to delete the filter. */
284 filter_id = cls->knode.handle & 0xFFFFF;
286 if (filter_id > adapter->tids.nftids) {
287 dev_err(adapter->pdev_dev,
288 "Location %d out of range for deletion. Max: %d\n",
289 filter_id, adapter->tids.nftids);
294 handle = cls->knode.handle;
295 uhtid = TC_U32_USERHTID(cls->knode.handle);
297 /* Ensure that uhtid is either root u32 (i.e. 0x800)
298 * or a a valid linked bucket.
300 if (uhtid != 0x800 && uhtid >= t->size)
303 /* Delete the specified filter */
304 if (uhtid != 0x800) {
305 link = &t->table[uhtid - 1];
306 if (!link->link_handle)
309 if (!test_bit(filter_id, link->tid_map))
313 ret = cxgb4_del_filter(dev, filter_id);
318 clear_bit(filter_id, link->tid_map);
320 /* If a link is being deleted, then delete all filters
321 * associated with the link.
323 max_tids = adapter->tids.nftids;
324 for (i = 0; i < t->size; i++) {
327 if (link->link_handle == handle) {
328 for (j = 0; j < max_tids; j++) {
329 if (!test_bit(j, link->tid_map))
332 ret = __cxgb4_del_filter(dev, j, NULL);
336 clear_bit(j, link->tid_map);
339 /* Clear the link state */
340 link->match_field = NULL;
341 link->link_handle = 0;
342 memset(&link->fs, 0, sizeof(link->fs));
351 void cxgb4_cleanup_tc_u32(struct adapter *adap)
353 struct cxgb4_tc_u32_table *t;
359 /* Free up all allocated memory. */
361 for (i = 0; i < t->size; i++) {
362 struct cxgb4_link *link = &t->table[i];
364 t4_free_mem(link->tid_map);
366 t4_free_mem(adap->tc_u32);
369 struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
372 struct cxgb4_tc_u32_table *t;
378 t = t4_alloc_mem(sizeof(*t) +
379 (size * sizeof(struct cxgb4_link)));
385 for (i = 0; i < t->size; i++) {
386 struct cxgb4_link *link = &t->table[i];
387 unsigned int bmap_size;
388 unsigned int max_tids;
390 max_tids = adap->tids.nftids;
391 bmap_size = BITS_TO_LONGS(max_tids);
392 link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
395 bitmap_zero(link->tid_map, max_tids);
401 for (i = 0; i < t->size; i++) {
402 struct cxgb4_link *link = &t->table[i];
405 t4_free_mem(link->tid_map);