}
flow_key_set_recirc_id(skb, nla_get_u32(a));
- ovs_dp_process_packet(skb, true);
+ ovs_dp_process_packet(skb);
return 0;
}
return 0;
}
-/* We limit the number of times that we pass into execute_actions()
- * to avoid blowing out the stack in the event that we have a loop.
- *
- * Each loop adds some (estimated) cost to the kernel stack.
- * The loop terminates when the max cost is exceeded.
- * */
-#define RECIRC_STACK_COST 1
-#define DEFAULT_STACK_COST 4
-/* Allow up to 4 regular services, and up to 3 recirculations */
-#define MAX_STACK_COST (DEFAULT_STACK_COST * 4 + RECIRC_STACK_COST * 3)
-
-struct loop_counter {
- u8 stack_cost; /* loop stack cost. */
- bool looping; /* Loop detected? */
-};
-
-static DEFINE_PER_CPU(struct loop_counter, loop_counters);
-
-static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
-{
- if (net_ratelimit())
- pr_warn("%s: flow loop detected, dropping\n",
- ovs_dp_name(dp));
- actions->actions_len = 0;
- return -ELOOP;
-}
-
/* Execute a list of actions against 'skb'. */
-int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
- struct sw_flow_actions *acts, bool recirc)
-{
- const u8 stack_cost = recirc ? RECIRC_STACK_COST : DEFAULT_STACK_COST;
- struct loop_counter *loop;
- int error;
-
- /* Check whether we've looped too much. */
- loop = &__get_cpu_var(loop_counters);
- loop->stack_cost += stack_cost;
- if (unlikely(loop->stack_cost > MAX_STACK_COST))
- loop->looping = true;
- if (unlikely(loop->looping)) {
- error = loop_suppress(dp, acts);
- kfree_skb(skb);
- goto out_loop;
- }
-
- error = do_execute_actions(dp, skb, acts->actions, acts->actions_len);
-
- /* Check whether sub-actions looped too much. */
- if (unlikely(loop->looping))
- error = loop_suppress(dp, acts);
-
-out_loop:
- /* Decrement loop stack cost. */
- loop->stack_cost -= stack_cost;
- if (!loop->stack_cost)
- loop->looping = false;
-
- return error;
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, struct sw_flow_actions *acts)
+{
+ return do_execute_actions(dp, skb, acts->actions, acts->actions_len);
}
}
/* Must be called with rcu_read_lock. */
-void ovs_dp_process_packet(struct sk_buff *skb, bool recirc)
+void ovs_dp_process_packet(struct sk_buff *skb)
{
const struct vport *p = OVS_CB(skb)->input_vport;
struct sw_flow_key *pkt_key = OVS_CB(skb)->pkt_key;
ovs_flow_stats_update(flow, pkt_key->tp.flags, skb);
sf_acts = rcu_dereference(flow->sf_acts);
- ovs_execute_actions(dp, skb, sf_acts, recirc);
+ ovs_execute_actions(dp, skb, sf_acts);
stats_counter = &stats->n_hit;
out:
sf_acts = rcu_dereference(flow->sf_acts);
local_bh_disable();
- err = ovs_execute_actions(dp, packet, sf_acts, false);
+ err = ovs_execute_actions(dp, packet, sf_acts);
local_bh_enable();
rcu_read_unlock();
extern struct genl_family dp_vport_genl_family;
extern struct genl_multicast_group ovs_dp_vport_multicast_group;
-void ovs_dp_process_packet(struct sk_buff *, bool recirc);
+void ovs_dp_process_packet(struct sk_buff *c);
void ovs_dp_detach_port(struct vport *);
int ovs_dp_upcall(struct datapath *, struct sk_buff *,
const struct dp_upcall_info *);
u8 cmd);
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
- struct sw_flow_actions *acts, bool recirc);
+ struct sw_flow_actions *acts);
void ovs_dp_notify_wq(struct work_struct *work);
#define OVS_NLERR(fmt, ...) \