Rollover indicates exceptional conditions. Export a counter to inform
socket owners of this state.
If no socket with sufficient room is found, rollover fails. Also count
these events.
Finally, also count when flows are rolled over early thanks to huge
flow detection, to validate its correctness.
Tested:
Read counters in bench_rollover on all other tests in the patchset
Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
#define PACKET_FANOUT 18
#define PACKET_TX_HAS_OFF 19
#define PACKET_QDISC_BYPASS 20
#define PACKET_FANOUT 18
#define PACKET_TX_HAS_OFF 19
#define PACKET_QDISC_BYPASS 20
+#define PACKET_ROLLOVER_STATS 21
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
unsigned int tp_freeze_q_cnt;
};
unsigned int tp_freeze_q_cnt;
};
+struct tpacket_rollover_stats {
+ __aligned_u64 tp_all;
+ __aligned_u64 tp_huge;
+ __aligned_u64 tp_failed;
+};
+
union tpacket_stats_u {
struct tpacket_stats stats1;
struct tpacket_stats_v3 stats3;
union tpacket_stats_u {
struct tpacket_stats stats1;
struct tpacket_stats_v3 stats3;
unsigned int num)
{
struct packet_sock *po, *po_next;
unsigned int num)
{
struct packet_sock *po, *po_next;
- unsigned int i, j, room;
+ unsigned int i, j, room = ROOM_NONE;
po = pkt_sk(f->arr[idx]);
po = pkt_sk(f->arr[idx]);
packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
if (i != j)
po->rollover->sock = i;
packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
if (i != j)
po->rollover->sock = i;
+ atomic_long_inc(&po->rollover->num);
+ if (room == ROOM_LOW)
+ atomic_long_inc(&po->rollover->num_huge);
+ atomic_long_inc(&po->rollover->num_failed);
po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
if (!po->rollover)
return -ENOMEM;
po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
if (!po->rollover)
return -ENOMEM;
+ atomic_long_set(&po->rollover->num, 0);
+ atomic_long_set(&po->rollover->num_huge, 0);
+ atomic_long_set(&po->rollover->num_failed, 0);
}
mutex_lock(&fanout_mutex);
}
mutex_lock(&fanout_mutex);
struct packet_sock *po = pkt_sk(sk);
void *data = &val;
union tpacket_stats_u st;
struct packet_sock *po = pkt_sk(sk);
void *data = &val;
union tpacket_stats_u st;
+ struct tpacket_rollover_stats rstats;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
((u32)po->fanout->flags << 24)) :
0);
break;
((u32)po->fanout->flags << 24)) :
0);
break;
+ case PACKET_ROLLOVER_STATS:
+ if (!po->rollover)
+ return -EINVAL;
+ rstats.tp_all = atomic_long_read(&po->rollover->num);
+ rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+ rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+ data = &rstats;
+ lv = sizeof(rstats);
+ break;
case PACKET_TX_HAS_OFF:
val = po->tp_tx_has_off;
break;
case PACKET_TX_HAS_OFF:
val = po->tp_tx_has_off;
break;
struct packet_rollover {
int sock;
struct packet_rollover {
int sock;
+ atomic_long_t num;
+ atomic_long_t num_huge;
+ atomic_long_t num_failed;
#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
} ____cacheline_aligned_in_smp;
#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
} ____cacheline_aligned_in_smp;