Skip to content
This repository has been archived by the owner on May 2, 2024. It is now read-only.

Commit

Permalink
gro_cells: remove spinlock protecting receive queues
Browse files Browse the repository at this point in the history
As David pointed out, spinlock are no longer needed
to protect the per cpu queues used in gro cells infrastructure.

Also use new napi_complete_done() API so that gro_flush_timeout
tweaks have an effect.

Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
Eric Dumazet authored and davem330 committed Aug 31, 2015
1 parent e704059 commit c42858e
Showing 1 changed file with 5 additions and 13 deletions.
18 changes: 5 additions & 13 deletions include/net/gro_cells.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,37 +32,28 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
return;
}

/* We run in BH context */
spin_lock(&cell->napi_skbs.lock);

__skb_queue_tail(&cell->napi_skbs, skb);
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);

spin_unlock(&cell->napi_skbs.lock);
}

/* called unser BH context */
/* called under BH context */
static inline int gro_cell_poll(struct napi_struct *napi, int budget)
{
struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
struct sk_buff *skb;
int work_done = 0;

spin_lock(&cell->napi_skbs.lock);
while (work_done < budget) {
skb = __skb_dequeue(&cell->napi_skbs);
if (!skb)
break;
spin_unlock(&cell->napi_skbs.lock);
napi_gro_receive(napi, skb);
work_done++;
spin_lock(&cell->napi_skbs.lock);
}

if (work_done < budget)
napi_complete(napi);
spin_unlock(&cell->napi_skbs.lock);
napi_complete_done(napi, work_done);
return work_done;
}

Expand All @@ -77,7 +68,7 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
for_each_possible_cpu(i) {
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);

skb_queue_head_init(&cell->napi_skbs);
__skb_queue_head_init(&cell->napi_skbs);
netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
napi_enable(&cell->napi);
}
Expand All @@ -92,8 +83,9 @@ static inline void gro_cells_destroy(struct gro_cells *gcells)
return;
for_each_possible_cpu(i) {
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);

netif_napi_del(&cell->napi);
skb_queue_purge(&cell->napi_skbs);
__skb_queue_purge(&cell->napi_skbs);
}
free_percpu(gcells->cells);
gcells->cells = NULL;
Expand Down

0 comments on commit c42858e

Please sign in to comment.