Hello!
> + /* First, check that queue is collapsable and find
> + * the point where collapsing can be useful. */
> + for (skb = head; skb != tail; ) {
> + /* No new bits? It is possible on ofo queue. */
> + if (!before(start, TCP_SKB_CB(skb)->seq)) {
> + struct sk_buff *next = skb->next;
> + __skb_unlink(skb, skb->list);
> + __kfree_skb(skb);
> NET_INC_STATS_BH(TCPRcvCollapsed);
Yep, it is not a right patch. The next ones and previous ones, which do not
contain this "if" are right.
Either delete all the if:
> + /* No new bits? It is possible on ofo queue. */
> + if (!before(start, TCP_SKB_CB(skb)->seq)) {
or, better, replace tcp_collapse with correct one.
(Appended. I am sorry, I cannot prepare incremental patch)
Alexey
static void
tcp_collapse(struct sock *sk, struct sk_buff *head,
struct sk_buff *tail, u32 start, u32 end)
{
struct sk_buff *skb;
/* First, check that queue is collapsable and find
* the point where collapsing can be useful. */
for (skb = head; skb != tail; ) {
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list);
__kfree_skb(skb);
NET_INC_STATS_BH(TCPRcvCollapsed);
skb = next;
continue;
}
/* The first skb to collapse is:
* - not SYN/FIN and
* - bloated or contains data before "start" or
* overlaps to the next one.
*/
if (!skb->h.th->syn && !skb->h.th->fin &&
(tcp_win_from_space(skb->truesize) > skb->len ||
before(TCP_SKB_CB(skb)->seq, start) ||
(skb->next != tail &&
TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
break;
/* Decided to skip this, advance start seq. */
start = TCP_SKB_CB(skb)->end_seq;
skb = skb->next;
}
if (skb == tail || skb->h.th->syn || skb->h.th->fin)
return;
while (before(start, end)) {
struct sk_buff *nskb;
int header = skb_headroom(skb);
int copy = (PAGE_SIZE - sizeof(struct sk_buff) -
sizeof(struct skb_shared_info) - header - 31)&~15;
/* Too big header? This can happen with IPv6. */
if (copy < 0)
return;
if (end-start < copy)
copy = end-start;
nskb = alloc_skb(copy+header, GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, header);
memcpy(nskb->head, skb->head, header);
nskb->nh.raw = nskb->head + (skb->nh.raw-skb->head);
nskb->h.raw = nskb->head + (skb->h.raw-skb->head);
nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
__skb_insert(nskb, skb->prev, skb, skb->list);
tcp_set_owner_r(nskb, sk);
/* Copy data, releasing collapsed skbs. */
while (copy > 0) {
int offset = start - TCP_SKB_CB(skb)->seq;
int size = TCP_SKB_CB(skb)->end_seq - start;
if (offset < 0) BUG();
if (size > 0) {
size = min(copy, size);
if (skb_copy_bits(skb, offset, skb_put(nskb,
size), size))
BUG();
TCP_SKB_CB(nskb)->end_seq += size;
copy -= size;
start += size;
}
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list);
__kfree_skb(skb);
NET_INC_STATS_BH(TCPRcvCollapsed);
skb = next;
if (skb == tail || skb->h.th->syn ||
skb->h.th->fin)
return;
}
}
}
}
|