--- net/ipv4/route.c.orig 2004-05-11 23:56:10.000000000 +1000 +++ net/ipv4/route.c 2004-05-27 16:16:23.000000000 +1000 @@ -101,7 +101,7 @@ #define IP_MAX_MTU 0xFFF0 -#define RT_GC_TIMEOUT (300*HZ) +#define RT_GC_TIMEOUT (120*HZ) int ip_rt_min_delay = 2 * HZ; int ip_rt_max_delay = 10 * HZ; @@ -138,7 +138,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); static void ipv4_link_failure(struct sk_buff *skb); static int rt_garbage_collect(void); - +static int rt_garbage_docollect(void); +static int rt_delete_now(void); struct dst_ops ipv4_dst_ops = { family: AF_INET, @@ -526,7 +527,7 @@ and when load increases it reduces to limit cache size. */ -static int rt_garbage_collect(void) +static int rt_garbage_docollect(void) { static unsigned long expire = RT_GC_TIMEOUT; static unsigned long last_gc; @@ -630,8 +631,11 @@ if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) goto out; + /* + * don't bitch, just silently attempt to correct if (net_ratelimit()) printk(KERN_WARNING "dst cache overflow\n"); + */ rt_cache_stat[smp_processor_id()].gc_dst_overflow++; return 1; @@ -646,6 +650,40 @@ #endif out: return 0; } +static int rt_delete_now(void){ + struct rtable *rth, **rthp; + int i = 0, ent1 = 0, ent2 = 0, c = 0; + + ent1 = atomic_read(&ipv4_dst_ops.entries); + local_bh_disable(); + while (i < rt_hash_mask) { + rthp = &(rt_hash_table[i].chain); + while ((rth = *rthp) != NULL) { + *rthp = rth->u.rt_next; + rth->u.rt_next = NULL; + c++; + rt_free(rth); + } + i++; + } + + atomic_set(&ipv4_dst_ops.entries, 0); + local_bh_enable(); + ent2 = atomic_read(&ipv4_dst_ops.entries); + + if (net_ratelimit()){ + printk("dst cache overflow\n"); + printk("rt_delete_now(): s:%d e:%d t:%d\n", ent1, ent2, c); + } + + return 0; +} + +static int rt_garbage_collect(void){ + if (rt_garbage_docollect()) + rt_delete_now(); + return 0; +} static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) {