Merge branches 'omap1-upstream' and 'omap2-upstream' into devel
[cascardo/linux.git] / net / ipv4 / ipvs / ip_vs_lblcr.c
1 /*
2  * IPVS:        Locality-Based Least-Connection with Replication scheduler
3  *
4  * Version:     $Id: ip_vs_lblcr.c,v 1.11 2002/09/15 08:14:08 wensong Exp $
5  *
6  * Authors:     Wensong Zhang <wensong@gnuchina.org>
7  *
8  *              This program is free software; you can redistribute it and/or
9  *              modify it under the terms of the GNU General Public License
10  *              as published by the Free Software Foundation; either version
11  *              2 of the License, or (at your option) any later version.
12  *
13  * Changes:
14  *     Julian Anastasov        :    Added the missing (dest->weight>0)
15  *                                  condition in the ip_vs_dest_set_max.
16  *
17  */
18
19 /*
20  * The lblc/r algorithm is as follows (pseudo code):
21  *
22  *       if serverSet[dest_ip] is null then
23  *               n, serverSet[dest_ip] <- {weighted least-conn node};
24  *       else
25  *               n <- {least-conn (alive) node in serverSet[dest_ip]};
26  *               if (n is null) OR
27  *                  (n.conns>n.weight AND
28  *                   there is a node m with m.conns<m.weight/2) then
29  *                   n <- {weighted least-conn node};
30  *                   add n to serverSet[dest_ip];
31  *               if |serverSet[dest_ip]| > 1 AND
32  *                   now - serverSet[dest_ip].lastMod > T then
33  *                   m <- {most conn node in serverSet[dest_ip]};
34  *                   remove m from serverSet[dest_ip];
35  *       if serverSet[dest_ip] changed then
36  *               serverSet[dest_ip].lastMod <- now;
37  *
38  *       return n;
39  *
40  */
41
42 #include <linux/ip.h>
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/skbuff.h>
46 #include <linux/jiffies.h>
47
48 /* for sysctl */
49 #include <linux/fs.h>
50 #include <linux/sysctl.h>
51 /* for proc_net_create/proc_net_remove */
52 #include <linux/proc_fs.h>
53 #include <net/net_namespace.h>
54
55 #include <net/ip_vs.h>
56
57
58 /*
59  *    It is for garbage collection of stale IPVS lblcr entries,
60  *    when the table is full.
61  */
62 #define CHECK_EXPIRE_INTERVAL   (60*HZ)
63 #define ENTRY_TIMEOUT           (6*60*HZ)
64
65 /*
66  *    It is for full expiration check.
67  *    When there is no partial expiration check (garbage collection)
68  *    in a half hour, do a full expiration check to collect stale
69  *    entries that haven't been touched for a day.
70  */
71 #define COUNT_FOR_FULL_EXPIRATION   30
72 static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
73
74
75 /*
76  *     for IPVS lblcr entry hash table
77  */
78 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
79 #define CONFIG_IP_VS_LBLCR_TAB_BITS      10
80 #endif
81 #define IP_VS_LBLCR_TAB_BITS     CONFIG_IP_VS_LBLCR_TAB_BITS
82 #define IP_VS_LBLCR_TAB_SIZE     (1 << IP_VS_LBLCR_TAB_BITS)
83 #define IP_VS_LBLCR_TAB_MASK     (IP_VS_LBLCR_TAB_SIZE - 1)
84
85
86 /*
87  *      IPVS destination set structure and operations
88  */
89 struct ip_vs_dest_list {
90         struct ip_vs_dest_list  *next;          /* list link */
91         struct ip_vs_dest       *dest;          /* destination server */
92 };
93
94 struct ip_vs_dest_set {
95         atomic_t                size;           /* set size */
96         unsigned long           lastmod;        /* last modified time */
97         struct ip_vs_dest_list  *list;          /* destination list */
98         rwlock_t                lock;           /* lock for this list */
99 };
100
101
102 static struct ip_vs_dest_list *
103 ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
104 {
105         struct ip_vs_dest_list *e;
106
107         for (e=set->list; e!=NULL; e=e->next) {
108                 if (e->dest == dest)
109                         /* already existed */
110                         return NULL;
111         }
112
113         e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC);
114         if (e == NULL) {
115                 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
116                 return NULL;
117         }
118
119         atomic_inc(&dest->refcnt);
120         e->dest = dest;
121
122         /* link it to the list */
123         write_lock(&set->lock);
124         e->next = set->list;
125         set->list = e;
126         atomic_inc(&set->size);
127         write_unlock(&set->lock);
128
129         set->lastmod = jiffies;
130         return e;
131 }
132
133 static void
134 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
135 {
136         struct ip_vs_dest_list *e, **ep;
137
138         write_lock(&set->lock);
139         for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
140                 if (e->dest == dest) {
141                         /* HIT */
142                         *ep = e->next;
143                         atomic_dec(&set->size);
144                         set->lastmod = jiffies;
145                         atomic_dec(&e->dest->refcnt);
146                         kfree(e);
147                         break;
148                 }
149                 ep = &e->next;
150         }
151         write_unlock(&set->lock);
152 }
153
154 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
155 {
156         struct ip_vs_dest_list *e, **ep;
157
158         write_lock(&set->lock);
159         for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
160                 *ep = e->next;
161                 /*
162                  * We don't kfree dest because it is refered either
163                  * by its service or by the trash dest list.
164                  */
165                 atomic_dec(&e->dest->refcnt);
166                 kfree(e);
167         }
168         write_unlock(&set->lock);
169 }
170
171 /* get weighted least-connection node in the destination set */
172 static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
173 {
174         register struct ip_vs_dest_list *e;
175         struct ip_vs_dest *dest, *least;
176         int loh, doh;
177
178         if (set == NULL)
179                 return NULL;
180
181         read_lock(&set->lock);
182         /* select the first destination server, whose weight > 0 */
183         for (e=set->list; e!=NULL; e=e->next) {
184                 least = e->dest;
185                 if (least->flags & IP_VS_DEST_F_OVERLOAD)
186                         continue;
187
188                 if ((atomic_read(&least->weight) > 0)
189                     && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
190                         loh = atomic_read(&least->activeconns) * 50
191                                 + atomic_read(&least->inactconns);
192                         goto nextstage;
193                 }
194         }
195         read_unlock(&set->lock);
196         return NULL;
197
198         /* find the destination with the weighted least load */
199   nextstage:
200         for (e=e->next; e!=NULL; e=e->next) {
201                 dest = e->dest;
202                 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
203                         continue;
204
205                 doh = atomic_read(&dest->activeconns) * 50
206                         + atomic_read(&dest->inactconns);
207                 if ((loh * atomic_read(&dest->weight) >
208                      doh * atomic_read(&least->weight))
209                     && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
210                         least = dest;
211                         loh = doh;
212                 }
213         }
214         read_unlock(&set->lock);
215
216         IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
217                   "activeconns %d refcnt %d weight %d overhead %d\n",
218                   NIPQUAD(least->addr), ntohs(least->port),
219                   atomic_read(&least->activeconns),
220                   atomic_read(&least->refcnt),
221                   atomic_read(&least->weight), loh);
222         return least;
223 }
224
225
226 /* get weighted most-connection node in the destination set */
227 static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
228 {
229         register struct ip_vs_dest_list *e;
230         struct ip_vs_dest *dest, *most;
231         int moh, doh;
232
233         if (set == NULL)
234                 return NULL;
235
236         read_lock(&set->lock);
237         /* select the first destination server, whose weight > 0 */
238         for (e=set->list; e!=NULL; e=e->next) {
239                 most = e->dest;
240                 if (atomic_read(&most->weight) > 0) {
241                         moh = atomic_read(&most->activeconns) * 50
242                                 + atomic_read(&most->inactconns);
243                         goto nextstage;
244                 }
245         }
246         read_unlock(&set->lock);
247         return NULL;
248
249         /* find the destination with the weighted most load */
250   nextstage:
251         for (e=e->next; e!=NULL; e=e->next) {
252                 dest = e->dest;
253                 doh = atomic_read(&dest->activeconns) * 50
254                         + atomic_read(&dest->inactconns);
255                 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
256                 if ((moh * atomic_read(&dest->weight) <
257                      doh * atomic_read(&most->weight))
258                     && (atomic_read(&dest->weight) > 0)) {
259                         most = dest;
260                         moh = doh;
261                 }
262         }
263         read_unlock(&set->lock);
264
265         IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
266                   "activeconns %d refcnt %d weight %d overhead %d\n",
267                   NIPQUAD(most->addr), ntohs(most->port),
268                   atomic_read(&most->activeconns),
269                   atomic_read(&most->refcnt),
270                   atomic_read(&most->weight), moh);
271         return most;
272 }
273
274
275 /*
276  *      IPVS lblcr entry represents an association between destination
277  *      IP address and its destination server set
278  */
279 struct ip_vs_lblcr_entry {
280         struct list_head        list;
281         __be32                   addr;           /* destination IP address */
282         struct ip_vs_dest_set   set;            /* destination server set */
283         unsigned long           lastuse;        /* last used time */
284 };
285
286
287 /*
288  *      IPVS lblcr hash table
289  */
290 struct ip_vs_lblcr_table {
291         rwlock_t                lock;           /* lock for this table */
292         struct list_head        bucket[IP_VS_LBLCR_TAB_SIZE];  /* hash bucket */
293         atomic_t                entries;        /* number of entries */
294         int                     max_size;       /* maximum size of entries */
295         struct timer_list       periodic_timer; /* collect stale entries */
296         int                     rover;          /* rover for expire check */
297         int                     counter;        /* counter for no expire */
298 };
299
300
301 /*
302  *      IPVS LBLCR sysctl table
303  */
304
305 static ctl_table vs_vars_table[] = {
306         {
307                 .ctl_name       = NET_IPV4_VS_LBLCR_EXPIRE,
308                 .procname       = "lblcr_expiration",
309                 .data           = &sysctl_ip_vs_lblcr_expiration,
310                 .maxlen         = sizeof(int),
311                 .mode           = 0644,
312                 .proc_handler   = &proc_dointvec_jiffies,
313         },
314         { .ctl_name = 0 }
315 };
316
317 static ctl_table vs_table[] = {
318         {
319                 .ctl_name       = NET_IPV4_VS,
320                 .procname       = "vs",
321                 .mode           = 0555,
322                 .child          = vs_vars_table
323         },
324         { .ctl_name = 0 }
325 };
326
327 static ctl_table ipvs_ipv4_table[] = {
328         {
329                 .ctl_name       = NET_IPV4,
330                 .procname       = "ipv4",
331                 .mode           = 0555,
332                 .child          = vs_table
333         },
334         { .ctl_name = 0 }
335 };
336
337 static ctl_table lblcr_root_table[] = {
338         {
339                 .ctl_name       = CTL_NET,
340                 .procname       = "net",
341                 .mode           = 0555,
342                 .child          = ipvs_ipv4_table
343         },
344         { .ctl_name = 0 }
345 };
346
347 static struct ctl_table_header * sysctl_header;
348
349 /*
350  *      new/free a ip_vs_lblcr_entry, which is a mapping of a destination
351  *      IP address to a server.
352  */
353 static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr)
354 {
355         struct ip_vs_lblcr_entry *en;
356
357         en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC);
358         if (en == NULL) {
359                 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
360                 return NULL;
361         }
362
363         INIT_LIST_HEAD(&en->list);
364         en->addr = daddr;
365
366         /* initilize its dest set */
367         atomic_set(&(en->set.size), 0);
368         en->set.list = NULL;
369         rwlock_init(&en->set.lock);
370
371         return en;
372 }
373
374
375 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
376 {
377         list_del(&en->list);
378         ip_vs_dest_set_eraseall(&en->set);
379         kfree(en);
380 }
381
382
383 /*
384  *      Returns hash value for IPVS LBLCR entry
385  */
386 static inline unsigned ip_vs_lblcr_hashkey(__be32 addr)
387 {
388         return (ntohl(addr)*2654435761UL) & IP_VS_LBLCR_TAB_MASK;
389 }
390
391
392 /*
393  *      Hash an entry in the ip_vs_lblcr_table.
394  *      returns bool success.
395  */
396 static int
397 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
398 {
399         unsigned hash;
400
401         if (!list_empty(&en->list)) {
402                 IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, "
403                           "called from %p\n", __builtin_return_address(0));
404                 return 0;
405         }
406
407         /*
408          *      Hash by destination IP address
409          */
410         hash = ip_vs_lblcr_hashkey(en->addr);
411
412         write_lock(&tbl->lock);
413         list_add(&en->list, &tbl->bucket[hash]);
414         atomic_inc(&tbl->entries);
415         write_unlock(&tbl->lock);
416
417         return 1;
418 }
419
420
421 /*
422  *  Get ip_vs_lblcr_entry associated with supplied parameters.
423  */
424 static inline struct ip_vs_lblcr_entry *
425 ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr)
426 {
427         unsigned hash;
428         struct ip_vs_lblcr_entry *en;
429
430         hash = ip_vs_lblcr_hashkey(addr);
431
432         read_lock(&tbl->lock);
433
434         list_for_each_entry(en, &tbl->bucket[hash], list) {
435                 if (en->addr == addr) {
436                         /* HIT */
437                         read_unlock(&tbl->lock);
438                         return en;
439                 }
440         }
441
442         read_unlock(&tbl->lock);
443
444         return NULL;
445 }
446
447
448 /*
449  *      Flush all the entries of the specified table.
450  */
451 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
452 {
453         int i;
454         struct ip_vs_lblcr_entry *en, *nxt;
455
456         for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
457                 write_lock(&tbl->lock);
458                 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
459                         ip_vs_lblcr_free(en);
460                         atomic_dec(&tbl->entries);
461                 }
462                 write_unlock(&tbl->lock);
463         }
464 }
465
466
467 static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
468 {
469         unsigned long now = jiffies;
470         int i, j;
471         struct ip_vs_lblcr_entry *en, *nxt;
472
473         for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
474                 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
475
476                 write_lock(&tbl->lock);
477                 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
478                         if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
479                                        now))
480                                 continue;
481
482                         ip_vs_lblcr_free(en);
483                         atomic_dec(&tbl->entries);
484                 }
485                 write_unlock(&tbl->lock);
486         }
487         tbl->rover = j;
488 }
489
490
491 /*
492  *      Periodical timer handler for IPVS lblcr table
493  *      It is used to collect stale entries when the number of entries
494  *      exceeds the maximum size of the table.
495  *
496  *      Fixme: we probably need more complicated algorithm to collect
497  *             entries that have not been used for a long time even
498  *             if the number of entries doesn't exceed the maximum size
499  *             of the table.
500  *      The full expiration check is for this purpose now.
501  */
502 static void ip_vs_lblcr_check_expire(unsigned long data)
503 {
504         struct ip_vs_lblcr_table *tbl;
505         unsigned long now = jiffies;
506         int goal;
507         int i, j;
508         struct ip_vs_lblcr_entry *en, *nxt;
509
510         tbl = (struct ip_vs_lblcr_table *)data;
511
512         if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
513                 /* do full expiration check */
514                 ip_vs_lblcr_full_check(tbl);
515                 tbl->counter = 1;
516                 goto out;
517         }
518
519         if (atomic_read(&tbl->entries) <= tbl->max_size) {
520                 tbl->counter++;
521                 goto out;
522         }
523
524         goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
525         if (goal > tbl->max_size/2)
526                 goal = tbl->max_size/2;
527
528         for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
529                 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
530
531                 write_lock(&tbl->lock);
532                 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
533                         if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
534                                 continue;
535
536                         ip_vs_lblcr_free(en);
537                         atomic_dec(&tbl->entries);
538                         goal--;
539                 }
540                 write_unlock(&tbl->lock);
541                 if (goal <= 0)
542                         break;
543         }
544         tbl->rover = j;
545
546   out:
547         mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
548 }
549
550
551 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
552 static struct ip_vs_lblcr_table *lblcr_table_list;
553
554 /*
555  *      /proc/net/ip_vs_lblcr to display the mappings of
556  *                  destination IP address <==> its serverSet
557  */
558 static int
559 ip_vs_lblcr_getinfo(char *buffer, char **start, off_t offset, int length)
560 {
561         off_t pos=0, begin;
562         int len=0, size;
563         struct ip_vs_lblcr_table *tbl;
564         unsigned long now = jiffies;
565         int i;
566         struct ip_vs_lblcr_entry *en;
567
568         tbl = lblcr_table_list;
569
570         size = sprintf(buffer, "LastTime Dest IP address  Server set\n");
571         pos += size;
572         len += size;
573
574         for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
575                 read_lock_bh(&tbl->lock);
576                 list_for_each_entry(en, &tbl->bucket[i], list) {
577                         char tbuf[16];
578                         struct ip_vs_dest_list *d;
579
580                         sprintf(tbuf, "%u.%u.%u.%u", NIPQUAD(en->addr));
581                         size = sprintf(buffer+len, "%8lu %-16s ",
582                                        now-en->lastuse, tbuf);
583
584                         read_lock(&en->set.lock);
585                         for (d=en->set.list; d!=NULL; d=d->next) {
586                                 size += sprintf(buffer+len+size,
587                                                 "%u.%u.%u.%u ",
588                                                 NIPQUAD(d->dest->addr));
589                         }
590                         read_unlock(&en->set.lock);
591                         size += sprintf(buffer+len+size, "\n");
592                         len += size;
593                         pos += size;
594                         if (pos <= offset)
595                                 len=0;
596                         if (pos >= offset+length) {
597                                 read_unlock_bh(&tbl->lock);
598                                 goto done;
599                         }
600                 }
601                 read_unlock_bh(&tbl->lock);
602         }
603
604   done:
605         begin = len - (pos - offset);
606         *start = buffer + begin;
607         len -= begin;
608         if(len>length)
609                 len = length;
610         return len;
611 }
612 #endif
613
614
615 static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
616 {
617         int i;
618         struct ip_vs_lblcr_table *tbl;
619
620         /*
621          *    Allocate the ip_vs_lblcr_table for this service
622          */
623         tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC);
624         if (tbl == NULL) {
625                 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
626                 return -ENOMEM;
627         }
628         svc->sched_data = tbl;
629         IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
630                   "current service\n",
631                   sizeof(struct ip_vs_lblcr_table));
632
633         /*
634          *    Initialize the hash buckets
635          */
636         for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
637                 INIT_LIST_HEAD(&tbl->bucket[i]);
638         }
639         rwlock_init(&tbl->lock);
640         tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
641         tbl->rover = 0;
642         tbl->counter = 1;
643
644         /*
645          *    Hook periodic timer for garbage collection
646          */
647         init_timer(&tbl->periodic_timer);
648         tbl->periodic_timer.data = (unsigned long)tbl;
649         tbl->periodic_timer.function = ip_vs_lblcr_check_expire;
650         tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL;
651         add_timer(&tbl->periodic_timer);
652
653 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
654         lblcr_table_list = tbl;
655 #endif
656         return 0;
657 }
658
659
660 static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
661 {
662         struct ip_vs_lblcr_table *tbl = svc->sched_data;
663
664         /* remove periodic timer */
665         del_timer_sync(&tbl->periodic_timer);
666
667         /* got to clean up table entries here */
668         ip_vs_lblcr_flush(tbl);
669
670         /* release the table itself */
671         kfree(svc->sched_data);
672         IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
673                   sizeof(struct ip_vs_lblcr_table));
674
675         return 0;
676 }
677
678
679 static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc)
680 {
681         return 0;
682 }
683
684
685 static inline struct ip_vs_dest *
686 __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
687 {
688         struct ip_vs_dest *dest, *least;
689         int loh, doh;
690
691         /*
692          * We think the overhead of processing active connections is fifty
693          * times higher than that of inactive connections in average. (This
694          * fifty times might not be accurate, we will change it later.) We
695          * use the following formula to estimate the overhead:
696          *                dest->activeconns*50 + dest->inactconns
697          * and the load:
698          *                (dest overhead) / dest->weight
699          *
700          * Remember -- no floats in kernel mode!!!
701          * The comparison of h1*w2 > h2*w1 is equivalent to that of
702          *                h1/w1 > h2/w2
703          * if every weight is larger than zero.
704          *
705          * The server with weight=0 is quiesced and will not receive any
706          * new connection.
707          */
708         list_for_each_entry(dest, &svc->destinations, n_list) {
709                 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
710                         continue;
711
712                 if (atomic_read(&dest->weight) > 0) {
713                         least = dest;
714                         loh = atomic_read(&least->activeconns) * 50
715                                 + atomic_read(&least->inactconns);
716                         goto nextstage;
717                 }
718         }
719         return NULL;
720
721         /*
722          *    Find the destination with the least load.
723          */
724   nextstage:
725         list_for_each_entry_continue(dest, &svc->destinations, n_list) {
726                 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
727                         continue;
728
729                 doh = atomic_read(&dest->activeconns) * 50
730                         + atomic_read(&dest->inactconns);
731                 if (loh * atomic_read(&dest->weight) >
732                     doh * atomic_read(&least->weight)) {
733                         least = dest;
734                         loh = doh;
735                 }
736         }
737
738         IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d "
739                   "activeconns %d refcnt %d weight %d overhead %d\n",
740                   NIPQUAD(least->addr), ntohs(least->port),
741                   atomic_read(&least->activeconns),
742                   atomic_read(&least->refcnt),
743                   atomic_read(&least->weight), loh);
744
745         return least;
746 }
747
748
749 /*
750  *   If this destination server is overloaded and there is a less loaded
751  *   server, then return true.
752  */
753 static inline int
754 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
755 {
756         if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
757                 struct ip_vs_dest *d;
758
759                 list_for_each_entry(d, &svc->destinations, n_list) {
760                         if (atomic_read(&d->activeconns)*2
761                             < atomic_read(&d->weight)) {
762                                 return 1;
763                         }
764                 }
765         }
766         return 0;
767 }
768
769
770 /*
771  *    Locality-Based (weighted) Least-Connection scheduling
772  */
773 static struct ip_vs_dest *
774 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
775 {
776         struct ip_vs_dest *dest;
777         struct ip_vs_lblcr_table *tbl;
778         struct ip_vs_lblcr_entry *en;
779         struct iphdr *iph = ip_hdr(skb);
780
781         IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
782
783         tbl = (struct ip_vs_lblcr_table *)svc->sched_data;
784         en = ip_vs_lblcr_get(tbl, iph->daddr);
785         if (en == NULL) {
786                 dest = __ip_vs_wlc_schedule(svc, iph);
787                 if (dest == NULL) {
788                         IP_VS_DBG(1, "no destination available\n");
789                         return NULL;
790                 }
791                 en = ip_vs_lblcr_new(iph->daddr);
792                 if (en == NULL) {
793                         return NULL;
794                 }
795                 ip_vs_dest_set_insert(&en->set, dest);
796                 ip_vs_lblcr_hash(tbl, en);
797         } else {
798                 dest = ip_vs_dest_set_min(&en->set);
799                 if (!dest || is_overloaded(dest, svc)) {
800                         dest = __ip_vs_wlc_schedule(svc, iph);
801                         if (dest == NULL) {
802                                 IP_VS_DBG(1, "no destination available\n");
803                                 return NULL;
804                         }
805                         ip_vs_dest_set_insert(&en->set, dest);
806                 }
807                 if (atomic_read(&en->set.size) > 1 &&
808                     jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) {
809                         struct ip_vs_dest *m;
810                         m = ip_vs_dest_set_max(&en->set);
811                         if (m)
812                                 ip_vs_dest_set_erase(&en->set, m);
813                 }
814         }
815         en->lastuse = jiffies;
816
817         IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
818                   "--> server %u.%u.%u.%u:%d\n",
819                   NIPQUAD(en->addr),
820                   NIPQUAD(dest->addr),
821                   ntohs(dest->port));
822
823         return dest;
824 }
825
826
827 /*
828  *      IPVS LBLCR Scheduler structure
829  */
830 static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
831 {
832         .name =                 "lblcr",
833         .refcnt =               ATOMIC_INIT(0),
834         .module =               THIS_MODULE,
835         .init_service =         ip_vs_lblcr_init_svc,
836         .done_service =         ip_vs_lblcr_done_svc,
837         .update_service =       ip_vs_lblcr_update_svc,
838         .schedule =             ip_vs_lblcr_schedule,
839 };
840
841
842 static int __init ip_vs_lblcr_init(void)
843 {
844         INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
845         sysctl_header = register_sysctl_table(lblcr_root_table);
846 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
847         proc_net_create(&init_net, "ip_vs_lblcr", 0, ip_vs_lblcr_getinfo);
848 #endif
849         return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
850 }
851
852
853 static void __exit ip_vs_lblcr_cleanup(void)
854 {
855 #ifdef CONFIG_IP_VS_LBLCR_DEBUG
856         proc_net_remove(&init_net, "ip_vs_lblcr");
857 #endif
858         unregister_sysctl_table(sysctl_header);
859         unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
860 }
861
862
863 module_init(ip_vs_lblcr_init);
864 module_exit(ip_vs_lblcr_cleanup);
865 MODULE_LICENSE("GPL");