package: haproxy

- update to 1.4.23 
   -  (CVE-2013-1912)
   -  fixup for consistent hash 
   -  refresh accept-proxy patch



git-svn-id: svn://svn.openwrt.org/openwrt/packages@36337 3c298f89-4303-0410-b956-a3cf2f4a3e73
This commit is contained in:
heil 2013-04-15 14:38:07 +00:00
parent 238d95974c
commit 58262c6392
46 changed files with 70 additions and 2959 deletions

View File

@ -9,12 +9,12 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=haproxy
PKG_VERSION:=1.4.22
PKG_RELEASE:=43
PKG_VERSION:=1.4.23
PKG_RELEASE:=03
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=http://haproxy.1wt.eu/download/1.4/src
PKG_MD5SUM:=a0b007c76f6a78524f3b3dd5e704979c
PKG_MD5SUM:=6535d5e58037ada4b58b439cebe03c79
include $(INCLUDE_DIR)/package.mk
@ -37,7 +37,7 @@ define Package/haproxy/description
endef
define Build/Compile
$(MAKE) TARGET=linux28 -C $(PKG_BUILD_DIR) \
$(MAKE) TARGET=linux2628 -C $(PKG_BUILD_DIR) \
DESTDIR="$(PKG_INSTALL_DIR)" \
CC="$(TARGET_CC)" \
CFLAGS="$(TARGET_CFLAGS)" \

View File

@ -0,0 +1,57 @@
From d16a1b2a818359e8c3ade85f789e66ed7ca9488c Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Fri, 12 Apr 2013 14:46:51 +0200
Subject: BUG/MAJOR: backend: consistent hash can loop forever in certain
circumstances
When the parameter passed to a consistent hash is not found, we fall back to
round-robin using chash_get_next_server(). This one stores the last visited
server in lbprm.chash.last, which can be NULL upon the first invocation or if
the only server was recently brought up.
The loop used to scan for a server is able to skip the previously attempted
server in case of a redispatch, by passing this previous server in srvtoavoid.
For this reason, the loop stops when the currently considered server is
different from srvtoavoid and different from the original chash.last.
A problem happens in a special sequence : if a connection to a server fails,
then all servers are removed from the farm, then the original server is added
again before the redispatch happens, we have chash.last = NULL and srvtoavoid
set to the only server in the farm. Then this server is always equal to
srvtoavoid and never to NULL, and the loop never stops.
The fix consists in assigning the stop point to the first encountered node if
it was not yet set.
This issue cannot happen with the map-based algorithm since it's based on an
index and not a stop point.
This issue was reported by Henry Qian who kindly provided lots of critically
useful information to figure out the conditions to reproduce the issue.
The fix needs to be backported to 1.4 which is also affected.
---
src/lb_chash.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/src/lb_chash.c b/src/lb_chash.c
index 58f1c9e..d65de74 100644
--- a/src/lb_chash.c
+++ b/src/lb_chash.c
@@ -332,6 +332,13 @@ struct server *chash_get_next_server(struct proxy *p, struct server *srvtoavoid)
/* no node is available */
return NULL;
+ /* Note: if we came here after a down/up cycle with no last
+ * pointer, and after a redispatch (srvtoavoid is set), we
+ * must set stop to non-null otherwise we can loop forever.
+ */
+ if (!stop)
+ stop = node;
+
/* OK, we have a server. However, it may be saturated, in which
* case we don't want to reconsider it for now, so we'll simply
* skip it. Same if it's the server we try to avoid, in which
--
1.7.12.4.dirty

View File

@ -1,75 +0,0 @@
From 60f05a8eae1afda44d62066445a0c072659a1aa1 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Thu, 4 Oct 2012 08:01:43 +0200
Subject: BUG/MINOR: config: use a copy of the file name in proxy configurations
Each proxy contains a reference to the original config file and line
number where it was declared. The pointer used is just a reference to
the one passed to the function instead of being duplicated. The effect
is that it is not valid anymore at the end of the parsing and that all
proxies will be enumerated as coming from the same file on some late
configuration errors. This may happen for exmaple when reporting SSL
certificate issues.
By copying using strdup(), we avoid this issue.
1.4 has the same issue, though no report of the proxy file name is done
out of the config section. Anyway a backport is recommended to ease
post-mortem analysis.
(cherry picked from commit 8113a5d78f2d2abe942f88a3a4df9f8bb5e535ba)
---
include/types/proxy.h | 2 +-
src/cfgparse.c | 4 ++--
src/haproxy.c | 1 +
3 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/include/types/proxy.h b/include/types/proxy.h
index 1773658..a06078a 100644
--- a/include/types/proxy.h
+++ b/include/types/proxy.h
@@ -320,7 +320,7 @@ struct proxy {
int no_options2; /* PR_O2_* */
struct {
- const char *file; /* file where the section appears */
+ char *file; /* file where the section appears */
int line; /* line where the section appears */
struct eb32_node id; /* place in the tree of used IDs */
struct eb_root used_listener_id;/* list of listener IDs in use */
diff --git a/src/cfgparse.c b/src/cfgparse.c
index 643f065..90fdbff 100644
--- a/src/cfgparse.c
+++ b/src/cfgparse.c
@@ -1136,7 +1136,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
init_new_proxy(curproxy);
curproxy->next = proxy;
proxy = curproxy;
- curproxy->conf.file = file;
+ curproxy->conf.file = strdup(file);
curproxy->conf.line = linenum;
curproxy->last_change = now.tv_sec;
curproxy->id = strdup(args[1]);
@@ -3425,7 +3425,7 @@ stats_error_parsing:
newsrv->next = curproxy->srv;
curproxy->srv = newsrv;
newsrv->proxy = curproxy;
- newsrv->conf.file = file;
+ newsrv->conf.file = strdup(file);
newsrv->conf.line = linenum;
LIST_INIT(&newsrv->pendconns);
diff --git a/src/haproxy.c b/src/haproxy.c
index 6141a5b..2944462 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -732,6 +732,7 @@ void deinit(void)
int i;
while (p) {
+ free(p->conf.file);
free(p->id);
free(p->check_req);
free(p->cookie_name);
--
1.7.1

View File

@ -1,53 +0,0 @@
From 72ce4c8f4e0531c50b3a0914d77858440d16d914 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Thu, 4 Oct 2012 21:54:41 +0200
Subject: BUG/MINOR: epoll: correctly disable FD polling in fd_rem()
When calling fd_rem(), the polling was not correctly disabled because the
->prev state was set to zero instead of the previous value. fd_rem() is
very rarely used, only just before closing a socket.
The effect is that upon an error reported at the connection level, if the
task assigned to the connection was too slow to be woken up because of too
many other tasks in the run queue, the FD was still not disabled and caused
the connection handler to be called again with the same event until the task
was finally executed to close the fd.
This issue only affects the epoll poller, not the sepoll variant nor any of
the other ones.
It was already present in 1.4 and even 1.3 with the same almost unnoticeable
effects. The bug can in fact only be discovered during development where it
emphasizes other bugs.
It should be backported anyway.
(cherry picked from commit f8cfa447c676849e1d1b007353d4ea2f7231e4a0)
---
src/ev_epoll.c | 8 ++++++--
1 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/src/ev_epoll.c b/src/ev_epoll.c
index b976868..0b22da6 100644
--- a/src/ev_epoll.c
+++ b/src/ev_epoll.c
@@ -194,11 +194,15 @@ REGPRM2 static int __fd_clr(const int fd, int dir)
REGPRM1 static void __fd_rem(int fd)
{
uint32_t ofs = FD2OFS(fd);
+ uint32_t old_evt;
- if (unlikely(!((fd_evts[ofs] >> FD2BIT(fd)) & 3)))
+ old_evt = fd_evts[ofs] >> FD2BIT(fd);
+ old_evt &= 3;
+
+ if (unlikely(!old_evt))
return;
- alloc_chg_list(fd, 0);
+ alloc_chg_list(fd, old_evt);
fd_evts[ofs] &= ~FD2MSK(fd);
return;
}
--
1.7.1

View File

@ -1,191 +0,0 @@
From 69eeb17615cd032ec7b3a9496ddb811095e92cb4 Mon Sep 17 00:00:00 2001
From: Baptiste <bedis9@gmail.com>
Date: Sat, 8 Sep 2012 23:10:03 +0200
Subject: CONTRIB: halog: sort URLs by avg bytes_read or total bytes_read
The patch attached to this mail brings ability to sort URLs by
averaged bytes read and total bytes read in HALog tool.
In most cases, bytes read is also the object size.
The purpose of this patch is to know which URL consume the most
bandwith, in average or in total.
It may be interesting as well to know the standard deviation (ecart
type in french) for some counters (like bytes_read).
The results:
- Sorting by average bytes read per URL:
./halog -uba <~/tmp/haproxy.log | column -t | head
2246 lines in, 302 lines out, 194 parsing errors
18 0 5101 283 5101 283 126573 2278327 /lib/exe/js.php
1 0 1 1 1 1 106734 106734 /wp-admin/images/screenshots/theme-customizer.png
2 0 2 1 2 1 106511 213022 /wp-admin/css/wp-admin.css
1 0 1 1 1 1 96698 96698 /wp-admin/images/screenshots/captions-1.png
1 0 1 1 1 1 73165 73165 /wp-admin/images/screenshots/flex-header-1.png
4 0 0 0 0 0 64832 259328 /cuisine/wp-content/plugins/stats/open-flash-chart.swf
1 0 0 0 0 0 48647 48647 /wp-admin/images/screenshots/flex-header-3.png
1 0 0 0 0 0 44046 44046 /wp-admin/images/screenshots/captions-2.png
1 0 1 1 1 1 38830 38830 /wp-admin/images/screenshots/flex-header-2.png
- Sorting by total bytes read per URL:
./halog -ubt <~/tmp/haproxy.log | column -t | head
2246 lines in, 302 lines out, 194 parsing errors
18 0 5101 283 5101 283 126573 2278327 /lib/exe/js.php
60 0 14387 239 14387 239 10081 604865 /lib/exe/css.php
64 2 8820 137 8819 142 7742 495524 /doku.php
14 0 250 17 250 17 24045 336632 /wp-admin/load-scripts.php
71 0 6422 90 6422 90 4048 287419 /wp-admin/
4 0 0 0 0 0 64832 259328 /cuisine/wp-content/plugins/stats/open-flash-chart.swf
2 0 2 1 2 1 106511 213022 /wp-admin/css/wp-admin.css
31 3 5423 174 5040 180 6804 210931 /index
10 0 429 42 429 42 18009 180093 /cuisine/files/2011/10/tarte_figue_amande-e1318281546905-225x300.jpg
(cherry picked from commit 61aaad06e85ae7f46cf5589bce8bc7f9331e7962)
---
contrib/halog/halog.c | 39 ++++++++++++++++++++++++++++++++-------
1 files changed, 32 insertions(+), 7 deletions(-)
diff --git a/contrib/halog/halog.c b/contrib/halog/halog.c
index f19be0b..274b7f3 100644
--- a/contrib/halog/halog.c
+++ b/contrib/halog/halog.c
@@ -29,6 +29,7 @@
#define SERVER_FIELD 8
#define TIME_FIELD 9
#define STATUS_FIELD 10
+#define BYTES_SENT_FIELD 11
#define TERM_CODES_FIELD 14
#define CONN_FIELD 15
#define QUEUE_LEN_FIELD 16
@@ -67,6 +68,7 @@ struct url_stat {
char *url;
unsigned long long total_time; /* sum(all reqs' times) */
unsigned long long total_time_ok; /* sum(all OK reqs' times) */
+ unsigned long long total_bytes_sent; /* sum(all bytes sent) */
unsigned int nb_err, nb_req;
};
@@ -94,8 +96,6 @@ struct url_stat {
#define FILT_COUNT_URL_TAVG 0x040000
#define FILT_COUNT_URL_TTOTO 0x080000
#define FILT_COUNT_URL_TAVGO 0x100000
-#define FILT_COUNT_URL_ANY (FILT_COUNT_URL_ONLY|FILT_COUNT_URL_COUNT|FILT_COUNT_URL_ERR| \
- FILT_COUNT_URL_TTOT|FILT_COUNT_URL_TAVG|FILT_COUNT_URL_TTOTO|FILT_COUNT_URL_TAVGO)
#define FILT_HTTP_ONLY 0x200000
#define FILT_TERM_CODE_NAME 0x400000
@@ -106,6 +106,13 @@ struct url_stat {
#define FILT_QUEUE_ONLY 0x4000000
#define FILT_QUEUE_SRV_ONLY 0x8000000
+#define FILT_COUNT_URL_BAVG 0x10000000
+#define FILT_COUNT_URL_BTOT 0x20000000
+
+#define FILT_COUNT_URL_ANY (FILT_COUNT_URL_ONLY|FILT_COUNT_URL_COUNT|FILT_COUNT_URL_ERR| \
+ FILT_COUNT_URL_TTOT|FILT_COUNT_URL_TAVG|FILT_COUNT_URL_TTOTO|FILT_COUNT_URL_TAVGO| \
+ FILT_COUNT_URL_BAVG|FILT_COUNT_URL_BTOT)
+
unsigned int filter = 0;
unsigned int filter_invert = 0;
const char *line;
@@ -128,9 +135,10 @@ void usage(FILE *output, const char *msg)
fprintf(output,
"%s"
"Usage: halog [-h|--help] for long help\n"
- " halog [-q] [-c] [-v] {-gt|-pct|-st|-tc|-srv|-u|-uc|-ue|-ua|-ut|-uao|-uto}\n"
+ " halog [-q] [-c]\n"
+ " {-gt|-pct|-st|-tc|-srv|-u|-uc|-ue|-ua|-ut|-uao|-uto|-uba|-ubt}\n"
" [-s <skip>] [-e|-E] [-H] [-rt|-RT <time>] [-ad <delay>] [-ac <count>]\n"
- " [-Q|-QS] [-tcn|-TCN <termcode>] [ -hs|-HS [min][:[max]] ] < log\n"
+ " [-v] [-Q|-QS] [-tcn|-TCN <termcode>] [ -hs|-HS [min][:[max]] ] < log\n"
"\n",
msg ? msg : ""
);
@@ -171,6 +179,7 @@ void help()
" -u : by URL, -uc : request count, -ue : error count\n"
" -ua : average response time, -uto : average total time\n"
" -uao, -uto: average times computed on valid ('OK') requests\n"
+ " -uba, -ubt: average bytes returned, total bytes returned\n"
);
exit(0);
}
@@ -632,6 +641,10 @@ int main(int argc, char **argv)
filter |= FILT_COUNT_URL_TAVGO;
else if (strcmp(argv[0], "-uto") == 0)
filter |= FILT_COUNT_URL_TTOTO;
+ else if (strcmp(argv[0], "-uba") == 0)
+ filter |= FILT_COUNT_URL_BAVG;
+ else if (strcmp(argv[0], "-ubt") == 0)
+ filter |= FILT_COUNT_URL_BTOT;
else if (strcmp(argv[0], "-o") == 0) {
if (output_file)
die("Fatal: output file name already specified.\n");
@@ -1029,6 +1042,10 @@ int main(int argc, char **argv)
ustat->node.val.key = ustat->total_time_ok;
else if (filter & FILT_COUNT_URL_TAVGO)
ustat->node.val.key = (ustat->nb_req - ustat->nb_err) ? ustat->total_time_ok / (ustat->nb_req - ustat->nb_err) : 0;
+ else if (filter & FILT_COUNT_URL_BAVG)
+ ustat->node.val.key = ustat->nb_req ? ustat->total_bytes_sent / ustat->nb_req : 0;
+ else if (filter & FILT_COUNT_URL_BTOT)
+ ustat->node.val.key = ustat->total_bytes_sent;
else
ustat->node.val.key = 0;
@@ -1038,19 +1055,21 @@ int main(int argc, char **argv)
timers[0] = timers[1];
}
- printf("#req err ttot tavg oktot okavg url\n");
+ printf("#req err ttot tavg oktot okavg bavg btot url\n");
/* scan the tree in its reverse sorting order */
node = eb_last(&timers[0]);
while (node) {
ustat = container_of(node, struct url_stat, node.url.node);
- printf("%d %d %Ld %Ld %Ld %Ld %s\n",
+ printf("%d %d %Ld %Ld %Ld %Ld %Ld %Ld %s\n",
ustat->nb_req,
ustat->nb_err,
ustat->total_time,
ustat->nb_req ? ustat->total_time / ustat->nb_req : 0,
ustat->total_time_ok,
(ustat->nb_req - ustat->nb_err) ? ustat->total_time_ok / (ustat->nb_req - ustat->nb_err) : 0,
+ ustat->nb_req ? ustat->total_bytes_sent / ustat->nb_req : 0,
+ ustat->total_bytes_sent,
ustat->url);
node = eb_prev(node);
@@ -1233,6 +1252,7 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
struct ebpt_node *ebpt_old;
const char *b, *e;
int f, err, array[5];
+ int val;
/* let's collect the response time */
if (!time_field) {
@@ -1276,12 +1296,16 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
ustat->total_time = (array[3] >= 0) ? array[3] : array[4];
ustat->total_time_ok = (array[3] >= 0) ? array[3] : 0;
+ e = field_start(e, BYTES_SENT_FIELD - TIME_FIELD + 1);
+ val = str2ic(e);
+ ustat->total_bytes_sent = val;
+
/* the line may be truncated because of a bad request or anything like this,
* without a method. Also, if it does not begin with an quote, let's skip to
* the next field because it's a capture. Let's fall back to the "method" itself
* if there's nothing else.
*/
- e = field_start(e, METH_FIELD - TIME_FIELD + 1); // avg 100 ns per line
+ e = field_start(e, METH_FIELD - BYTES_SENT_FIELD + 1);
while (*e != '"' && *e) {
/* Note: some syslog servers escape quotes ! */
if (*e == '\\' && e[1] == '"')
@@ -1324,6 +1348,7 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
ustat_old->nb_err += ustat->nb_err;
ustat_old->total_time += ustat->total_time;
ustat_old->total_time_ok += ustat->total_time_ok;
+ ustat_old->total_bytes_sent += ustat->total_bytes_sent;
} else {
ustat->url = ustat->node.url.key = strdup(ustat->node.url.key);
ustat = NULL; /* node was used */
--
1.7.1

View File

@ -1,118 +0,0 @@
From b4d12bdacf6ff4cc96abaffb8c3e801f94a84de2 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Wed, 10 Oct 2012 10:26:22 +0200
Subject: MINOR: halog: sort output by cookie code
It's sometimes useful to have the output sorted by cookie code to see
the ratios of NI vs VN for example. This is now possible with -cc.
(cherry picked from commit 8a09b663a829f7012c50743adaf3d29cc81700c5)
---
contrib/halog/halog.c | 36 +++++++++++++++++++++++++++++++++---
1 files changed, 33 insertions(+), 3 deletions(-)
diff --git a/contrib/halog/halog.c b/contrib/halog/halog.c
index 274b7f3..e4d62e9 100644
--- a/contrib/halog/halog.c
+++ b/contrib/halog/halog.c
@@ -1,7 +1,7 @@
/*
* haproxy log statistics reporter
*
- * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -113,6 +113,8 @@ struct url_stat {
FILT_COUNT_URL_TTOT|FILT_COUNT_URL_TAVG|FILT_COUNT_URL_TTOTO|FILT_COUNT_URL_TAVGO| \
FILT_COUNT_URL_BAVG|FILT_COUNT_URL_BTOT)
+#define FILT_COUNT_COOK_CODES 0x40000000
+
unsigned int filter = 0;
unsigned int filter_invert = 0;
const char *line;
@@ -124,6 +126,7 @@ const char *fgets2(FILE *stream);
void filter_count_url(const char *accept_field, const char *time_field, struct timer **tptr);
void filter_count_srv_status(const char *accept_field, const char *time_field, struct timer **tptr);
+void filter_count_cook_codes(const char *accept_field, const char *time_field, struct timer **tptr);
void filter_count_term_codes(const char *accept_field, const char *time_field, struct timer **tptr);
void filter_count_status(const char *accept_field, const char *time_field, struct timer **tptr);
void filter_graphs(const char *accept_field, const char *time_field, struct timer **tptr);
@@ -136,7 +139,7 @@ void usage(FILE *output, const char *msg)
"%s"
"Usage: halog [-h|--help] for long help\n"
" halog [-q] [-c]\n"
- " {-gt|-pct|-st|-tc|-srv|-u|-uc|-ue|-ua|-ut|-uao|-uto|-uba|-ubt}\n"
+ " {-cc|-gt|-pct|-st|-tc|-srv|-u|-uc|-ue|-ua|-ut|-uao|-uto|-uba|-ubt}\n"
" [-s <skip>] [-e|-E] [-H] [-rt|-RT <time>] [-ad <delay>] [-ac <count>]\n"
" [-v] [-Q|-QS] [-tcn|-TCN <termcode>] [ -hs|-HS [min][:[max]] ] < log\n"
"\n",
@@ -172,6 +175,7 @@ void help()
" -c only report the number of lines that would have been printed\n"
" -pct output connect and response times percentiles\n"
" -st output number of requests per HTTP status code\n"
+ " -cc output number of requests per cookie code (2 chars)\n"
" -tc output number of requests per termination code (2 chars)\n"
" -srv output statistics per server (time, requests, errors)\n"
" -u* output statistics per URL (time, requests, errors)\n"
@@ -595,6 +599,8 @@ int main(int argc, char **argv)
filter |= FILT_COUNT_STATUS;
else if (strcmp(argv[0], "-srv") == 0)
filter |= FILT_COUNT_SRV_STATUS;
+ else if (strcmp(argv[0], "-cc") == 0)
+ filter |= FILT_COUNT_COOK_CODES;
else if (strcmp(argv[0], "-tc") == 0)
filter |= FILT_COUNT_TERM_CODES;
else if (strcmp(argv[0], "-tcn") == 0) {
@@ -676,6 +682,8 @@ int main(int argc, char **argv)
line_filter = filter_graphs;
else if (filter & FILT_COUNT_STATUS)
line_filter = filter_count_status;
+ else if (filter & FILT_COUNT_COOK_CODES)
+ line_filter = filter_count_cook_codes;
else if (filter & FILT_COUNT_TERM_CODES)
line_filter = filter_count_term_codes;
else if (filter & FILT_COUNT_SRV_STATUS)
@@ -1005,7 +1013,7 @@ int main(int argc, char **argv)
lines_out++;
}
}
- else if (filter & FILT_COUNT_TERM_CODES) {
+ else if (filter & (FILT_COUNT_TERM_CODES|FILT_COUNT_COOK_CODES)) {
/* output all statuses in the form of <code> <occurrences> */
n = eb32_first(&timers[0]);
while (n) {
@@ -1129,6 +1137,28 @@ void filter_count_status(const char *accept_field, const char *time_field, struc
t2->count++;
}
+void filter_count_cook_codes(const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ struct timer *t2;
+ const char *b;
+ int val;
+
+ if (time_field)
+ b = field_start(time_field, TERM_CODES_FIELD - TIME_FIELD + 1);
+ else
+ b = field_start(accept_field, TERM_CODES_FIELD - ACCEPT_FIELD + 1);
+
+ if (unlikely(!*b)) {
+ truncated_line(linenum, line);
+ return;
+ }
+
+ val = 256 * b[2] + b[3];
+
+ t2 = insert_value(&timers[0], tptr, val);
+ t2->count++;
+}
+
void filter_count_term_codes(const char *accept_field, const char *time_field, struct timer **tptr)
{
struct timer *t2;
--
1.7.1

View File

@ -1,29 +0,0 @@
From 113e6626ca26de31e2523a5b197a5bac4ca73dcc Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Wed, 10 Oct 2012 13:41:52 +0200
Subject: BUG/MINOR: halog: -ad/-ac report the correct number of output lines
There was a lines_out++ left from earlier code, causing each input
line to be counted as an output line.
This fix also affects 1.4 and should be backported.
(cherry picked from commit 0a706880160167f872247723c6a041eb31a20c29)
---
contrib/halog/halog.c | 1 -
1 files changed, 0 insertions(+), 1 deletions(-)
diff --git a/contrib/halog/halog.c b/contrib/halog/halog.c
index e4d62e9..8f7f04a 100644
--- a/contrib/halog/halog.c
+++ b/contrib/halog/halog.c
@@ -1111,7 +1111,6 @@ void filter_accept_holes(const char *accept_field, const char *time_field, struc
t2 = insert_value(&timers[0], tptr, val);
t2->count++;
- lines_out++;
return;
}
--
1.7.1

View File

@ -1,27 +0,0 @@
From 1fd5d70b52a1d3b3f12e577cc158d0988e103f0d Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Wed, 10 Oct 2012 14:57:35 +0200
Subject: BUG/MINOR: halog: fix help message for -ut/-uto
Erroneous copy-paste suggesting wrong option.
(cherry picked from commit 4201df77df34492be89e9c720397ff66bc5775d9)
---
contrib/halog/halog.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/contrib/halog/halog.c b/contrib/halog/halog.c
index 8f7f04a..4e3cfc4 100644
--- a/contrib/halog/halog.c
+++ b/contrib/halog/halog.c
@@ -181,7 +181,7 @@ void help()
" -u* output statistics per URL (time, requests, errors)\n"
" Additional characters indicate the output sorting key :\n"
" -u : by URL, -uc : request count, -ue : error count\n"
- " -ua : average response time, -uto : average total time\n"
+ " -ua : average response time, -ut : average total time\n"
" -uao, -uto: average times computed on valid ('OK') requests\n"
" -uba, -ubt: average bytes returned, total bytes returned\n"
);
--
1.7.1

View File

@ -1,75 +0,0 @@
From 7a883f8542dffb7299a903eb9a82fed3980337fa Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Sat, 20 Oct 2012 10:38:09 +0200
Subject: BUG/MEDIUM: http: set DONTWAIT on data when switching to tunnel mode
Jaroslaw Bojar diagnosed an issue when haproxy switches to tunnel mode
after a transfer. The response data are sent with the MSG_MORE flag,
causing them to be needlessly queued in the kernel. In order to fix this,
we set the CF_NEVER_WAIT flag on the channels when switching to tunnel
mode.
One issue remained with client-side keep-alive : if the response is sent
before the end of the request, it suffers the same issue for the same
reason. This is easily addressed by setting the CF_SEND_DONTWAIT flag
on the channel when the response has been parsed and we're waiting for
the other side.
The same issue is present in 1.4 so the fix must be backported.
(cherry picked from commit fc47f91c9cc66e3652d98deab82d6e5fe3a59711)
---
src/proto_http.c | 10 ++++++++++
1 files changed, 10 insertions(+), 0 deletions(-)
diff --git a/src/proto_http.c b/src/proto_http.c
index dc65cbd..2ba38f1 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -4178,6 +4178,7 @@ int http_sync_req_state(struct session *s)
/* if any side switches to tunnel mode, the other one does too */
buffer_auto_read(buf);
txn->req.msg_state = HTTP_MSG_TUNNEL;
+ buf->flags |= BF_NEVER_WAIT;
goto wait_other_side;
}
@@ -4211,6 +4212,7 @@ int http_sync_req_state(struct session *s)
*/
buffer_auto_read(buf);
txn->req.msg_state = HTTP_MSG_TUNNEL;
+ buf->flags |= BF_NEVER_WAIT;
}
if (buf->flags & (BF_SHUTW|BF_SHUTW_NOW)) {
@@ -4297,6 +4299,7 @@ int http_sync_res_state(struct session *s)
/* if any side switches to tunnel mode, the other one does too */
buffer_auto_read(buf);
txn->rsp.msg_state = HTTP_MSG_TUNNEL;
+ buf->flags |= BF_NEVER_WAIT;
goto wait_other_side;
}
@@ -4334,6 +4337,7 @@ int http_sync_res_state(struct session *s)
*/
buffer_auto_read(buf);
txn->rsp.msg_state = HTTP_MSG_TUNNEL;
+ buf->flags |= BF_NEVER_WAIT;
}
if (buf->flags & (BF_SHUTW|BF_SHUTW_NOW)) {
@@ -4379,6 +4383,12 @@ int http_sync_res_state(struct session *s)
wait_other_side:
http_silent_debug(__LINE__, s);
+
+ /* We force the response to leave immediately if we're waiting for the
+ * other side, since there is no pending shutdown to push it out.
+ */
+ if (!(buf->flags & BF_OUT_EMPTY))
+ buf->flags |= BF_SEND_DONTWAIT;
return txn->rsp.msg_state != old_state || buf->flags != old_flags;
}
--
1.7.1

View File

@ -1,42 +0,0 @@
From ce64f84c174fc65ea02f3f1cba2b3295c0b34198 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Fri, 26 Oct 2012 16:04:28 +0200
Subject: BUG/MEDIUM: command-line option -D must have precedence over "debug"
From the beginning it has been said that -D must always be used on the
command line from startup scripts so that haproxy does not accidentally
stay in foreground when loaded from init script... Except that this has
not been true for a long time now.
The fix is easy and must be backported to 1.4 too which is affected.
(cherry picked from commit 772f0dd545eb3837e2bc794f4c4863663be3742c)
---
src/haproxy.c | 11 +++++++++--
1 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/src/haproxy.c b/src/haproxy.c
index 2944462..ec481aa 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -649,9 +649,16 @@ void init(int argc, char **argv)
if (arg_mode & (MODE_DEBUG | MODE_FOREGROUND)) {
/* command line debug mode inhibits configuration mode */
global.mode &= ~(MODE_DAEMON | MODE_QUIET);
+ global.mode |= (arg_mode & (MODE_DEBUG | MODE_FOREGROUND));
}
- global.mode |= (arg_mode & (MODE_DAEMON | MODE_FOREGROUND | MODE_QUIET |
- MODE_VERBOSE | MODE_DEBUG ));
+
+ if (arg_mode & MODE_DAEMON) {
+ /* command line daemon mode inhibits foreground and debug modes mode */
+ global.mode &= ~(MODE_DEBUG | MODE_FOREGROUND);
+ global.mode |= (arg_mode & MODE_DAEMON);
+ }
+
+ global.mode |= (arg_mode & (MODE_QUIET | MODE_VERBOSE));
if ((global.mode & MODE_DEBUG) && (global.mode & (MODE_DAEMON | MODE_QUIET))) {
Warning("<debug> mode incompatible with <quiet> and <daemon>. Keeping <debug> only.\n");
--
1.7.1

View File

@ -1,49 +0,0 @@
From 5b4d0775f0afd10c80e63842dca85a69281c9433 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Cyril=20Bont=C3=A9?= <cyril.bonte@free.fr>
Date: Thu, 1 Nov 2012 18:48:23 +0100
Subject: BUG: fix garbage data when http-send-name-header replaces an existing header
This patch is an attempt to prevent sending garbage data when
http-send-name-header replaced existing headers in the request.
http-send-name-header is applied late in the request processing. The buffer is
already ready to be sent to the backend server. When headers are removed, the
data length is not modified, resulting in sending more data than required. By
reducing the data length to send after removing them, this should fix the
issue.
This patch doesn't need to be ported to haproxy-1.5, which already readjust the
amount of data to be forwarded.
This issue was reported by Michael Seiferle at BaseX, who also tested and
confirmed the fix.
---
src/proto_http.c | 5 +++++
1 files changed, 5 insertions(+), 0 deletions(-)
diff --git a/src/proto_http.c b/src/proto_http.c
index 2ba38f1..1ad838b 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -3981,6 +3981,7 @@ int http_send_name_header(struct http_txn *txn, struct http_msg *msg, struct buf
char *hdr_val;
+ int delta = txn->req.eoh;
while (http_find_header2(hdr_name, hdr_name_len, msg->sol, &txn->hdr_idx, &ctx)) {
/* remove any existing values from the header */
http_remove_header2(msg, buf, &txn->hdr_idx, &ctx);
@@ -3994,6 +3995,10 @@ int http_send_name_header(struct http_txn *txn, struct http_msg *msg, struct buf
*hdr_val++ = ' ';
hdr_val += strlcpy2(hdr_val, srv_name, trash + trashlen - hdr_val);
http_header_add_tail2(buf, msg, &txn->hdr_idx, trash, hdr_val - trash);
+ delta -= txn->req.eoh;
+
+ /* Adjust buffer data length to send */
+ buf->send_max -= delta;
return 0;
}
--
1.7.1

View File

@ -1,6 +1,6 @@
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -1339,6 +1339,7 @@ bind [<address>]:<port_range> [, ...] tr
@@ -1343,6 +1343,7 @@ bind [<address>]:<port_range> [, ...] tr
bind [<address>]:<port_range> [, ...] id <id>
bind [<address>]:<port_range> [, ...] name <name>
bind [<address>]:<port_range> [, ...] defer-accept
@ -8,7 +8,7 @@
Define one or several listening addresses and/or ports in a frontend.
May be used in sections : defaults | frontend | listen | backend
no | yes | yes | no
@@ -1419,6 +1420,19 @@ bind [<address>]:<port_range> [, ...] de
@@ -1423,6 +1424,19 @@ bind [<address>]:<port_range> [, ...] de
with front firewalls which would see an established
connection while the proxy will only see it in SYN_RECV.
@ -28,7 +28,7 @@
It is possible to specify a list of address:port combinations delimited by
commas. The frontend will then listen on all of these addresses. There is no
fixed limit to the number of addresses and ports which can be listened on in
@@ -1429,8 +1443,10 @@ bind [<address>]:<port_range> [, ...] de
@@ -1433,8 +1447,10 @@ bind [<address>]:<port_range> [, ...] de
listen http_proxy
bind :80,:443
bind 10.0.0.1:10080,10.0.0.1:10443
@ -40,7 +40,7 @@
bind-process [ all | odd | even | <number 1-32> ] ...
@@ -7231,7 +7247,9 @@ marked with a star ('*') after the field
@@ -7237,7 +7253,9 @@ marked with a star ('*') after the field
Detailed fields description :
- "client_ip" is the IP address of the client which initiated the TCP
@ -51,7 +51,7 @@
- "client_port" is the TCP port of the client which initiated the connection.
@@ -7404,7 +7422,9 @@ with a star ('*') after the field name b
@@ -7410,7 +7428,9 @@ with a star ('*') after the field name b
Detailed fields description :
- "client_ip" is the IP address of the client which initiated the TCP
@ -178,7 +178,7 @@
file, linenum, args[0]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
@@ -5732,6 +5742,9 @@ out_uri_auth_compat:
@@ -5726,6 +5736,9 @@ out_uri_auth_compat:
listener->handler = process_session;
listener->analysers |= curproxy->fe_req_ana;
@ -392,7 +392,7 @@
*/
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -4127,7 +4127,8 @@ void http_end_txn_clean_session(struct s
@@ -4156,7 +4156,8 @@ void http_end_txn_clean_session(struct s
if (s->rep->lr >= s->rep->data + s->rep->size)
s->rep->lr -= s->req->size;
@ -402,7 +402,7 @@
s->rep->analysers = 0;
http_silent_debug(__LINE__, s);
@@ -7670,7 +7671,6 @@ void http_reset_txn(struct session *s)
@@ -7739,7 +7740,6 @@ void http_reset_txn(struct session *s)
http_init_txn(s);
s->be = s->fe;
@ -412,7 +412,7 @@
/* re-init store persistence */
--- a/src/session.c
+++ b/src/session.c
@@ -1055,6 +1055,12 @@ resync_stream_interface:
@@ -1071,6 +1071,12 @@ resync_stream_interface:
while (ana_list && max_loops--) {
/* Warning! ensure that analysers are always placed in ascending order! */

View File

@ -1,54 +0,0 @@
From c998e6347e6e6940263157fb341a8444d789aa5f Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Tue, 3 Jan 2012 09:23:03 +0100
Subject: OPTIM: halog: keep a fast path for the lines-count only
[ note: this backport is not absolutely required for 1.4 but it eases
further backports ]
Using "halog -c" is still something quite common to perform on logs,
but unfortunately since the recent added controls, it was sensibly
slowed down due to the parsing of the accept date field.
Now we use a specific loop for the case where nothing is needed from
the input, and this sped up the line counting by 2.5x. A 2.4 GHz Xeon
now counts lines at a rate of 2 GB of logs per second.
(cherry picked from commit e1a908c369ab988448c6672066ad1d09b6919d02)
---
contrib/halog/halog.c | 12 +++++++++++-
1 files changed, 11 insertions(+), 1 deletions(-)
diff --git a/contrib/halog/halog.c b/contrib/halog/halog.c
index 4e3cfc4..3912807 100644
--- a/contrib/halog/halog.c
+++ b/contrib/halog/halog.c
@@ -702,6 +702,16 @@ int main(int argc, char **argv)
posix_fadvise(0, 0, 0, POSIX_FADV_SEQUENTIAL);
#endif
+ if (!line_filter &&
+ !(filter & (FILT_HTTP_ONLY|FILT_TIME_RESP|FILT_ERRORS_ONLY|FILT_HTTP_STATUS|FILT_QUEUE_ONLY|FILT_QUEUE_SRV_ONLY|FILT_TERM_CODE_NAME))) {
+ /* read the whole file at once first */
+ if (!filter_invert)
+ while (fgets2(stdin) != NULL)
+ lines_out++;
+
+ goto skip_filters;
+ }
+
while ((line = fgets2(stdin)) != NULL) {
linenum++;
time_field = NULL; accept_field = NULL;
@@ -859,7 +869,7 @@ int main(int argc, char **argv)
lines_out++; /* we're just counting lines */
}
-
+ skip_filters:
/*****************************************************
* Here we've finished reading all input. Depending on the
* filters, we may still have some analysis to run on the
--
1.7.1

View File

@ -1,134 +0,0 @@
From 9f03ab8e3ec603f2aed944a7887bcdf7be52009a Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Wed, 10 Oct 2012 16:49:28 +0200
Subject: MINOR: halog: add a parameter to limit output line count
Sometimes it's useful to limit the output to a number of lines, for
example when output is already sorted (eg: 10 slowest URLs, ...). Now
we can use -m for this.
(cherry picked from commit 667c905fe5153a8754bd8177c80dd9e6c245a0b0)
[ this patch introduces a but that is fixed by commit a1629a59 ]
---
contrib/halog/halog.c | 28 ++++++++++++++++++++++++----
1 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/contrib/halog/halog.c b/contrib/halog/halog.c
index 3912807..7e16cd5 100644
--- a/contrib/halog/halog.c
+++ b/contrib/halog/halog.c
@@ -121,6 +121,7 @@ const char *line;
int linenum = 0;
int parse_err = 0;
int lines_out = 0;
+int lines_max = -1;
const char *fgets2(FILE *stream);
@@ -138,7 +139,7 @@ void usage(FILE *output, const char *msg)
fprintf(output,
"%s"
"Usage: halog [-h|--help] for long help\n"
- " halog [-q] [-c]\n"
+ " halog [-q] [-c] [-m <lines>]\n"
" {-cc|-gt|-pct|-st|-tc|-srv|-u|-uc|-ue|-ua|-ut|-uao|-uto|-uba|-ubt}\n"
" [-s <skip>] [-e|-E] [-H] [-rt|-RT <time>] [-ad <delay>] [-ac <count>]\n"
" [-v] [-Q|-QS] [-tcn|-TCN <termcode>] [ -hs|-HS [min][:[max]] ] < log\n"
@@ -170,7 +171,7 @@ void help()
"Modifiers\n"
" -v invert the input filtering condition\n"
" -q don't report errors/warnings\n"
- "\n"
+ " -m <lines> limit output to the first <lines> lines\n"
"Output filters - only one may be used at a time\n"
" -c only report the number of lines that would have been printed\n"
" -pct output connect and response times percentiles\n"
@@ -575,6 +576,11 @@ int main(int argc, char **argv)
argc--; argv++;
skip_fields = atol(*argv);
}
+ else if (strcmp(argv[0], "-m") == 0) {
+ if (argc < 2) die("missing option for -m");
+ argc--; argv++;
+ lines_max = atol(*argv);
+ }
else if (strcmp(argv[0], "-e") == 0)
filter |= FILT_ERRORS_ONLY;
else if (strcmp(argv[0], "-E") == 0)
@@ -702,7 +708,7 @@ int main(int argc, char **argv)
posix_fadvise(0, 0, 0, POSIX_FADV_SEQUENTIAL);
#endif
- if (!line_filter &&
+ if (!line_filter && lines_max >= 0 &&
!(filter & (FILT_HTTP_ONLY|FILT_TIME_RESP|FILT_ERRORS_ONLY|FILT_HTTP_STATUS|FILT_QUEUE_ONLY|FILT_QUEUE_SRV_ONLY|FILT_TERM_CODE_NAME))) {
/* read the whole file at once first */
if (!filter_invert)
@@ -867,6 +873,8 @@ int main(int argc, char **argv)
line_filter(accept_field, time_field, &t);
else
lines_out++; /* we're just counting lines */
+ if (lines_out >= lines_max)
+ break;
}
skip_filters:
@@ -904,8 +912,10 @@ int main(int argc, char **argv)
ms = h % 1000; h = h / 1000;
s = h % 60; h = h / 60;
m = h % 60; h = h / 60;
- lines_out++;
printf("%02d:%02d:%02d.%03d %d %d %d\n", h, m, s, ms, last, d, t->count);
+ lines_out++;
+ if (lines_out >= lines_max)
+ break;
}
n = eb32_next(n);
}
@@ -937,6 +947,8 @@ int main(int argc, char **argv)
if (d > 0.0) {
printf("%d %d %f\n", f, last, d+1.0);
lines_out++;
+ if (lines_out >= lines_max)
+ break;
}
n = eb32_next(n);
@@ -994,6 +1006,8 @@ int main(int argc, char **argv)
t = container_of(n, struct timer, node);
printf("%d %d\n", n->key, t->count);
lines_out++;
+ if (lines_out >= lines_max)
+ break;
n = eb32_next(n);
}
}
@@ -1021,6 +1035,8 @@ int main(int argc, char **argv)
(int)(srv->cum_ct / (srv->nb_ct?srv->nb_ct:1)), (int)(srv->cum_rt / (srv->nb_rt?srv->nb_rt:1)));
srv_node = ebmb_next(srv_node);
lines_out++;
+ if (lines_out >= lines_max)
+ break;
}
}
else if (filter & (FILT_COUNT_TERM_CODES|FILT_COUNT_COOK_CODES)) {
@@ -1030,6 +1046,8 @@ int main(int argc, char **argv)
t = container_of(n, struct timer, node);
printf("%c%c %d\n", (n->key >> 8), (n->key) & 255, t->count);
lines_out++;
+ if (lines_out >= lines_max)
+ break;
n = eb32_next(n);
}
}
@@ -1092,6 +1110,8 @@ int main(int argc, char **argv)
node = eb_prev(node);
lines_out++;
+ if (lines_out >= lines_max)
+ break;
}
}
--
1.7.1

View File

@ -1,110 +0,0 @@
From e1e14b5d2a0ff3097c93f4605f33b38df1e3e266 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Tue, 13 Nov 2012 20:48:15 +0100
Subject: BUG: halog: fix broken output limitation
Commit 667c905f introduced parameter -m to halog which limits the size
of the output. Unfortunately it is completely broken in that it doesn't
check that the limit was previously set or not, and also prevents a
simple counting operation from returning anything if a limit is not set.
Note that the -gt and -pct outputs behave differently in face of this
limit, since they count the valid output lines BEFORE actually producing
the data, so the limit really applies to valid input lines.
(cherry picked from commit a1629a59d17208897622d4e0d8ecddf157d55074)
---
contrib/halog/halog.c | 26 +++++++++++---------------
1 files changed, 11 insertions(+), 15 deletions(-)
diff --git a/contrib/halog/halog.c b/contrib/halog/halog.c
index 7e16cd5..61034ec 100644
--- a/contrib/halog/halog.c
+++ b/contrib/halog/halog.c
@@ -708,11 +708,11 @@ int main(int argc, char **argv)
posix_fadvise(0, 0, 0, POSIX_FADV_SEQUENTIAL);
#endif
- if (!line_filter && lines_max >= 0 &&
+ if (!line_filter && /* FILT_COUNT_ONLY ( see above), and no input filter (see below) */
!(filter & (FILT_HTTP_ONLY|FILT_TIME_RESP|FILT_ERRORS_ONLY|FILT_HTTP_STATUS|FILT_QUEUE_ONLY|FILT_QUEUE_SRV_ONLY|FILT_TERM_CODE_NAME))) {
- /* read the whole file at once first */
+ /* read the whole file at once first, ignore it if inverted output */
if (!filter_invert)
- while (fgets2(stdin) != NULL)
+ while ((lines_max < 0 || lines_out < lines_max) && fgets2(stdin) != NULL)
lines_out++;
goto skip_filters;
@@ -872,8 +872,8 @@ int main(int argc, char **argv)
if (line_filter)
line_filter(accept_field, time_field, &t);
else
- lines_out++; /* we're just counting lines */
- if (lines_out >= lines_max)
+ lines_out++; /* FILT_COUNT_ONLY was used, so we're just counting lines */
+ if (lines_max >= 0 && lines_out >= lines_max)
break;
}
@@ -914,7 +914,7 @@ int main(int argc, char **argv)
m = h % 60; h = h / 60;
printf("%02d:%02d:%02d.%03d %d %d %d\n", h, m, s, ms, last, d, t->count);
lines_out++;
- if (lines_out >= lines_max)
+ if (lines_max >= 0 && lines_out >= lines_max)
break;
}
n = eb32_next(n);
@@ -944,12 +944,8 @@ int main(int argc, char **argv)
else
d = val;
- if (d > 0.0) {
+ if (d > 0.0)
printf("%d %d %f\n", f, last, d+1.0);
- lines_out++;
- if (lines_out >= lines_max)
- break;
- }
n = eb32_next(n);
}
@@ -1006,7 +1002,7 @@ int main(int argc, char **argv)
t = container_of(n, struct timer, node);
printf("%d %d\n", n->key, t->count);
lines_out++;
- if (lines_out >= lines_max)
+ if (lines_max >= 0 && lines_out >= lines_max)
break;
n = eb32_next(n);
}
@@ -1035,7 +1031,7 @@ int main(int argc, char **argv)
(int)(srv->cum_ct / (srv->nb_ct?srv->nb_ct:1)), (int)(srv->cum_rt / (srv->nb_rt?srv->nb_rt:1)));
srv_node = ebmb_next(srv_node);
lines_out++;
- if (lines_out >= lines_max)
+ if (lines_max >= 0 && lines_out >= lines_max)
break;
}
}
@@ -1046,7 +1042,7 @@ int main(int argc, char **argv)
t = container_of(n, struct timer, node);
printf("%c%c %d\n", (n->key >> 8), (n->key) & 255, t->count);
lines_out++;
- if (lines_out >= lines_max)
+ if (lines_max >= 0 && lines_out >= lines_max)
break;
n = eb32_next(n);
}
@@ -1110,7 +1106,7 @@ int main(int argc, char **argv)
node = eb_prev(node);
lines_out++;
- if (lines_out >= lines_max)
+ if (lines_max >= 0 && lines_out >= lines_max)
break;
}
}
--
1.7.1

View File

@ -1,91 +0,0 @@
From 2f61455a647d9539a49392d475e7f2aeef7dcfce Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Sat, 29 Dec 2012 22:23:02 +0100
Subject: MEDIUM: checks: avoid accumulating TIME_WAITs during checks
Some checks which do not induce a close from the server accumulate
local TIME_WAIT sockets because they're cleanly shut down. Typically
TCP probes cause this. This is very problematic when there are many
servers, when the checks are fast or when local source ports are rare.
So now we'll disable lingering on the socket instead of sending a
shutdown. Before doing this we try to drain any possibly pending data.
That way we avoid sending an RST when the server has closed first.
This change means that some servers will see more RSTs, but this is
needed to avoid local source port starvation.
(cherry picked from commit fd29cc537b8511db6e256529ded625c8e7f856d0)
---
src/checks.c | 22 +++++++++++++++++-----
1 files changed, 17 insertions(+), 5 deletions(-)
diff --git a/src/checks.c b/src/checks.c
index 0aa65c0..3d01282 100644
--- a/src/checks.c
+++ b/src/checks.c
@@ -833,6 +833,10 @@ static int event_srv_chk_w(int fd)
/* good TCP connection is enough */
set_server_check_status(s, HCHK_STATUS_L4OK, NULL);
+
+ /* avoid accumulating TIME_WAIT on connect-only checks */
+ setsockopt(fd, SOL_SOCKET, SO_LINGER,
+ (struct linger *) &nolinger, sizeof(struct linger));
goto out_wakeup;
}
}
@@ -877,7 +881,7 @@ static int event_srv_chk_r(int fd)
struct task *t = fdtab[fd].owner;
struct server *s = t->context;
char *desc;
- int done;
+ int done, shutr;
unsigned short msglen;
if (unlikely((s->result & SRV_CHK_ERROR) || (fdtab[fd].state == FD_STERROR))) {
@@ -898,7 +902,7 @@ static int event_srv_chk_r(int fd)
* with running the checks without attempting another socket read.
*/
- done = 0;
+ done = shutr = 0;
for (len = 0; s->check_data_len < global.tune.chksize; s->check_data_len += len) {
len = recv(fd, s->check_data + s->check_data_len, global.tune.chksize - s->check_data_len, 0);
if (len <= 0)
@@ -906,14 +910,14 @@ static int event_srv_chk_r(int fd)
}
if (len == 0)
- done = 1; /* connection hangup received */
+ done = shutr = 1; /* connection hangup received */
else if (len < 0 && errno != EAGAIN) {
/* Report network errors only if we got no other data. Otherwise
* we'll let the upper layers decide whether the response is OK
* or not. It is very common that an RST sent by the server is
* reported as an error just after the last data chunk.
*/
- done = 1;
+ done = shutr = 1;
if (!s->check_data_len) {
if (!(s->result & SRV_CHK_ERROR))
set_server_check_status(s, HCHK_STATUS_SOCKERR, NULL);
@@ -1162,7 +1166,15 @@ static int event_srv_chk_r(int fd)
*s->check_data = '\0';
s->check_data_len = 0;
- /* Close the connection... */
+ /* Close the connection... We absolutely want to perform a hard close
+ * and reset the connection if some data are pending, otherwise we end
+ * up with many TIME_WAITs and eat all the source port range quickly.
+ * To avoid sending RSTs all the time, we first try to drain pending
+ * data.
+ */
+ if (!shutr && recv(fd, trash, trashlen, MSG_NOSIGNAL|MSG_DONTWAIT) > 0)
+ setsockopt(fd, SOL_SOCKET, SO_LINGER,
+ (struct linger *) &nolinger, sizeof(struct linger));
shutdown(fd, SHUT_RDWR);
EV_FD_CLR(fd, DIR_RD);
task_wakeup(t, TASK_WOKEN_IO);
--
1.7.1

View File

@ -1,29 +0,0 @@
From 4ee19c30abdd962fbc10f65c45a5f1615cda3059 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Fri, 23 Nov 2012 17:35:59 +0100
Subject: MEDIUM: checks: prevent TIME_WAITs from appearing also on timeouts
We need to disable lingering before closing on timeout too, otherwise
we accumulate TIME_WAITs.
(cherry picked from commit cfd97c6f043fb64980529a0ce26b10fecd0e8fe2)
---
src/checks.c | 3 +++
1 files changed, 3 insertions(+), 0 deletions(-)
diff --git a/src/checks.c b/src/checks.c
index 3d01282..201900a 100644
--- a/src/checks.c
+++ b/src/checks.c
@@ -1527,6 +1527,9 @@ struct task *process_chk(struct task *t)
else
set_server_down(s);
s->curfd = -1;
+ /* avoid accumulating TIME_WAIT on timeouts */
+ setsockopt(fd, SOL_SOCKET, SO_LINGER,
+ (struct linger *) &nolinger, sizeof(struct linger));
fd_delete(fd);
rv = 0;
--
1.7.1

View File

@ -1,55 +0,0 @@
From 475b5ec3be8e022bd0f96331efc14c7b7b137d60 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Mon, 26 Nov 2012 02:22:40 +0100
Subject: BUG/MAJOR: cli: show sess <id> may randomly corrupt the back-ref list
show sess <id> puts a backref into the session it's dumping. If the output
is interrupted, the backref cannot always be removed because it's only done
in the I/O handler. This can randomly corrupt the backref list when the
session closes, because it passes the pointer to the next session which
itself might be watched.
The case is hard to reproduce (hundreds of attempts) but monitoring systems
might encounter it frequently.
Thus we have to add a release handler which does the cleanup even when the
I/O handler is not called.
This issue should also be present in 1.4 so the patch should be backported.
(cherry picked from commit 5f9a8779b3d4fb324dacc1daacfb478bd12963d1)
NOTE: In 1.4 there is no release function so we have to hard-code the
release in session.c when the condition is encountered.
---
src/session.c | 15 +++++++++++++++
1 files changed, 15 insertions(+), 0 deletions(-)
diff --git a/src/session.c b/src/session.c
index 0f6a1cf..239d4f5 100644
--- a/src/session.c
+++ b/src/session.c
@@ -989,6 +989,21 @@ resync_stream_interface:
if (may_dequeue_tasks(s->srv, s->be))
process_srv_queue(s->srv);
}
+
+ if (s->req->cons->iohandler == stats_io_handler &&
+ s->req->cons->st0 == STAT_CLI_O_SESS && s->data_state == DATA_ST_LIST) {
+ /* This is a fix for a design bug in the stats I/O handler :
+ * "show sess $sess" may corrupt the struct session if not
+ * properly detached. Unfortunately, in 1.4 there is no way
+ * to ensure we always cleanly unregister an I/O handler upon
+ * error. So we're doing the cleanup here if we can detect the
+ * situation.
+ */
+ if (!LIST_ISEMPTY(&s->data_ctx.sess.bref.users)) {
+ LIST_DEL(&s->data_ctx.sess.bref.users);
+ LIST_INIT(&s->data_ctx.sess.bref.users);
+ }
+ }
}
/*
--
1.7.1

View File

@ -1,86 +0,0 @@
From 46169a35f95f2128bbf185c06eb535d9acb7ee61 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Mon, 26 Nov 2012 13:35:37 +0100
Subject: BUG/MINOR: http: don't report client aborts as server errors
If a client aborts with an abortonclose flag, the close is forwarded
to the server and when server response is processed, the analyser thinks
it's the server who has closed first, and logs flags "SD" or "SH" and
counts a server error. In order to avoid this, we now first detect that
the client has closed and log a client abort instead.
This likely is the reason why many people have been observing a small rate
of SD/SH flags without being able to find what the error was.
This fix should probably be backported to 1.4.
(cherry picked from commit f003d375ec5190e7d99cfa14a9b09e7ca6c55daf)
---
src/proto_http.c | 37 +++++++++++++++++++++++++++++++++----
1 files changed, 33 insertions(+), 4 deletions(-)
diff --git a/src/proto_http.c b/src/proto_http.c
index 1ad838b..ae37035 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -4919,6 +4919,28 @@ int http_wait_for_response(struct session *s, struct buffer *rep, int an_bit)
return 0;
}
+ /* client abort with an abortonclose */
+ else if ((rep->flags & BF_SHUTR) && ((s->req->flags & (BF_SHUTR|BF_SHUTW)) == (BF_SHUTR|BF_SHUTW))) {
+ s->fe->counters.cli_aborts++;
+ if (s->fe != s->be)
+ s->be->counters.cli_aborts++;
+ if (s->srv)
+ s->srv->counters.cli_aborts++;
+
+ buffer_auto_close(rep);
+ rep->analysers = 0;
+ txn->status = 400;
+ buffer_ignore(rep, rep->l - rep->send_max);
+ stream_int_retnclose(rep->cons, error_message(s, HTTP_ERR_400));
+
+ if (!(s->flags & SN_ERR_MASK))
+ s->flags |= SN_ERR_CLICL;
+ if (!(s->flags & SN_FINST_MASK))
+ s->flags |= SN_FINST_H;
+ /* process_session() will take care of the error */
+ return 0;
+ }
+
/* close from server, capture the response if the server has started to respond */
else if (rep->flags & BF_SHUTR) {
if (msg->msg_state >= HTTP_MSG_RPVER || msg->err_pos >= 0)
@@ -5666,8 +5688,18 @@ int http_response_forward_body(struct session *s, struct buffer *res, int an_bit
}
missing_data:
- /* stop waiting for data if the input is closed before the end */
+
+ if (res->flags & BF_SHUTW)
+ goto aborted_xfer;
+
+ /* stop waiting for data if the input is closed before the end. If the
+ * client side was already closed, it means that the client has aborted,
+ * so we don't want to count this as a server abort. Otherwise it's a
+ * server abort.
+ */
if (res->flags & BF_SHUTR) {
+ if ((res->flags & BF_SHUTW_NOW) || (s->req->flags & BF_SHUTR))
+ goto aborted_xfer;
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_SRVCL;
s->be->counters.srv_aborts++;
@@ -5676,9 +5708,6 @@ int http_response_forward_body(struct session *s, struct buffer *res, int an_bit
goto return_bad_res_stats_ok;
}
- if (res->flags & BF_SHUTW)
- goto aborted_xfer;
-
/* we need to obey the req analyser, so if it leaves, we must too */
if (!s->req->analysers)
goto return_bad_res;
--
1.7.1

View File

@ -1,31 +0,0 @@
From e856a82ac64d2fea6d2117627a9c857aca738bb0 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Tue, 4 Dec 2012 10:39:01 +0100
Subject: BUG/MINOR: http: don't log a 503 on client errors while waiting for requests
If a client aborts a request with an error (typically a TCP reset), we must
log a 400. Till now we did not set the status nor close the stream interface,
causing the request to attempt to be forwarded and logging a 503.
Should be backported to 1.4 which is affected as well.
(cherry picked from commit dc979f24929ad5352832730954c83ba47afe24cc)
---
src/proto_http.c | 2 ++
1 files changed, 2 insertions(+), 0 deletions(-)
diff --git a/src/proto_http.c b/src/proto_http.c
index ae37035..7a8872b 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -2506,6 +2506,8 @@ int http_wait_for_request(struct session *s, struct buffer *req, int an_bit)
if (msg->err_pos >= 0)
http_capture_bad_message(&s->fe->invalid_req, s, req, msg, msg->msg_state, s->fe);
+ txn->status = 400;
+ stream_int_retnclose(req->prod, NULL);
msg->msg_state = HTTP_MSG_ERROR;
req->analysers = 0;
--
1.7.1

View File

@ -1,58 +0,0 @@
From 307227061df9938e9db5c0880254d71f2b9f5e83 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Sat, 8 Dec 2012 23:03:28 +0100
Subject: BUG/MEDIUM: tcp: process could theorically crash on lack of source ports
When connect() fails with EAGAIN or EADDRINUSE, an error message is
sent to logs and uses srv->id to indicate the server name (this is
very old code). Since version 1.4, it is possible to have srv == NULL,
so the message could cause a crash when connect() returns EAGAIN or
EADDRINUSE. However in practice this does not happen because on lack
of source ports, EADDRNOTAVAIL is returned instead, so this code is
never called.
This fix consists in not displaying the server name anymore, and in
adding the test for EADDRNOTAVAIL.
Also, the log level was lowered from LOG_EMERG to LOG_ERR in order
not to spam all consoles when source ports are missing for a given
target.
This fix should be backported to 1.4.
(cherry picked from commit b1719517b754aa0a098ee0f9c59e8babaf8df384)
---
src/proto_tcp.c | 10 ++++------
1 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/src/proto_tcp.c b/src/proto_tcp.c
index 84fda20..e876d71 100644
--- a/src/proto_tcp.c
+++ b/src/proto_tcp.c
@@ -376,20 +376,18 @@ int tcpv4_connect_server(struct stream_interface *si,
if ((connect(fd, (struct sockaddr *)srv_addr, sizeof(struct sockaddr_in)) == -1) &&
(errno != EINPROGRESS) && (errno != EALREADY) && (errno != EISCONN)) {
- if (errno == EAGAIN || errno == EADDRINUSE) {
+ if (errno == EAGAIN || errno == EADDRINUSE || errno == EADDRNOTAVAIL) {
char *msg;
- if (errno == EAGAIN) /* no free ports left, try again later */
+ if (errno == EAGAIN || errno == EADDRNOTAVAIL)
msg = "no free ports";
else
msg = "local address already in use";
- qfprintf(stderr,"Cannot connect: %s.\n",msg);
+ qfprintf(stderr,"Connect() failed for backend %s: %s.\n", be->id, msg);
port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
fdinfo[fd].port_range = NULL;
close(fd);
- send_log(be, LOG_EMERG,
- "Connect() failed for server %s/%s: %s.\n",
- be->id, srv->id, msg);
+ send_log(be, LOG_ERR, "Connect() failed for backend %s: %s.\n", be->id, msg);
return SN_ERR_RESOURCE;
} else if (errno == ETIMEDOUT) {
//qfprintf(stderr,"Connect(): ETIMEDOUT");
--
1.7.1

View File

@ -1,45 +0,0 @@
From dc624fd282caae163b11c197b36851e0ecf373eb Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Thu, 20 Dec 2012 12:10:09 +0100
Subject: BUG/MINOR: http: don't abort client connection on premature responses
When a server responds prematurely to a POST request, haproxy used to
cause the transfer to be aborted before the end. This is problematic
because this causes the client to receive a TCP reset when it tries to
push more data, generally preventing it from receiving the response
which contain the reason for the premature reponse (eg: "entity too
large" or an authentication request).
From now on we take care of allowing the upload traffic to flow to the
server even when the response has been received, since the server is
supposed to drain it. That way the client receives the server response.
This bug has been present since 1.4 and the fix should probably be
backported there.
(cherry picked from commit 40f151aa79bcdf8b517c4e5666edbc7a47ea7fdc)
Note: 1.4 does not have SI_FL_NOHALF but this is not important.
---
src/proto_http.c | 5 +++--
1 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/src/proto_http.c b/src/proto_http.c
index 7a8872b..e4cec6d 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -4443,9 +4443,10 @@ int http_resync_states(struct session *s)
buffer_auto_close(s->rep);
buffer_auto_read(s->rep);
}
- else if (txn->rsp.msg_state == HTTP_MSG_CLOSED ||
+ else if ((txn->req.msg_state >= HTTP_MSG_DONE &&
+ (txn->rsp.msg_state == HTTP_MSG_CLOSED || (s->rep->flags & BF_SHUTW))) ||
txn->rsp.msg_state == HTTP_MSG_ERROR ||
- (s->rep->flags & BF_SHUTW)) {
+ txn->req.msg_state == HTTP_MSG_ERROR) {
s->rep->analysers = 0;
buffer_auto_close(s->rep);
buffer_auto_read(s->rep);
--
1.7.1

View File

@ -1,28 +0,0 @@
From 24a528b3f5f09cdd9aebeb9531d7e76623329070 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Thu, 20 Dec 2012 15:00:44 +0100
Subject: BUILD: no need to clean up when making git-tar
git-tar uses the repository, not the working dir, so it's useless to
run "make clean" first.
(cherry picked from commit 05ed29cf6ef1eb6268c95c5d38af849c487eeedd)
---
Makefile | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/Makefile b/Makefile
index 9350d54..7633588 100644
--- a/Makefile
+++ b/Makefile
@@ -584,7 +584,7 @@ tar: clean
-cf - haproxy-$(VERSION)/* | gzip -c9 >haproxy-$(VERSION).tar.gz
rm -f haproxy-$(VERSION)
-git-tar: clean
+git-tar:
git archive --format=tar --prefix="haproxy-$(VERSION)/" HEAD | gzip -9 > haproxy-$(VERSION)$(SUBVERS).tar.gz
version:
--
1.7.1

View File

@ -1,30 +0,0 @@
From 528595f5989ef3af64c3e480ae15363677812aaa Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Sun, 30 Dec 2012 00:27:36 +0100
Subject: MINOR: http: always report PR-- flags for redirect rules
Mainline commit 71241abf fixed a minor issue by which keep-alive requests
causing a redirect did not have the PR-- flags while close requests did
have it. This patch merges this part of the change which fixes the flags.
---
src/proto_http.c | 4 ++++
1 files changed, 4 insertions(+), 0 deletions(-)
diff --git a/src/proto_http.c b/src/proto_http.c
index e4cec6d..ffa2224 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -3476,6 +3476,10 @@ int http_process_req_common(struct session *s, struct buffer *req, int an_bit, s
s->rep->analysers = AN_RES_HTTP_XFER_BODY;
txn->req.msg_state = HTTP_MSG_CLOSED;
txn->rsp.msg_state = HTTP_MSG_DONE;
+ if (!(s->flags & SN_ERR_MASK))
+ s->flags |= SN_ERR_PRXCOND;
+ if (!(s->flags & SN_FINST_MASK))
+ s->flags |= SN_FINST_R;
break;
} else {
/* keep-alive not possible */
--
1.7.1

View File

@ -1,124 +0,0 @@
From 44db294666f9dfb68667f998532781e898f14c4f Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Sat, 29 Dec 2012 21:50:07 +0100
Subject: BUG/MINOR: time: frequency counters are not totally accurate
When a frontend is rate-limited to 1000 connections per second, the
effective rate measured from the client is 999/s, and connections
experience an average response time of 99.5 ms with a standard
deviation of 2 ms.
The reason for this inaccuracy is that when computing frequency
counters, we use one part of the previous value proportional to the
number of milliseconds remaining in the current second. But even the
last millisecond still uses a part of the past value, which is wrong :
since we have a 1ms resolution, the last millisecond must be dedicated
only to filling the current second.
So we slightly adjust the algorithm to use 999/1000 of the past value
during the first millisecond, and 0/1000 of the past value during the
last millisecond. We also slightly improve the computation by computing
the remaining time instead of the current time in tv_update_date(), so
that we don't have to negate the value in each frequency counter.
Now with the fix, the connection rate measured by both the client and
haproxy is a steady 1000/s, the average response time measured is 99.2ms
and more importantly, the standard deviation has been divided by 3 to
0.6 millisecond.
This fix should also be backported to 1.4 which has the same issue.
(cherry picked from commit eab777c32eff7ee55a6ea12dd2f15fa14d66f233)
---
include/common/time.h | 1 +
src/freq_ctr.c | 7 +++----
src/time.c | 16 +++++++++++++---
3 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/include/common/time.h b/include/common/time.h
index abc1ccf..c9e3641 100644
--- a/include/common/time.h
+++ b/include/common/time.h
@@ -55,6 +55,7 @@
#define SETNOW(a) (*a=now)
extern unsigned int curr_sec_ms; /* millisecond of current second (0..999) */
+extern unsigned int ms_left_scaled; /* milliseconds left for current second (0..2^32-1) */
extern unsigned int curr_sec_ms_scaled; /* millisecond of current second (0..2^32-1) */
extern unsigned int now_ms; /* internal date in milliseconds (may wrap) */
extern struct timeval now; /* internal date is a monotonic function of real clock */
diff --git a/src/freq_ctr.c b/src/freq_ctr.c
index 3df930f..8718b60 100644
--- a/src/freq_ctr.c
+++ b/src/freq_ctr.c
@@ -47,7 +47,7 @@ unsigned int read_freq_ctr(struct freq_ctr *ctr)
if (past <= 1 && !curr)
return past; /* very low rate, avoid flapping */
- return curr + mul32hi(past, ~curr_sec_ms_scaled);
+ return curr + mul32hi(past, ms_left_scaled);
}
/* returns the number of remaining events that can occur on this freq counter
@@ -59,7 +59,6 @@ unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned i
unsigned int curr, past;
unsigned int age;
- past = 0;
curr = 0;
age = now.tv_sec - ctr->curr_sec;
@@ -69,7 +68,7 @@ unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned i
curr = past;
past = ctr->prev_ctr;
}
- curr += mul32hi(past, ~curr_sec_ms_scaled);
+ curr += mul32hi(past, ms_left_scaled);
}
curr += pend;
@@ -99,7 +98,7 @@ unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned
curr = past;
past = ctr->prev_ctr;
}
- curr += mul32hi(past, ~curr_sec_ms_scaled);
+ curr += mul32hi(past, ms_left_scaled);
}
curr += pend;
diff --git a/src/time.c b/src/time.c
index 1b0f72c..342be9d 100644
--- a/src/time.c
+++ b/src/time.c
@@ -16,8 +16,8 @@
#include <common/standard.h>
#include <common/time.h>
-unsigned int curr_sec_ms; /* millisecond of current second (0..999) */
-unsigned int curr_sec_ms_scaled; /* millisecond of current second (0..2^32-1) */
+unsigned int curr_sec_ms; /* millisecond of current second (0..999) */
+unsigned int ms_left_scaled; /* milliseconds left for current second (0..2^32-1) */
unsigned int now_ms; /* internal date in milliseconds (may wrap) */
struct timeval now; /* internal date is a monotonic function of real clock */
struct timeval date; /* the real current date */
@@ -195,7 +195,17 @@ REGPRM2 void tv_update_date(int max_wait, int interrupted)
to_ms:
now = adjusted;
curr_sec_ms = now.tv_usec / 1000; /* ms of current second */
- curr_sec_ms_scaled = curr_sec_ms * 4294971; /* ms * 2^32 / 1000 */
+
+ /* For frequency counters, we'll need to know the ratio of the previous
+ * value to add to current value depending on the current millisecond.
+ * The principle is that during the first millisecond, we use 999/1000
+ * of the past value and that during the last millisecond we use 0/1000
+ * of the past value. In summary, we only use the past value during the
+ * first 999 ms of a second, and the last ms is used to complete the
+ * current measure. The value is scaled to (2^32-1) so that a simple
+ * multiply followed by a shift gives us the final value.
+ */
+ ms_left_scaled = (999U - curr_sec_ms) * 4294967U;
now_ms = now.tv_sec * 1000 + curr_sec_ms;
return;
}
--
1.7.1

View File

@ -1,28 +0,0 @@
From d978423607b6666ca8dd3257d860558ead1b94af Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Sun, 30 Dec 2012 00:50:35 +0100
Subject: BUG/MINOR: http: don't process abortonclose when request was sent
option abortonclose may cause a valid connection to be aborted just
after the request has been sent. This is because we check for it
during the session establishment sequence before checking for write
activity. So if the abort and the connect complete at the same time,
the abort is still considered. Let's check for an explicity partial
write before aborting.
This fix should be backported to 1.4 too.
(cherry picked from commit a7a7ebc38280d7a04192bf95e6852222f4bd8140)
Index: haproxy-1.4.22/src/session.c
===================================================================
--- haproxy-1.4.22.orig/src/session.c
+++ haproxy-1.4.22/src/session.c
@@ -203,7 +203,8 @@ int sess_update_st_con_tcp(struct sessio
}
/* OK, maybe we want to abort */
- if (unlikely((rep->flags & BF_SHUTW) ||
+ if (!(req->flags & BF_WRITE_PARTIAL) &&
+ unlikely((rep->flags & BF_SHUTW) ||
((req->flags & BF_SHUTW_NOW) && /* FIXME: this should not prevent a connection from establishing */
(((req->flags & (BF_OUT_EMPTY|BF_WRITE_ACTIVITY)) == BF_OUT_EMPTY) ||
s->be->options & PR_O_ABRT_CLOSE)))) {

View File

@ -1,81 +0,0 @@
From e35cc9549c74f696823a13d24df39be06192bf0b Mon Sep 17 00:00:00 2001
From: Michael Scherer <misc@zarb.org>
Date: Sat, 12 Jan 2013 18:35:19 +0100
Subject: BUG/MEDIUM: remove supplementary groups when changing gid
Without it, haproxy will retain the group membership of root, which may
give more access than intended to the process. For example, haproxy would
still be in the wheel group on Fedora 18, as seen with :
# haproxy -f /etc/haproxy/haproxy.cfg
# ps a -o pid,user,group,command | grep hapr
3545 haproxy haproxy haproxy -f /etc/haproxy/haproxy.cfg
4356 root root grep --color=auto hapr
# grep Group /proc/3545/status
Groups: 0 1 2 3 4 6 10
# getent group wheel
wheel:x:10:root,misc
[WT: The issue has been investigated by independent security research team
and realized by itself not being able to allow security exploitation.
Additionally, dropping groups is not allowed to unprivileged users,
though this mode of deployment is quite common. Thus a warning is
emitted in this case to inform the user. The fix could be backported
into all supported versions as the issue has always been there. ]
(cherry picked from commit ab012dd394d596f022c0d16f3584d5f61ffcf10e)
---
doc/configuration.txt | 2 ++
src/haproxy.c | 15 +++++++++++----
2 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/doc/configuration.txt b/doc/configuration.txt
index e5d9af5..20b89c2 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -486,6 +486,8 @@ gid <number>
Changes the process' group ID to <number>. It is recommended that the group
ID is dedicated to HAProxy or to a small set of similar daemons. HAProxy must
be started with a user belonging to this group, or with superuser privileges.
+ Note that if haproxy is started from a user having supplementary groups, it
+ will only be able to drop these groups if started with superuser privileges.
See also "group" and "uid".
group <group name>
diff --git a/src/haproxy.c b/src/haproxy.c
index ec481aa..c302143 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -44,6 +44,7 @@
#include <sys/resource.h>
#include <time.h>
#include <syslog.h>
+#include <grp.h>
#ifdef DEBUG_FULL
#include <assert.h>
@@ -1186,10 +1187,16 @@ int main(int argc, char **argv)
*/
/* setgid / setuid */
- if (global.gid && setgid(global.gid) == -1) {
- Alert("[%s.main()] Cannot set gid %d.\n", argv[0], global.gid);
- protocol_unbind_all();
- exit(1);
+ if (global.gid) {
+ if (getgroups(0, NULL) > 0 && setgroups(0, NULL) == -1)
+ Warning("[%s.main()] Failed to drop supplementary groups. Using 'gid'/'group'"
+ " without 'uid'/'user' is generally useless.\n", argv[0]);
+
+ if (setgid(global.gid) == -1) {
+ Alert("[%s.main()] Cannot set gid %d.\n", argv[0], global.gid);
+ protocol_unbind_all();
+ exit(1);
+ }
}
if (global.uid && setuid(global.uid) == -1) {
--
1.7.1

View File

@ -1,50 +0,0 @@
From a587005ff413866d7346e7448438d4c5be5a1cd7 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Fri, 18 Jan 2013 15:22:41 +0100
Subject: BUG/MINOR: epoll: use a fix maxevents argument in epoll_wait()
epoll_wait() takes a number of returned events, not the number of
fds to consider. We must not pass it the number of the smallest fd,
as it leads to value zero being used, which is invalid in epoll_wait().
The effect may sometimes be observed with peers sections trying to
connect and causing 2-seconds CPU loops upon a soft reload because
epoll_wait() immediately returns -1 EINVAL instead of waiting for the
timeout to happen.
This fix should be backported to 1.4 too (into ev_epoll and ev_sepoll).
(cherry picked from commit cf181c9d404815f890da7cd2243a5528edd7b4f9)
---
src/ev_epoll.c | 3 +--
src/ev_sepoll.c | 1 -
2 files changed, 1 insertions(+), 3 deletions(-)
diff --git a/src/ev_epoll.c b/src/ev_epoll.c
index 0b22da6..1d213d9 100644
--- a/src/ev_epoll.c
+++ b/src/ev_epoll.c
@@ -249,8 +249,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
wait_time = MAX_DELAY_MS;
}
- fd = MIN(maxfd, global.tune.maxpollevents);
- status = epoll_wait(epoll_fd, epoll_events, fd, wait_time);
+ status = epoll_wait(epoll_fd, epoll_events, global.tune.maxpollevents, wait_time);
tv_update_date(wait_time, status);
for (count = 0; count < status; count++) {
diff --git a/src/ev_sepoll.c b/src/ev_sepoll.c
index 248f1f4..a3ef118 100644
--- a/src/ev_sepoll.c
+++ b/src/ev_sepoll.c
@@ -481,7 +481,6 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
*/
fd = MIN(absmaxevents, spec_processed);
fd = MAX(global.tune.maxpollevents, fd);
- fd = MIN(maxfd, fd);
/* we want to detect if an accept() will create new speculative FDs here */
fd_created = 0;
spec_processed = 0;
--
1.7.1

View File

@ -1,32 +0,0 @@
From 48ffce4546ce99d51ea8c3dfb575c307e9f96ea5 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Thu, 24 Jan 2013 00:25:39 +0100
Subject: BUG/MINOR: config: fix improper check for failed memory alloc in ACL parser
The wrong variable is checked after a calloc() so a memory shortage would
result in a segfault while loading the config instead of a clean error.
This fix may be backported to 1.4 and 1.3 which are both affected.
Reported-by: Dinko Korunic <dkorunic@reflected.net>
(cherry picked from commit f678b7f32253fa7b279f907dbda563e985c6478c)
---
src/acl.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/src/acl.c b/src/acl.c
index 35b6eb8..793e5ca 100644
--- a/src/acl.c
+++ b/src/acl.c
@@ -1230,7 +1230,7 @@ struct acl_cond *parse_acl_cond(const char **args, struct list *known_acl, int p
if (!cur_suite) {
cur_suite = (struct acl_term_suite *)calloc(1, sizeof(*cur_suite));
- if (cur_term == NULL)
+ if (cur_suite == NULL)
goto out_free_term;
LIST_INIT(&cur_suite->terms);
LIST_ADDQ(&cond->suites, &cur_suite->list);
--
1.7.1

View File

@ -1,35 +0,0 @@
From 9e98076edc9d3f25763473480e4aac6223bfd7d0 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Thu, 24 Jan 2013 00:37:39 +0100
Subject: BUG/MEDIUM: checks: ensure the health_status is always within bounds
health_adjust() checks for incorrect bounds for the status argument.
With current code, the argument is always a constant from the valid
enum so there is no impact and the check is basically a NOP. However
users running local patches (eg: new checks) might want to recheck
their code.
This fix should be backported to 1.4 which introduced the issue.
Reported-by: Dinko Korunic <dkorunic@reflected.net>
(cherry picked from commit bb95666bac94b6235eda431aba788644f7de7a3f)
---
src/checks.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/src/checks.c b/src/checks.c
index 201900a..13b5c64 100644
--- a/src/checks.c
+++ b/src/checks.c
@@ -584,7 +584,7 @@ void health_adjust(struct server *s, short status) {
if (s->observe >= HANA_OBS_SIZE)
return;
- if (status >= HCHK_STATUS_SIZE || !analyze_statuses[status].desc)
+ if (status >= HANA_STATUS_SIZE || !analyze_statuses[status].desc)
return;
switch (analyze_statuses[status].lr[s->observe - 1]) {
--
1.7.1

View File

@ -1,33 +0,0 @@
From 062501e5d3bea2989557fa84325fc894784da16b Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Thu, 24 Jan 2013 01:25:25 +0100
Subject: CLEANUP: http: remove a useless null check
srv cannot be null in perform_http_redirect(), as it's taken
from the stream interface's target which is always valid for a
server-based redirect, and it was already dereferenced above, so in
practice, gcc already removes the test anyway.
Reported-by: Dinko Korunic <dkorunic@reflected.net>
(cherry picked from commit 4521ba689c506ce66e4843c887fbc555d2ea2006)
---
src/proto_http.c | 3 +--
1 files changed, 1 insertions(+), 2 deletions(-)
diff --git a/src/proto_http.c b/src/proto_http.c
index ffa2224..06b3743 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -798,8 +798,7 @@ void perform_http_redirect(struct session *s, struct stream_interface *si)
http_server_error(s, si, SN_ERR_PRXCOND, SN_FINST_C, 302, &rdr);
/* FIXME: we should increase a counter of redirects per server and per backend. */
- if (s->srv)
- srv_inc_sess_ctr(s->srv);
+ srv_inc_sess_ctr(s->srv);
}
/* Return the error message corresponding to si->err_type. It is assumed
--
1.7.1

View File

@ -1,43 +0,0 @@
From cf196abaa639f2ad940327145e59458b2cb7d9fe Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Thu, 24 Jan 2013 02:06:05 +0100
Subject: BUG/MEDIUM: signal: signal handler does not properly check for signal bounds
sig is checked for < 0 or > MAX_SIGNAL, but the signal array is
MAX_SIGNAL in size. At the moment, MAX_SIGNAL is 256. If a system supports
more than MAX_SIGNAL signals, then sending signal MAX_SIGNAL to the process
will corrupt one integer in its memory and might also crash the process.
This bug is also present in 1.4 and 1.3, and the fix must be backported.
Reported-by: Dinko Korunic <dkorunic@reflected.net>
(cherry picked from commit 1a53b5ef583988ca0405007f3ef47d2114da9546)
---
src/signal.c | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/signal.c b/src/signal.c
index 9825c0d..a3c6cd4 100644
--- a/src/signal.c
+++ b/src/signal.c
@@ -38,7 +38,7 @@ void signal_init()
void signal_handler(int sig)
{
- if (sig < 0 || sig > MAX_SIGNAL || !signal_state[sig].handler) {
+ if (sig < 0 || sig >= MAX_SIGNAL || !signal_state[sig].handler) {
/* unhandled signal */
qfprintf(stderr, "Received unhandled signal %d. Signal has been disabled.\n", sig);
signal(sig, SIG_IGN);
@@ -64,7 +64,7 @@ void signal_handler(int sig)
*/
void signal_register(int sig, void (*handler)(int))
{
- if (sig < 0 || sig > MAX_SIGNAL) {
+ if (sig < 0 || sig >= MAX_SIGNAL) {
qfprintf(stderr, "Failed to register signal %d : out of range [0..%d].\n", sig, MAX_SIGNAL);
return;
}
--
1.7.1

View File

@ -1,48 +0,0 @@
From 022ff7d0fd38505dbd87d7224ca20d1cdc729f01 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Thu, 24 Jan 2013 02:26:43 +0100
Subject: BUG/MEDIUM: uri_auth: missing NULL check and memory leak on memory shortage
A test is obviously wrong in uri_auth(). If strdup(pass) returns an error
while strdup(user) passes, the NULL pointer is still stored into the
structure. If the user returns the NULL instead, the allocated memory is
not released before returning the error.
The issue was present in 1.4 so the fix should be backported.
Reported-by: Dinko Korunic <dkorunic@reflected.net>
(cherry picked from commit 0b291bdef1b9b6b539f44aa896eb1211c57a67a5)
---
src/uri_auth.c | 13 ++++++++++---
1 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/src/uri_auth.c b/src/uri_auth.c
index fdbcef0..2344ac6 100644
--- a/src/uri_auth.c
+++ b/src/uri_auth.c
@@ -247,12 +247,19 @@ struct uri_auth *stats_add_auth(struct uri_auth **root, char *user)
return NULL;
newuser->user = strdup(user);
- newuser->pass = strdup(pass);
- newuser->flags |= AU_O_INSECURE;
+ if (!newuser->user) {
+ free(newuser);
+ return NULL;
+ }
- if (!newuser->user || !newuser->user)
+ newuser->pass = strdup(pass);
+ if (!newuser->pass) {
+ free(newuser->user);
+ free(newuser);
return NULL;
+ }
+ newuser->flags |= AU_O_INSECURE;
newuser->next = u->userlist->users;
u->userlist->users = newuser;
--
1.7.1

View File

@ -1,34 +0,0 @@
From d319dc8713c7db1eb54d0474c7c87aeaf1064b2f Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Thu, 24 Jan 2013 16:24:15 +0100
Subject: CLEANUP: config: slowstart is never negative
No need to check for a negative value in the "slowstart" argument, it's
an unsigned.
Reported-by: Dinko Korunic <dkorunic@reflected.net>
(cherry picked from commit 3a3bbcd6f1fd3b5629ca1759104b0e58ad637dc0)
---
src/cfgparse.c | 6 ------
1 files changed, 0 insertions(+), 6 deletions(-)
diff --git a/src/cfgparse.c b/src/cfgparse.c
index 90fdbff..e55d30a 100644
--- a/src/cfgparse.c
+++ b/src/cfgparse.c
@@ -3669,12 +3669,6 @@ stats_error_parsing:
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
- if (val < 0) {
- Alert("parsing [%s:%d]: invalid value %d for argument '%s' of server %s.\n",
- file, linenum, val, args[cur_arg], newsrv->id);
- err_code |= ERR_ALERT | ERR_FATAL;
- goto out;
- }
newsrv->slowstart = (val + 999) / 1000;
cur_arg += 2;
}
--
1.7.1

View File

@ -1,44 +0,0 @@
From a2c277afc6f12858939ba26160ce0f91fdb160d0 Mon Sep 17 00:00:00 2001
From: Simon Horman <horms@verge.net.au>
Date: Wed, 13 Feb 2013 17:48:00 +0900
Subject: BUG/MINOR: Correct logic in cut_crlf()
This corrects what appears to be logic errors in cut_crlf().
I assume that the intention of this function is to truncate a
string at the first cr or lf. However, currently lf are ignored.
Also use '\0' instead of 0 as the null character, a cosmetic change.
Cc: Krzysztof Piotr Oledzki <ole@ans.pl>
Signed-off-by: Simon Horman <horms@verge.net.au>
[WT: this fix may be backported to 1.4 too]
(cherry picked from commit 5269cfb4585ebee9babc628e2fed672c00028743)
---
include/common/standard.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/include/common/standard.h b/include/common/standard.h
index cd1a609..064d216 100644
--- a/include/common/standard.h
+++ b/include/common/standard.h
@@ -282,14 +282,14 @@ unsigned int inetaddr_host_lim_ret(const char *text, char *stop, const char **re
static inline char *cut_crlf(char *s) {
- while (*s != '\r' || *s == '\n') {
+ while (*s != '\r' && *s != '\n') {
char *p = s++;
if (!*p)
return p;
}
- *s++ = 0;
+ *s++ = '\0';
return s;
}
--
1.7.1

View File

@ -1,88 +0,0 @@
From a72ae88b2d68c3e772f14aed88687c676771e1c3 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Wed, 13 Feb 2013 12:39:06 +0100
Subject: BUILD: improve the makefile's support for libpcre
Currently when cross-compiling, it's generally necessary to force
PCREDIR which the Makefile automatically appends /include and /lib to.
Unfortunately on most 64-bit linux distros, the lib path is instead
/lib64, which is really annoying to fix in the makefile.
So now we're computing PCRE_INC and PCRE_LIB from PCREDIR and using
these ones instead. If one wants to force paths individually, it is
possible to set them instead of setting PCREDIR. The old behaviour
of not passing anything to the compiler when PCREDIR is forced to blank
is conserved.
(cherry picked from commit 39793095d7e19031504ed75aebfbb500680baaf7)
---
Makefile | 40 ++++++++++++++++++++++------------------
1 files changed, 22 insertions(+), 18 deletions(-)
diff --git a/Makefile b/Makefile
index 7633588..8d82543 100644
--- a/Makefile
+++ b/Makefile
@@ -55,6 +55,8 @@
# DLMALLOC_SRC : build with dlmalloc, indicate the location of dlmalloc.c.
# DLMALLOC_THRES : should match PAGE_SIZE on every platform (default: 4096).
# PCREDIR : force the path to libpcre.
+# PCRE_LIB : force the lib path to libpcre (defaults to $PCREDIR/lib).
+# PCRE_INC : force the include path to libpcre ($PCREDIR/inc)
# IGNOREGIT : ignore GIT commit versions if set.
# VERSION : force haproxy version reporting.
# SUBVERS : add a sub-version (eg: platform, model, ...).
@@ -436,30 +438,32 @@ DLMALLOC_THRES = 4096
OPTIONS_OBJS += src/dlmalloc.o
endif
-ifneq ($(USE_PCRE),)
-# PCREDIR is the directory hosting include/pcre.h and lib/libpcre.*. It is
-# automatically detected but can be forced if required. Forcing it to an empty
-# string will result in search only in the default paths.
-ifeq ($(PCREDIR),)
+ifneq ($(USE_PCRE)$(USE_STATIC_PCRE),)
+# PCREDIR is used to automatically construct the PCRE_INC and PCRE_LIB paths,
+# by appending /include and /lib respectively. If your system does not use the
+# same sub-directories, simply force these variables instead of PCREDIR. It is
+# automatically detected but can be forced if required (for cross-compiling).
+# Forcing PCREDIR to an empty string will let the compiler use the default
+# locations.
+
PCREDIR := $(shell pcre-config --prefix 2>/dev/null || echo /usr/local)
+ifneq ($(PCREDIR),)
+PCRE_INC := $(PCREDIR)/include
+PCRE_LIB := $(PCREDIR)/lib
endif
+
ifeq ($(USE_STATIC_PCRE),)
-OPTIONS_CFLAGS += -DUSE_PCRE $(if $(PCREDIR),-I$(PCREDIR)/include)
-OPTIONS_LDFLAGS += $(if $(PCREDIR),-L$(PCREDIR)/lib) -lpcreposix -lpcre
-endif
+# dynamic PCRE
+OPTIONS_CFLAGS += -DUSE_PCRE $(if $(PCRE_INC),-I$(PCRE_INC))
+OPTIONS_LDFLAGS += $(if $(PCRE_LIB),-L$(PCRE_LIB)) -lpcreposix -lpcre
BUILD_OPTIONS += $(call ignore_implicit,USE_PCRE)
-endif
-
-ifneq ($(USE_STATIC_PCRE),)
-# PCREDIR is the directory hosting include/pcre.h and lib/libpcre.*. It is
-# automatically detected but can be forced if required.
-ifeq ($(PCREDIR),)
-PCREDIR := $(shell pcre-config --prefix 2>/dev/null || echo /usr/local)
-endif
-OPTIONS_CFLAGS += -DUSE_PCRE $(if $(PCREDIR),-I$(PCREDIR)/include)
-OPTIONS_LDFLAGS += $(if $(PCREDIR),-L$(PCREDIR)/lib) -Wl,-Bstatic -lpcreposix -lpcre -Wl,-Bdynamic
+else
+# static PCRE
+OPTIONS_CFLAGS += -DUSE_PCRE $(if $(PCRE_INC),-I$(PCRE_INC))
+OPTIONS_LDFLAGS += $(if $(PCRE_LIB),-L$(PCRE_LIB)) -Wl,-Bstatic -lpcreposix -lpcre -Wl,-Bdynamic
BUILD_OPTIONS += $(call ignore_implicit,USE_STATIC_PCRE)
endif
+endif
# This one can be changed to look for ebtree files in an external directory
EBTREE_DIR := ebtree
--
1.7.1

View File

@ -1,30 +0,0 @@
From baa0b0fab303179d5195e347b1254b1da2c6ff33 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Tue, 19 Mar 2013 08:19:59 +0100
Subject: BUG/MINOR: checks: fix an warning introduced by commit 2f61455a
Commit 2f61455a fixed a TIME_WAIT issue but introduced a warning as in
case of an error, it relies on the shutr variable which is not initialized
to decide whether or not to disable lingering on the socket. This has no
impact obviously since the socket is already dead, but better fix this
anyway and avoid doing the setsockopt() in this case.
---
src/checks.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/src/checks.c b/src/checks.c
index 13b5c64..e586e4c 100644
--- a/src/checks.c
+++ b/src/checks.c
@@ -888,7 +888,7 @@ static int event_srv_chk_r(int fd)
/* in case of TCP only, this tells us if the connection failed */
if (!(s->result & SRV_CHK_ERROR))
set_server_check_status(s, HCHK_STATUS_SOCKERR, NULL);
-
+ shutr = 1;
goto out_wakeup;
}
--
1.7.1

View File

@ -1,231 +0,0 @@
From e30c9c92868af1d50b7d273056b6fbfeb82aaf73 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Sat, 16 Feb 2013 23:49:04 +0100
Subject: MEDIUM: halog: add support for counting per source address (-ic)
This is the same as -uc except that instead of counting URLs, it
counts source addresses. The reported times are request times and
not response times.
The code becomes heavily ugly, the url struct is being abused to
store an address, and there are no more bit fields available. The
code needs a major revamp.
(cherry picked from commit 7cf479cc09d4e4d142e8862e2a2792385f695439)
---
contrib/halog/halog.c | 124 +++++++++++++++++++++++++++++++++++++++++++++---
1 files changed, 116 insertions(+), 8 deletions(-)
diff --git a/contrib/halog/halog.c b/contrib/halog/halog.c
index 61034ec..9552998 100644
--- a/contrib/halog/halog.c
+++ b/contrib/halog/halog.c
@@ -114,6 +114,7 @@ struct url_stat {
FILT_COUNT_URL_BAVG|FILT_COUNT_URL_BTOT)
#define FILT_COUNT_COOK_CODES 0x40000000
+#define FILT_COUNT_IP_COUNT 0x80000000
unsigned int filter = 0;
unsigned int filter_invert = 0;
@@ -126,6 +127,7 @@ int lines_max = -1;
const char *fgets2(FILE *stream);
void filter_count_url(const char *accept_field, const char *time_field, struct timer **tptr);
+void filter_count_ip(const char *source_field, const char *accept_field, const char *time_field, struct timer **tptr);
void filter_count_srv_status(const char *accept_field, const char *time_field, struct timer **tptr);
void filter_count_cook_codes(const char *accept_field, const char *time_field, struct timer **tptr);
void filter_count_term_codes(const char *accept_field, const char *time_field, struct timer **tptr);
@@ -140,7 +142,7 @@ void usage(FILE *output, const char *msg)
"%s"
"Usage: halog [-h|--help] for long help\n"
" halog [-q] [-c] [-m <lines>]\n"
- " {-cc|-gt|-pct|-st|-tc|-srv|-u|-uc|-ue|-ua|-ut|-uao|-uto|-uba|-ubt}\n"
+ " {-cc|-gt|-pct|-st|-tc|-srv|-u|-uc|-ue|-ua|-ut|-uao|-uto|-uba|-ubt|-ic}\n"
" [-s <skip>] [-e|-E] [-H] [-rt|-RT <time>] [-ad <delay>] [-ac <count>]\n"
" [-v] [-Q|-QS] [-tcn|-TCN <termcode>] [ -hs|-HS [min][:[max]] ] < log\n"
"\n",
@@ -527,7 +529,7 @@ void truncated_line(int linenum, const char *line)
int main(int argc, char **argv)
{
- const char *b, *e, *p, *time_field, *accept_field;
+ const char *b, *e, *p, *time_field, *accept_field, *source_field;
const char *filter_term_code_name = NULL;
const char *output_file = NULL;
int f, last, err;
@@ -657,6 +659,8 @@ int main(int argc, char **argv)
filter |= FILT_COUNT_URL_BAVG;
else if (strcmp(argv[0], "-ubt") == 0)
filter |= FILT_COUNT_URL_BTOT;
+ else if (strcmp(argv[0], "-ic") == 0)
+ filter |= FILT_COUNT_IP_COUNT;
else if (strcmp(argv[0], "-o") == 0) {
if (output_file)
die("Fatal: output file name already specified.\n");
@@ -721,13 +725,21 @@ int main(int argc, char **argv)
while ((line = fgets2(stdin)) != NULL) {
linenum++;
time_field = NULL; accept_field = NULL;
+ source_field = NULL;
test = 1;
/* for any line we process, we first ensure that there is a field
* looking like the accept date field (beginning with a '[').
*/
- accept_field = field_start(line, ACCEPT_FIELD + skip_fields);
+ if (filter & FILT_COUNT_IP_COUNT) {
+ /* we need the IP first */
+ source_field = field_start(line, SOURCE_FIELD + skip_fields);
+ accept_field = field_start(source_field, ACCEPT_FIELD - SOURCE_FIELD + 1);
+ }
+ else
+ accept_field = field_start(line, ACCEPT_FIELD + skip_fields);
+
if (unlikely(*accept_field != '[')) {
parse_err++;
continue;
@@ -869,8 +881,12 @@ int main(int argc, char **argv)
/************** here we process inputs *******************/
- if (line_filter)
- line_filter(accept_field, time_field, &t);
+ if (line_filter) {
+ if (filter & FILT_COUNT_IP_COUNT)
+ filter_count_ip(source_field, accept_field, time_field, &t);
+ else
+ line_filter(accept_field, time_field, &t);
+ }
else
lines_out++; /* FILT_COUNT_ONLY was used, so we're just counting lines */
if (lines_max >= 0 && lines_out >= lines_max)
@@ -1047,7 +1063,7 @@ int main(int argc, char **argv)
n = eb32_next(n);
}
}
- else if (filter & FILT_COUNT_URL_ANY) {
+ else if (filter & (FILT_COUNT_URL_ANY|FILT_COUNT_IP_COUNT)) {
struct eb_node *node, *next;
if (!(filter & FILT_COUNT_URL_ONLY)) {
@@ -1062,7 +1078,7 @@ int main(int argc, char **argv)
ustat = container_of(node, struct url_stat, node.url.node);
- if (filter & FILT_COUNT_URL_COUNT)
+ if (filter & (FILT_COUNT_URL_COUNT|FILT_COUNT_IP_COUNT))
ustat->node.val.key = ustat->nb_req;
else if (filter & FILT_COUNT_URL_ERR)
ustat->node.val.key = ustat->nb_err;
@@ -1087,7 +1103,10 @@ int main(int argc, char **argv)
timers[0] = timers[1];
}
- printf("#req err ttot tavg oktot okavg bavg btot url\n");
+ if (FILT_COUNT_IP_COUNT)
+ printf("#req err ttot tavg oktot okavg bavg btot src\n");
+ else
+ printf("#req err ttot tavg oktot okavg bavg btot url\n");
/* scan the tree in its reverse sorting order */
node = eb_last(&timers[0]);
@@ -1410,6 +1429,95 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
}
}
+void filter_count_ip(const char *source_field, const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ struct url_stat *ustat = NULL;
+ struct ebpt_node *ebpt_old;
+ const char *b, *e;
+ int f, err, array[5];
+ int val;
+
+ /* let's collect the response time */
+ if (!time_field) {
+ time_field = field_start(accept_field, TIME_FIELD - ACCEPT_FIELD + 1); // avg 115 ns per line
+ if (unlikely(!*time_field)) {
+ truncated_line(linenum, line);
+ return;
+ }
+ }
+
+ /* we have the field TIME_FIELD starting at <time_field>. We'll
+ * parse the 5 timers to detect errors, it takes avg 55 ns per line.
+ */
+ e = time_field; err = 0; f = 0;
+ while (!SEP(*e)) {
+ if (f == 0 || f == 4) {
+ array[f] = str2ic(e);
+ if (array[f] < 0) {
+ array[f] = -1;
+ err = 1;
+ }
+ }
+ if (++f == 5)
+ break;
+ SKIP_CHAR(e, '/');
+ }
+ if (f < 5) {
+ parse_err++;
+ return;
+ }
+
+ /* OK we have our timers in array[0], and err is >0 if at
+ * least one -1 was seen. <e> points to the first char of
+ * the last timer. Let's prepare a new node with that.
+ */
+ if (unlikely(!ustat))
+ ustat = calloc(1, sizeof(*ustat));
+
+ ustat->nb_err = err;
+ ustat->nb_req = 1;
+
+ /* use array[4] = total time in case of error */
+ ustat->total_time = (array[0] >= 0) ? array[0] : array[4];
+ ustat->total_time_ok = (array[0] >= 0) ? array[0] : 0;
+
+ e = field_start(e, BYTES_SENT_FIELD - TIME_FIELD + 1);
+ val = str2ic(e);
+ ustat->total_bytes_sent = val;
+
+ /* the source might be IPv4 or IPv6, so we always strip the port by
+ * removing the last colon.
+ */
+ b = source_field;
+ e = field_stop(b + 1);
+ while (e > b && e[-1] != ':')
+ e--;
+ *(char *)(e - 1) = '\0';
+
+ /* now instead of copying the src for a simple lookup, we'll link
+ * to it from the node we're trying to insert. If it returns a
+ * different value, it was already there. Otherwise we just have
+ * to dynamically realloc an entry using strdup(). We're using the
+ * <url> field of the node to store the source address.
+ */
+ ustat->node.url.key = (char *)b;
+ ebpt_old = ebis_insert(&timers[0], &ustat->node.url);
+
+ if (ebpt_old != &ustat->node.url) {
+ struct url_stat *ustat_old;
+ /* node was already there, let's update previous one */
+ ustat_old = container_of(ebpt_old, struct url_stat, node.url);
+ ustat_old->nb_req ++;
+ ustat_old->nb_err += ustat->nb_err;
+ ustat_old->total_time += ustat->total_time;
+ ustat_old->total_time_ok += ustat->total_time_ok;
+ ustat_old->total_bytes_sent += ustat->total_bytes_sent;
+ } else {
+ ustat->url = ustat->node.url.key = strdup(ustat->node.url.key);
+ ustat = NULL; /* node was used */
+ }
+}
+
void filter_graphs(const char *accept_field, const char *time_field, struct timer **tptr)
{
struct timer *t2;
--
1.7.1

View File

@ -1,71 +0,0 @@
From 8a92e409e706e503ba76f8863c87409192e9f082 Mon Sep 17 00:00:00 2001
From: Yves Lafon <ylafon@w3.org>
Date: Mon, 11 Mar 2013 11:06:05 -0400
Subject: [PATCH 36/42] MEDIUM: http: implement redirect 307 and 308
I needed to emit a 307 and noticed it was not available so I did it,
as well as 308.
(cherry picked from commit 3e8d1ae2d25d3fae659fc560506af2ae9b20da12)
---
src/cfgparse.c | 6 +++---
src/proto_http.c | 20 ++++++++++++++++++++
2 files changed, 23 insertions(+), 3 deletions(-)
diff --git a/src/cfgparse.c b/src/cfgparse.c
index e55d30a..345b415 100644
--- a/src/cfgparse.c
+++ b/src/cfgparse.c
@@ -2215,9 +2215,9 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
}
cur_arg++;
code = atol(args[cur_arg]);
- if (code < 301 || code > 303) {
- Alert("parsing [%s:%d] : '%s': unsupported HTTP code '%d'.\n",
- file, linenum, args[0], code);
+ if (code < 301 || code > 308 || (code > 303 && code < 307)) {
+ Alert("parsing [%s:%d] : '%s': unsupported HTTP code '%s' (must be one of 301, 302, 303, 307 or 308).\n",
+ file, linenum, args[0], args[cur_arg]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
diff --git a/src/proto_http.c b/src/proto_http.c
index 06b3743..8d9d8e8 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -90,6 +90,20 @@ const char *HTTP_303 =
"Content-length: 0\r\n"
"Location: "; /* not terminated since it will be concatenated with the URL */
+
+/* same as 302 except that the browser MUST retry with the same method */
+const char *HTTP_307 =
+ "HTTP/1.1 307 Temporary Redirect\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-length: 0\r\n"
+ "Location: "; /* not terminated since it will be concatenated with the URL */
+
+/* same as 301 except that the browser MUST retry with the same method */
+const char *HTTP_308 =
+ "HTTP/1.1 308 Permanent Redirect\r\n"
+ "Content-length: 0\r\n"
+ "Location: "; /* not terminated since it will be concatenated with the URL */
+
/* Warning: this one is an sprintf() fmt string, with <realm> as its only argument */
const char *HTTP_401_fmt =
"HTTP/1.0 401 Unauthorized\r\n"
@@ -3355,6 +3369,12 @@ int http_process_req_common(struct session *s, struct buffer *req, int an_bit, s
/* build redirect message */
switch(rule->code) {
+ case 308:
+ msg_fmt = HTTP_308;
+ break;
+ case 307:
+ msg_fmt = HTTP_307;
+ break;
case 303:
msg_fmt = HTTP_303;
break;
--
1.8.1.5

View File

@ -1,28 +0,0 @@
From 6c9ba3562cf57dc033a52c0973962c642b3aaf18 Mon Sep 17 00:00:00 2001
From: Yves Lafon <ylafon@w3.org>
Date: Mon, 11 Mar 2013 11:06:05 -0400
Subject: [PATCH 37/42] MINOR: http: status 301 should not be marked
non-cacheable
Also, browsers behaviour is inconsistent regarding the Cache-Control
header field on a 301.
(cherry picked from commit e267421e93eb35272a104c9c8fa6878880f42be8)
---
src/proto_http.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/proto_http.c b/src/proto_http.c
index 8d9d8e8..a52c038 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -73,7 +73,6 @@ const struct chunk http_100_chunk = {
/* Warning: no "connection" header is provided with the 3xx messages below */
const char *HTTP_301 =
"HTTP/1.1 301 Moved Permanently\r\n"
- "Cache-Control: no-cache\r\n"
"Content-length: 0\r\n"
"Location: "; /* not terminated since it will be concatenated with the URL */
--
1.8.1.5

View File

@ -1,47 +0,0 @@
From 89c12b19f9e42976c9c205cd1a42bb2e148b3235 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Fri, 29 Mar 2013 19:28:11 +0100
Subject: [PATCH 38/42] DOC: mention the new HTTP 307 and 308 redirect statues
(cherry picked from commit b67fdc4cd8bde202f2805d98683ddab929469a05)
---
doc/configuration.txt | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/doc/configuration.txt b/doc/configuration.txt
index 20b89c2..9a99267 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -309,6 +309,8 @@ Haproxy may emit the following status codes by itself :
301 when performing a redirection, depending on the configured code
302 when performing a redirection, depending on the configured code
303 when performing a redirection, depending on the configured code
+ 307 when performing a redirection, depending on the configured code
+ 308 when performing a redirection, depending on the configured code
400 for an invalid or too large request
401 when an authentication is required to perform the action (when
accessing the stats page)
@@ -4057,12 +4059,14 @@ redirect prefix <to> [code <code>] <option> [{if | unless} <condition>]
URL.
<code> The code is optional. It indicates which type of HTTP redirection
- is desired. Only codes 301, 302 and 303 are supported, and 302 is
- used if no code is specified. 301 means "Moved permanently", and
- a browser may cache the Location. 302 means "Moved permanently"
- and means that the browser should not cache the redirection. 303
- is equivalent to 302 except that the browser will fetch the
- location with a GET method.
+ is desired. Only codes 301, 302, 303, 307 and 308 are supported,
+ with 302 used by default if no code is specified. 301 means
+ "Moved permanently", and a browser may cache the Location. 302
+ means "Moved permanently" and means that the browser should not
+ cache the redirection. 303 is equivalent to 302 except that the
+ browser will fetch the location with a GET method. 307 is just
+ like 302 but makes it clear that the same method must be reused.
+ Likewise, 308 replaces 301 if the same method must be used.
<option> There are several options which can be specified to adjust the
expected behaviour of a redirection :
--
1.8.1.5

View File

@ -1,193 +0,0 @@
From d9185dbab66e8ea3bafd1d43660ae44311da7a81 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Sun, 31 Mar 2013 14:06:57 +0200
Subject: [PATCH 39/42] MEDIUM: poll: do not use FD_* macros anymore
Some recent glibc updates have added controls on FD_SET/FD_CLR/FD_ISSET
that crash the program if it tries to use a file descriptor larger than
FD_SETSIZE.
Do not rely on FD_* macros anymore and replace them with bit fields.
(cherry picked from commit 80da05a4cfb881663dc1f38a94d613f37e54552a)
---
src/ev_poll.c | 87 +++++++++++++++++++++++++++++------------------------------
1 file changed, 43 insertions(+), 44 deletions(-)
diff --git a/src/ev_poll.c b/src/ev_poll.c
index f5d011e..02e89ad 100644
--- a/src/ev_poll.c
+++ b/src/ev_poll.c
@@ -27,12 +27,25 @@
#include <proto/task.h>
-static fd_set *fd_evts[2];
+static unsigned int *fd_evts[2];
/* private data */
static struct pollfd *poll_events = NULL;
+static inline unsigned int hap_fd_isset(int fd, unsigned int *evts)
+{
+ return evts[fd / (8*sizeof(*evts))] & (1U << (fd & (8*sizeof(*evts) - 1)));
+}
+
+static inline void hap_fd_set(int fd, unsigned int *evts)
+{
+ evts[fd / (8*sizeof(*evts))] |= 1U << (fd & (8*sizeof(*evts) - 1));
+}
+static inline void hap_fd_clr(int fd, unsigned int *evts)
+{
+ evts[fd / (8*sizeof(*evts))] &= ~(1U << (fd & (8*sizeof(*evts) - 1)));
+}
/*
* Benchmarks performed on a Pentium-M notebook show that using functions
* instead of the usual macros improve the FD_* performance by about 80%,
@@ -40,43 +53,43 @@ static struct pollfd *poll_events = NULL;
*/
REGPRM2 static int __fd_is_set(const int fd, int dir)
{
- return FD_ISSET(fd, fd_evts[dir]);
+ return hap_fd_isset(fd, fd_evts[dir]);
}
REGPRM2 static int __fd_set(const int fd, int dir)
{
- FD_SET(fd, fd_evts[dir]);
+ hap_fd_set(fd, fd_evts[dir]);
return 0;
}
REGPRM2 static int __fd_clr(const int fd, int dir)
{
- FD_CLR(fd, fd_evts[dir]);
+ hap_fd_clr(fd, fd_evts[dir]);
return 0;
}
REGPRM2 static int __fd_cond_s(const int fd, int dir)
{
int ret;
- ret = !FD_ISSET(fd, fd_evts[dir]);
+ ret = !hap_fd_isset(fd, fd_evts[dir]);
if (ret)
- FD_SET(fd, fd_evts[dir]);
+ hap_fd_set(fd, fd_evts[dir]);
return ret;
}
REGPRM2 static int __fd_cond_c(const int fd, int dir)
{
int ret;
- ret = FD_ISSET(fd, fd_evts[dir]);
+ ret = hap_fd_isset(fd, fd_evts[dir]);
if (ret)
- FD_CLR(fd, fd_evts[dir]);
+ hap_fd_clr(fd, fd_evts[dir]);
return ret;
}
REGPRM1 static void __fd_rem(const int fd)
{
- FD_CLR(fd, fd_evts[DIR_RD]);
- FD_CLR(fd, fd_evts[DIR_WR]);
+ hap_fd_clr(fd, fd_evts[DIR_RD]);
+ hap_fd_clr(fd, fd_evts[DIR_WR]);
}
/*
@@ -93,33 +106,20 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
unsigned rn, wn; /* read new, write new */
nbfd = 0;
- for (fds = 0; (fds * BITS_PER_INT) < maxfd; fds++) {
-
- rn = ((int*)fd_evts[DIR_RD])[fds];
- wn = ((int*)fd_evts[DIR_WR])[fds];
+ for (fds = 0; (fds * 8*sizeof(**fd_evts)) < maxfd; fds++) {
+ rn = fd_evts[DIR_RD][fds];
+ wn = fd_evts[DIR_WR][fds];
- if ((rn|wn)) {
- for (count = 0, fd = fds * BITS_PER_INT; count < BITS_PER_INT && fd < maxfd; count++, fd++) {
-#define FDSETS_ARE_INT_ALIGNED
-#ifdef FDSETS_ARE_INT_ALIGNED
-
-#define WE_REALLY_KNOW_THAT_FDSETS_ARE_INTS
-#ifdef WE_REALLY_KNOW_THAT_FDSETS_ARE_INTS
- sr = (rn >> count) & 1;
- sw = (wn >> count) & 1;
-#else
- sr = FD_ISSET(fd&(BITS_PER_INT-1), (typeof(fd_set*))&rn);
- sw = FD_ISSET(fd&(BITS_PER_INT-1), (typeof(fd_set*))&wn);
-#endif
-#else
- sr = FD_ISSET(fd, fd_evts[DIR_RD]);
- sw = FD_ISSET(fd, fd_evts[DIR_WR]);
-#endif
- if ((sr|sw)) {
- poll_events[nbfd].fd = fd;
- poll_events[nbfd].events = (sr ? POLLIN : 0) | (sw ? POLLOUT : 0);
- nbfd++;
- }
+ if (!(rn|wn))
+ continue;
+
+ for (count = 0, fd = fds * 8*sizeof(**fd_evts); count < 8*sizeof(**fd_evts) && fd < maxfd; count++, fd++) {
+ sr = (rn >> count) & 1;
+ sw = (wn >> count) & 1;
+ if ((sr|sw)) {
+ poll_events[nbfd].fd = fd;
+ poll_events[nbfd].events = (sr ? POLLIN : 0) | (sw ? POLLOUT : 0);
+ nbfd++;
}
}
}
@@ -149,14 +149,14 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
/* ok, we found one active fd */
status--;
- if (FD_ISSET(fd, fd_evts[DIR_RD])) {
+ if (hap_fd_isset(fd, fd_evts[DIR_RD])) {
if (fdtab[fd].state == FD_STCLOSE)
continue;
if (poll_events[count].revents & ( POLLIN | POLLERR | POLLHUP ))
fdtab[fd].cb[DIR_RD].f(fd);
}
- if (FD_ISSET(fd, fd_evts[DIR_WR])) {
+ if (hap_fd_isset(fd, fd_evts[DIR_WR])) {
if (fdtab[fd].state == FD_STCLOSE)
continue;
if (poll_events[count].revents & ( POLLOUT | POLLERR | POLLHUP ))
@@ -174,21 +174,20 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
REGPRM1 static int _do_init(struct poller *p)
{
__label__ fail_swevt, fail_srevt, fail_pe;
- int fd_set_bytes;
+ int fd_evts_bytes;
p->private = NULL;
- fd_set_bytes = sizeof(fd_set) * (global.maxsock + FD_SETSIZE - 1) / FD_SETSIZE;
+ fd_evts_bytes = (global.maxsock + sizeof(**fd_evts) - 1) / sizeof(**fd_evts) * sizeof(**fd_evts);
- poll_events = (struct pollfd*)
- calloc(1, sizeof(struct pollfd) * global.maxsock);
+ poll_events = calloc(1, sizeof(struct pollfd) * global.maxsock);
if (poll_events == NULL)
goto fail_pe;
- if ((fd_evts[DIR_RD] = (fd_set *)calloc(1, fd_set_bytes)) == NULL)
+ if ((fd_evts[DIR_RD] = calloc(1, fd_evts_bytes)) == NULL)
goto fail_srevt;
- if ((fd_evts[DIR_WR] = (fd_set *)calloc(1, fd_set_bytes)) == NULL)
+ if ((fd_evts[DIR_WR] = calloc(1, fd_evts_bytes)) == NULL)
goto fail_swevt;
return 1;
--
1.8.1.5

View File

@ -1,77 +0,0 @@
From f4096052b9397e29c3638651e7487c047081c00c Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Sun, 31 Mar 2013 14:41:15 +0200
Subject: [PATCH 40/42] BUG/MAJOR: ev_select: disable the select() poller if
maxsock > FD_SETSIZE
Some recent glibc updates have added controls on FD_SET/FD_CLR/FD_ISSET
that crash the program if it tries to use a file descriptor larger than
FD_SETSIZE.
For this reason, we now control the compatibility between global.maxsock
and FD_SETSIZE, and refuse to use select() if there too many FDs are
expected to be used. Note that on Solaris, FD_SETSIZE is already forced
to 65536, and that FreeBSD and OpenBSD allow it to be redefined, though
this is not needed thanks to kqueue which is much more efficient.
In practice, since poll() is enabled on all targets, it should not cause
any problem, unless it is explicitly disabled.
This change must be backported to 1.4 because the crashes caused by glibc
have already been reported on this version.
(cherry picked from commit 3fa87b1db95bc4d6640999462bdae620bff147c6)
---
src/ev_select.c | 7 +++++++
src/haproxy.c | 11 ++++++++++-
2 files changed, 17 insertions(+), 1 deletion(-)
diff --git a/src/ev_select.c b/src/ev_select.c
index 5a87282..1f35b54 100644
--- a/src/ev_select.c
+++ b/src/ev_select.c
@@ -170,6 +170,10 @@ REGPRM1 static int _do_init(struct poller *p)
int fd_set_bytes;
p->private = NULL;
+
+ if (global.maxsock > FD_SETSIZE)
+ goto fail_revt;
+
fd_set_bytes = sizeof(fd_set) * (global.maxsock + FD_SETSIZE - 1) / FD_SETSIZE;
if ((tmp_evts[DIR_RD] = (fd_set *)calloc(1, fd_set_bytes)) == NULL)
@@ -217,6 +221,9 @@ REGPRM1 static void _do_term(struct poller *p)
*/
REGPRM1 static int _do_test(struct poller *p)
{
+ if (global.maxsock > FD_SETSIZE)
+ return 0;
+
return 1;
}
diff --git a/src/haproxy.c b/src/haproxy.c
index c302143..1d588e1 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -711,7 +711,16 @@ void init(int argc, char **argv)
list_pollers(stderr);
if (!init_pollers()) {
- Alert("No polling mechanism available.\n");
+ Alert("No polling mechanism available.\n"
+ " It is likely that haproxy was built with TARGET=generic and that FD_SETSIZE\n"
+ " is too low on this platform to support maxconn and the number of listeners\n"
+ " and servers. You should rebuild haproxy specifying your system using TARGET=\n"
+ " in order to support other polling systems (poll, epoll, kqueue) or reduce the\n"
+ " global maxconn setting to accomodate the system's limitation. For reference,\n"
+ " FD_SETSIZE=%d on this system, global.maxconn=%d resulting in a maximum of\n"
+ " %d file descriptors. You should thus reduce global.maxconn by %d. Also,\n"
+ " check build settings using 'haproxy -vv'.\n\n",
+ FD_SETSIZE, global.maxconn, global.maxsock, (global.maxsock + 1 - FD_SETSIZE) / 2);
exit(1);
}
if (global.mode & (MODE_VERBOSE|MODE_DEBUG)) {
--
1.8.1.5

View File

@ -1,60 +0,0 @@
From 4c5a09bb3c62070f139159378a8d68897b7ec8d6 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Tue, 2 Apr 2013 08:14:29 +0200
Subject: [PATCH 41/42] BUILD: enable poll() by default in the makefile
This allows to build haproxy for unknown targets and still have poll().
If for any reason a target does not support it, just passing USE_POLL=""
disables it.
(cherry picked from commit 32e65ef62570cbace7368ebe7262b48c28b31bb7)
---
Makefile | 5 ++++-
README | 9 ++++++++-
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/Makefile b/Makefile
index 8d82543..f4dbccd 100644
--- a/Makefile
+++ b/Makefile
@@ -172,7 +172,10 @@ LDFLAGS = $(ARCH_FLAGS) -g
# Depending on the target platform, some options are set, as well as some
# CFLAGS and LDFLAGS. The USE_* values are set to "implicit" so that they are
# not reported in the build options string. You should not have to change
-# anything there.
+# anything there. poll() is always supported, unless explicitly disabled by
+# passing USE_POLL="" on the make command line.
+USE_POLL = default
+
ifeq ($(TARGET),generic)
# generic system target has nothing specific
USE_POLL = implicit
diff --git a/README b/README
index aca83f9..c99897e 100644
--- a/README
+++ b/README
@@ -12,7 +12,7 @@
To build haproxy, you will need :
- GNU make. Neither Solaris nor OpenBSD's make work with the GNU Makefile.
However, specific Makefiles for BSD and OSX are provided.
- - GCC between 2.91 and 4.5.0. Others may work, but not tested.
+ - GCC between 2.91 and 4.7. Others may work, but not tested.
- GNU ld
Also, you might want to build with libpcre support, which will provide a very
@@ -100,6 +100,13 @@ otherwise __fd_select() will be used while not being present in the libc.
If you get build errors because of strange symbols or section mismatches,
simply remove -g from DEBUG_CFLAGS.
+You can easily define your own target with the GNU Makefile. Unknown targets
+are processed with no default option except USE_POLL=default. So you can very
+well use that property to define your own set of options. USE_POLL can even be
+disabled by setting USE_POLL="". For example :
+
+ $ gmake TARGET=tiny USE_POLL="" TARGET_CFLAGS=-fomit-frame-pointer
+
2) How to install it
--------------------
--
1.8.1.5

View File

@ -1,57 +0,0 @@
From 2d7b45e7321437c1f9c4d0f2c46a793ef9d059c0 Mon Sep 17 00:00:00 2001
From: Willy Tarreau <w@1wt.eu>
Date: Tue, 2 Apr 2013 08:17:43 +0200
Subject: [PATCH 42/42] BUILD: add explicit support for Mac OS/X
The "osx" target may now be passed in the TARGET variable. It supports
the same features as FreeBSD and allows its users to use the GNU makefile
instead of the platform-specific makefile which lacks some features.
(cherry picked from commit 8624cab29c52db9052bf022683cfd3d11369cc0d)
---
Makefile | 8 ++++++++
README | 3 ++-
2 files changed, 10 insertions(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index f4dbccd..57692fe 100644
--- a/Makefile
+++ b/Makefile
@@ -245,6 +245,13 @@ ifeq ($(TARGET),freebsd)
USE_TPROXY = implicit
USE_LIBCRYPT = implicit
else
+ifeq ($(TARGET),osx)
+ # This is for Mac OS/X
+ USE_POLL = implicit
+ USE_KQUEUE = implicit
+ USE_TPROXY = implicit
+ USE_LIBCRYPT = implicit
+else
ifeq ($(TARGET),openbsd)
# This is for OpenBSD >= 3.0
USE_POLL = implicit
@@ -267,6 +274,7 @@ ifeq ($(TARGET),cygwin)
endif # cygwin
endif # aix52
endif # openbsd
+endif # osx
endif # freebsd
endif # solaris
endif # linux2628
diff --git a/README b/README
index c99897e..7897cb3 100644
--- a/README
+++ b/README
@@ -28,7 +28,8 @@ and assign it to the TARGET variable :
- linux2628 for Linux 2.6.28 and above (enables splice and tproxy)
- solaris for Solaris 8 or 10 (others untested)
- freebsd for FreeBSD 5 to 8.0 (others untested)
- - openbsd for OpenBSD 3.1 to 4.6 (others untested)
+ - osx for Mac OS/X
+ - openbsd for OpenBSD 3.1 to 5.2 (others untested)
- aix52 for AIX 5.2
- cygwin for Cygwin
- generic for any other OS.
--
1.8.1.5

View File

@ -1,102 +0,0 @@
From: Willy Tarreau <w@1wt.eu>
Date: Fri, 29 Mar 2013 11:31:49 +0000 (+0100)
Subject: BUG/CRITICAL: using HTTP information in tcp-request content may crash the process
X-Git-Tag: v1.4.23~1
X-Git-Url: http://git.1wt.eu:81/web?p=haproxy-1.4.git;a=commitdiff_plain;h=dc80672211
BUG/CRITICAL: using HTTP information in tcp-request content may crash the process
During normal HTTP request processing, request buffers are realigned if
there are less than global.maxrewrite bytes available after them, in
order to leave enough room for rewriting headers after the request. This
is done in http_wait_for_request().
However, if some HTTP inspection happens during a "tcp-request content"
rule, this realignment is not performed. In theory this is not a problem
because empty buffers are always aligned and TCP inspection happens at
the beginning of a connection. But with HTTP keep-alive, it also happens
at the beginning of each subsequent request. So if a second request was
pipelined by the client before the first one had a chance to be forwarded,
the second request will not be realigned. Then, http_wait_for_request()
will not perform such a realignment either because the request was
already parsed and marked as such. The consequence of this, is that the
rewrite of a sufficient number of such pipelined, unaligned requests may
leave less room past the request been processed than the configured
reserve, which can lead to a buffer overflow if request processing appends
some data past the end of the buffer.
A number of conditions are required for the bug to be triggered :
- HTTP keep-alive must be enabled ;
- HTTP inspection in TCP rules must be used ;
- some request appending rules are needed (reqadd, x-forwarded-for)
- since empty buffers are always realigned, the client must pipeline
enough requests so that the buffer always contains something till
the point where there is no more room for rewriting.
While such a configuration is quite unlikely to be met (which is
confirmed by the bug's lifetime), a few people do use these features
together for very specific usages. And more importantly, writing such
a configuration and the request to attack it is trivial.
A quick workaround consists in forcing keep-alive off by adding
"option httpclose" or "option forceclose" in the frontend. Alternatively,
disabling HTTP-based TCP inspection rules enough if the application
supports it.
At first glance, this bug does not look like it could lead to remote code
execution, as the overflowing part is controlled by the configuration and
not by the user. But some deeper analysis should be performed to confirm
this. And anyway, corrupting the process' memory and crashing it is quite
trivial.
Special thanks go to Yves Lafon from the W3C who reported this bug and
deployed significant efforts to collect the relevant data needed to
understand it in less than one week.
CVE-2013-1912 was assigned to this issue.
Note that 1.4 is also affected so the fix must be backported.
(cherry picked from commit aae75e3279c6c9bd136413a72dafdcd4986bb89a)
---
diff --git a/src/proto_http.c b/src/proto_http.c
index a52c038..a768eb5 100644
--- a/src/proto_http.c
+++ b/src/proto_http.c
@@ -8347,6 +8347,14 @@ acl_fetch_proto_http(struct proxy *px, struct session *s, void *l7, int dir,
return 1;
}
+ /* If the buffer does not leave enough free space at the end,
+ * we must first realign it.
+ */
+ if (unlikely(req->lr > req->data &&
+ (req->r < req->lr || req->r > req->data + req->size - global.tune.maxrewrite)) &&
+ (req->l <= req->size - global.tune.maxrewrite))
+ http_buffer_heavy_realign(req, msg);
+
/* Try to decode HTTP request */
if (likely(req->lr < req->r))
http_msg_analyzer(req, msg, &txn->hdr_idx);
@@ -8364,6 +8372,20 @@ acl_fetch_proto_http(struct proxy *px, struct session *s, void *l7, int dir,
/* OK we got a valid HTTP request. We have some minor preparation to
* perform so that further checks can rely on HTTP tests.
*/
+
+ /* If the request was parsed but was too large, we must absolutely
+ * return an error so that it is not processed. At the moment this
+ * cannot happen, but if the parsers are to change in the future,
+ * we want this check to be maintained.
+ */
+ if (unlikely(req->lr > req->data &&
+ (req->r < req->lr || req->l > req->size - global.tune.maxrewrite ||
+ req->r > req->data + req->size - global.tune.maxrewrite))) {
+ msg->msg_state = HTTP_MSG_ERROR;
+ test->flags |= ACL_TEST_F_SET_RES_PASS;
+ return 1;
+ }
+
txn->meth = find_http_meth(msg->sol, msg->sl.rq.m_l);
if (txn->meth == HTTP_METH_GET || txn->meth == HTTP_METH_HEAD)
s->flags |= SN_REDIRECTABLE;