http-backend: spool ref negotiation requests to buffer

When http-backend spawns "upload-pack" to do ref
negotiation, it streams the http request body to
upload-pack, who then streams the http response back to the
client as it reads. In theory, git can go full-duplex; the
client can consume our response while it is still sending
the request.  In practice, however, HTTP is a half-duplex
protocol. Even if our client is ready to read and write
simultaneously, we may have other HTTP infrastructure in the
way, including the webserver that spawns our CGI, or any
intermediate proxies.

In at least one documented case[1], this leads to deadlock
when trying a fetch over http. What happens is basically:

  1. Apache proxies the request to the CGI, http-backend.

  2. http-backend gzip-inflates the data and sends
     the result to upload-pack.

  3. upload-pack acts on the data and generates output over
     the pipe back to Apache. Apache isn't reading because
     it's busy writing (step 1).

This works fine most of the time, because the upload-pack
output ends up in a system pipe buffer, and Apache reads
it as soon as it finishes writing. But if both the request
and the response exceed the system pipe buffer size, then we
deadlock (Apache blocks writing to http-backend,
http-backend blocks writing to upload-pack, and upload-pack
blocks writing to Apache).

We need to break the deadlock by spooling either the input
or the output. In this case, it's ideal to spool the input,
because Apache does not start reading either stdout _or_
stderr until we have consumed all of the input. So until we
do so, we cannot even get an error message out to the

The solution is fairly straight-forward: we read the request
body into an in-memory buffer in http-backend, freeing up
Apache, and then feed the data ourselves to upload-pack. But
there are a few important things to note:

  1. We limit the in-memory buffer to prevent an obvious
     denial-of-service attack. This is a new hard limit on
     requests, but it's unlikely to come into play. The
     default value is 10MB, which covers even the ridiculous
     100,000-ref negotation in the included test (that
     actually caps out just over 5MB). But it's configurable
     on the off chance that you don't mind spending some
     extra memory to make even ridiculous requests work.

  2. We must take care only to buffer when we have to. For
     pushes, the incoming packfile may be of arbitrary
     size, and we should connect the input directly to
     receive-pack. There's no deadlock problem here, though,
     because we do not produce any output until the whole
     packfile has been read.

     For upload-pack's initial ref advertisement, we
     similarly do not need to buffer. Even though we may
     generate a lot of output, there is no request body at
     all (i.e., it is a GET, not a POST).


Test-adapted-from: Dennis Kaarsemaker <>
Signed-off-by: Jeff King <>
Signed-off-by: Junio C Hamano <>
Jeff King 8 years ago committed by Junio C Hamano
parent cc969c8dc1
commit 6bc0cb5176
  1. 9
  2. 96
  3. 11

@ -255,6 +255,15 @@ The GIT_HTTP_EXPORT_ALL environmental variable may be passed to
'git-http-backend' to bypass the check for the "git-daemon-export-ok"
file in each repository before allowing export of that repository.
The `GIT_HTTP_MAX_REQUEST_BUFFER` environment variable (or the
`http.maxRequestBuffer` config variable) may be set to change the
largest ref negotiation request that git will handle during a fetch; any
fetch requiring a larger buffer will not succeed. This value should not
normally need to be changed, but may be helpful if you are fetching from
a repository with an extremely large number of refs. The value can be
specified with a unit (e.g., `100M` for 100 megabytes). The default is
10 megabytes.
The backend process sets GIT_COMMITTER_NAME to '$REMOTE_USER' and
ensuring that any reflogs created by 'git-receive-pack' contain some

@ -13,18 +13,20 @@ static const char content_type[] = "Content-Type";
static const char content_length[] = "Content-Length";
static const char last_modified[] = "Last-Modified";
static int getanyfile = 1;
static unsigned long max_request_buffer = 10 * 1024 * 1024;
static struct string_list *query_params;
struct rpc_service {
const char *name;
const char *config_name;
unsigned buffer_input : 1;
signed enabled : 2;
static struct rpc_service rpc_service[] = {
{ "upload-pack", "uploadpack", 1 },
{ "receive-pack", "receivepack", -1 },
{ "upload-pack", "uploadpack", 1, 1 },
{ "receive-pack", "receivepack", 0, -1 },
static struct string_list *get_parameters(void)
@ -225,6 +227,7 @@ static void http_config(void)
struct strbuf var = STRBUF_INIT;
git_config_get_bool("http.getanyfile", &getanyfile);
git_config_get_ulong("http.maxrequestbuffer", &max_request_buffer);
for (i = 0; i < ARRAY_SIZE(rpc_service); i++) {
struct rpc_service *svc = &rpc_service[i];
@ -266,9 +269,52 @@ static struct rpc_service *select_service(const char *name)
return svc;
static void inflate_request(const char *prog_name, int out)
* This is basically strbuf_read(), except that if we
* hit max_request_buffer we die (we'd rather reject a
* maliciously large request than chew up infinite memory).
static ssize_t read_request(int fd, unsigned char **out)
size_t len = 0, alloc = 8192;
unsigned char *buf = xmalloc(alloc);
if (max_request_buffer < alloc)
max_request_buffer = alloc;
while (1) {
ssize_t cnt;
cnt = read_in_full(fd, buf + len, alloc - len);
if (cnt < 0) {
return -1;
/* partial read from read_in_full means we hit EOF */
len += cnt;
if (len < alloc) {
*out = buf;
return len;
/* otherwise, grow and try again (if we can) */
if (alloc == max_request_buffer)
die("request was larger than our maximum size (%lu);"
alloc = alloc_nr(alloc);
if (alloc > max_request_buffer)
alloc = max_request_buffer;
REALLOC_ARRAY(buf, alloc);
static void inflate_request(const char *prog_name, int out, int buffer_input)
git_zstream stream;
unsigned char *full_request = NULL;
unsigned char in_buf[8192];
unsigned char out_buf[8192];
unsigned long cnt = 0;
@ -277,11 +323,21 @@ static void inflate_request(const char *prog_name, int out)
while (1) {
ssize_t n = xread(0, in_buf, sizeof(in_buf));
ssize_t n;
if (buffer_input) {
if (full_request)
n = 0; /* nothing left to read */
n = read_request(0, &full_request);
stream.next_in = full_request;
} else {
n = xread(0, in_buf, sizeof(in_buf));
stream.next_in = in_buf;
if (n <= 0)
die("request ended in the middle of the gzip stream");
stream.next_in = in_buf;
stream.avail_in = n;
while (0 < stream.avail_in) {
@ -307,9 +363,22 @@ static void inflate_request(const char *prog_name, int out)
static void copy_request(const char *prog_name, int out)
unsigned char *buf;
ssize_t n = read_request(0, &buf);
if (n < 0)
die_errno("error reading request body");
if (write_in_full(out, buf, n) != n)
die("%s aborted reading request", prog_name);
static void run_service(const char **argv)
static void run_service(const char **argv, int buffer_input)
const char *encoding = getenv("HTTP_CONTENT_ENCODING");
const char *user = getenv("REMOTE_USER");
@ -334,7 +403,7 @@ static void run_service(const char **argv)
"GIT_COMMITTER_EMAIL=%s@http.%s", user, host);
cld.argv = argv;
if (gzipped_request)
if (buffer_input || gzipped_request) = -1;
cld.git_cmd = 1;
if (start_command(&cld))
@ -342,7 +411,9 @@ static void run_service(const char **argv)
if (gzipped_request)
inflate_request(argv[0],, buffer_input);
else if (buffer_input)
@ -392,7 +463,7 @@ static void get_info_refs(char *arg)
argv[0] = svc->name;
run_service(argv, 0);
} else {
@ -496,7 +567,7 @@ static void service_rpc(char *service_name)
argv[0] = svc->name;
run_service(argv, svc->buffer_input);
@ -623,6 +694,9 @@ int main(int argc, char **argv)
not_found("Repository not exported: '%s'", dir);
max_request_buffer = git_env_ulong("GIT_HTTP_MAX_REQUEST_BUFFER",
return 0;

@ -253,5 +253,16 @@ test_expect_success EXPENSIVE 'clone the 50,000 tag repo to check OS command lin
test_expect_success EXPENSIVE 'http can handle enormous ref negotiation' '
git -C too-many-refs fetch -q --tags &&
cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
create_tags 50001 100000
) &&
git -C too-many-refs fetch -q --tags &&
git -C too-many-refs for-each-ref refs/tags >tags &&
test_line_count = 100000 tags