diff options
| author | robot-contrib <[email protected]> | 2022-04-28 16:36:59 +0300 |
|---|---|---|
| committer | robot-contrib <[email protected]> | 2022-04-28 16:36:59 +0300 |
| commit | 1d80f65d6a77d0e4c1b3a18a6a970715934ecd75 (patch) | |
| tree | e0363932ca34036e90ac4cd461092c763acb3a4d /contrib/libs/curl/lib/transfer.c | |
| parent | 505c75794e448a38ffa0b066ccef3299aec10239 (diff) | |
Update contrib/libs/curl to 7.83.0
ref:72dd794f7af62e3844c14f0a9bcee77e66f30a36
Diffstat (limited to 'contrib/libs/curl/lib/transfer.c')
| -rw-r--r-- | contrib/libs/curl/lib/transfer.c | 80 |
1 files changed, 65 insertions, 15 deletions
diff --git a/contrib/libs/curl/lib/transfer.c b/contrib/libs/curl/lib/transfer.c index 1f8019b3d0c..315da876c4a 100644 --- a/contrib/libs/curl/lib/transfer.c +++ b/contrib/libs/curl/lib/transfer.c @@ -79,6 +79,7 @@ #include "urlapi-int.h" #include "hsts.h" #include "setopt.h" +#include "headers.h" /* The last 3 #include files should be in this order */ #include "curl_printf.h" @@ -245,7 +246,7 @@ CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes, /* protocols that work without network cannot be paused. This is actually only FILE:// just now, and it can't pause since the transfer isn't done using the "normal" procedure. */ - failf(data, "Read callback asked for PAUSE when not supported!"); + failf(data, "Read callback asked for PAUSE when not supported"); return CURLE_READ_ERROR; } @@ -459,7 +460,7 @@ static int data_pending(const struct Curl_easy *data) /* in the case of libssh2, we can never be really sure that we have emptied its internal buffers so we MUST always try until we get EAGAIN back */ return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) || -#if defined(USE_NGHTTP2) +#ifdef USE_NGHTTP2 /* For HTTP/2, we may read up everything including response body with header fields in Curl_http_readwrite_headers. If no content-length is provided, curl waits for the connection @@ -542,15 +543,14 @@ static CURLcode readwrite_data(struct Curl_easy *data, if( #ifdef USE_NGHTTP2 - /* For HTTP/2, read data without caring about the content - length. This is safe because body in HTTP/2 is always - segmented thanks to its framing layer. Meanwhile, we have to - call Curl_read to ensure that http2_handle_stream_close is - called when we read all incoming bytes for a particular - stream. */ - !is_http2 && + /* For HTTP/2, read data without caring about the content length. This + is safe because body in HTTP/2 is always segmented thanks to its + framing layer. Meanwhile, we have to call Curl_read to ensure that + http2_handle_stream_close is called when we read all incoming bytes + for a particular stream. */ + !is_http2 && #endif - k->size != -1 && !k->header) { + k->size != -1 && !k->header) { /* make sure we don't read too much */ curl_off_t totalleft = k->size - k->bytecount; if(totalleft < (curl_off_t)bytestoread) @@ -571,7 +571,7 @@ static CURLcode readwrite_data(struct Curl_easy *data, else { /* read nothing but since we wanted nothing we consider this an OK situation to proceed from */ - DEBUGF(infof(data, "readwrite_data: we're done!")); + DEBUGF(infof(data, "readwrite_data: we're done")); nread = 0; } @@ -995,7 +995,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data, if(!data->state.scratch) { data->state.scratch = malloc(2 * data->set.upload_buffer_size); if(!data->state.scratch) { - failf(data, "Failed to alloc scratch buffer!"); + failf(data, "Failed to alloc scratch buffer"); return CURLE_OUT_OF_MEMORY; } @@ -1360,7 +1360,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data) if(!data->state.url && !data->set.uh) { /* we can't do anything without URL */ - failf(data, "No URL set!"); + failf(data, "No URL set"); return CURLE_URL_MALFORMAT; } @@ -1377,7 +1377,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data) uc = curl_url_get(data->set.uh, CURLUPART_URL, &data->set.str[STRING_SET_URL], 0); if(uc) { - failf(data, "No URL set!"); + failf(data, "No URL set"); return CURLE_URL_MALFORMAT; } } @@ -1489,6 +1489,7 @@ CURLcode Curl_pretransfer(struct Curl_easy *data) data->set.str[STRING_PROXYPASSWORD]); data->req.headerbytecount = 0; + Curl_headers_cleanup(data); return result; } @@ -1533,6 +1534,8 @@ CURLcode Curl_follow(struct Curl_easy *data, DEBUGASSERT(type != FOLLOW_NONE); + if(type != FOLLOW_FAKE) + data->state.requests++; /* count all real follows */ if(type == FOLLOW_REDIR) { if((data->set.maxredirs != -1) && (data->state.followlocation >= data->set.maxredirs)) { @@ -1608,10 +1611,57 @@ CURLcode Curl_follow(struct Curl_easy *data, return CURLE_OUT_OF_MEMORY; } else { - uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0); if(uc) return Curl_uc_to_curlcode(uc); + + /* Clear auth if this redirects to a different port number or protocol, + unless permitted */ + if(!data->set.allow_auth_to_other_hosts && (type != FOLLOW_FAKE)) { + char *portnum; + int port; + bool clear = FALSE; + + if(data->set.use_port && data->state.allow_port) + /* a custom port is used */ + port = (int)data->set.use_port; + else { + uc = curl_url_get(data->state.uh, CURLUPART_PORT, &portnum, + CURLU_DEFAULT_PORT); + if(uc) { + free(newurl); + return Curl_uc_to_curlcode(uc); + } + port = atoi(portnum); + free(portnum); + } + if(port != data->info.conn_remote_port) { + infof(data, "Clear auth, redirects to port from %u to %u", + data->info.conn_remote_port, port); + clear = TRUE; + } + else { + char *scheme; + const struct Curl_handler *p; + uc = curl_url_get(data->state.uh, CURLUPART_SCHEME, &scheme, 0); + if(uc) { + free(newurl); + return Curl_uc_to_curlcode(uc); + } + + p = Curl_builtin_scheme(scheme); + if(p && (p->protocol != data->info.conn_protocol)) { + infof(data, "Clear auth, redirects scheme from %s to %s", + data->info.conn_scheme, scheme); + clear = TRUE; + } + free(scheme); + } + if(clear) { + Curl_safefree(data->state.aptr.user); + Curl_safefree(data->state.aptr.passwd); + } + } } if(type == FOLLOW_FAKE) { |
