aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/go/_std_1.22/src/net/http
diff options
context:
space:
mode:
authorhiddenpath <hiddenpath@yandex-team.com>2024-04-02 23:50:23 +0300
committerhiddenpath <hiddenpath@yandex-team.com>2024-04-03 00:02:31 +0300
commit8923c6d2c438e0aeed2e06b8b0275e1864eeee33 (patch)
tree6b5e476699fc0be5091cb650654ef5f602c8afff /contrib/go/_std_1.22/src/net/http
parentd18afd09df2a08cd023012593b46109b77713a6c (diff)
downloadydb-8923c6d2c438e0aeed2e06b8b0275e1864eeee33.tar.gz
Update golang to 1.22.1
2967d19c907adf59101a1f47b4208bd0b04a6186
Diffstat (limited to 'contrib/go/_std_1.22/src/net/http')
-rw-r--r--contrib/go/_std_1.22/src/net/http/client.go1038
-rw-r--r--contrib/go/_std_1.22/src/net/http/clone.go74
-rw-r--r--contrib/go/_std_1.22/src/net/http/cookie.go468
-rw-r--r--contrib/go/_std_1.22/src/net/http/doc.go110
-rw-r--r--contrib/go/_std_1.22/src/net/http/filetransport.go142
-rw-r--r--contrib/go/_std_1.22/src/net/http/fs.go1057
-rw-r--r--contrib/go/_std_1.22/src/net/http/h2_bundle.go11488
-rw-r--r--contrib/go/_std_1.22/src/net/http/h2_error.go37
-rw-r--r--contrib/go/_std_1.22/src/net/http/header.go280
-rw-r--r--contrib/go/_std_1.22/src/net/http/http.go165
-rw-r--r--contrib/go/_std_1.22/src/net/http/httptest/httptest.go90
-rw-r--r--contrib/go/_std_1.22/src/net/http/httptest/recorder.go255
-rw-r--r--contrib/go/_std_1.22/src/net/http/httptest/server.go385
-rw-r--r--contrib/go/_std_1.22/src/net/http/httptest/ya.make9
-rw-r--r--contrib/go/_std_1.22/src/net/http/httptrace/trace.go255
-rw-r--r--contrib/go/_std_1.22/src/net/http/httptrace/ya.make7
-rw-r--r--contrib/go/_std_1.22/src/net/http/httputil/dump.go337
-rw-r--r--contrib/go/_std_1.22/src/net/http/httputil/httputil.go41
-rw-r--r--contrib/go/_std_1.22/src/net/http/httputil/persist.go431
-rw-r--r--contrib/go/_std_1.22/src/net/http/httputil/reverseproxy.go831
-rw-r--r--contrib/go/_std_1.22/src/net/http/httputil/ya.make10
-rw-r--r--contrib/go/_std_1.22/src/net/http/internal/ascii/print.go61
-rw-r--r--contrib/go/_std_1.22/src/net/http/internal/ascii/ya.make7
-rw-r--r--contrib/go/_std_1.22/src/net/http/internal/chunked.go287
-rw-r--r--contrib/go/_std_1.22/src/net/http/internal/testcert/testcert.go65
-rw-r--r--contrib/go/_std_1.22/src/net/http/internal/testcert/ya.make7
-rw-r--r--contrib/go/_std_1.22/src/net/http/internal/ya.make7
-rw-r--r--contrib/go/_std_1.22/src/net/http/jar.go27
-rw-r--r--contrib/go/_std_1.22/src/net/http/mapping.go78
-rw-r--r--contrib/go/_std_1.22/src/net/http/method.go20
-rw-r--r--contrib/go/_std_1.22/src/net/http/pattern.go529
-rw-r--r--contrib/go/_std_1.22/src/net/http/pprof/pprof.go464
-rw-r--r--contrib/go/_std_1.22/src/net/http/pprof/ya.make7
-rw-r--r--contrib/go/_std_1.22/src/net/http/request.go1554
-rw-r--r--contrib/go/_std_1.22/src/net/http/response.go371
-rw-r--r--contrib/go/_std_1.22/src/net/http/responsecontroller.go147
-rw-r--r--contrib/go/_std_1.22/src/net/http/roundtrip.go18
-rw-r--r--contrib/go/_std_1.22/src/net/http/routing_index.go124
-rw-r--r--contrib/go/_std_1.22/src/net/http/routing_tree.go240
-rw-r--r--contrib/go/_std_1.22/src/net/http/servemux121.go211
-rw-r--r--contrib/go/_std_1.22/src/net/http/server.go3843
-rw-r--r--contrib/go/_std_1.22/src/net/http/sniff.go304
-rw-r--r--contrib/go/_std_1.22/src/net/http/socks_bundle.go473
-rw-r--r--contrib/go/_std_1.22/src/net/http/status.go210
-rw-r--r--contrib/go/_std_1.22/src/net/http/transfer.go1137
-rw-r--r--contrib/go/_std_1.22/src/net/http/transport.go2965
-rw-r--r--contrib/go/_std_1.22/src/net/http/transport_default_other.go16
-rw-r--r--contrib/go/_std_1.22/src/net/http/ya.make34
48 files changed, 30716 insertions, 0 deletions
diff --git a/contrib/go/_std_1.22/src/net/http/client.go b/contrib/go/_std_1.22/src/net/http/client.go
new file mode 100644
index 0000000000..8fc348fe5d
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/client.go
@@ -0,0 +1,1038 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP client. See RFC 7230 through 7235.
+//
+// This is the high-level Client interface.
+// The low-level implementation is in transport.go.
+
+package http
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net/http/internal/ascii"
+ "net/url"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// A Client is an HTTP client. Its zero value ([DefaultClient]) is a
+// usable client that uses [DefaultTransport].
+//
+// The [Client.Transport] typically has internal state (cached TCP
+// connections), so Clients should be reused instead of created as
+// needed. Clients are safe for concurrent use by multiple goroutines.
+//
+// A Client is higher-level than a [RoundTripper] (such as [Transport])
+// and additionally handles HTTP details such as cookies and
+// redirects.
+//
+// When following redirects, the Client will forward all headers set on the
+// initial [Request] except:
+//
+// - when forwarding sensitive headers like "Authorization",
+// "WWW-Authenticate", and "Cookie" to untrusted targets.
+// These headers will be ignored when following a redirect to a domain
+// that is not a subdomain match or exact match of the initial domain.
+// For example, a redirect from "foo.com" to either "foo.com" or "sub.foo.com"
+// will forward the sensitive headers, but a redirect to "bar.com" will not.
+// - when forwarding the "Cookie" header with a non-nil cookie Jar.
+// Since each redirect may mutate the state of the cookie jar,
+// a redirect may possibly alter a cookie set in the initial request.
+// When forwarding the "Cookie" header, any mutated cookies will be omitted,
+// with the expectation that the Jar will insert those mutated cookies
+// with the updated values (assuming the origin matches).
+// If Jar is nil, the initial cookies are forwarded without change.
+type Client struct {
+ // Transport specifies the mechanism by which individual
+ // HTTP requests are made.
+ // If nil, DefaultTransport is used.
+ Transport RoundTripper
+
+ // CheckRedirect specifies the policy for handling redirects.
+ // If CheckRedirect is not nil, the client calls it before
+ // following an HTTP redirect. The arguments req and via are
+ // the upcoming request and the requests made already, oldest
+ // first. If CheckRedirect returns an error, the Client's Get
+ // method returns both the previous Response (with its Body
+ // closed) and CheckRedirect's error (wrapped in a url.Error)
+ // instead of issuing the Request req.
+ // As a special case, if CheckRedirect returns ErrUseLastResponse,
+ // then the most recent response is returned with its body
+ // unclosed, along with a nil error.
+ //
+ // If CheckRedirect is nil, the Client uses its default policy,
+ // which is to stop after 10 consecutive requests.
+ CheckRedirect func(req *Request, via []*Request) error
+
+ // Jar specifies the cookie jar.
+ //
+ // The Jar is used to insert relevant cookies into every
+ // outbound Request and is updated with the cookie values
+ // of every inbound Response. The Jar is consulted for every
+ // redirect that the Client follows.
+ //
+ // If Jar is nil, cookies are only sent if they are explicitly
+ // set on the Request.
+ Jar CookieJar
+
+ // Timeout specifies a time limit for requests made by this
+ // Client. The timeout includes connection time, any
+ // redirects, and reading the response body. The timer remains
+ // running after Get, Head, Post, or Do return and will
+ // interrupt reading of the Response.Body.
+ //
+ // A Timeout of zero means no timeout.
+ //
+ // The Client cancels requests to the underlying Transport
+ // as if the Request's Context ended.
+ //
+ // For compatibility, the Client will also use the deprecated
+ // CancelRequest method on Transport if found. New
+ // RoundTripper implementations should use the Request's Context
+ // for cancellation instead of implementing CancelRequest.
+ Timeout time.Duration
+}
+
+// DefaultClient is the default [Client] and is used by [Get], [Head], and [Post].
+var DefaultClient = &Client{}
+
+// RoundTripper is an interface representing the ability to execute a
+// single HTTP transaction, obtaining the [Response] for a given [Request].
+//
+// A RoundTripper must be safe for concurrent use by multiple
+// goroutines.
+type RoundTripper interface {
+ // RoundTrip executes a single HTTP transaction, returning
+ // a Response for the provided Request.
+ //
+ // RoundTrip should not attempt to interpret the response. In
+ // particular, RoundTrip must return err == nil if it obtained
+ // a response, regardless of the response's HTTP status code.
+ // A non-nil err should be reserved for failure to obtain a
+ // response. Similarly, RoundTrip should not attempt to
+ // handle higher-level protocol details such as redirects,
+ // authentication, or cookies.
+ //
+ // RoundTrip should not modify the request, except for
+ // consuming and closing the Request's Body. RoundTrip may
+ // read fields of the request in a separate goroutine. Callers
+ // should not mutate or reuse the request until the Response's
+ // Body has been closed.
+ //
+ // RoundTrip must always close the body, including on errors,
+ // but depending on the implementation may do so in a separate
+ // goroutine even after RoundTrip returns. This means that
+ // callers wanting to reuse the body for subsequent requests
+ // must arrange to wait for the Close call before doing so.
+ //
+ // The Request's URL and Header fields must be initialized.
+ RoundTrip(*Request) (*Response, error)
+}
+
+// refererForURL returns a referer without any authentication info or
+// an empty string if lastReq scheme is https and newReq scheme is http.
+// If the referer was explicitly set, then it will continue to be used.
+func refererForURL(lastReq, newReq *url.URL, explicitRef string) string {
+ // https://tools.ietf.org/html/rfc7231#section-5.5.2
+ // "Clients SHOULD NOT include a Referer header field in a
+ // (non-secure) HTTP request if the referring page was
+ // transferred with a secure protocol."
+ if lastReq.Scheme == "https" && newReq.Scheme == "http" {
+ return ""
+ }
+ if explicitRef != "" {
+ return explicitRef
+ }
+
+ referer := lastReq.String()
+ if lastReq.User != nil {
+ // This is not very efficient, but is the best we can
+ // do without:
+ // - introducing a new method on URL
+ // - creating a race condition
+ // - copying the URL struct manually, which would cause
+ // maintenance problems down the line
+ auth := lastReq.User.String() + "@"
+ referer = strings.Replace(referer, auth, "", 1)
+ }
+ return referer
+}
+
+// didTimeout is non-nil only if err != nil.
+func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTimeout func() bool, err error) {
+ if c.Jar != nil {
+ for _, cookie := range c.Jar.Cookies(req.URL) {
+ req.AddCookie(cookie)
+ }
+ }
+ resp, didTimeout, err = send(req, c.transport(), deadline)
+ if err != nil {
+ return nil, didTimeout, err
+ }
+ if c.Jar != nil {
+ if rc := resp.Cookies(); len(rc) > 0 {
+ c.Jar.SetCookies(req.URL, rc)
+ }
+ }
+ return resp, nil, nil
+}
+
+func (c *Client) deadline() time.Time {
+ if c.Timeout > 0 {
+ return time.Now().Add(c.Timeout)
+ }
+ return time.Time{}
+}
+
+func (c *Client) transport() RoundTripper {
+ if c.Transport != nil {
+ return c.Transport
+ }
+ return DefaultTransport
+}
+
+// ErrSchemeMismatch is returned when a server returns an HTTP response to an HTTPS client.
+var ErrSchemeMismatch = errors.New("http: server gave HTTP response to HTTPS client")
+
+// send issues an HTTP request.
+// Caller should close resp.Body when done reading from it.
+func send(ireq *Request, rt RoundTripper, deadline time.Time) (resp *Response, didTimeout func() bool, err error) {
+ req := ireq // req is either the original request, or a modified fork
+
+ if rt == nil {
+ req.closeBody()
+ return nil, alwaysFalse, errors.New("http: no Client.Transport or DefaultTransport")
+ }
+
+ if req.URL == nil {
+ req.closeBody()
+ return nil, alwaysFalse, errors.New("http: nil Request.URL")
+ }
+
+ if req.RequestURI != "" {
+ req.closeBody()
+ return nil, alwaysFalse, errors.New("http: Request.RequestURI can't be set in client requests")
+ }
+
+ // forkReq forks req into a shallow clone of ireq the first
+ // time it's called.
+ forkReq := func() {
+ if ireq == req {
+ req = new(Request)
+ *req = *ireq // shallow clone
+ }
+ }
+
+ // Most the callers of send (Get, Post, et al) don't need
+ // Headers, leaving it uninitialized. We guarantee to the
+ // Transport that this has been initialized, though.
+ if req.Header == nil {
+ forkReq()
+ req.Header = make(Header)
+ }
+
+ if u := req.URL.User; u != nil && req.Header.Get("Authorization") == "" {
+ username := u.Username()
+ password, _ := u.Password()
+ forkReq()
+ req.Header = cloneOrMakeHeader(ireq.Header)
+ req.Header.Set("Authorization", "Basic "+basicAuth(username, password))
+ }
+
+ if !deadline.IsZero() {
+ forkReq()
+ }
+ stopTimer, didTimeout := setRequestCancel(req, rt, deadline)
+
+ resp, err = rt.RoundTrip(req)
+ if err != nil {
+ stopTimer()
+ if resp != nil {
+ log.Printf("RoundTripper returned a response & error; ignoring response")
+ }
+ if tlsErr, ok := err.(tls.RecordHeaderError); ok {
+ // If we get a bad TLS record header, check to see if the
+ // response looks like HTTP and give a more helpful error.
+ // See golang.org/issue/11111.
+ if string(tlsErr.RecordHeader[:]) == "HTTP/" {
+ err = ErrSchemeMismatch
+ }
+ }
+ return nil, didTimeout, err
+ }
+ if resp == nil {
+ return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a nil *Response with a nil error", rt)
+ }
+ if resp.Body == nil {
+ // The documentation on the Body field says “The http Client and Transport
+ // guarantee that Body is always non-nil, even on responses without a body
+ // or responses with a zero-length body.” Unfortunately, we didn't document
+ // that same constraint for arbitrary RoundTripper implementations, and
+ // RoundTripper implementations in the wild (mostly in tests) assume that
+ // they can use a nil Body to mean an empty one (similar to Request.Body).
+ // (See https://golang.org/issue/38095.)
+ //
+ // If the ContentLength allows the Body to be empty, fill in an empty one
+ // here to ensure that it is non-nil.
+ if resp.ContentLength > 0 && req.Method != "HEAD" {
+ return nil, didTimeout, fmt.Errorf("http: RoundTripper implementation (%T) returned a *Response with content length %d but a nil Body", rt, resp.ContentLength)
+ }
+ resp.Body = io.NopCloser(strings.NewReader(""))
+ }
+ if !deadline.IsZero() {
+ resp.Body = &cancelTimerBody{
+ stop: stopTimer,
+ rc: resp.Body,
+ reqDidTimeout: didTimeout,
+ }
+ }
+ return resp, nil, nil
+}
+
+// timeBeforeContextDeadline reports whether the non-zero Time t is
+// before ctx's deadline, if any. If ctx does not have a deadline, it
+// always reports true (the deadline is considered infinite).
+func timeBeforeContextDeadline(t time.Time, ctx context.Context) bool {
+ d, ok := ctx.Deadline()
+ if !ok {
+ return true
+ }
+ return t.Before(d)
+}
+
+// knownRoundTripperImpl reports whether rt is a RoundTripper that's
+// maintained by the Go team and known to implement the latest
+// optional semantics (notably contexts). The Request is used
+// to check whether this particular request is using an alternate protocol,
+// in which case we need to check the RoundTripper for that protocol.
+func knownRoundTripperImpl(rt RoundTripper, req *Request) bool {
+ switch t := rt.(type) {
+ case *Transport:
+ if altRT := t.alternateRoundTripper(req); altRT != nil {
+ return knownRoundTripperImpl(altRT, req)
+ }
+ return true
+ case *http2Transport, http2noDialH2RoundTripper:
+ return true
+ }
+ // There's a very minor chance of a false positive with this.
+ // Instead of detecting our golang.org/x/net/http2.Transport,
+ // it might detect a Transport type in a different http2
+ // package. But I know of none, and the only problem would be
+ // some temporarily leaked goroutines if the transport didn't
+ // support contexts. So this is a good enough heuristic:
+ if reflect.TypeOf(rt).String() == "*http2.Transport" {
+ return true
+ }
+ return false
+}
+
+// setRequestCancel sets req.Cancel and adds a deadline context to req
+// if deadline is non-zero. The RoundTripper's type is used to
+// determine whether the legacy CancelRequest behavior should be used.
+//
+// As background, there are three ways to cancel a request:
+// First was Transport.CancelRequest. (deprecated)
+// Second was Request.Cancel.
+// Third was Request.Context.
+// This function populates the second and third, and uses the first if it really needs to.
+func setRequestCancel(req *Request, rt RoundTripper, deadline time.Time) (stopTimer func(), didTimeout func() bool) {
+ if deadline.IsZero() {
+ return nop, alwaysFalse
+ }
+ knownTransport := knownRoundTripperImpl(rt, req)
+ oldCtx := req.Context()
+
+ if req.Cancel == nil && knownTransport {
+ // If they already had a Request.Context that's
+ // expiring sooner, do nothing:
+ if !timeBeforeContextDeadline(deadline, oldCtx) {
+ return nop, alwaysFalse
+ }
+
+ var cancelCtx func()
+ req.ctx, cancelCtx = context.WithDeadline(oldCtx, deadline)
+ return cancelCtx, func() bool { return time.Now().After(deadline) }
+ }
+ initialReqCancel := req.Cancel // the user's original Request.Cancel, if any
+
+ var cancelCtx func()
+ if timeBeforeContextDeadline(deadline, oldCtx) {
+ req.ctx, cancelCtx = context.WithDeadline(oldCtx, deadline)
+ }
+
+ cancel := make(chan struct{})
+ req.Cancel = cancel
+
+ doCancel := func() {
+ // The second way in the func comment above:
+ close(cancel)
+ // The first way, used only for RoundTripper
+ // implementations written before Go 1.5 or Go 1.6.
+ type canceler interface{ CancelRequest(*Request) }
+ if v, ok := rt.(canceler); ok {
+ v.CancelRequest(req)
+ }
+ }
+
+ stopTimerCh := make(chan struct{})
+ var once sync.Once
+ stopTimer = func() {
+ once.Do(func() {
+ close(stopTimerCh)
+ if cancelCtx != nil {
+ cancelCtx()
+ }
+ })
+ }
+
+ timer := time.NewTimer(time.Until(deadline))
+ var timedOut atomic.Bool
+
+ go func() {
+ select {
+ case <-initialReqCancel:
+ doCancel()
+ timer.Stop()
+ case <-timer.C:
+ timedOut.Store(true)
+ doCancel()
+ case <-stopTimerCh:
+ timer.Stop()
+ }
+ }()
+
+ return stopTimer, timedOut.Load
+}
+
+// See 2 (end of page 4) https://www.ietf.org/rfc/rfc2617.txt
+// "To receive authorization, the client sends the userid and password,
+// separated by a single colon (":") character, within a base64
+// encoded string in the credentials."
+// It is not meant to be urlencoded.
+func basicAuth(username, password string) string {
+ auth := username + ":" + password
+ return base64.StdEncoding.EncodeToString([]byte(auth))
+}
+
+// Get issues a GET to the specified URL. If the response is one of
+// the following redirect codes, Get follows the redirect, up to a
+// maximum of 10 redirects:
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+// 308 (Permanent Redirect)
+//
+// An error is returned if there were too many redirects or if there
+// was an HTTP protocol error. A non-2xx response doesn't cause an
+// error. Any returned error will be of type [*url.Error]. The url.Error
+// value's Timeout method will report true if the request timed out.
+//
+// When err is nil, resp always contains a non-nil resp.Body.
+// Caller should close resp.Body when done reading from it.
+//
+// Get is a wrapper around DefaultClient.Get.
+//
+// To make a request with custom headers, use [NewRequest] and
+// DefaultClient.Do.
+//
+// To make a request with a specified context.Context, use [NewRequestWithContext]
+// and DefaultClient.Do.
+func Get(url string) (resp *Response, err error) {
+ return DefaultClient.Get(url)
+}
+
+// Get issues a GET to the specified URL. If the response is one of the
+// following redirect codes, Get follows the redirect after calling the
+// [Client.CheckRedirect] function:
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+// 308 (Permanent Redirect)
+//
+// An error is returned if the [Client.CheckRedirect] function fails
+// or if there was an HTTP protocol error. A non-2xx response doesn't
+// cause an error. Any returned error will be of type [*url.Error]. The
+// url.Error value's Timeout method will report true if the request
+// timed out.
+//
+// When err is nil, resp always contains a non-nil resp.Body.
+// Caller should close resp.Body when done reading from it.
+//
+// To make a request with custom headers, use [NewRequest] and [Client.Do].
+//
+// To make a request with a specified context.Context, use [NewRequestWithContext]
+// and Client.Do.
+func (c *Client) Get(url string) (resp *Response, err error) {
+ req, err := NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.Do(req)
+}
+
+func alwaysFalse() bool { return false }
+
+// ErrUseLastResponse can be returned by Client.CheckRedirect hooks to
+// control how redirects are processed. If returned, the next request
+// is not sent and the most recent response is returned with its body
+// unclosed.
+var ErrUseLastResponse = errors.New("net/http: use last response")
+
+// checkRedirect calls either the user's configured CheckRedirect
+// function, or the default.
+func (c *Client) checkRedirect(req *Request, via []*Request) error {
+ fn := c.CheckRedirect
+ if fn == nil {
+ fn = defaultCheckRedirect
+ }
+ return fn(req, via)
+}
+
+// redirectBehavior describes what should happen when the
+// client encounters a 3xx status code from the server.
+func redirectBehavior(reqMethod string, resp *Response, ireq *Request) (redirectMethod string, shouldRedirect, includeBody bool) {
+ switch resp.StatusCode {
+ case 301, 302, 303:
+ redirectMethod = reqMethod
+ shouldRedirect = true
+ includeBody = false
+
+ // RFC 2616 allowed automatic redirection only with GET and
+ // HEAD requests. RFC 7231 lifts this restriction, but we still
+ // restrict other methods to GET to maintain compatibility.
+ // See Issue 18570.
+ if reqMethod != "GET" && reqMethod != "HEAD" {
+ redirectMethod = "GET"
+ }
+ case 307, 308:
+ redirectMethod = reqMethod
+ shouldRedirect = true
+ includeBody = true
+
+ if ireq.GetBody == nil && ireq.outgoingLength() != 0 {
+ // We had a request body, and 307/308 require
+ // re-sending it, but GetBody is not defined. So just
+ // return this response to the user instead of an
+ // error, like we did in Go 1.7 and earlier.
+ shouldRedirect = false
+ }
+ }
+ return redirectMethod, shouldRedirect, includeBody
+}
+
+// urlErrorOp returns the (*url.Error).Op value to use for the
+// provided (*Request).Method value.
+func urlErrorOp(method string) string {
+ if method == "" {
+ return "Get"
+ }
+ if lowerMethod, ok := ascii.ToLower(method); ok {
+ return method[:1] + lowerMethod[1:]
+ }
+ return method
+}
+
+// Do sends an HTTP request and returns an HTTP response, following
+// policy (such as redirects, cookies, auth) as configured on the
+// client.
+//
+// An error is returned if caused by client policy (such as
+// CheckRedirect), or failure to speak HTTP (such as a network
+// connectivity problem). A non-2xx status code doesn't cause an
+// error.
+//
+// If the returned error is nil, the [Response] will contain a non-nil
+// Body which the user is expected to close. If the Body is not both
+// read to EOF and closed, the [Client]'s underlying [RoundTripper]
+// (typically [Transport]) may not be able to re-use a persistent TCP
+// connection to the server for a subsequent "keep-alive" request.
+//
+// The request Body, if non-nil, will be closed by the underlying
+// Transport, even on errors. The Body may be closed asynchronously after
+// Do returns.
+//
+// On error, any Response can be ignored. A non-nil Response with a
+// non-nil error only occurs when CheckRedirect fails, and even then
+// the returned [Response.Body] is already closed.
+//
+// Generally [Get], [Post], or [PostForm] will be used instead of Do.
+//
+// If the server replies with a redirect, the Client first uses the
+// CheckRedirect function to determine whether the redirect should be
+// followed. If permitted, a 301, 302, or 303 redirect causes
+// subsequent requests to use HTTP method GET
+// (or HEAD if the original request was HEAD), with no body.
+// A 307 or 308 redirect preserves the original HTTP method and body,
+// provided that the [Request.GetBody] function is defined.
+// The [NewRequest] function automatically sets GetBody for common
+// standard library body types.
+//
+// Any returned error will be of type [*url.Error]. The url.Error
+// value's Timeout method will report true if the request timed out.
+func (c *Client) Do(req *Request) (*Response, error) {
+ return c.do(req)
+}
+
+var testHookClientDoResult func(retres *Response, reterr error)
+
+func (c *Client) do(req *Request) (retres *Response, reterr error) {
+ if testHookClientDoResult != nil {
+ defer func() { testHookClientDoResult(retres, reterr) }()
+ }
+ if req.URL == nil {
+ req.closeBody()
+ return nil, &url.Error{
+ Op: urlErrorOp(req.Method),
+ Err: errors.New("http: nil Request.URL"),
+ }
+ }
+
+ var (
+ deadline = c.deadline()
+ reqs []*Request
+ resp *Response
+ copyHeaders = c.makeHeadersCopier(req)
+ reqBodyClosed = false // have we closed the current req.Body?
+
+ // Redirect behavior:
+ redirectMethod string
+ includeBody bool
+ )
+ uerr := func(err error) error {
+ // the body may have been closed already by c.send()
+ if !reqBodyClosed {
+ req.closeBody()
+ }
+ var urlStr string
+ if resp != nil && resp.Request != nil {
+ urlStr = stripPassword(resp.Request.URL)
+ } else {
+ urlStr = stripPassword(req.URL)
+ }
+ return &url.Error{
+ Op: urlErrorOp(reqs[0].Method),
+ URL: urlStr,
+ Err: err,
+ }
+ }
+ for {
+ // For all but the first request, create the next
+ // request hop and replace req.
+ if len(reqs) > 0 {
+ loc := resp.Header.Get("Location")
+ if loc == "" {
+ // While most 3xx responses include a Location, it is not
+ // required and 3xx responses without a Location have been
+ // observed in the wild. See issues #17773 and #49281.
+ return resp, nil
+ }
+ u, err := req.URL.Parse(loc)
+ if err != nil {
+ resp.closeBody()
+ return nil, uerr(fmt.Errorf("failed to parse Location header %q: %v", loc, err))
+ }
+ host := ""
+ if req.Host != "" && req.Host != req.URL.Host {
+ // If the caller specified a custom Host header and the
+ // redirect location is relative, preserve the Host header
+ // through the redirect. See issue #22233.
+ if u, _ := url.Parse(loc); u != nil && !u.IsAbs() {
+ host = req.Host
+ }
+ }
+ ireq := reqs[0]
+ req = &Request{
+ Method: redirectMethod,
+ Response: resp,
+ URL: u,
+ Header: make(Header),
+ Host: host,
+ Cancel: ireq.Cancel,
+ ctx: ireq.ctx,
+ }
+ if includeBody && ireq.GetBody != nil {
+ req.Body, err = ireq.GetBody()
+ if err != nil {
+ resp.closeBody()
+ return nil, uerr(err)
+ }
+ req.ContentLength = ireq.ContentLength
+ }
+
+ // Copy original headers before setting the Referer,
+ // in case the user set Referer on their first request.
+ // If they really want to override, they can do it in
+ // their CheckRedirect func.
+ copyHeaders(req)
+
+ // Add the Referer header from the most recent
+ // request URL to the new one, if it's not https->http:
+ if ref := refererForURL(reqs[len(reqs)-1].URL, req.URL, req.Header.Get("Referer")); ref != "" {
+ req.Header.Set("Referer", ref)
+ }
+ err = c.checkRedirect(req, reqs)
+
+ // Sentinel error to let users select the
+ // previous response, without closing its
+ // body. See Issue 10069.
+ if err == ErrUseLastResponse {
+ return resp, nil
+ }
+
+ // Close the previous response's body. But
+ // read at least some of the body so if it's
+ // small the underlying TCP connection will be
+ // re-used. No need to check for errors: if it
+ // fails, the Transport won't reuse it anyway.
+ const maxBodySlurpSize = 2 << 10
+ if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize {
+ io.CopyN(io.Discard, resp.Body, maxBodySlurpSize)
+ }
+ resp.Body.Close()
+
+ if err != nil {
+ // Special case for Go 1 compatibility: return both the response
+ // and an error if the CheckRedirect function failed.
+ // See https://golang.org/issue/3795
+ // The resp.Body has already been closed.
+ ue := uerr(err)
+ ue.(*url.Error).URL = loc
+ return resp, ue
+ }
+ }
+
+ reqs = append(reqs, req)
+ var err error
+ var didTimeout func() bool
+ if resp, didTimeout, err = c.send(req, deadline); err != nil {
+ // c.send() always closes req.Body
+ reqBodyClosed = true
+ if !deadline.IsZero() && didTimeout() {
+ err = &httpError{
+ err: err.Error() + " (Client.Timeout exceeded while awaiting headers)",
+ timeout: true,
+ }
+ }
+ return nil, uerr(err)
+ }
+
+ var shouldRedirect bool
+ redirectMethod, shouldRedirect, includeBody = redirectBehavior(req.Method, resp, reqs[0])
+ if !shouldRedirect {
+ return resp, nil
+ }
+
+ req.closeBody()
+ }
+}
+
+// makeHeadersCopier makes a function that copies headers from the
+// initial Request, ireq. For every redirect, this function must be called
+// so that it can copy headers into the upcoming Request.
+func (c *Client) makeHeadersCopier(ireq *Request) func(*Request) {
+ // The headers to copy are from the very initial request.
+ // We use a closured callback to keep a reference to these original headers.
+ var (
+ ireqhdr = cloneOrMakeHeader(ireq.Header)
+ icookies map[string][]*Cookie
+ )
+ if c.Jar != nil && ireq.Header.Get("Cookie") != "" {
+ icookies = make(map[string][]*Cookie)
+ for _, c := range ireq.Cookies() {
+ icookies[c.Name] = append(icookies[c.Name], c)
+ }
+ }
+
+ preq := ireq // The previous request
+ return func(req *Request) {
+ // If Jar is present and there was some initial cookies provided
+ // via the request header, then we may need to alter the initial
+ // cookies as we follow redirects since each redirect may end up
+ // modifying a pre-existing cookie.
+ //
+ // Since cookies already set in the request header do not contain
+ // information about the original domain and path, the logic below
+ // assumes any new set cookies override the original cookie
+ // regardless of domain or path.
+ //
+ // See https://golang.org/issue/17494
+ if c.Jar != nil && icookies != nil {
+ var changed bool
+ resp := req.Response // The response that caused the upcoming redirect
+ for _, c := range resp.Cookies() {
+ if _, ok := icookies[c.Name]; ok {
+ delete(icookies, c.Name)
+ changed = true
+ }
+ }
+ if changed {
+ ireqhdr.Del("Cookie")
+ var ss []string
+ for _, cs := range icookies {
+ for _, c := range cs {
+ ss = append(ss, c.Name+"="+c.Value)
+ }
+ }
+ sort.Strings(ss) // Ensure deterministic headers
+ ireqhdr.Set("Cookie", strings.Join(ss, "; "))
+ }
+ }
+
+ // Copy the initial request's Header values
+ // (at least the safe ones).
+ for k, vv := range ireqhdr {
+ if shouldCopyHeaderOnRedirect(k, preq.URL, req.URL) {
+ req.Header[k] = vv
+ }
+ }
+
+ preq = req // Update previous Request with the current request
+ }
+}
+
+func defaultCheckRedirect(req *Request, via []*Request) error {
+ if len(via) >= 10 {
+ return errors.New("stopped after 10 redirects")
+ }
+ return nil
+}
+
+// Post issues a POST to the specified URL.
+//
+// Caller should close resp.Body when done reading from it.
+//
+// If the provided body is an [io.Closer], it is closed after the
+// request.
+//
+// Post is a wrapper around DefaultClient.Post.
+//
+// To set custom headers, use [NewRequest] and DefaultClient.Do.
+//
+// See the [Client.Do] method documentation for details on how redirects
+// are handled.
+//
+// To make a request with a specified context.Context, use [NewRequestWithContext]
+// and DefaultClient.Do.
+func Post(url, contentType string, body io.Reader) (resp *Response, err error) {
+ return DefaultClient.Post(url, contentType, body)
+}
+
+// Post issues a POST to the specified URL.
+//
+// Caller should close resp.Body when done reading from it.
+//
+// If the provided body is an [io.Closer], it is closed after the
+// request.
+//
+// To set custom headers, use [NewRequest] and [Client.Do].
+//
+// To make a request with a specified context.Context, use [NewRequestWithContext]
+// and [Client.Do].
+//
+// See the Client.Do method documentation for details on how redirects
+// are handled.
+func (c *Client) Post(url, contentType string, body io.Reader) (resp *Response, err error) {
+ req, err := NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", contentType)
+ return c.Do(req)
+}
+
+// PostForm issues a POST to the specified URL, with data's keys and
+// values URL-encoded as the request body.
+//
+// The Content-Type header is set to application/x-www-form-urlencoded.
+// To set other headers, use [NewRequest] and DefaultClient.Do.
+//
+// When err is nil, resp always contains a non-nil resp.Body.
+// Caller should close resp.Body when done reading from it.
+//
+// PostForm is a wrapper around DefaultClient.PostForm.
+//
+// See the [Client.Do] method documentation for details on how redirects
+// are handled.
+//
+// To make a request with a specified [context.Context], use [NewRequestWithContext]
+// and DefaultClient.Do.
+func PostForm(url string, data url.Values) (resp *Response, err error) {
+ return DefaultClient.PostForm(url, data)
+}
+
+// PostForm issues a POST to the specified URL,
+// with data's keys and values URL-encoded as the request body.
+//
+// The Content-Type header is set to application/x-www-form-urlencoded.
+// To set other headers, use [NewRequest] and [Client.Do].
+//
+// When err is nil, resp always contains a non-nil resp.Body.
+// Caller should close resp.Body when done reading from it.
+//
+// See the Client.Do method documentation for details on how redirects
+// are handled.
+//
+// To make a request with a specified context.Context, use [NewRequestWithContext]
+// and Client.Do.
+func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) {
+ return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
+
+// Head issues a HEAD to the specified URL. If the response is one of
+// the following redirect codes, Head follows the redirect, up to a
+// maximum of 10 redirects:
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+// 308 (Permanent Redirect)
+//
+// Head is a wrapper around DefaultClient.Head.
+//
+// To make a request with a specified [context.Context], use [NewRequestWithContext]
+// and DefaultClient.Do.
+func Head(url string) (resp *Response, err error) {
+ return DefaultClient.Head(url)
+}
+
+// Head issues a HEAD to the specified URL. If the response is one of the
+// following redirect codes, Head follows the redirect after calling the
+// [Client.CheckRedirect] function:
+//
+// 301 (Moved Permanently)
+// 302 (Found)
+// 303 (See Other)
+// 307 (Temporary Redirect)
+// 308 (Permanent Redirect)
+//
+// To make a request with a specified [context.Context], use [NewRequestWithContext]
+// and [Client.Do].
+func (c *Client) Head(url string) (resp *Response, err error) {
+ req, err := NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.Do(req)
+}
+
+// CloseIdleConnections closes any connections on its [Transport] which
+// were previously connected from previous requests but are now
+// sitting idle in a "keep-alive" state. It does not interrupt any
+// connections currently in use.
+//
+// If [Client.Transport] does not have a [Client.CloseIdleConnections] method
+// then this method does nothing.
+func (c *Client) CloseIdleConnections() {
+ type closeIdler interface {
+ CloseIdleConnections()
+ }
+ if tr, ok := c.transport().(closeIdler); ok {
+ tr.CloseIdleConnections()
+ }
+}
+
+// cancelTimerBody is an io.ReadCloser that wraps rc with two features:
+// 1. On Read error or close, the stop func is called.
+// 2. On Read failure, if reqDidTimeout is true, the error is wrapped and
+// marked as net.Error that hit its timeout.
+type cancelTimerBody struct {
+ stop func() // stops the time.Timer waiting to cancel the request
+ rc io.ReadCloser
+ reqDidTimeout func() bool
+}
+
+func (b *cancelTimerBody) Read(p []byte) (n int, err error) {
+ n, err = b.rc.Read(p)
+ if err == nil {
+ return n, nil
+ }
+ if err == io.EOF {
+ return n, err
+ }
+ if b.reqDidTimeout() {
+ err = &httpError{
+ err: err.Error() + " (Client.Timeout or context cancellation while reading body)",
+ timeout: true,
+ }
+ }
+ return n, err
+}
+
+func (b *cancelTimerBody) Close() error {
+ err := b.rc.Close()
+ b.stop()
+ return err
+}
+
+func shouldCopyHeaderOnRedirect(headerKey string, initial, dest *url.URL) bool {
+ switch CanonicalHeaderKey(headerKey) {
+ case "Authorization", "Www-Authenticate", "Cookie", "Cookie2":
+ // Permit sending auth/cookie headers from "foo.com"
+ // to "sub.foo.com".
+
+ // Note that we don't send all cookies to subdomains
+ // automatically. This function is only used for
+ // Cookies set explicitly on the initial outgoing
+ // client request. Cookies automatically added via the
+ // CookieJar mechanism continue to follow each
+ // cookie's scope as set by Set-Cookie. But for
+ // outgoing requests with the Cookie header set
+ // directly, we don't know their scope, so we assume
+ // it's for *.domain.com.
+
+ ihost := idnaASCIIFromURL(initial)
+ dhost := idnaASCIIFromURL(dest)
+ return isDomainOrSubdomain(dhost, ihost)
+ }
+ // All other headers are copied:
+ return true
+}
+
+// isDomainOrSubdomain reports whether sub is a subdomain (or exact
+// match) of the parent domain.
+//
+// Both domains must already be in canonical form.
+func isDomainOrSubdomain(sub, parent string) bool {
+ if sub == parent {
+ return true
+ }
+ // If sub contains a :, it's probably an IPv6 address (and is definitely not a hostname).
+ // Don't check the suffix in this case, to avoid matching the contents of a IPv6 zone.
+ // For example, "::1%.www.example.com" is not a subdomain of "www.example.com".
+ if strings.ContainsAny(sub, ":%") {
+ return false
+ }
+ // If sub is "foo.example.com" and parent is "example.com",
+ // that means sub must end in "."+parent.
+ // Do it without allocating.
+ if !strings.HasSuffix(sub, parent) {
+ return false
+ }
+ return sub[len(sub)-len(parent)-1] == '.'
+}
+
+func stripPassword(u *url.URL) string {
+ _, passSet := u.User.Password()
+ if passSet {
+ return strings.Replace(u.String(), u.User.String()+"@", u.User.Username()+":***@", 1)
+ }
+ return u.String()
+}
diff --git a/contrib/go/_std_1.22/src/net/http/clone.go b/contrib/go/_std_1.22/src/net/http/clone.go
new file mode 100644
index 0000000000..3a3375bff7
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/clone.go
@@ -0,0 +1,74 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "mime/multipart"
+ "net/textproto"
+ "net/url"
+)
+
+func cloneURLValues(v url.Values) url.Values {
+ if v == nil {
+ return nil
+ }
+ // http.Header and url.Values have the same representation, so temporarily
+ // treat it like http.Header, which does have a clone:
+ return url.Values(Header(v).Clone())
+}
+
+func cloneURL(u *url.URL) *url.URL {
+ if u == nil {
+ return nil
+ }
+ u2 := new(url.URL)
+ *u2 = *u
+ if u.User != nil {
+ u2.User = new(url.Userinfo)
+ *u2.User = *u.User
+ }
+ return u2
+}
+
+func cloneMultipartForm(f *multipart.Form) *multipart.Form {
+ if f == nil {
+ return nil
+ }
+ f2 := &multipart.Form{
+ Value: (map[string][]string)(Header(f.Value).Clone()),
+ }
+ if f.File != nil {
+ m := make(map[string][]*multipart.FileHeader)
+ for k, vv := range f.File {
+ vv2 := make([]*multipart.FileHeader, len(vv))
+ for i, v := range vv {
+ vv2[i] = cloneMultipartFileHeader(v)
+ }
+ m[k] = vv2
+ }
+ f2.File = m
+ }
+ return f2
+}
+
+func cloneMultipartFileHeader(fh *multipart.FileHeader) *multipart.FileHeader {
+ if fh == nil {
+ return nil
+ }
+ fh2 := new(multipart.FileHeader)
+ *fh2 = *fh
+ fh2.Header = textproto.MIMEHeader(Header(fh.Header).Clone())
+ return fh2
+}
+
+// cloneOrMakeHeader invokes Header.Clone but if the
+// result is nil, it'll instead make and return a non-nil Header.
+func cloneOrMakeHeader(hdr Header) Header {
+ clone := hdr.Clone()
+ if clone == nil {
+ clone = make(Header)
+ }
+ return clone
+}
diff --git a/contrib/go/_std_1.22/src/net/http/cookie.go b/contrib/go/_std_1.22/src/net/http/cookie.go
new file mode 100644
index 0000000000..c22897f3f9
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/cookie.go
@@ -0,0 +1,468 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
+// HTTP response or the Cookie header of an HTTP request.
+//
+// See https://tools.ietf.org/html/rfc6265 for details.
+type Cookie struct {
+ Name string
+ Value string
+
+ Path string // optional
+ Domain string // optional
+ Expires time.Time // optional
+ RawExpires string // for reading cookies only
+
+ // MaxAge=0 means no 'Max-Age' attribute specified.
+ // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'
+ // MaxAge>0 means Max-Age attribute present and given in seconds
+ MaxAge int
+ Secure bool
+ HttpOnly bool
+ SameSite SameSite
+ Raw string
+ Unparsed []string // Raw text of unparsed attribute-value pairs
+}
+
+// SameSite allows a server to define a cookie attribute making it impossible for
+// the browser to send this cookie along with cross-site requests. The main
+// goal is to mitigate the risk of cross-origin information leakage, and provide
+// some protection against cross-site request forgery attacks.
+//
+// See https://tools.ietf.org/html/draft-ietf-httpbis-cookie-same-site-00 for details.
+type SameSite int
+
+const (
+ SameSiteDefaultMode SameSite = iota + 1
+ SameSiteLaxMode
+ SameSiteStrictMode
+ SameSiteNoneMode
+)
+
+// readSetCookies parses all "Set-Cookie" values from
+// the header h and returns the successfully parsed Cookies.
+func readSetCookies(h Header) []*Cookie {
+ cookieCount := len(h["Set-Cookie"])
+ if cookieCount == 0 {
+ return []*Cookie{}
+ }
+ cookies := make([]*Cookie, 0, cookieCount)
+ for _, line := range h["Set-Cookie"] {
+ parts := strings.Split(textproto.TrimString(line), ";")
+ if len(parts) == 1 && parts[0] == "" {
+ continue
+ }
+ parts[0] = textproto.TrimString(parts[0])
+ name, value, ok := strings.Cut(parts[0], "=")
+ if !ok {
+ continue
+ }
+ name = textproto.TrimString(name)
+ if !isCookieNameValid(name) {
+ continue
+ }
+ value, ok = parseCookieValue(value, true)
+ if !ok {
+ continue
+ }
+ c := &Cookie{
+ Name: name,
+ Value: value,
+ Raw: line,
+ }
+ for i := 1; i < len(parts); i++ {
+ parts[i] = textproto.TrimString(parts[i])
+ if len(parts[i]) == 0 {
+ continue
+ }
+
+ attr, val, _ := strings.Cut(parts[i], "=")
+ lowerAttr, isASCII := ascii.ToLower(attr)
+ if !isASCII {
+ continue
+ }
+ val, ok = parseCookieValue(val, false)
+ if !ok {
+ c.Unparsed = append(c.Unparsed, parts[i])
+ continue
+ }
+
+ switch lowerAttr {
+ case "samesite":
+ lowerVal, ascii := ascii.ToLower(val)
+ if !ascii {
+ c.SameSite = SameSiteDefaultMode
+ continue
+ }
+ switch lowerVal {
+ case "lax":
+ c.SameSite = SameSiteLaxMode
+ case "strict":
+ c.SameSite = SameSiteStrictMode
+ case "none":
+ c.SameSite = SameSiteNoneMode
+ default:
+ c.SameSite = SameSiteDefaultMode
+ }
+ continue
+ case "secure":
+ c.Secure = true
+ continue
+ case "httponly":
+ c.HttpOnly = true
+ continue
+ case "domain":
+ c.Domain = val
+ continue
+ case "max-age":
+ secs, err := strconv.Atoi(val)
+ if err != nil || secs != 0 && val[0] == '0' {
+ break
+ }
+ if secs <= 0 {
+ secs = -1
+ }
+ c.MaxAge = secs
+ continue
+ case "expires":
+ c.RawExpires = val
+ exptime, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val)
+ if err != nil {
+ c.Expires = time.Time{}
+ break
+ }
+ }
+ c.Expires = exptime.UTC()
+ continue
+ case "path":
+ c.Path = val
+ continue
+ }
+ c.Unparsed = append(c.Unparsed, parts[i])
+ }
+ cookies = append(cookies, c)
+ }
+ return cookies
+}
+
+// SetCookie adds a Set-Cookie header to the provided [ResponseWriter]'s headers.
+// The provided cookie must have a valid Name. Invalid cookies may be
+// silently dropped.
+func SetCookie(w ResponseWriter, cookie *Cookie) {
+ if v := cookie.String(); v != "" {
+ w.Header().Add("Set-Cookie", v)
+ }
+}
+
+// String returns the serialization of the cookie for use in a [Cookie]
+// header (if only Name and Value are set) or a Set-Cookie response
+// header (if other fields are set).
+// If c is nil or c.Name is invalid, the empty string is returned.
+func (c *Cookie) String() string {
+ if c == nil || !isCookieNameValid(c.Name) {
+ return ""
+ }
+ // extraCookieLength derived from typical length of cookie attributes
+ // see RFC 6265 Sec 4.1.
+ const extraCookieLength = 110
+ var b strings.Builder
+ b.Grow(len(c.Name) + len(c.Value) + len(c.Domain) + len(c.Path) + extraCookieLength)
+ b.WriteString(c.Name)
+ b.WriteRune('=')
+ b.WriteString(sanitizeCookieValue(c.Value))
+
+ if len(c.Path) > 0 {
+ b.WriteString("; Path=")
+ b.WriteString(sanitizeCookiePath(c.Path))
+ }
+ if len(c.Domain) > 0 {
+ if validCookieDomain(c.Domain) {
+ // A c.Domain containing illegal characters is not
+ // sanitized but simply dropped which turns the cookie
+ // into a host-only cookie. A leading dot is okay
+ // but won't be sent.
+ d := c.Domain
+ if d[0] == '.' {
+ d = d[1:]
+ }
+ b.WriteString("; Domain=")
+ b.WriteString(d)
+ } else {
+ log.Printf("net/http: invalid Cookie.Domain %q; dropping domain attribute", c.Domain)
+ }
+ }
+ var buf [len(TimeFormat)]byte
+ if validCookieExpires(c.Expires) {
+ b.WriteString("; Expires=")
+ b.Write(c.Expires.UTC().AppendFormat(buf[:0], TimeFormat))
+ }
+ if c.MaxAge > 0 {
+ b.WriteString("; Max-Age=")
+ b.Write(strconv.AppendInt(buf[:0], int64(c.MaxAge), 10))
+ } else if c.MaxAge < 0 {
+ b.WriteString("; Max-Age=0")
+ }
+ if c.HttpOnly {
+ b.WriteString("; HttpOnly")
+ }
+ if c.Secure {
+ b.WriteString("; Secure")
+ }
+ switch c.SameSite {
+ case SameSiteDefaultMode:
+ // Skip, default mode is obtained by not emitting the attribute.
+ case SameSiteNoneMode:
+ b.WriteString("; SameSite=None")
+ case SameSiteLaxMode:
+ b.WriteString("; SameSite=Lax")
+ case SameSiteStrictMode:
+ b.WriteString("; SameSite=Strict")
+ }
+ return b.String()
+}
+
+// Valid reports whether the cookie is valid.
+func (c *Cookie) Valid() error {
+ if c == nil {
+ return errors.New("http: nil Cookie")
+ }
+ if !isCookieNameValid(c.Name) {
+ return errors.New("http: invalid Cookie.Name")
+ }
+ if !c.Expires.IsZero() && !validCookieExpires(c.Expires) {
+ return errors.New("http: invalid Cookie.Expires")
+ }
+ for i := 0; i < len(c.Value); i++ {
+ if !validCookieValueByte(c.Value[i]) {
+ return fmt.Errorf("http: invalid byte %q in Cookie.Value", c.Value[i])
+ }
+ }
+ if len(c.Path) > 0 {
+ for i := 0; i < len(c.Path); i++ {
+ if !validCookiePathByte(c.Path[i]) {
+ return fmt.Errorf("http: invalid byte %q in Cookie.Path", c.Path[i])
+ }
+ }
+ }
+ if len(c.Domain) > 0 {
+ if !validCookieDomain(c.Domain) {
+ return errors.New("http: invalid Cookie.Domain")
+ }
+ }
+ return nil
+}
+
+// readCookies parses all "Cookie" values from the header h and
+// returns the successfully parsed Cookies.
+//
+// if filter isn't empty, only cookies of that name are returned.
+func readCookies(h Header, filter string) []*Cookie {
+ lines := h["Cookie"]
+ if len(lines) == 0 {
+ return []*Cookie{}
+ }
+
+ cookies := make([]*Cookie, 0, len(lines)+strings.Count(lines[0], ";"))
+ for _, line := range lines {
+ line = textproto.TrimString(line)
+
+ var part string
+ for len(line) > 0 { // continue since we have rest
+ part, line, _ = strings.Cut(line, ";")
+ part = textproto.TrimString(part)
+ if part == "" {
+ continue
+ }
+ name, val, _ := strings.Cut(part, "=")
+ name = textproto.TrimString(name)
+ if !isCookieNameValid(name) {
+ continue
+ }
+ if filter != "" && filter != name {
+ continue
+ }
+ val, ok := parseCookieValue(val, true)
+ if !ok {
+ continue
+ }
+ cookies = append(cookies, &Cookie{Name: name, Value: val})
+ }
+ }
+ return cookies
+}
+
+// validCookieDomain reports whether v is a valid cookie domain-value.
+func validCookieDomain(v string) bool {
+ if isCookieDomainName(v) {
+ return true
+ }
+ if net.ParseIP(v) != nil && !strings.Contains(v, ":") {
+ return true
+ }
+ return false
+}
+
+// validCookieExpires reports whether v is a valid cookie expires-value.
+func validCookieExpires(t time.Time) bool {
+ // IETF RFC 6265 Section 5.1.1.5, the year must not be less than 1601
+ return t.Year() >= 1601
+}
+
+// isCookieDomainName reports whether s is a valid domain name or a valid
+// domain name with a leading dot '.'. It is almost a direct copy of
+// package net's isDomainName.
+func isCookieDomainName(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+ if len(s) > 255 {
+ return false
+ }
+
+ if s[0] == '.' {
+ // A cookie a domain attribute may start with a leading dot.
+ s = s[1:]
+ }
+ last := byte('.')
+ ok := false // Ok once we've seen a letter.
+ partlen := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ switch {
+ default:
+ return false
+ case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
+ // No '_' allowed here (in contrast to package net).
+ ok = true
+ partlen++
+ case '0' <= c && c <= '9':
+ // fine
+ partlen++
+ case c == '-':
+ // Byte before dash cannot be dot.
+ if last == '.' {
+ return false
+ }
+ partlen++
+ case c == '.':
+ // Byte before dot cannot be dot, dash.
+ if last == '.' || last == '-' {
+ return false
+ }
+ if partlen > 63 || partlen == 0 {
+ return false
+ }
+ partlen = 0
+ }
+ last = c
+ }
+ if last == '-' || partlen > 63 {
+ return false
+ }
+
+ return ok
+}
+
+var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-")
+
+func sanitizeCookieName(n string) string {
+ return cookieNameSanitizer.Replace(n)
+}
+
+// sanitizeCookieValue produces a suitable cookie-value from v.
+// https://tools.ietf.org/html/rfc6265#section-4.1.1
+//
+// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE )
+// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
+// ; US-ASCII characters excluding CTLs,
+// ; whitespace DQUOTE, comma, semicolon,
+// ; and backslash
+//
+// We loosen this as spaces and commas are common in cookie values
+// but we produce a quoted cookie-value if and only if v contains
+// commas or spaces.
+// See https://golang.org/issue/7243 for the discussion.
+func sanitizeCookieValue(v string) string {
+ v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v)
+ if len(v) == 0 {
+ return v
+ }
+ if strings.ContainsAny(v, " ,") {
+ return `"` + v + `"`
+ }
+ return v
+}
+
+func validCookieValueByte(b byte) bool {
+ return 0x20 <= b && b < 0x7f && b != '"' && b != ';' && b != '\\'
+}
+
+// path-av = "Path=" path-value
+// path-value = <any CHAR except CTLs or ";">
+func sanitizeCookiePath(v string) string {
+ return sanitizeOrWarn("Cookie.Path", validCookiePathByte, v)
+}
+
+func validCookiePathByte(b byte) bool {
+ return 0x20 <= b && b < 0x7f && b != ';'
+}
+
+func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string {
+ ok := true
+ for i := 0; i < len(v); i++ {
+ if valid(v[i]) {
+ continue
+ }
+ log.Printf("net/http: invalid byte %q in %s; dropping invalid bytes", v[i], fieldName)
+ ok = false
+ break
+ }
+ if ok {
+ return v
+ }
+ buf := make([]byte, 0, len(v))
+ for i := 0; i < len(v); i++ {
+ if b := v[i]; valid(b) {
+ buf = append(buf, b)
+ }
+ }
+ return string(buf)
+}
+
+func parseCookieValue(raw string, allowDoubleQuote bool) (string, bool) {
+ // Strip the quotes, if present.
+ if allowDoubleQuote && len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' {
+ raw = raw[1 : len(raw)-1]
+ }
+ for i := 0; i < len(raw); i++ {
+ if !validCookieValueByte(raw[i]) {
+ return "", false
+ }
+ }
+ return raw, true
+}
+
+func isCookieNameValid(raw string) bool {
+ if raw == "" {
+ return false
+ }
+ return strings.IndexFunc(raw, isNotToken) < 0
+}
diff --git a/contrib/go/_std_1.22/src/net/http/doc.go b/contrib/go/_std_1.22/src/net/http/doc.go
new file mode 100644
index 0000000000..f7ad3ae762
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/doc.go
@@ -0,0 +1,110 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package http provides HTTP client and server implementations.
+
+[Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests:
+
+ resp, err := http.Get("http://example.com/")
+ ...
+ resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf)
+ ...
+ resp, err := http.PostForm("http://example.com/form",
+ url.Values{"key": {"Value"}, "id": {"123"}})
+
+The caller must close the response body when finished with it:
+
+ resp, err := http.Get("http://example.com/")
+ if err != nil {
+ // handle error
+ }
+ defer resp.Body.Close()
+ body, err := io.ReadAll(resp.Body)
+ // ...
+
+# Clients and Transports
+
+For control over HTTP client headers, redirect policy, and other
+settings, create a [Client]:
+
+ client := &http.Client{
+ CheckRedirect: redirectPolicyFunc,
+ }
+
+ resp, err := client.Get("http://example.com")
+ // ...
+
+ req, err := http.NewRequest("GET", "http://example.com", nil)
+ // ...
+ req.Header.Add("If-None-Match", `W/"wyzzy"`)
+ resp, err := client.Do(req)
+ // ...
+
+For control over proxies, TLS configuration, keep-alives,
+compression, and other settings, create a [Transport]:
+
+ tr := &http.Transport{
+ MaxIdleConns: 10,
+ IdleConnTimeout: 30 * time.Second,
+ DisableCompression: true,
+ }
+ client := &http.Client{Transport: tr}
+ resp, err := client.Get("https://example.com")
+
+Clients and Transports are safe for concurrent use by multiple
+goroutines and for efficiency should only be created once and re-used.
+
+# Servers
+
+ListenAndServe starts an HTTP server with a given address and handler.
+The handler is usually nil, which means to use [DefaultServeMux].
+[Handle] and [HandleFunc] add handlers to [DefaultServeMux]:
+
+ http.Handle("/foo", fooHandler)
+
+ http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
+ })
+
+ log.Fatal(http.ListenAndServe(":8080", nil))
+
+More control over the server's behavior is available by creating a
+custom Server:
+
+ s := &http.Server{
+ Addr: ":8080",
+ Handler: myHandler,
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ MaxHeaderBytes: 1 << 20,
+ }
+ log.Fatal(s.ListenAndServe())
+
+# HTTP/2
+
+Starting with Go 1.6, the http package has transparent support for the
+HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2
+can do so by setting [Transport.TLSNextProto] (for clients) or
+[Server.TLSNextProto] (for servers) to a non-nil, empty
+map. Alternatively, the following GODEBUG settings are
+currently supported:
+
+ GODEBUG=http2client=0 # disable HTTP/2 client support
+ GODEBUG=http2server=0 # disable HTTP/2 server support
+ GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs
+ GODEBUG=http2debug=2 # ... even more verbose, with frame dumps
+
+Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug
+
+The http package's [Transport] and [Server] both automatically enable
+HTTP/2 support for simple configurations. To enable HTTP/2 for more
+complex configurations, to use lower-level HTTP/2 features, or to use
+a newer version of Go's http2 package, import "golang.org/x/net/http2"
+directly and use its ConfigureTransport and/or ConfigureServer
+functions. Manually configuring HTTP/2 via the golang.org/x/net/http2
+package takes precedence over the net/http package's built-in HTTP/2
+support.
+*/
+package http
diff --git a/contrib/go/_std_1.22/src/net/http/filetransport.go b/contrib/go/_std_1.22/src/net/http/filetransport.go
new file mode 100644
index 0000000000..7384b22fbe
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/filetransport.go
@@ -0,0 +1,142 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "fmt"
+ "io"
+ "io/fs"
+)
+
+// fileTransport implements RoundTripper for the 'file' protocol.
+type fileTransport struct {
+ fh fileHandler
+}
+
+// NewFileTransport returns a new [RoundTripper], serving the provided
+// [FileSystem]. The returned RoundTripper ignores the URL host in its
+// incoming requests, as well as most other properties of the
+// request.
+//
+// The typical use case for NewFileTransport is to register the "file"
+// protocol with a [Transport], as in:
+//
+// t := &http.Transport{}
+// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
+// c := &http.Client{Transport: t}
+// res, err := c.Get("file:///etc/passwd")
+// ...
+func NewFileTransport(fs FileSystem) RoundTripper {
+ return fileTransport{fileHandler{fs}}
+}
+
+// NewFileTransportFS returns a new [RoundTripper], serving the provided
+// file system fsys. The returned RoundTripper ignores the URL host in its
+// incoming requests, as well as most other properties of the
+// request.
+//
+// The typical use case for NewFileTransportFS is to register the "file"
+// protocol with a [Transport], as in:
+//
+// fsys := os.DirFS("/")
+// t := &http.Transport{}
+// t.RegisterProtocol("file", http.NewFileTransportFS(fsys))
+// c := &http.Client{Transport: t}
+// res, err := c.Get("file:///etc/passwd")
+// ...
+func NewFileTransportFS(fsys fs.FS) RoundTripper {
+ return NewFileTransport(FS(fsys))
+}
+
+func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) {
+ // We start ServeHTTP in a goroutine, which may take a long
+ // time if the file is large. The newPopulateResponseWriter
+ // call returns a channel which either ServeHTTP or finish()
+ // sends our *Response on, once the *Response itself has been
+ // populated (even if the body itself is still being
+ // written to the res.Body, a pipe)
+ rw, resc := newPopulateResponseWriter()
+ go func() {
+ t.fh.ServeHTTP(rw, req)
+ rw.finish()
+ }()
+ return <-resc, nil
+}
+
+func newPopulateResponseWriter() (*populateResponse, <-chan *Response) {
+ pr, pw := io.Pipe()
+ rw := &populateResponse{
+ ch: make(chan *Response),
+ pw: pw,
+ res: &Response{
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ Header: make(Header),
+ Close: true,
+ Body: pr,
+ },
+ }
+ return rw, rw.ch
+}
+
+// populateResponse is a ResponseWriter that populates the *Response
+// in res, and writes its body to a pipe connected to the response
+// body. Once writes begin or finish() is called, the response is sent
+// on ch.
+type populateResponse struct {
+ res *Response
+ ch chan *Response
+ wroteHeader bool
+ hasContent bool
+ sentResponse bool
+ pw *io.PipeWriter
+}
+
+func (pr *populateResponse) finish() {
+ if !pr.wroteHeader {
+ pr.WriteHeader(500)
+ }
+ if !pr.sentResponse {
+ pr.sendResponse()
+ }
+ pr.pw.Close()
+}
+
+func (pr *populateResponse) sendResponse() {
+ if pr.sentResponse {
+ return
+ }
+ pr.sentResponse = true
+
+ if pr.hasContent {
+ pr.res.ContentLength = -1
+ }
+ pr.ch <- pr.res
+}
+
+func (pr *populateResponse) Header() Header {
+ return pr.res.Header
+}
+
+func (pr *populateResponse) WriteHeader(code int) {
+ if pr.wroteHeader {
+ return
+ }
+ pr.wroteHeader = true
+
+ pr.res.StatusCode = code
+ pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code))
+}
+
+func (pr *populateResponse) Write(p []byte) (n int, err error) {
+ if !pr.wroteHeader {
+ pr.WriteHeader(StatusOK)
+ }
+ pr.hasContent = true
+ if !pr.sentResponse {
+ pr.sendResponse()
+ }
+ return pr.pw.Write(p)
+}
diff --git a/contrib/go/_std_1.22/src/net/http/fs.go b/contrib/go/_std_1.22/src/net/http/fs.go
new file mode 100644
index 0000000000..af7511a7a4
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/fs.go
@@ -0,0 +1,1057 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP file system request handler
+
+package http
+
+import (
+ "errors"
+ "fmt"
+ "internal/safefilepath"
+ "io"
+ "io/fs"
+ "mime"
+ "mime/multipart"
+ "net/textproto"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// A Dir implements [FileSystem] using the native file system restricted to a
+// specific directory tree.
+//
+// While the [FileSystem.Open] method takes '/'-separated paths, a Dir's string
+// value is a filename on the native file system, not a URL, so it is separated
+// by [filepath.Separator], which isn't necessarily '/'.
+//
+// Note that Dir could expose sensitive files and directories. Dir will follow
+// symlinks pointing out of the directory tree, which can be especially dangerous
+// if serving from a directory in which users are able to create arbitrary symlinks.
+// Dir will also allow access to files and directories starting with a period,
+// which could expose sensitive directories like .git or sensitive files like
+// .htpasswd. To exclude files with a leading period, remove the files/directories
+// from the server or create a custom FileSystem implementation.
+//
+// An empty Dir is treated as ".".
+type Dir string
+
+// mapOpenError maps the provided non-nil error from opening name
+// to a possibly better non-nil error. In particular, it turns OS-specific errors
+// about opening files in non-directories into fs.ErrNotExist. See Issues 18984 and 49552.
+func mapOpenError(originalErr error, name string, sep rune, stat func(string) (fs.FileInfo, error)) error {
+ if errors.Is(originalErr, fs.ErrNotExist) || errors.Is(originalErr, fs.ErrPermission) {
+ return originalErr
+ }
+
+ parts := strings.Split(name, string(sep))
+ for i := range parts {
+ if parts[i] == "" {
+ continue
+ }
+ fi, err := stat(strings.Join(parts[:i+1], string(sep)))
+ if err != nil {
+ return originalErr
+ }
+ if !fi.IsDir() {
+ return fs.ErrNotExist
+ }
+ }
+ return originalErr
+}
+
+// Open implements [FileSystem] using [os.Open], opening files for reading rooted
+// and relative to the directory d.
+func (d Dir) Open(name string) (File, error) {
+ path, err := safefilepath.FromFS(path.Clean("/" + name))
+ if err != nil {
+ return nil, errors.New("http: invalid or unsafe file path")
+ }
+ dir := string(d)
+ if dir == "" {
+ dir = "."
+ }
+ fullName := filepath.Join(dir, path)
+ f, err := os.Open(fullName)
+ if err != nil {
+ return nil, mapOpenError(err, fullName, filepath.Separator, os.Stat)
+ }
+ return f, nil
+}
+
+// A FileSystem implements access to a collection of named files.
+// The elements in a file path are separated by slash ('/', U+002F)
+// characters, regardless of host operating system convention.
+// See the [FileServer] function to convert a FileSystem to a [Handler].
+//
+// This interface predates the [fs.FS] interface, which can be used instead:
+// the [FS] adapter function converts an fs.FS to a FileSystem.
+type FileSystem interface {
+ Open(name string) (File, error)
+}
+
+// A File is returned by a [FileSystem]'s Open method and can be
+// served by the [FileServer] implementation.
+//
+// The methods should behave the same as those on an [*os.File].
+type File interface {
+ io.Closer
+ io.Reader
+ io.Seeker
+ Readdir(count int) ([]fs.FileInfo, error)
+ Stat() (fs.FileInfo, error)
+}
+
+type anyDirs interface {
+ len() int
+ name(i int) string
+ isDir(i int) bool
+}
+
+type fileInfoDirs []fs.FileInfo
+
+func (d fileInfoDirs) len() int { return len(d) }
+func (d fileInfoDirs) isDir(i int) bool { return d[i].IsDir() }
+func (d fileInfoDirs) name(i int) string { return d[i].Name() }
+
+type dirEntryDirs []fs.DirEntry
+
+func (d dirEntryDirs) len() int { return len(d) }
+func (d dirEntryDirs) isDir(i int) bool { return d[i].IsDir() }
+func (d dirEntryDirs) name(i int) string { return d[i].Name() }
+
+func dirList(w ResponseWriter, r *Request, f File) {
+ // Prefer to use ReadDir instead of Readdir,
+ // because the former doesn't require calling
+ // Stat on every entry of a directory on Unix.
+ var dirs anyDirs
+ var err error
+ if d, ok := f.(fs.ReadDirFile); ok {
+ var list dirEntryDirs
+ list, err = d.ReadDir(-1)
+ dirs = list
+ } else {
+ var list fileInfoDirs
+ list, err = f.Readdir(-1)
+ dirs = list
+ }
+
+ if err != nil {
+ logf(r, "http: error reading directory: %v", err)
+ Error(w, "Error reading directory", StatusInternalServerError)
+ return
+ }
+ sort.Slice(dirs, func(i, j int) bool { return dirs.name(i) < dirs.name(j) })
+
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ fmt.Fprintf(w, "<pre>\n")
+ for i, n := 0, dirs.len(); i < n; i++ {
+ name := dirs.name(i)
+ if dirs.isDir(i) {
+ name += "/"
+ }
+ // name may contain '?' or '#', which must be escaped to remain
+ // part of the URL path, and not indicate the start of a query
+ // string or fragment.
+ url := url.URL{Path: name}
+ fmt.Fprintf(w, "<a href=\"%s\">%s</a>\n", url.String(), htmlReplacer.Replace(name))
+ }
+ fmt.Fprintf(w, "</pre>\n")
+}
+
+// ServeContent replies to the request using the content in the
+// provided ReadSeeker. The main benefit of ServeContent over [io.Copy]
+// is that it handles Range requests properly, sets the MIME type, and
+// handles If-Match, If-Unmodified-Since, If-None-Match, If-Modified-Since,
+// and If-Range requests.
+//
+// If the response's Content-Type header is not set, ServeContent
+// first tries to deduce the type from name's file extension and,
+// if that fails, falls back to reading the first block of the content
+// and passing it to [DetectContentType].
+// The name is otherwise unused; in particular it can be empty and is
+// never sent in the response.
+//
+// If modtime is not the zero time or Unix epoch, ServeContent
+// includes it in a Last-Modified header in the response. If the
+// request includes an If-Modified-Since header, ServeContent uses
+// modtime to decide whether the content needs to be sent at all.
+//
+// The content's Seek method must work: ServeContent uses
+// a seek to the end of the content to determine its size.
+//
+// If the caller has set w's ETag header formatted per RFC 7232, section 2.3,
+// ServeContent uses it to handle requests using If-Match, If-None-Match, or If-Range.
+//
+// Note that [*os.File] implements the [io.ReadSeeker] interface.
+func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) {
+ sizeFunc := func() (int64, error) {
+ size, err := content.Seek(0, io.SeekEnd)
+ if err != nil {
+ return 0, errSeeker
+ }
+ _, err = content.Seek(0, io.SeekStart)
+ if err != nil {
+ return 0, errSeeker
+ }
+ return size, nil
+ }
+ serveContent(w, req, name, modtime, sizeFunc, content)
+}
+
+// errSeeker is returned by ServeContent's sizeFunc when the content
+// doesn't seek properly. The underlying Seeker's error text isn't
+// included in the sizeFunc reply so it's not sent over HTTP to end
+// users.
+var errSeeker = errors.New("seeker can't seek")
+
+// errNoOverlap is returned by serveContent's parseRange if first-byte-pos of
+// all of the byte-range-spec values is greater than the content size.
+var errNoOverlap = errors.New("invalid range: failed to overlap")
+
+// if name is empty, filename is unknown. (used for mime type, before sniffing)
+// if modtime.IsZero(), modtime is unknown.
+// content must be seeked to the beginning of the file.
+// The sizeFunc is called at most once. Its error, if any, is sent in the HTTP response.
+func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, sizeFunc func() (int64, error), content io.ReadSeeker) {
+ setLastModified(w, modtime)
+ done, rangeReq := checkPreconditions(w, r, modtime)
+ if done {
+ return
+ }
+
+ code := StatusOK
+
+ // If Content-Type isn't set, use the file's extension to find it, but
+ // if the Content-Type is unset explicitly, do not sniff the type.
+ ctypes, haveType := w.Header()["Content-Type"]
+ var ctype string
+ if !haveType {
+ ctype = mime.TypeByExtension(filepath.Ext(name))
+ if ctype == "" {
+ // read a chunk to decide between utf-8 text and binary
+ var buf [sniffLen]byte
+ n, _ := io.ReadFull(content, buf[:])
+ ctype = DetectContentType(buf[:n])
+ _, err := content.Seek(0, io.SeekStart) // rewind to output whole file
+ if err != nil {
+ Error(w, "seeker can't seek", StatusInternalServerError)
+ return
+ }
+ }
+ w.Header().Set("Content-Type", ctype)
+ } else if len(ctypes) > 0 {
+ ctype = ctypes[0]
+ }
+
+ size, err := sizeFunc()
+ if err != nil {
+ Error(w, err.Error(), StatusInternalServerError)
+ return
+ }
+ if size < 0 {
+ // Should never happen but just to be sure
+ Error(w, "negative content size computed", StatusInternalServerError)
+ return
+ }
+
+ // handle Content-Range header.
+ sendSize := size
+ var sendContent io.Reader = content
+ ranges, err := parseRange(rangeReq, size)
+ switch err {
+ case nil:
+ case errNoOverlap:
+ if size == 0 {
+ // Some clients add a Range header to all requests to
+ // limit the size of the response. If the file is empty,
+ // ignore the range header and respond with a 200 rather
+ // than a 416.
+ ranges = nil
+ break
+ }
+ w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
+ fallthrough
+ default:
+ Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
+ return
+ }
+
+ if sumRangesSize(ranges) > size {
+ // The total number of bytes in all the ranges
+ // is larger than the size of the file by
+ // itself, so this is probably an attack, or a
+ // dumb client. Ignore the range request.
+ ranges = nil
+ }
+ switch {
+ case len(ranges) == 1:
+ // RFC 7233, Section 4.1:
+ // "If a single part is being transferred, the server
+ // generating the 206 response MUST generate a
+ // Content-Range header field, describing what range
+ // of the selected representation is enclosed, and a
+ // payload consisting of the range.
+ // ...
+ // A server MUST NOT generate a multipart response to
+ // a request for a single range, since a client that
+ // does not request multiple parts might not support
+ // multipart responses."
+ ra := ranges[0]
+ if _, err := content.Seek(ra.start, io.SeekStart); err != nil {
+ Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
+ return
+ }
+ sendSize = ra.length
+ code = StatusPartialContent
+ w.Header().Set("Content-Range", ra.contentRange(size))
+ case len(ranges) > 1:
+ sendSize = rangesMIMESize(ranges, ctype, size)
+ code = StatusPartialContent
+
+ pr, pw := io.Pipe()
+ mw := multipart.NewWriter(pw)
+ w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
+ sendContent = pr
+ defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
+ go func() {
+ for _, ra := range ranges {
+ part, err := mw.CreatePart(ra.mimeHeader(ctype, size))
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ if _, err := content.Seek(ra.start, io.SeekStart); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ if _, err := io.CopyN(part, content, ra.length); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ }
+ mw.Close()
+ pw.Close()
+ }()
+ }
+
+ w.Header().Set("Accept-Ranges", "bytes")
+
+ // We should be able to unconditionally set the Content-Length here.
+ //
+ // However, there is a pattern observed in the wild that this breaks:
+ // The user wraps the ResponseWriter in one which gzips data written to it,
+ // and sets "Content-Encoding: gzip".
+ //
+ // The user shouldn't be doing this; the serveContent path here depends
+ // on serving seekable data with a known length. If you want to compress
+ // on the fly, then you shouldn't be using ServeFile/ServeContent, or
+ // you should compress the entire file up-front and provide a seekable
+ // view of the compressed data.
+ //
+ // However, since we've observed this pattern in the wild, and since
+ // setting Content-Length here breaks code that mostly-works today,
+ // skip setting Content-Length if the user set Content-Encoding.
+ //
+ // If this is a range request, always set Content-Length.
+ // If the user isn't changing the bytes sent in the ResponseWrite,
+ // the Content-Length will be correct.
+ // If the user is changing the bytes sent, then the range request wasn't
+ // going to work properly anyway and we aren't worse off.
+ //
+ // A possible future improvement on this might be to look at the type
+ // of the ResponseWriter, and always set Content-Length if it's one
+ // that we recognize.
+ if len(ranges) > 0 || w.Header().Get("Content-Encoding") == "" {
+ w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
+ }
+ w.WriteHeader(code)
+
+ if r.Method != "HEAD" {
+ io.CopyN(w, sendContent, sendSize)
+ }
+}
+
+// scanETag determines if a syntactically valid ETag is present at s. If so,
+// the ETag and remaining text after consuming ETag is returned. Otherwise,
+// it returns "", "".
+func scanETag(s string) (etag string, remain string) {
+ s = textproto.TrimString(s)
+ start := 0
+ if strings.HasPrefix(s, "W/") {
+ start = 2
+ }
+ if len(s[start:]) < 2 || s[start] != '"' {
+ return "", ""
+ }
+ // ETag is either W/"text" or "text".
+ // See RFC 7232 2.3.
+ for i := start + 1; i < len(s); i++ {
+ c := s[i]
+ switch {
+ // Character values allowed in ETags.
+ case c == 0x21 || c >= 0x23 && c <= 0x7E || c >= 0x80:
+ case c == '"':
+ return s[:i+1], s[i+1:]
+ default:
+ return "", ""
+ }
+ }
+ return "", ""
+}
+
+// etagStrongMatch reports whether a and b match using strong ETag comparison.
+// Assumes a and b are valid ETags.
+func etagStrongMatch(a, b string) bool {
+ return a == b && a != "" && a[0] == '"'
+}
+
+// etagWeakMatch reports whether a and b match using weak ETag comparison.
+// Assumes a and b are valid ETags.
+func etagWeakMatch(a, b string) bool {
+ return strings.TrimPrefix(a, "W/") == strings.TrimPrefix(b, "W/")
+}
+
+// condResult is the result of an HTTP request precondition check.
+// See https://tools.ietf.org/html/rfc7232 section 3.
+type condResult int
+
+const (
+ condNone condResult = iota
+ condTrue
+ condFalse
+)
+
+func checkIfMatch(w ResponseWriter, r *Request) condResult {
+ im := r.Header.Get("If-Match")
+ if im == "" {
+ return condNone
+ }
+ for {
+ im = textproto.TrimString(im)
+ if len(im) == 0 {
+ break
+ }
+ if im[0] == ',' {
+ im = im[1:]
+ continue
+ }
+ if im[0] == '*' {
+ return condTrue
+ }
+ etag, remain := scanETag(im)
+ if etag == "" {
+ break
+ }
+ if etagStrongMatch(etag, w.Header().get("Etag")) {
+ return condTrue
+ }
+ im = remain
+ }
+
+ return condFalse
+}
+
+func checkIfUnmodifiedSince(r *Request, modtime time.Time) condResult {
+ ius := r.Header.Get("If-Unmodified-Since")
+ if ius == "" || isZeroTime(modtime) {
+ return condNone
+ }
+ t, err := ParseTime(ius)
+ if err != nil {
+ return condNone
+ }
+
+ // The Last-Modified header truncates sub-second precision so
+ // the modtime needs to be truncated too.
+ modtime = modtime.Truncate(time.Second)
+ if ret := modtime.Compare(t); ret <= 0 {
+ return condTrue
+ }
+ return condFalse
+}
+
+func checkIfNoneMatch(w ResponseWriter, r *Request) condResult {
+ inm := r.Header.get("If-None-Match")
+ if inm == "" {
+ return condNone
+ }
+ buf := inm
+ for {
+ buf = textproto.TrimString(buf)
+ if len(buf) == 0 {
+ break
+ }
+ if buf[0] == ',' {
+ buf = buf[1:]
+ continue
+ }
+ if buf[0] == '*' {
+ return condFalse
+ }
+ etag, remain := scanETag(buf)
+ if etag == "" {
+ break
+ }
+ if etagWeakMatch(etag, w.Header().get("Etag")) {
+ return condFalse
+ }
+ buf = remain
+ }
+ return condTrue
+}
+
+func checkIfModifiedSince(r *Request, modtime time.Time) condResult {
+ if r.Method != "GET" && r.Method != "HEAD" {
+ return condNone
+ }
+ ims := r.Header.Get("If-Modified-Since")
+ if ims == "" || isZeroTime(modtime) {
+ return condNone
+ }
+ t, err := ParseTime(ims)
+ if err != nil {
+ return condNone
+ }
+ // The Last-Modified header truncates sub-second precision so
+ // the modtime needs to be truncated too.
+ modtime = modtime.Truncate(time.Second)
+ if ret := modtime.Compare(t); ret <= 0 {
+ return condFalse
+ }
+ return condTrue
+}
+
+func checkIfRange(w ResponseWriter, r *Request, modtime time.Time) condResult {
+ if r.Method != "GET" && r.Method != "HEAD" {
+ return condNone
+ }
+ ir := r.Header.get("If-Range")
+ if ir == "" {
+ return condNone
+ }
+ etag, _ := scanETag(ir)
+ if etag != "" {
+ if etagStrongMatch(etag, w.Header().Get("Etag")) {
+ return condTrue
+ } else {
+ return condFalse
+ }
+ }
+ // The If-Range value is typically the ETag value, but it may also be
+ // the modtime date. See golang.org/issue/8367.
+ if modtime.IsZero() {
+ return condFalse
+ }
+ t, err := ParseTime(ir)
+ if err != nil {
+ return condFalse
+ }
+ if t.Unix() == modtime.Unix() {
+ return condTrue
+ }
+ return condFalse
+}
+
+var unixEpochTime = time.Unix(0, 0)
+
+// isZeroTime reports whether t is obviously unspecified (either zero or Unix()=0).
+func isZeroTime(t time.Time) bool {
+ return t.IsZero() || t.Equal(unixEpochTime)
+}
+
+func setLastModified(w ResponseWriter, modtime time.Time) {
+ if !isZeroTime(modtime) {
+ w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat))
+ }
+}
+
+func writeNotModified(w ResponseWriter) {
+ // RFC 7232 section 4.1:
+ // a sender SHOULD NOT generate representation metadata other than the
+ // above listed fields unless said metadata exists for the purpose of
+ // guiding cache updates (e.g., Last-Modified might be useful if the
+ // response does not have an ETag field).
+ h := w.Header()
+ delete(h, "Content-Type")
+ delete(h, "Content-Length")
+ delete(h, "Content-Encoding")
+ if h.Get("Etag") != "" {
+ delete(h, "Last-Modified")
+ }
+ w.WriteHeader(StatusNotModified)
+}
+
+// checkPreconditions evaluates request preconditions and reports whether a precondition
+// resulted in sending StatusNotModified or StatusPreconditionFailed.
+func checkPreconditions(w ResponseWriter, r *Request, modtime time.Time) (done bool, rangeHeader string) {
+ // This function carefully follows RFC 7232 section 6.
+ ch := checkIfMatch(w, r)
+ if ch == condNone {
+ ch = checkIfUnmodifiedSince(r, modtime)
+ }
+ if ch == condFalse {
+ w.WriteHeader(StatusPreconditionFailed)
+ return true, ""
+ }
+ switch checkIfNoneMatch(w, r) {
+ case condFalse:
+ if r.Method == "GET" || r.Method == "HEAD" {
+ writeNotModified(w)
+ return true, ""
+ } else {
+ w.WriteHeader(StatusPreconditionFailed)
+ return true, ""
+ }
+ case condNone:
+ if checkIfModifiedSince(r, modtime) == condFalse {
+ writeNotModified(w)
+ return true, ""
+ }
+ }
+
+ rangeHeader = r.Header.get("Range")
+ if rangeHeader != "" && checkIfRange(w, r, modtime) == condFalse {
+ rangeHeader = ""
+ }
+ return false, rangeHeader
+}
+
+// name is '/'-separated, not filepath.Separator.
+func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) {
+ const indexPage = "/index.html"
+
+ // redirect .../index.html to .../
+ // can't use Redirect() because that would make the path absolute,
+ // which would be a problem running under StripPrefix
+ if strings.HasSuffix(r.URL.Path, indexPage) {
+ localRedirect(w, r, "./")
+ return
+ }
+
+ f, err := fs.Open(name)
+ if err != nil {
+ msg, code := toHTTPError(err)
+ Error(w, msg, code)
+ return
+ }
+ defer f.Close()
+
+ d, err := f.Stat()
+ if err != nil {
+ msg, code := toHTTPError(err)
+ Error(w, msg, code)
+ return
+ }
+
+ if redirect {
+ // redirect to canonical path: / at end of directory url
+ // r.URL.Path always begins with /
+ url := r.URL.Path
+ if d.IsDir() {
+ if url[len(url)-1] != '/' {
+ localRedirect(w, r, path.Base(url)+"/")
+ return
+ }
+ } else {
+ if url[len(url)-1] == '/' {
+ localRedirect(w, r, "../"+path.Base(url))
+ return
+ }
+ }
+ }
+
+ if d.IsDir() {
+ url := r.URL.Path
+ // redirect if the directory name doesn't end in a slash
+ if url == "" || url[len(url)-1] != '/' {
+ localRedirect(w, r, path.Base(url)+"/")
+ return
+ }
+
+ // use contents of index.html for directory, if present
+ index := strings.TrimSuffix(name, "/") + indexPage
+ ff, err := fs.Open(index)
+ if err == nil {
+ defer ff.Close()
+ dd, err := ff.Stat()
+ if err == nil {
+ d = dd
+ f = ff
+ }
+ }
+ }
+
+ // Still a directory? (we didn't find an index.html file)
+ if d.IsDir() {
+ if checkIfModifiedSince(r, d.ModTime()) == condFalse {
+ writeNotModified(w)
+ return
+ }
+ setLastModified(w, d.ModTime())
+ dirList(w, r, f)
+ return
+ }
+
+ // serveContent will check modification time
+ sizeFunc := func() (int64, error) { return d.Size(), nil }
+ serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f)
+}
+
+// toHTTPError returns a non-specific HTTP error message and status code
+// for a given non-nil error value. It's important that toHTTPError does not
+// actually return err.Error(), since msg and httpStatus are returned to users,
+// and historically Go's ServeContent always returned just "404 Not Found" for
+// all errors. We don't want to start leaking information in error messages.
+func toHTTPError(err error) (msg string, httpStatus int) {
+ if errors.Is(err, fs.ErrNotExist) {
+ return "404 page not found", StatusNotFound
+ }
+ if errors.Is(err, fs.ErrPermission) {
+ return "403 Forbidden", StatusForbidden
+ }
+ // Default:
+ return "500 Internal Server Error", StatusInternalServerError
+}
+
+// localRedirect gives a Moved Permanently response.
+// It does not convert relative paths to absolute paths like Redirect does.
+func localRedirect(w ResponseWriter, r *Request, newPath string) {
+ if q := r.URL.RawQuery; q != "" {
+ newPath += "?" + q
+ }
+ w.Header().Set("Location", newPath)
+ w.WriteHeader(StatusMovedPermanently)
+}
+
+// ServeFile replies to the request with the contents of the named
+// file or directory.
+//
+// If the provided file or directory name is a relative path, it is
+// interpreted relative to the current directory and may ascend to
+// parent directories. If the provided name is constructed from user
+// input, it should be sanitized before calling ServeFile.
+//
+// As a precaution, ServeFile will reject requests where r.URL.Path
+// contains a ".." path element; this protects against callers who
+// might unsafely use [filepath.Join] on r.URL.Path without sanitizing
+// it and then use that filepath.Join result as the name argument.
+//
+// As another special case, ServeFile redirects any request where r.URL.Path
+// ends in "/index.html" to the same path, without the final
+// "index.html". To avoid such redirects either modify the path or
+// use [ServeContent].
+//
+// Outside of those two special cases, ServeFile does not use
+// r.URL.Path for selecting the file or directory to serve; only the
+// file or directory provided in the name argument is used.
+func ServeFile(w ResponseWriter, r *Request, name string) {
+ if containsDotDot(r.URL.Path) {
+ // Too many programs use r.URL.Path to construct the argument to
+ // serveFile. Reject the request under the assumption that happened
+ // here and ".." may not be wanted.
+ // Note that name might not contain "..", for example if code (still
+ // incorrectly) used filepath.Join(myDir, r.URL.Path).
+ Error(w, "invalid URL path", StatusBadRequest)
+ return
+ }
+ dir, file := filepath.Split(name)
+ serveFile(w, r, Dir(dir), file, false)
+}
+
+// ServeFileFS replies to the request with the contents
+// of the named file or directory from the file system fsys.
+//
+// If the provided file or directory name is a relative path, it is
+// interpreted relative to the current directory and may ascend to
+// parent directories. If the provided name is constructed from user
+// input, it should be sanitized before calling [ServeFile].
+//
+// As a precaution, ServeFile will reject requests where r.URL.Path
+// contains a ".." path element; this protects against callers who
+// might unsafely use [filepath.Join] on r.URL.Path without sanitizing
+// it and then use that filepath.Join result as the name argument.
+//
+// As another special case, ServeFile redirects any request where r.URL.Path
+// ends in "/index.html" to the same path, without the final
+// "index.html". To avoid such redirects either modify the path or
+// use ServeContent.
+//
+// Outside of those two special cases, ServeFile does not use
+// r.URL.Path for selecting the file or directory to serve; only the
+// file or directory provided in the name argument is used.
+func ServeFileFS(w ResponseWriter, r *Request, fsys fs.FS, name string) {
+ if containsDotDot(r.URL.Path) {
+ // Too many programs use r.URL.Path to construct the argument to
+ // serveFile. Reject the request under the assumption that happened
+ // here and ".." may not be wanted.
+ // Note that name might not contain "..", for example if code (still
+ // incorrectly) used filepath.Join(myDir, r.URL.Path).
+ Error(w, "invalid URL path", StatusBadRequest)
+ return
+ }
+ serveFile(w, r, FS(fsys), name, false)
+}
+
+func containsDotDot(v string) bool {
+ if !strings.Contains(v, "..") {
+ return false
+ }
+ for _, ent := range strings.FieldsFunc(v, isSlashRune) {
+ if ent == ".." {
+ return true
+ }
+ }
+ return false
+}
+
+func isSlashRune(r rune) bool { return r == '/' || r == '\\' }
+
+type fileHandler struct {
+ root FileSystem
+}
+
+type ioFS struct {
+ fsys fs.FS
+}
+
+type ioFile struct {
+ file fs.File
+}
+
+func (f ioFS) Open(name string) (File, error) {
+ if name == "/" {
+ name = "."
+ } else {
+ name = strings.TrimPrefix(name, "/")
+ }
+ file, err := f.fsys.Open(name)
+ if err != nil {
+ return nil, mapOpenError(err, name, '/', func(path string) (fs.FileInfo, error) {
+ return fs.Stat(f.fsys, path)
+ })
+ }
+ return ioFile{file}, nil
+}
+
+func (f ioFile) Close() error { return f.file.Close() }
+func (f ioFile) Read(b []byte) (int, error) { return f.file.Read(b) }
+func (f ioFile) Stat() (fs.FileInfo, error) { return f.file.Stat() }
+
+var errMissingSeek = errors.New("io.File missing Seek method")
+var errMissingReadDir = errors.New("io.File directory missing ReadDir method")
+
+func (f ioFile) Seek(offset int64, whence int) (int64, error) {
+ s, ok := f.file.(io.Seeker)
+ if !ok {
+ return 0, errMissingSeek
+ }
+ return s.Seek(offset, whence)
+}
+
+func (f ioFile) ReadDir(count int) ([]fs.DirEntry, error) {
+ d, ok := f.file.(fs.ReadDirFile)
+ if !ok {
+ return nil, errMissingReadDir
+ }
+ return d.ReadDir(count)
+}
+
+func (f ioFile) Readdir(count int) ([]fs.FileInfo, error) {
+ d, ok := f.file.(fs.ReadDirFile)
+ if !ok {
+ return nil, errMissingReadDir
+ }
+ var list []fs.FileInfo
+ for {
+ dirs, err := d.ReadDir(count - len(list))
+ for _, dir := range dirs {
+ info, err := dir.Info()
+ if err != nil {
+ // Pretend it doesn't exist, like (*os.File).Readdir does.
+ continue
+ }
+ list = append(list, info)
+ }
+ if err != nil {
+ return list, err
+ }
+ if count < 0 || len(list) >= count {
+ break
+ }
+ }
+ return list, nil
+}
+
+// FS converts fsys to a [FileSystem] implementation,
+// for use with [FileServer] and [NewFileTransport].
+// The files provided by fsys must implement [io.Seeker].
+func FS(fsys fs.FS) FileSystem {
+ return ioFS{fsys}
+}
+
+// FileServer returns a handler that serves HTTP requests
+// with the contents of the file system rooted at root.
+//
+// As a special case, the returned file server redirects any request
+// ending in "/index.html" to the same path, without the final
+// "index.html".
+//
+// To use the operating system's file system implementation,
+// use [http.Dir]:
+//
+// http.Handle("/", http.FileServer(http.Dir("/tmp")))
+//
+// To use an [fs.FS] implementation, use [http.FileServerFS] instead.
+func FileServer(root FileSystem) Handler {
+ return &fileHandler{root}
+}
+
+// FileServerFS returns a handler that serves HTTP requests
+// with the contents of the file system fsys.
+//
+// As a special case, the returned file server redirects any request
+// ending in "/index.html" to the same path, without the final
+// "index.html".
+//
+// http.Handle("/", http.FileServerFS(fsys))
+func FileServerFS(root fs.FS) Handler {
+ return FileServer(FS(root))
+}
+
+func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ upath := r.URL.Path
+ if !strings.HasPrefix(upath, "/") {
+ upath = "/" + upath
+ r.URL.Path = upath
+ }
+ serveFile(w, r, f.root, path.Clean(upath), true)
+}
+
+// httpRange specifies the byte range to be sent to the client.
+type httpRange struct {
+ start, length int64
+}
+
+func (r httpRange) contentRange(size int64) string {
+ return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
+}
+
+func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader {
+ return textproto.MIMEHeader{
+ "Content-Range": {r.contentRange(size)},
+ "Content-Type": {contentType},
+ }
+}
+
+// parseRange parses a Range header string as per RFC 7233.
+// errNoOverlap is returned if none of the ranges overlap.
+func parseRange(s string, size int64) ([]httpRange, error) {
+ if s == "" {
+ return nil, nil // header not present
+ }
+ const b = "bytes="
+ if !strings.HasPrefix(s, b) {
+ return nil, errors.New("invalid range")
+ }
+ var ranges []httpRange
+ noOverlap := false
+ for _, ra := range strings.Split(s[len(b):], ",") {
+ ra = textproto.TrimString(ra)
+ if ra == "" {
+ continue
+ }
+ start, end, ok := strings.Cut(ra, "-")
+ if !ok {
+ return nil, errors.New("invalid range")
+ }
+ start, end = textproto.TrimString(start), textproto.TrimString(end)
+ var r httpRange
+ if start == "" {
+ // If no start is specified, end specifies the
+ // range start relative to the end of the file,
+ // and we are dealing with <suffix-length>
+ // which has to be a non-negative integer as per
+ // RFC 7233 Section 2.1 "Byte-Ranges".
+ if end == "" || end[0] == '-' {
+ return nil, errors.New("invalid range")
+ }
+ i, err := strconv.ParseInt(end, 10, 64)
+ if i < 0 || err != nil {
+ return nil, errors.New("invalid range")
+ }
+ if i > size {
+ i = size
+ }
+ r.start = size - i
+ r.length = size - r.start
+ } else {
+ i, err := strconv.ParseInt(start, 10, 64)
+ if err != nil || i < 0 {
+ return nil, errors.New("invalid range")
+ }
+ if i >= size {
+ // If the range begins after the size of the content,
+ // then it does not overlap.
+ noOverlap = true
+ continue
+ }
+ r.start = i
+ if end == "" {
+ // If no end is specified, range extends to end of the file.
+ r.length = size - r.start
+ } else {
+ i, err := strconv.ParseInt(end, 10, 64)
+ if err != nil || r.start > i {
+ return nil, errors.New("invalid range")
+ }
+ if i >= size {
+ i = size - 1
+ }
+ r.length = i - r.start + 1
+ }
+ }
+ ranges = append(ranges, r)
+ }
+ if noOverlap && len(ranges) == 0 {
+ // The specified ranges did not overlap with the content.
+ return nil, errNoOverlap
+ }
+ return ranges, nil
+}
+
+// countingWriter counts how many bytes have been written to it.
+type countingWriter int64
+
+func (w *countingWriter) Write(p []byte) (n int, err error) {
+ *w += countingWriter(len(p))
+ return len(p), nil
+}
+
+// rangesMIMESize returns the number of bytes it takes to encode the
+// provided ranges as a multipart response.
+func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) {
+ var w countingWriter
+ mw := multipart.NewWriter(&w)
+ for _, ra := range ranges {
+ mw.CreatePart(ra.mimeHeader(contentType, contentSize))
+ encSize += ra.length
+ }
+ mw.Close()
+ encSize += int64(w)
+ return
+}
+
+func sumRangesSize(ranges []httpRange) (size int64) {
+ for _, ra := range ranges {
+ size += ra.length
+ }
+ return
+}
diff --git a/contrib/go/_std_1.22/src/net/http/h2_bundle.go b/contrib/go/_std_1.22/src/net/http/h2_bundle.go
new file mode 100644
index 0000000000..ac41144d5b
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/h2_bundle.go
@@ -0,0 +1,11488 @@
+//go:build !nethttpomithttp2
+
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+// $ bundle -o=h2_bundle.go -prefix=http2 -tags=!nethttpomithttp2 golang.org/x/net/http2
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This package is low-level and intended to be used directly by very
+// few people. Most users will use it indirectly through the automatic
+// use by the net/http package (from Go 1.6 and later).
+// For use in earlier Go versions see ConfigureServer. (Transport support
+// requires Go 1.6 or later)
+//
+// See https://http2.github.io/ for more information on HTTP/2.
+//
+// See https://http2.golang.org/ for a test server running this code.
+//
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "context"
+ "crypto/rand"
+ "crypto/tls"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "log"
+ "math"
+ "math/bits"
+ mathrand "math/rand"
+ "net"
+ "net/http/httptrace"
+ "net/textproto"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/idna"
+)
+
+// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
+// contains helper functions which may use Unicode-aware functions which would
+// otherwise be unsafe and could introduce vulnerabilities if used improperly.
+
+// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
+// are equal, ASCII-case-insensitively.
+func http2asciiEqualFold(s, t string) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if http2lower(s[i]) != http2lower(t[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// lower returns the ASCII lowercase version of b.
+func http2lower(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// isASCIIPrint returns whether s is ASCII and printable according to
+// https://tools.ietf.org/html/rfc20#section-4.2.
+func http2isASCIIPrint(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] < ' ' || s[i] > '~' {
+ return false
+ }
+ }
+ return true
+}
+
+// asciiToLower returns the lowercase version of s if s is ASCII and printable,
+// and whether or not it was.
+func http2asciiToLower(s string) (lower string, ok bool) {
+ if !http2isASCIIPrint(s) {
+ return "", false
+ }
+ return strings.ToLower(s), true
+}
+
+// A list of the possible cipher suite ids. Taken from
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
+
+const (
+ http2cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
+ http2cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
+ http2cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
+ http2cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
+ http2cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
+ http2cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
+ http2cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
+ http2cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
+ http2cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
+ http2cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
+ http2cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
+ http2cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
+ http2cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
+ http2cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
+ http2cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
+ http2cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
+ http2cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
+ http2cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
+ http2cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
+ http2cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
+ http2cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
+ http2cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
+ http2cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
+ http2cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
+ http2cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
+ http2cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
+ http2cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
+ http2cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
+ // Reserved uint16 = 0x001C-1D
+ http2cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
+ http2cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
+ http2cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
+ http2cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
+ http2cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
+ http2cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
+ // Reserved uint16 = 0x0047-4F
+ // Reserved uint16 = 0x0050-58
+ // Reserved uint16 = 0x0059-5C
+ // Unassigned uint16 = 0x005D-5F
+ // Reserved uint16 = 0x0060-66
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
+ // Unassigned uint16 = 0x006E-83
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
+ http2cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
+ http2cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
+ http2cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
+ http2cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
+ http2cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
+ http2cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
+ http2cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
+ http2cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
+ http2cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
+ http2cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
+ http2cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
+ http2cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
+ http2cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
+ http2cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
+ http2cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
+ http2cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
+ http2cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
+ http2cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
+ http2cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
+ http2cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
+ http2cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
+ http2cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
+ http2cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
+ http2cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
+ // Unassigned uint16 = 0x00C6-FE
+ http2cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
+ // Unassigned uint16 = 0x01-55,*
+ http2cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
+ // Unassigned uint16 = 0x5601 - 0xC000
+ http2cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
+ http2cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
+ http2cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
+ http2cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
+ http2cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
+ http2cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
+ http2cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
+ http2cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
+ http2cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
+ http2cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
+ http2cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
+ http2cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
+ http2cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
+ http2cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
+ http2cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
+ http2cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
+ http2cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
+ http2cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
+ http2cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
+ http2cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
+ http2cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
+ http2cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
+ http2cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
+ http2cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
+ http2cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
+ http2cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
+ http2cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
+ http2cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
+ http2cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
+ http2cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
+ http2cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
+ http2cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
+ http2cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
+ http2cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
+ http2cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
+ http2cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
+ http2cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
+ http2cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
+ http2cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
+ http2cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
+ http2cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
+ http2cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
+ // Unassigned uint16 = 0xC0B0-FF
+ // Unassigned uint16 = 0xC1-CB,*
+ // Unassigned uint16 = 0xCC00-A7
+ http2cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
+ http2cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
+ http2cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
+ http2cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
+ http2cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
+ http2cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
+)
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+// References:
+// https://tools.ietf.org/html/rfc7540#appendix-A
+// Reject cipher suites from Appendix A.
+// "This list includes those cipher suites that do not
+// offer an ephemeral key exchange and those that are
+// based on the TLS null, stream or block cipher type"
+func http2isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case http2cipher_TLS_NULL_WITH_NULL_NULL,
+ http2cipher_TLS_RSA_WITH_NULL_MD5,
+ http2cipher_TLS_RSA_WITH_NULL_SHA,
+ http2cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
+ http2cipher_TLS_RSA_WITH_RC4_128_MD5,
+ http2cipher_TLS_RSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
+ http2cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
+ http2cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_DES_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
+ http2cipher_TLS_DH_anon_WITH_RC4_128_MD5,
+ http2cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_DES_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_RC4_128_SHA,
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_DES_CBC_MD5,
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
+ http2cipher_TLS_KRB5_WITH_RC4_128_MD5,
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
+ http2cipher_TLS_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_NULL_SHA256,
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
+ http2cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_NULL_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_WITH_AES_128_CCM,
+ http2cipher_TLS_RSA_WITH_AES_256_CCM,
+ http2cipher_TLS_RSA_WITH_AES_128_CCM_8,
+ http2cipher_TLS_RSA_WITH_AES_256_CCM_8,
+ http2cipher_TLS_PSK_WITH_AES_128_CCM,
+ http2cipher_TLS_PSK_WITH_AES_256_CCM,
+ http2cipher_TLS_PSK_WITH_AES_128_CCM_8,
+ http2cipher_TLS_PSK_WITH_AES_256_CCM_8:
+ return true
+ default:
+ return false
+ }
+}
+
+// ClientConnPool manages a pool of HTTP/2 client connections.
+type http2ClientConnPool interface {
+ // GetClientConn returns a specific HTTP/2 connection (usually
+ // a TLS-TCP connection) to an HTTP/2 server. On success, the
+ // returned ClientConn accounts for the upcoming RoundTrip
+ // call, so the caller should not omit it. If the caller needs
+ // to, ClientConn.RoundTrip can be called with a bogus
+ // new(http.Request) to release the stream reservation.
+ GetClientConn(req *Request, addr string) (*http2ClientConn, error)
+ MarkDead(*http2ClientConn)
+}
+
+// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
+// implementations which can close their idle connections.
+type http2clientConnPoolIdleCloser interface {
+ http2ClientConnPool
+ closeIdleConnections()
+}
+
+var (
+ _ http2clientConnPoolIdleCloser = (*http2clientConnPool)(nil)
+ _ http2clientConnPoolIdleCloser = http2noDialClientConnPool{}
+)
+
+// TODO: use singleflight for dialing and addConnCalls?
+type http2clientConnPool struct {
+ t *http2Transport
+
+ mu sync.Mutex // TODO: maybe switch to RWMutex
+ // TODO: add support for sharing conns based on cert names
+ // (e.g. share conn for googleapis.com and appspot.com)
+ conns map[string][]*http2ClientConn // key is host:port
+ dialing map[string]*http2dialCall // currently in-flight dials
+ keys map[*http2ClientConn][]string
+ addConnCalls map[string]*http2addConnCall // in-flight addConnIfNeeded calls
+}
+
+func (p *http2clientConnPool) GetClientConn(req *Request, addr string) (*http2ClientConn, error) {
+ return p.getClientConn(req, addr, http2dialOnMiss)
+}
+
+const (
+ http2dialOnMiss = true
+ http2noDialOnMiss = false
+)
+
+func (p *http2clientConnPool) getClientConn(req *Request, addr string, dialOnMiss bool) (*http2ClientConn, error) {
+ // TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
+ if http2isConnectionCloseRequest(req) && dialOnMiss {
+ // It gets its own connection.
+ http2traceGetConn(req, addr)
+ const singleUse = true
+ cc, err := p.t.dialClientConn(req.Context(), addr, singleUse)
+ if err != nil {
+ return nil, err
+ }
+ return cc, nil
+ }
+ for {
+ p.mu.Lock()
+ for _, cc := range p.conns[addr] {
+ if cc.ReserveNewRequest() {
+ // When a connection is presented to us by the net/http package,
+ // the GetConn hook has already been called.
+ // Don't call it a second time here.
+ if !cc.getConnCalled {
+ http2traceGetConn(req, addr)
+ }
+ cc.getConnCalled = false
+ p.mu.Unlock()
+ return cc, nil
+ }
+ }
+ if !dialOnMiss {
+ p.mu.Unlock()
+ return nil, http2ErrNoCachedConn
+ }
+ http2traceGetConn(req, addr)
+ call := p.getStartDialLocked(req.Context(), addr)
+ p.mu.Unlock()
+ <-call.done
+ if http2shouldRetryDial(call, req) {
+ continue
+ }
+ cc, err := call.res, call.err
+ if err != nil {
+ return nil, err
+ }
+ if cc.ReserveNewRequest() {
+ return cc, nil
+ }
+ }
+}
+
+// dialCall is an in-flight Transport dial call to a host.
+type http2dialCall struct {
+ _ http2incomparable
+ p *http2clientConnPool
+ // the context associated with the request
+ // that created this dialCall
+ ctx context.Context
+ done chan struct{} // closed when done
+ res *http2ClientConn // valid after done is closed
+ err error // valid after done is closed
+}
+
+// requires p.mu is held.
+func (p *http2clientConnPool) getStartDialLocked(ctx context.Context, addr string) *http2dialCall {
+ if call, ok := p.dialing[addr]; ok {
+ // A dial is already in-flight. Don't start another.
+ return call
+ }
+ call := &http2dialCall{p: p, done: make(chan struct{}), ctx: ctx}
+ if p.dialing == nil {
+ p.dialing = make(map[string]*http2dialCall)
+ }
+ p.dialing[addr] = call
+ go call.dial(call.ctx, addr)
+ return call
+}
+
+// run in its own goroutine.
+func (c *http2dialCall) dial(ctx context.Context, addr string) {
+ const singleUse = false // shared conn
+ c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse)
+
+ c.p.mu.Lock()
+ delete(c.p.dialing, addr)
+ if c.err == nil {
+ c.p.addConnLocked(addr, c.res)
+ }
+ c.p.mu.Unlock()
+
+ close(c.done)
+}
+
+// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
+// already exist. It coalesces concurrent calls with the same key.
+// This is used by the http1 Transport code when it creates a new connection. Because
+// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
+// the protocol), it can get into a situation where it has multiple TLS connections.
+// This code decides which ones live or die.
+// The return value used is whether c was used.
+// c is never closed.
+func (p *http2clientConnPool) addConnIfNeeded(key string, t *http2Transport, c *tls.Conn) (used bool, err error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[key] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return false, nil
+ }
+ }
+ call, dup := p.addConnCalls[key]
+ if !dup {
+ if p.addConnCalls == nil {
+ p.addConnCalls = make(map[string]*http2addConnCall)
+ }
+ call = &http2addConnCall{
+ p: p,
+ done: make(chan struct{}),
+ }
+ p.addConnCalls[key] = call
+ go call.run(t, key, c)
+ }
+ p.mu.Unlock()
+
+ <-call.done
+ if call.err != nil {
+ return false, call.err
+ }
+ return !dup, nil
+}
+
+type http2addConnCall struct {
+ _ http2incomparable
+ p *http2clientConnPool
+ done chan struct{} // closed when done
+ err error
+}
+
+func (c *http2addConnCall) run(t *http2Transport, key string, tc *tls.Conn) {
+ cc, err := t.NewClientConn(tc)
+
+ p := c.p
+ p.mu.Lock()
+ if err != nil {
+ c.err = err
+ } else {
+ cc.getConnCalled = true // already called by the net/http package
+ p.addConnLocked(key, cc)
+ }
+ delete(p.addConnCalls, key)
+ p.mu.Unlock()
+ close(c.done)
+}
+
+// p.mu must be held
+func (p *http2clientConnPool) addConnLocked(key string, cc *http2ClientConn) {
+ for _, v := range p.conns[key] {
+ if v == cc {
+ return
+ }
+ }
+ if p.conns == nil {
+ p.conns = make(map[string][]*http2ClientConn)
+ }
+ if p.keys == nil {
+ p.keys = make(map[*http2ClientConn][]string)
+ }
+ p.conns[key] = append(p.conns[key], cc)
+ p.keys[cc] = append(p.keys[cc], key)
+}
+
+func (p *http2clientConnPool) MarkDead(cc *http2ClientConn) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ for _, key := range p.keys[cc] {
+ vv, ok := p.conns[key]
+ if !ok {
+ continue
+ }
+ newList := http2filterOutClientConn(vv, cc)
+ if len(newList) > 0 {
+ p.conns[key] = newList
+ } else {
+ delete(p.conns, key)
+ }
+ }
+ delete(p.keys, cc)
+}
+
+func (p *http2clientConnPool) closeIdleConnections() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // TODO: don't close a cc if it was just added to the pool
+ // milliseconds ago and has never been used. There's currently
+ // a small race window with the HTTP/1 Transport's integration
+ // where it can add an idle conn just before using it, and
+ // somebody else can concurrently call CloseIdleConns and
+ // break some caller's RoundTrip.
+ for _, vv := range p.conns {
+ for _, cc := range vv {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func http2filterOutClientConn(in []*http2ClientConn, exclude *http2ClientConn) []*http2ClientConn {
+ out := in[:0]
+ for _, v := range in {
+ if v != exclude {
+ out = append(out, v)
+ }
+ }
+ // If we filtered it out, zero out the last item to prevent
+ // the GC from seeing it.
+ if len(in) != len(out) {
+ in[len(in)-1] = nil
+ }
+ return out
+}
+
+// noDialClientConnPool is an implementation of http2.ClientConnPool
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// connection instead.
+type http2noDialClientConnPool struct{ *http2clientConnPool }
+
+func (p http2noDialClientConnPool) GetClientConn(req *Request, addr string) (*http2ClientConn, error) {
+ return p.getClientConn(req, addr, http2noDialOnMiss)
+}
+
+// shouldRetryDial reports whether the current request should
+// retry dialing after the call finished unsuccessfully, for example
+// if the dial was canceled because of a context cancellation or
+// deadline expiry.
+func http2shouldRetryDial(call *http2dialCall, req *Request) bool {
+ if call.err == nil {
+ // No error, no need to retry
+ return false
+ }
+ if call.ctx == req.Context() {
+ // If the call has the same context as the request, the dial
+ // should not be retried, since any cancellation will have come
+ // from this request.
+ return false
+ }
+ if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) {
+ // If the call error is not because of a context cancellation or a deadline expiry,
+ // the dial should not be retried.
+ return false
+ }
+ // Only retry if the error is a context cancellation error or deadline expiry
+ // and the context associated with the call was canceled or expired.
+ return call.ctx.Err() != nil
+}
+
+// Buffer chunks are allocated from a pool to reduce pressure on GC.
+// The maximum wasted space per dataBuffer is 2x the largest size class,
+// which happens when the dataBuffer has multiple chunks and there is
+// one unread byte in both the first and last chunks. We use a few size
+// classes to minimize overheads for servers that typically receive very
+// small request bodies.
+//
+// TODO: Benchmark to determine if the pools are necessary. The GC may have
+// improved enough that we can instead allocate chunks like this:
+// make([]byte, max(16<<10, expectedBytesRemaining))
+var http2dataChunkPools = [...]sync.Pool{
+ {New: func() interface{} { return new([1 << 10]byte) }},
+ {New: func() interface{} { return new([2 << 10]byte) }},
+ {New: func() interface{} { return new([4 << 10]byte) }},
+ {New: func() interface{} { return new([8 << 10]byte) }},
+ {New: func() interface{} { return new([16 << 10]byte) }},
+}
+
+func http2getDataBufferChunk(size int64) []byte {
+ switch {
+ case size <= 1<<10:
+ return http2dataChunkPools[0].Get().(*[1 << 10]byte)[:]
+ case size <= 2<<10:
+ return http2dataChunkPools[1].Get().(*[2 << 10]byte)[:]
+ case size <= 4<<10:
+ return http2dataChunkPools[2].Get().(*[4 << 10]byte)[:]
+ case size <= 8<<10:
+ return http2dataChunkPools[3].Get().(*[8 << 10]byte)[:]
+ default:
+ return http2dataChunkPools[4].Get().(*[16 << 10]byte)[:]
+ }
+}
+
+func http2putDataBufferChunk(p []byte) {
+ switch len(p) {
+ case 1 << 10:
+ http2dataChunkPools[0].Put((*[1 << 10]byte)(p))
+ case 2 << 10:
+ http2dataChunkPools[1].Put((*[2 << 10]byte)(p))
+ case 4 << 10:
+ http2dataChunkPools[2].Put((*[4 << 10]byte)(p))
+ case 8 << 10:
+ http2dataChunkPools[3].Put((*[8 << 10]byte)(p))
+ case 16 << 10:
+ http2dataChunkPools[4].Put((*[16 << 10]byte)(p))
+ default:
+ panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
+ }
+}
+
+// dataBuffer is an io.ReadWriter backed by a list of data chunks.
+// Each dataBuffer is used to read DATA frames on a single stream.
+// The buffer is divided into chunks so the server can limit the
+// total memory used by a single connection without limiting the
+// request body size on any single stream.
+type http2dataBuffer struct {
+ chunks [][]byte
+ r int // next byte to read is chunks[0][r]
+ w int // next byte to write is chunks[len(chunks)-1][w]
+ size int // total buffered bytes
+ expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
+}
+
+var http2errReadEmpty = errors.New("read from empty dataBuffer")
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *http2dataBuffer) Read(p []byte) (int, error) {
+ if b.size == 0 {
+ return 0, http2errReadEmpty
+ }
+ var ntotal int
+ for len(p) > 0 && b.size > 0 {
+ readFrom := b.bytesFromFirstChunk()
+ n := copy(p, readFrom)
+ p = p[n:]
+ ntotal += n
+ b.r += n
+ b.size -= n
+ // If the first chunk has been consumed, advance to the next chunk.
+ if b.r == len(b.chunks[0]) {
+ http2putDataBufferChunk(b.chunks[0])
+ end := len(b.chunks) - 1
+ copy(b.chunks[:end], b.chunks[1:])
+ b.chunks[end] = nil
+ b.chunks = b.chunks[:end]
+ b.r = 0
+ }
+ }
+ return ntotal, nil
+}
+
+func (b *http2dataBuffer) bytesFromFirstChunk() []byte {
+ if len(b.chunks) == 1 {
+ return b.chunks[0][b.r:b.w]
+ }
+ return b.chunks[0][b.r:]
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *http2dataBuffer) Len() int {
+ return b.size
+}
+
+// Write appends p to the buffer.
+func (b *http2dataBuffer) Write(p []byte) (int, error) {
+ ntotal := len(p)
+ for len(p) > 0 {
+ // If the last chunk is empty, allocate a new chunk. Try to allocate
+ // enough to fully copy p plus any additional bytes we expect to
+ // receive. However, this may allocate less than len(p).
+ want := int64(len(p))
+ if b.expected > want {
+ want = b.expected
+ }
+ chunk := b.lastChunkOrAlloc(want)
+ n := copy(chunk[b.w:], p)
+ p = p[n:]
+ b.w += n
+ b.size += n
+ b.expected -= int64(n)
+ }
+ return ntotal, nil
+}
+
+func (b *http2dataBuffer) lastChunkOrAlloc(want int64) []byte {
+ if len(b.chunks) != 0 {
+ last := b.chunks[len(b.chunks)-1]
+ if b.w < len(last) {
+ return last
+ }
+ }
+ chunk := http2getDataBufferChunk(want)
+ b.chunks = append(b.chunks, chunk)
+ b.w = 0
+ return chunk
+}
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type http2ErrCode uint32
+
+const (
+ http2ErrCodeNo http2ErrCode = 0x0
+ http2ErrCodeProtocol http2ErrCode = 0x1
+ http2ErrCodeInternal http2ErrCode = 0x2
+ http2ErrCodeFlowControl http2ErrCode = 0x3
+ http2ErrCodeSettingsTimeout http2ErrCode = 0x4
+ http2ErrCodeStreamClosed http2ErrCode = 0x5
+ http2ErrCodeFrameSize http2ErrCode = 0x6
+ http2ErrCodeRefusedStream http2ErrCode = 0x7
+ http2ErrCodeCancel http2ErrCode = 0x8
+ http2ErrCodeCompression http2ErrCode = 0x9
+ http2ErrCodeConnect http2ErrCode = 0xa
+ http2ErrCodeEnhanceYourCalm http2ErrCode = 0xb
+ http2ErrCodeInadequateSecurity http2ErrCode = 0xc
+ http2ErrCodeHTTP11Required http2ErrCode = 0xd
+)
+
+var http2errCodeName = map[http2ErrCode]string{
+ http2ErrCodeNo: "NO_ERROR",
+ http2ErrCodeProtocol: "PROTOCOL_ERROR",
+ http2ErrCodeInternal: "INTERNAL_ERROR",
+ http2ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
+ http2ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
+ http2ErrCodeStreamClosed: "STREAM_CLOSED",
+ http2ErrCodeFrameSize: "FRAME_SIZE_ERROR",
+ http2ErrCodeRefusedStream: "REFUSED_STREAM",
+ http2ErrCodeCancel: "CANCEL",
+ http2ErrCodeCompression: "COMPRESSION_ERROR",
+ http2ErrCodeConnect: "CONNECT_ERROR",
+ http2ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
+ http2ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+ http2ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
+}
+
+func (e http2ErrCode) String() string {
+ if s, ok := http2errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+func (e http2ErrCode) stringToken() string {
+ if s, ok := http2errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type http2ConnectionError http2ErrCode
+
+func (e http2ConnectionError) Error() string {
+ return fmt.Sprintf("connection error: %s", http2ErrCode(e))
+}
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type http2StreamError struct {
+ StreamID uint32
+ Code http2ErrCode
+ Cause error // optional additional detail
+}
+
+// errFromPeer is a sentinel error value for StreamError.Cause to
+// indicate that the StreamError was sent from the peer over the wire
+// and wasn't locally generated in the Transport.
+var http2errFromPeer = errors.New("received from peer")
+
+func http2streamError(id uint32, code http2ErrCode) http2StreamError {
+ return http2StreamError{StreamID: id, Code: code}
+}
+
+func (e http2StreamError) Error() string {
+ if e.Cause != nil {
+ return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
+ }
+ return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type http2goAwayFlowError struct{}
+
+func (http2goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+
+// connError represents an HTTP/2 ConnectionError error code, along
+// with a string (for debugging) explaining why.
+//
+// Errors of this type are only returned by the frame parser functions
+// and converted into ConnectionError(Code), after stashing away
+// the Reason into the Framer's errDetail field, accessible via
+// the (*Framer).ErrorDetail method.
+type http2connError struct {
+ Code http2ErrCode // the ConnectionError error code
+ Reason string // additional reason
+}
+
+func (e http2connError) Error() string {
+ return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
+}
+
+type http2pseudoHeaderError string
+
+func (e http2pseudoHeaderError) Error() string {
+ return fmt.Sprintf("invalid pseudo-header %q", string(e))
+}
+
+type http2duplicatePseudoHeaderError string
+
+func (e http2duplicatePseudoHeaderError) Error() string {
+ return fmt.Sprintf("duplicate pseudo-header %q", string(e))
+}
+
+type http2headerFieldNameError string
+
+func (e http2headerFieldNameError) Error() string {
+ return fmt.Sprintf("invalid header field name %q", string(e))
+}
+
+type http2headerFieldValueError string
+
+func (e http2headerFieldValueError) Error() string {
+ return fmt.Sprintf("invalid header field value for %q", string(e))
+}
+
+var (
+ http2errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
+ http2errPseudoAfterRegular = errors.New("pseudo header field after regular")
+)
+
+// inflowMinRefresh is the minimum number of bytes we'll send for a
+// flow control window update.
+const http2inflowMinRefresh = 4 << 10
+
+// inflow accounts for an inbound flow control window.
+// It tracks both the latest window sent to the peer (used for enforcement)
+// and the accumulated unsent window.
+type http2inflow struct {
+ avail int32
+ unsent int32
+}
+
+// init sets the initial window.
+func (f *http2inflow) init(n int32) {
+ f.avail = n
+}
+
+// add adds n bytes to the window, with a maximum window size of max,
+// indicating that the peer can now send us more data.
+// For example, the user read from a {Request,Response} body and consumed
+// some of the buffered data, so the peer can now send more.
+// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer.
+// Window updates are accumulated and sent when the unsent capacity
+// is at least inflowMinRefresh or will at least double the peer's available window.
+func (f *http2inflow) add(n int) (connAdd int32) {
+ if n < 0 {
+ panic("negative update")
+ }
+ unsent := int64(f.unsent) + int64(n)
+ // "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets."
+ // RFC 7540 Section 6.9.1.
+ const maxWindow = 1<<31 - 1
+ if unsent+int64(f.avail) > maxWindow {
+ panic("flow control update exceeds maximum window size")
+ }
+ f.unsent = int32(unsent)
+ if f.unsent < http2inflowMinRefresh && f.unsent < f.avail {
+ // If there aren't at least inflowMinRefresh bytes of window to send,
+ // and this update won't at least double the window, buffer the update for later.
+ return 0
+ }
+ f.avail += f.unsent
+ f.unsent = 0
+ return int32(unsent)
+}
+
+// take attempts to take n bytes from the peer's flow control window.
+// It reports whether the window has available capacity.
+func (f *http2inflow) take(n uint32) bool {
+ if n > uint32(f.avail) {
+ return false
+ }
+ f.avail -= int32(n)
+ return true
+}
+
+// takeInflows attempts to take n bytes from two inflows,
+// typically connection-level and stream-level flows.
+// It reports whether both windows have available capacity.
+func http2takeInflows(f1, f2 *http2inflow, n uint32) bool {
+ if n > uint32(f1.avail) || n > uint32(f2.avail) {
+ return false
+ }
+ f1.avail -= int32(n)
+ f2.avail -= int32(n)
+ return true
+}
+
+// outflow is the outbound flow control window's size.
+type http2outflow struct {
+ _ http2incomparable
+
+ // n is the number of DATA bytes we're allowed to send.
+ // An outflow is kept both on a conn and a per-stream.
+ n int32
+
+ // conn points to the shared connection-level outflow that is
+ // shared by all streams on that conn. It is nil for the outflow
+ // that's on the conn directly.
+ conn *http2outflow
+}
+
+func (f *http2outflow) setConnFlow(cf *http2outflow) { f.conn = cf }
+
+func (f *http2outflow) available() int32 {
+ n := f.n
+ if f.conn != nil && f.conn.n < n {
+ n = f.conn.n
+ }
+ return n
+}
+
+func (f *http2outflow) take(n int32) {
+ if n > f.available() {
+ panic("internal error: took too much")
+ }
+ f.n -= n
+ if f.conn != nil {
+ f.conn.n -= n
+ }
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *http2outflow) add(n int32) bool {
+ sum := f.n + n
+ if (sum > n) == (f.n > 0) {
+ f.n = sum
+ return true
+ }
+ return false
+}
+
+const http2frameHeaderLen = 9
+
+var http2padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// https://httpwg.org/specs/rfc7540.html#rfc.section.11.2
+type http2FrameType uint8
+
+const (
+ http2FrameData http2FrameType = 0x0
+ http2FrameHeaders http2FrameType = 0x1
+ http2FramePriority http2FrameType = 0x2
+ http2FrameRSTStream http2FrameType = 0x3
+ http2FrameSettings http2FrameType = 0x4
+ http2FramePushPromise http2FrameType = 0x5
+ http2FramePing http2FrameType = 0x6
+ http2FrameGoAway http2FrameType = 0x7
+ http2FrameWindowUpdate http2FrameType = 0x8
+ http2FrameContinuation http2FrameType = 0x9
+)
+
+var http2frameName = map[http2FrameType]string{
+ http2FrameData: "DATA",
+ http2FrameHeaders: "HEADERS",
+ http2FramePriority: "PRIORITY",
+ http2FrameRSTStream: "RST_STREAM",
+ http2FrameSettings: "SETTINGS",
+ http2FramePushPromise: "PUSH_PROMISE",
+ http2FramePing: "PING",
+ http2FrameGoAway: "GOAWAY",
+ http2FrameWindowUpdate: "WINDOW_UPDATE",
+ http2FrameContinuation: "CONTINUATION",
+}
+
+func (t http2FrameType) String() string {
+ if s, ok := http2frameName[t]; ok {
+ return s
+ }
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type http2Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f http2Flags) Has(v http2Flags) bool {
+ return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+ // Data Frame
+ http2FlagDataEndStream http2Flags = 0x1
+ http2FlagDataPadded http2Flags = 0x8
+
+ // Headers Frame
+ http2FlagHeadersEndStream http2Flags = 0x1
+ http2FlagHeadersEndHeaders http2Flags = 0x4
+ http2FlagHeadersPadded http2Flags = 0x8
+ http2FlagHeadersPriority http2Flags = 0x20
+
+ // Settings Frame
+ http2FlagSettingsAck http2Flags = 0x1
+
+ // Ping Frame
+ http2FlagPingAck http2Flags = 0x1
+
+ // Continuation Frame
+ http2FlagContinuationEndHeaders http2Flags = 0x4
+
+ http2FlagPushPromiseEndHeaders http2Flags = 0x4
+ http2FlagPushPromisePadded http2Flags = 0x8
+)
+
+var http2flagName = map[http2FrameType]map[http2Flags]string{
+ http2FrameData: {
+ http2FlagDataEndStream: "END_STREAM",
+ http2FlagDataPadded: "PADDED",
+ },
+ http2FrameHeaders: {
+ http2FlagHeadersEndStream: "END_STREAM",
+ http2FlagHeadersEndHeaders: "END_HEADERS",
+ http2FlagHeadersPadded: "PADDED",
+ http2FlagHeadersPriority: "PRIORITY",
+ },
+ http2FrameSettings: {
+ http2FlagSettingsAck: "ACK",
+ },
+ http2FramePing: {
+ http2FlagPingAck: "ACK",
+ },
+ http2FrameContinuation: {
+ http2FlagContinuationEndHeaders: "END_HEADERS",
+ },
+ http2FramePushPromise: {
+ http2FlagPushPromiseEndHeaders: "END_HEADERS",
+ http2FlagPushPromisePadded: "PADDED",
+ },
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type http2frameParser func(fc *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error)
+
+var http2frameParsers = map[http2FrameType]http2frameParser{
+ http2FrameData: http2parseDataFrame,
+ http2FrameHeaders: http2parseHeadersFrame,
+ http2FramePriority: http2parsePriorityFrame,
+ http2FrameRSTStream: http2parseRSTStreamFrame,
+ http2FrameSettings: http2parseSettingsFrame,
+ http2FramePushPromise: http2parsePushPromise,
+ http2FramePing: http2parsePingFrame,
+ http2FrameGoAway: http2parseGoAwayFrame,
+ http2FrameWindowUpdate: http2parseWindowUpdateFrame,
+ http2FrameContinuation: http2parseContinuationFrame,
+}
+
+func http2typeFrameParser(t http2FrameType) http2frameParser {
+ if f := http2frameParsers[t]; f != nil {
+ return f
+ }
+ return http2parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See https://httpwg.org/specs/rfc7540.html#FrameHeader
+type http2FrameHeader struct {
+ valid bool // caller can access []byte fields in the Frame
+
+ // Type is the 1 byte frame type. There are ten standard frame
+ // types, but extension frame types may be written by WriteRawFrame
+ // and will be returned by ReadFrame (as UnknownFrame).
+ Type http2FrameType
+
+ // Flags are the 1 byte of 8 potential bit flags per frame.
+ // They are specific to the frame type.
+ Flags http2Flags
+
+ // Length is the length of the frame, not including the 9 byte header.
+ // The maximum size is one byte less than 16MB (uint24), but only
+ // frames up to 16KB are allowed without peer agreement.
+ Length uint32
+
+ // StreamID is which stream this frame is for. Certain frames
+ // are not stream-specific, in which case this field is 0.
+ StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h http2FrameHeader) Header() http2FrameHeader { return h }
+
+func (h http2FrameHeader) String() string {
+ var buf bytes.Buffer
+ buf.WriteString("[FrameHeader ")
+ h.writeDebug(&buf)
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+func (h http2FrameHeader) writeDebug(buf *bytes.Buffer) {
+ buf.WriteString(h.Type.String())
+ if h.Flags != 0 {
+ buf.WriteString(" flags=")
+ set := 0
+ for i := uint8(0); i < 8; i++ {
+ if h.Flags&(1<<i) == 0 {
+ continue
+ }
+ set++
+ if set > 1 {
+ buf.WriteByte('|')
+ }
+ name := http2flagName[h.Type][http2Flags(1<<i)]
+ if name != "" {
+ buf.WriteString(name)
+ } else {
+ fmt.Fprintf(buf, "0x%x", 1<<i)
+ }
+ }
+ }
+ if h.StreamID != 0 {
+ fmt.Fprintf(buf, " stream=%d", h.StreamID)
+ }
+ fmt.Fprintf(buf, " len=%d", h.Length)
+}
+
+func (h *http2FrameHeader) checkValid() {
+ if !h.valid {
+ panic("Frame accessor called on non-owned Frame")
+ }
+}
+
+func (h *http2FrameHeader) invalidate() { h.valid = false }
+
+// frame header bytes.
+// Used only by ReadFrameHeader.
+var http2fhBytes = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, http2frameHeaderLen)
+ return &buf
+ },
+}
+
+// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
+// Most users should use Framer.ReadFrame instead.
+func http2ReadFrameHeader(r io.Reader) (http2FrameHeader, error) {
+ bufp := http2fhBytes.Get().(*[]byte)
+ defer http2fhBytes.Put(bufp)
+ return http2readFrameHeader(*bufp, r)
+}
+
+func http2readFrameHeader(buf []byte, r io.Reader) (http2FrameHeader, error) {
+ _, err := io.ReadFull(r, buf[:http2frameHeaderLen])
+ if err != nil {
+ return http2FrameHeader{}, err
+ }
+ return http2FrameHeader{
+ Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
+ Type: http2FrameType(buf[3]),
+ Flags: http2Flags(buf[4]),
+ StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
+ valid: true,
+ }, nil
+}
+
+// A Frame is the base interface implemented by all frame types.
+// Callers will generally type-assert the specific frame type:
+// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
+//
+// Frames are only valid until the next call to Framer.ReadFrame.
+type http2Frame interface {
+ Header() http2FrameHeader
+
+ // invalidate is called by Framer.ReadFrame to make this
+ // frame's buffers as being invalid, since the subsequent
+ // frame will reuse them.
+ invalidate()
+}
+
+// A Framer reads and writes Frames.
+type http2Framer struct {
+ r io.Reader
+ lastFrame http2Frame
+ errDetail error
+
+ // countError is a non-nil func that's called on a frame parse
+ // error with some unique error path token. It's initialized
+ // from Transport.CountError or Server.CountError.
+ countError func(errToken string)
+
+ // lastHeaderStream is non-zero if the last frame was an
+ // unfinished HEADERS/CONTINUATION.
+ lastHeaderStream uint32
+
+ maxReadSize uint32
+ headerBuf [http2frameHeaderLen]byte
+
+ // TODO: let getReadBuf be configurable, and use a less memory-pinning
+ // allocator in server.go to minimize memory pinned for many idle conns.
+ // Will probably also need to make frame invalidation have a hook too.
+ getReadBuf func(size uint32) []byte
+ readBuf []byte // cache for default getReadBuf
+
+ maxWriteSize uint32 // zero means unlimited; TODO: implement
+
+ w io.Writer
+ wbuf []byte
+
+ // AllowIllegalWrites permits the Framer's Write methods to
+ // write frames that do not conform to the HTTP/2 spec. This
+ // permits using the Framer to test other HTTP/2
+ // implementations' conformance to the spec.
+ // If false, the Write methods will prefer to return an error
+ // rather than comply.
+ AllowIllegalWrites bool
+
+ // AllowIllegalReads permits the Framer's ReadFrame method
+ // to return non-compliant frames or frame orders.
+ // This is for testing and permits using the Framer to test
+ // other HTTP/2 implementations' conformance to the spec.
+ // It is not compatible with ReadMetaHeaders.
+ AllowIllegalReads bool
+
+ // ReadMetaHeaders if non-nil causes ReadFrame to merge
+ // HEADERS and CONTINUATION frames together and return
+ // MetaHeadersFrame instead.
+ ReadMetaHeaders *hpack.Decoder
+
+ // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
+ // It's used only if ReadMetaHeaders is set; 0 means a sane default
+ // (currently 16MB)
+ // If the limit is hit, MetaHeadersFrame.Truncated is set true.
+ MaxHeaderListSize uint32
+
+ // TODO: track which type of frame & with which flags was sent
+ // last. Then return an error (unless AllowIllegalWrites) if
+ // we're in the middle of a header block and a
+ // non-Continuation or Continuation on a different stream is
+ // attempted to be written.
+
+ logReads, logWrites bool
+
+ debugFramer *http2Framer // only use for logging written writes
+ debugFramerBuf *bytes.Buffer
+ debugReadLoggerf func(string, ...interface{})
+ debugWriteLoggerf func(string, ...interface{})
+
+ frameCache *http2frameCache // nil if frames aren't reused (default)
+}
+
+func (fr *http2Framer) maxHeaderListSize() uint32 {
+ if fr.MaxHeaderListSize == 0 {
+ return 16 << 20 // sane default, per docs
+ }
+ return fr.MaxHeaderListSize
+}
+
+func (f *http2Framer) startWrite(ftype http2FrameType, flags http2Flags, streamID uint32) {
+ // Write the FrameHeader.
+ f.wbuf = append(f.wbuf[:0],
+ 0, // 3 bytes of length, filled in in endWrite
+ 0,
+ 0,
+ byte(ftype),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+}
+
+func (f *http2Framer) endWrite() error {
+ // Now that we know the final size, fill in the FrameHeader in
+ // the space previously reserved for it. Abuse append.
+ length := len(f.wbuf) - http2frameHeaderLen
+ if length >= (1 << 24) {
+ return http2ErrFrameTooLarge
+ }
+ _ = append(f.wbuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length))
+ if f.logWrites {
+ f.logWrite()
+ }
+
+ n, err := f.w.Write(f.wbuf)
+ if err == nil && n != len(f.wbuf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+func (f *http2Framer) logWrite() {
+ if f.debugFramer == nil {
+ f.debugFramerBuf = new(bytes.Buffer)
+ f.debugFramer = http2NewFramer(nil, f.debugFramerBuf)
+ f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+ // Let us read anything, even if we accidentally wrote it
+ // in the wrong order:
+ f.debugFramer.AllowIllegalReads = true
+ }
+ f.debugFramerBuf.Write(f.wbuf)
+ fr, err := f.debugFramer.ReadFrame()
+ if err != nil {
+ f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f)
+ return
+ }
+ f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, http2summarizeFrame(fr))
+}
+
+func (f *http2Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
+
+func (f *http2Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
+
+func (f *http2Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+
+func (f *http2Framer) writeUint32(v uint32) {
+ f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+ http2minMaxFrameSize = 1 << 14
+ http2maxFrameSize = 1<<24 - 1
+)
+
+// SetReuseFrames allows the Framer to reuse Frames.
+// If called on a Framer, Frames returned by calls to ReadFrame are only
+// valid until the next call to ReadFrame.
+func (fr *http2Framer) SetReuseFrames() {
+ if fr.frameCache != nil {
+ return
+ }
+ fr.frameCache = &http2frameCache{}
+}
+
+type http2frameCache struct {
+ dataFrame http2DataFrame
+}
+
+func (fc *http2frameCache) getDataFrame() *http2DataFrame {
+ if fc == nil {
+ return &http2DataFrame{}
+ }
+ return &fc.dataFrame
+}
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func http2NewFramer(w io.Writer, r io.Reader) *http2Framer {
+ fr := &http2Framer{
+ w: w,
+ r: r,
+ countError: func(string) {},
+ logReads: http2logFrameReads,
+ logWrites: http2logFrameWrites,
+ debugReadLoggerf: log.Printf,
+ debugWriteLoggerf: log.Printf,
+ }
+ fr.getReadBuf = func(size uint32) []byte {
+ if cap(fr.readBuf) >= int(size) {
+ return fr.readBuf[:size]
+ }
+ fr.readBuf = make([]byte, size)
+ return fr.readBuf
+ }
+ fr.SetMaxReadFrameSize(http2maxFrameSize)
+ return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *http2Framer) SetMaxReadFrameSize(v uint32) {
+ if v > http2maxFrameSize {
+ v = http2maxFrameSize
+ }
+ fr.maxReadSize = v
+}
+
+// ErrorDetail returns a more detailed error of the last error
+// returned by Framer.ReadFrame. For instance, if ReadFrame
+// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
+// will say exactly what was invalid. ErrorDetail is not guaranteed
+// to return a non-nil value and like the rest of the http2 package,
+// its return value is not protected by an API compatibility promise.
+// ErrorDetail is reset after the next call to ReadFrame.
+func (fr *http2Framer) ErrorDetail() error {
+ return fr.errDetail
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var http2ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// terminalReadFrameError reports whether err is an unrecoverable
+// error from ReadFrame and no other frames should be read.
+func http2terminalReadFrameError(err error) bool {
+ if _, ok := err.(http2StreamError); ok {
+ return false
+ }
+ return err != nil
+}
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
+func (fr *http2Framer) ReadFrame() (http2Frame, error) {
+ fr.errDetail = nil
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
+ }
+ fh, err := http2readFrameHeader(fr.headerBuf[:], fr.r)
+ if err != nil {
+ return nil, err
+ }
+ if fh.Length > fr.maxReadSize {
+ return nil, http2ErrFrameTooLarge
+ }
+ payload := fr.getReadBuf(fh.Length)
+ if _, err := io.ReadFull(fr.r, payload); err != nil {
+ return nil, err
+ }
+ f, err := http2typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
+ if err != nil {
+ if ce, ok := err.(http2connError); ok {
+ return nil, fr.connError(ce.Code, ce.Reason)
+ }
+ return nil, err
+ }
+ if err := fr.checkFrameOrder(f); err != nil {
+ return nil, err
+ }
+ if fr.logReads {
+ fr.debugReadLoggerf("http2: Framer %p: read %v", fr, http2summarizeFrame(f))
+ }
+ if fh.Type == http2FrameHeaders && fr.ReadMetaHeaders != nil {
+ return fr.readMetaFrame(f.(*http2HeadersFrame))
+ }
+ return f, nil
+}
+
+// connError returns ConnectionError(code) but first
+// stashes away a public reason to the caller can optionally relay it
+// to the peer before hanging up on them. This might help others debug
+// their implementations.
+func (fr *http2Framer) connError(code http2ErrCode, reason string) error {
+ fr.errDetail = errors.New(reason)
+ return http2ConnectionError(code)
+}
+
+// checkFrameOrder reports an error if f is an invalid frame to return
+// next from ReadFrame. Mostly it checks whether HEADERS and
+// CONTINUATION frames are contiguous.
+func (fr *http2Framer) checkFrameOrder(f http2Frame) error {
+ last := fr.lastFrame
+ fr.lastFrame = f
+ if fr.AllowIllegalReads {
+ return nil
+ }
+
+ fh := f.Header()
+ if fr.lastHeaderStream != 0 {
+ if fh.Type != http2FrameContinuation {
+ return fr.connError(http2ErrCodeProtocol,
+ fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
+ fh.Type, fh.StreamID,
+ last.Header().Type, fr.lastHeaderStream))
+ }
+ if fh.StreamID != fr.lastHeaderStream {
+ return fr.connError(http2ErrCodeProtocol,
+ fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
+ fh.StreamID, fr.lastHeaderStream))
+ }
+ } else if fh.Type == http2FrameContinuation {
+ return fr.connError(http2ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
+ }
+
+ switch fh.Type {
+ case http2FrameHeaders, http2FrameContinuation:
+ if fh.Flags.Has(http2FlagHeadersEndHeaders) {
+ fr.lastHeaderStream = 0
+ } else {
+ fr.lastHeaderStream = fh.StreamID
+ }
+ }
+
+ return nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.1
+type http2DataFrame struct {
+ http2FrameHeader
+ data []byte
+}
+
+func (f *http2DataFrame) StreamEnded() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *http2DataFrame) Data() []byte {
+ f.checkValid()
+ return f.data
+}
+
+func http2parseDataFrame(fc *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ countError("frame_data_stream_0")
+ return nil, http2connError{http2ErrCodeProtocol, "DATA frame with stream ID 0"}
+ }
+ f := fc.getDataFrame()
+ f.http2FrameHeader = fh
+
+ var padSize byte
+ if fh.Flags.Has(http2FlagDataPadded) {
+ var err error
+ payload, padSize, err = http2readByte(payload)
+ if err != nil {
+ countError("frame_data_pad_byte_short")
+ return nil, err
+ }
+ }
+ if int(padSize) > len(payload) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ countError("frame_data_pad_too_big")
+ return nil, http2connError{http2ErrCodeProtocol, "pad size larger than data payload"}
+ }
+ f.data = payload[:len(payload)-int(padSize)]
+ return f, nil
+}
+
+var (
+ http2errStreamID = errors.New("invalid stream ID")
+ http2errDepStreamID = errors.New("invalid dependent stream ID")
+ http2errPadLength = errors.New("pad length too large")
+ http2errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
+)
+
+func http2validStreamIDOrZero(streamID uint32) bool {
+ return streamID&(1<<31) == 0
+}
+
+func http2validStreamID(streamID uint32) bool {
+ return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *http2Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+ return f.WriteDataPadded(streamID, endStream, data, nil)
+}
+
+// WriteDataPadded writes a DATA frame with optional padding.
+//
+// If pad is nil, the padding bit is not sent.
+// The length of pad must not exceed 255 bytes.
+// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *http2Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
+ if err := f.startWriteDataPadded(streamID, endStream, data, pad); err != nil {
+ return err
+ }
+ return f.endWrite()
+}
+
+// startWriteDataPadded is WriteDataPadded, but only writes the frame to the Framer's internal buffer.
+// The caller should call endWrite to flush the frame to the underlying writer.
+func (f *http2Framer) startWriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
+ if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ if len(pad) > 0 {
+ if len(pad) > 255 {
+ return http2errPadLength
+ }
+ if !f.AllowIllegalWrites {
+ for _, b := range pad {
+ if b != 0 {
+ // "Padding octets MUST be set to zero when sending."
+ return http2errPadBytes
+ }
+ }
+ }
+ }
+ var flags http2Flags
+ if endStream {
+ flags |= http2FlagDataEndStream
+ }
+ if pad != nil {
+ flags |= http2FlagDataPadded
+ }
+ f.startWrite(http2FrameData, flags, streamID)
+ if pad != nil {
+ f.wbuf = append(f.wbuf, byte(len(pad)))
+ }
+ f.wbuf = append(f.wbuf, data...)
+ f.wbuf = append(f.wbuf, pad...)
+ return nil
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See https://httpwg.org/specs/rfc7540.html#SETTINGS
+type http2SettingsFrame struct {
+ http2FrameHeader
+ p []byte
+}
+
+func http2parseSettingsFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ if fh.Flags.Has(http2FlagSettingsAck) && fh.Length > 0 {
+ // When this (ACK 0x1) bit is set, the payload of the
+ // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FRAME_SIZE_ERROR.
+ countError("frame_settings_ack_with_length")
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ // SETTINGS frames always apply to a connection,
+ // never a single stream. The stream identifier for a
+ // SETTINGS frame MUST be zero (0x0). If an endpoint
+ // receives a SETTINGS frame whose stream identifier
+ // field is anything other than 0x0, the endpoint MUST
+ // respond with a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR.
+ countError("frame_settings_has_stream")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ if len(p)%6 != 0 {
+ countError("frame_settings_mod_6")
+ // Expecting even number of 6 byte settings.
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ f := &http2SettingsFrame{http2FrameHeader: fh, p: p}
+ if v, ok := f.Value(http2SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ countError("frame_settings_window_size_too_big")
+ // Values above the maximum flow control window size of 2^31 - 1 MUST
+ // be treated as a connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ return nil, http2ConnectionError(http2ErrCodeFlowControl)
+ }
+ return f, nil
+}
+
+func (f *http2SettingsFrame) IsAck() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagSettingsAck)
+}
+
+func (f *http2SettingsFrame) Value(id http2SettingID) (v uint32, ok bool) {
+ f.checkValid()
+ for i := 0; i < f.NumSettings(); i++ {
+ if s := f.Setting(i); s.ID == id {
+ return s.Val, true
+ }
+ }
+ return 0, false
+}
+
+// Setting returns the setting from the frame at the given 0-based index.
+// The index must be >= 0 and less than f.NumSettings().
+func (f *http2SettingsFrame) Setting(i int) http2Setting {
+ buf := f.p
+ return http2Setting{
+ ID: http2SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])),
+ Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]),
+ }
+}
+
+func (f *http2SettingsFrame) NumSettings() int { return len(f.p) / 6 }
+
+// HasDuplicates reports whether f contains any duplicate setting IDs.
+func (f *http2SettingsFrame) HasDuplicates() bool {
+ num := f.NumSettings()
+ if num == 0 {
+ return false
+ }
+ // If it's small enough (the common case), just do the n^2
+ // thing and avoid a map allocation.
+ if num < 10 {
+ for i := 0; i < num; i++ {
+ idi := f.Setting(i).ID
+ for j := i + 1; j < num; j++ {
+ idj := f.Setting(j).ID
+ if idi == idj {
+ return true
+ }
+ }
+ }
+ return false
+ }
+ seen := map[http2SettingID]bool{}
+ for i := 0; i < num; i++ {
+ id := f.Setting(i).ID
+ if seen[id] {
+ return true
+ }
+ seen[id] = true
+ }
+ return false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *http2SettingsFrame) ForeachSetting(fn func(http2Setting) error) error {
+ f.checkValid()
+ for i := 0; i < f.NumSettings(); i++ {
+ if err := fn(f.Setting(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WriteSettings(settings ...http2Setting) error {
+ f.startWrite(http2FrameSettings, 0, 0)
+ for _, s := range settings {
+ f.writeUint16(uint16(s.ID))
+ f.writeUint32(s.Val)
+ }
+ return f.endWrite()
+}
+
+// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WriteSettingsAck() error {
+ f.startWrite(http2FrameSettings, http2FlagSettingsAck, 0)
+ return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.7
+type http2PingFrame struct {
+ http2FrameHeader
+ Data [8]byte
+}
+
+func (f *http2PingFrame) IsAck() bool { return f.Flags.Has(http2FlagPingAck) }
+
+func http2parsePingFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error) {
+ if len(payload) != 8 {
+ countError("frame_ping_length")
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ countError("frame_ping_has_stream")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ f := &http2PingFrame{http2FrameHeader: fh}
+ copy(f.Data[:], payload)
+ return f, nil
+}
+
+func (f *http2Framer) WritePing(ack bool, data [8]byte) error {
+ var flags http2Flags
+ if ack {
+ flags = http2FlagPingAck
+ }
+ f.startWrite(http2FramePing, flags, 0)
+ f.writeBytes(data[:])
+ return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.8
+type http2GoAwayFrame struct {
+ http2FrameHeader
+ LastStreamID uint32
+ ErrCode http2ErrCode
+ debugData []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *http2GoAwayFrame) DebugData() []byte {
+ f.checkValid()
+ return f.debugData
+}
+
+func http2parseGoAwayFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ if fh.StreamID != 0 {
+ countError("frame_goaway_has_stream")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ if len(p) < 8 {
+ countError("frame_goaway_short")
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ return &http2GoAwayFrame{
+ http2FrameHeader: fh,
+ LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+ ErrCode: http2ErrCode(binary.BigEndian.Uint32(p[4:8])),
+ debugData: p[8:],
+ }, nil
+}
+
+func (f *http2Framer) WriteGoAway(maxStreamID uint32, code http2ErrCode, debugData []byte) error {
+ f.startWrite(http2FrameGoAway, 0, 0)
+ f.writeUint32(maxStreamID & (1<<31 - 1))
+ f.writeUint32(uint32(code))
+ f.writeBytes(debugData)
+ return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type http2UnknownFrame struct {
+ http2FrameHeader
+ p []byte
+}
+
+// Payload returns the frame's payload (after the header). It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *http2UnknownFrame) Payload() []byte {
+ f.checkValid()
+ return f.p
+}
+
+func http2parseUnknownFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ return &http2UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.9
+type http2WindowUpdateFrame struct {
+ http2FrameHeader
+ Increment uint32 // never read with high bit set
+}
+
+func http2parseWindowUpdateFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ if len(p) != 4 {
+ countError("frame_windowupdate_bad_len")
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+ if inc == 0 {
+ // A receiver MUST treat the receipt of a
+ // WINDOW_UPDATE frame with an flow control window
+ // increment of 0 as a stream error (Section 5.4.2) of
+ // type PROTOCOL_ERROR; errors on the connection flow
+ // control window MUST be treated as a connection
+ // error (Section 5.4.1).
+ if fh.StreamID == 0 {
+ countError("frame_windowupdate_zero_inc_conn")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ countError("frame_windowupdate_zero_inc_stream")
+ return nil, http2streamError(fh.StreamID, http2ErrCodeProtocol)
+ }
+ return &http2WindowUpdateFrame{
+ http2FrameHeader: fh,
+ Increment: inc,
+ }, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *http2Framer) WriteWindowUpdate(streamID, incr uint32) error {
+ // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+ if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+ return errors.New("illegal window increment value")
+ }
+ f.startWrite(http2FrameWindowUpdate, 0, streamID)
+ f.writeUint32(incr)
+ return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type http2HeadersFrame struct {
+ http2FrameHeader
+
+ // Priority is set if FlagHeadersPriority is set in the FrameHeader.
+ Priority http2PriorityParam
+
+ headerFragBuf []byte // not owned
+}
+
+func (f *http2HeadersFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *http2HeadersFrame) HeadersEnded() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagHeadersEndHeaders)
+}
+
+func (f *http2HeadersFrame) StreamEnded() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagHeadersEndStream)
+}
+
+func (f *http2HeadersFrame) HasPriority() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagHeadersPriority)
+}
+
+func http2parseHeadersFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (_ http2Frame, err error) {
+ hf := &http2HeadersFrame{
+ http2FrameHeader: fh,
+ }
+ if fh.StreamID == 0 {
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // is received whose stream identifier field is 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ countError("frame_headers_zero_stream")
+ return nil, http2connError{http2ErrCodeProtocol, "HEADERS frame with stream ID 0"}
+ }
+ var padLength uint8
+ if fh.Flags.Has(http2FlagHeadersPadded) {
+ if p, padLength, err = http2readByte(p); err != nil {
+ countError("frame_headers_pad_short")
+ return
+ }
+ }
+ if fh.Flags.Has(http2FlagHeadersPriority) {
+ var v uint32
+ p, v, err = http2readUint32(p)
+ if err != nil {
+ countError("frame_headers_prio_short")
+ return nil, err
+ }
+ hf.Priority.StreamDep = v & 0x7fffffff
+ hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+ p, hf.Priority.Weight, err = http2readByte(p)
+ if err != nil {
+ countError("frame_headers_prio_weight_short")
+ return nil, err
+ }
+ }
+ if len(p)-int(padLength) < 0 {
+ countError("frame_headers_pad_too_big")
+ return nil, http2streamError(fh.StreamID, http2ErrCodeProtocol)
+ }
+ hf.headerFragBuf = p[:len(p)-int(padLength)]
+ return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type http2HeadersFrameParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndStream indicates that the header block is the last that
+ // the endpoint will send for the identified stream. Setting
+ // this flag causes the stream to enter one of "half closed"
+ // states.
+ EndStream bool
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+
+ // Priority, if non-zero, includes stream priority information
+ // in the HEADER frame.
+ Priority http2PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WriteHeaders(p http2HeadersFrameParam) error {
+ if !http2validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ var flags http2Flags
+ if p.PadLength != 0 {
+ flags |= http2FlagHeadersPadded
+ }
+ if p.EndStream {
+ flags |= http2FlagHeadersEndStream
+ }
+ if p.EndHeaders {
+ flags |= http2FlagHeadersEndHeaders
+ }
+ if !p.Priority.IsZero() {
+ flags |= http2FlagHeadersPriority
+ }
+ f.startWrite(http2FrameHeaders, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !p.Priority.IsZero() {
+ v := p.Priority.StreamDep
+ if !http2validStreamIDOrZero(v) && !f.AllowIllegalWrites {
+ return http2errDepStreamID
+ }
+ if p.Priority.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Priority.Weight)
+ }
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, http2padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.3
+type http2PriorityFrame struct {
+ http2FrameHeader
+ http2PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type http2PriorityParam struct {
+ // StreamDep is a 31-bit stream identifier for the
+ // stream that this stream depends on. Zero means no
+ // dependency.
+ StreamDep uint32
+
+ // Exclusive is whether the dependency is exclusive.
+ Exclusive bool
+
+ // Weight is the stream's zero-indexed weight. It should be
+ // set together with StreamDep, or neither should be set. Per
+ // the spec, "Add one to the value to obtain a weight between
+ // 1 and 256."
+ Weight uint8
+}
+
+func (p http2PriorityParam) IsZero() bool {
+ return p == http2PriorityParam{}
+}
+
+func http2parsePriorityFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), payload []byte) (http2Frame, error) {
+ if fh.StreamID == 0 {
+ countError("frame_priority_zero_stream")
+ return nil, http2connError{http2ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
+ }
+ if len(payload) != 5 {
+ countError("frame_priority_bad_length")
+ return nil, http2connError{http2ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
+ }
+ v := binary.BigEndian.Uint32(payload[:4])
+ streamID := v & 0x7fffffff // mask off high bit
+ return &http2PriorityFrame{
+ http2FrameHeader: fh,
+ http2PriorityParam: http2PriorityParam{
+ Weight: payload[4],
+ StreamDep: streamID,
+ Exclusive: streamID != v, // was high bit set?
+ },
+ }, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WritePriority(streamID uint32, p http2PriorityParam) error {
+ if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ if !http2validStreamIDOrZero(p.StreamDep) {
+ return http2errDepStreamID
+ }
+ f.startWrite(http2FramePriority, 0, streamID)
+ v := p.StreamDep
+ if p.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Weight)
+ return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.4
+type http2RSTStreamFrame struct {
+ http2FrameHeader
+ ErrCode http2ErrCode
+}
+
+func http2parseRSTStreamFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ if len(p) != 4 {
+ countError("frame_rststream_bad_len")
+ return nil, http2ConnectionError(http2ErrCodeFrameSize)
+ }
+ if fh.StreamID == 0 {
+ countError("frame_rststream_zero_stream")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ return &http2RSTStreamFrame{fh, http2ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WriteRSTStream(streamID uint32, code http2ErrCode) error {
+ if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ f.startWrite(http2FrameRSTStream, 0, streamID)
+ f.writeUint32(uint32(code))
+ return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.10
+type http2ContinuationFrame struct {
+ http2FrameHeader
+ headerFragBuf []byte
+}
+
+func http2parseContinuationFrame(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (http2Frame, error) {
+ if fh.StreamID == 0 {
+ countError("frame_continuation_zero_stream")
+ return nil, http2connError{http2ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
+ }
+ return &http2ContinuationFrame{fh, p}, nil
+}
+
+func (f *http2ContinuationFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *http2ContinuationFrame) HeadersEnded() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ var flags http2Flags
+ if endHeaders {
+ flags |= http2FlagContinuationEndHeaders
+ }
+ f.startWrite(http2FrameContinuation, flags, streamID)
+ f.wbuf = append(f.wbuf, headerBlockFragment...)
+ return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.6
+type http2PushPromiseFrame struct {
+ http2FrameHeader
+ PromiseID uint32
+ headerFragBuf []byte // not owned
+}
+
+func (f *http2PushPromiseFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *http2PushPromiseFrame) HeadersEnded() bool {
+ return f.http2FrameHeader.Flags.Has(http2FlagPushPromiseEndHeaders)
+}
+
+func http2parsePushPromise(_ *http2frameCache, fh http2FrameHeader, countError func(string), p []byte) (_ http2Frame, err error) {
+ pp := &http2PushPromiseFrame{
+ http2FrameHeader: fh,
+ }
+ if pp.StreamID == 0 {
+ // PUSH_PROMISE frames MUST be associated with an existing,
+ // peer-initiated stream. The stream identifier of a
+ // PUSH_PROMISE frame indicates the stream it is associated
+ // with. If the stream identifier field specifies the value
+ // 0x0, a recipient MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ countError("frame_pushpromise_zero_stream")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ // The PUSH_PROMISE frame includes optional padding.
+ // Padding fields and flags are identical to those defined for DATA frames
+ var padLength uint8
+ if fh.Flags.Has(http2FlagPushPromisePadded) {
+ if p, padLength, err = http2readByte(p); err != nil {
+ countError("frame_pushpromise_pad_short")
+ return
+ }
+ }
+
+ p, pp.PromiseID, err = http2readUint32(p)
+ if err != nil {
+ countError("frame_pushpromise_promiseid_short")
+ return
+ }
+ pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+ if int(padLength) > len(p) {
+ // like the DATA frame, error out if padding is longer than the body.
+ countError("frame_pushpromise_pad_too_big")
+ return nil, http2ConnectionError(http2ErrCodeProtocol)
+ }
+ pp.headerFragBuf = p[:len(p)-int(padLength)]
+ return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type http2PushPromiseParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+
+ // PromiseID is the required Stream ID which this
+ // Push Promises
+ PromiseID uint32
+
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *http2Framer) WritePushPromise(p http2PushPromiseParam) error {
+ if !http2validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ var flags http2Flags
+ if p.PadLength != 0 {
+ flags |= http2FlagPushPromisePadded
+ }
+ if p.EndHeaders {
+ flags |= http2FlagPushPromiseEndHeaders
+ }
+ f.startWrite(http2FramePushPromise, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !http2validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+ return http2errStreamID
+ }
+ f.writeUint32(p.PromiseID)
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, http2padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *http2Framer) WriteRawFrame(t http2FrameType, flags http2Flags, streamID uint32, payload []byte) error {
+ f.startWrite(t, flags, streamID)
+ f.writeBytes(payload)
+ return f.endWrite()
+}
+
+func http2readByte(p []byte) (remain []byte, b byte, err error) {
+ if len(p) == 0 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[1:], p[0], nil
+}
+
+func http2readUint32(p []byte) (remain []byte, v uint32, err error) {
+ if len(p) < 4 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type http2streamEnder interface {
+ StreamEnded() bool
+}
+
+type http2headersEnder interface {
+ HeadersEnded() bool
+}
+
+type http2headersOrContinuation interface {
+ http2headersEnder
+ HeaderBlockFragment() []byte
+}
+
+// A MetaHeadersFrame is the representation of one HEADERS frame and
+// zero or more contiguous CONTINUATION frames and the decoding of
+// their HPACK-encoded contents.
+//
+// This type of frame does not appear on the wire and is only returned
+// by the Framer when Framer.ReadMetaHeaders is set.
+type http2MetaHeadersFrame struct {
+ *http2HeadersFrame
+
+ // Fields are the fields contained in the HEADERS and
+ // CONTINUATION frames. The underlying slice is owned by the
+ // Framer and must not be retained after the next call to
+ // ReadFrame.
+ //
+ // Fields are guaranteed to be in the correct http2 order and
+ // not have unknown pseudo header fields or invalid header
+ // field names or values. Required pseudo header fields may be
+ // missing, however. Use the MetaHeadersFrame.Pseudo accessor
+ // method access pseudo headers.
+ Fields []hpack.HeaderField
+
+ // Truncated is whether the max header list size limit was hit
+ // and Fields is incomplete. The hpack decoder state is still
+ // valid, however.
+ Truncated bool
+}
+
+// PseudoValue returns the given pseudo header field's value.
+// The provided pseudo field should not contain the leading colon.
+func (mh *http2MetaHeadersFrame) PseudoValue(pseudo string) string {
+ for _, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return ""
+ }
+ if hf.Name[1:] == pseudo {
+ return hf.Value
+ }
+ }
+ return ""
+}
+
+// RegularFields returns the regular (non-pseudo) header fields of mh.
+// The caller does not own the returned slice.
+func (mh *http2MetaHeadersFrame) RegularFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[i:]
+ }
+ }
+ return nil
+}
+
+// PseudoFields returns the pseudo header fields of mh.
+// The caller does not own the returned slice.
+func (mh *http2MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[:i]
+ }
+ }
+ return mh.Fields
+}
+
+func (mh *http2MetaHeadersFrame) checkPseudos() error {
+ var isRequest, isResponse bool
+ pf := mh.PseudoFields()
+ for i, hf := range pf {
+ switch hf.Name {
+ case ":method", ":path", ":scheme", ":authority":
+ isRequest = true
+ case ":status":
+ isResponse = true
+ default:
+ return http2pseudoHeaderError(hf.Name)
+ }
+ // Check for duplicates.
+ // This would be a bad algorithm, but N is 4.
+ // And this doesn't allocate.
+ for _, hf2 := range pf[:i] {
+ if hf.Name == hf2.Name {
+ return http2duplicatePseudoHeaderError(hf.Name)
+ }
+ }
+ }
+ if isRequest && isResponse {
+ return http2errMixPseudoHeaderTypes
+ }
+ return nil
+}
+
+func (fr *http2Framer) maxHeaderStringLen() int {
+ v := fr.maxHeaderListSize()
+ if uint32(int(v)) == v {
+ return int(v)
+ }
+ // They had a crazy big number for MaxHeaderBytes anyway,
+ // so give them unlimited header lengths:
+ return 0
+}
+
+// readMetaFrame returns 0 or more CONTINUATION frames from fr and
+// merge them into the provided hf and returns a MetaHeadersFrame
+// with the decoded hpack values.
+func (fr *http2Framer) readMetaFrame(hf *http2HeadersFrame) (*http2MetaHeadersFrame, error) {
+ if fr.AllowIllegalReads {
+ return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
+ }
+ mh := &http2MetaHeadersFrame{
+ http2HeadersFrame: hf,
+ }
+ var remainSize = fr.maxHeaderListSize()
+ var sawRegular bool
+
+ var invalid error // pseudo header field errors
+ hdec := fr.ReadMetaHeaders
+ hdec.SetEmitEnabled(true)
+ hdec.SetMaxStringLength(fr.maxHeaderStringLen())
+ hdec.SetEmitFunc(func(hf hpack.HeaderField) {
+ if http2VerboseLogs && fr.logReads {
+ fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
+ }
+ if !httpguts.ValidHeaderFieldValue(hf.Value) {
+ // Don't include the value in the error, because it may be sensitive.
+ invalid = http2headerFieldValueError(hf.Name)
+ }
+ isPseudo := strings.HasPrefix(hf.Name, ":")
+ if isPseudo {
+ if sawRegular {
+ invalid = http2errPseudoAfterRegular
+ }
+ } else {
+ sawRegular = true
+ if !http2validWireHeaderFieldName(hf.Name) {
+ invalid = http2headerFieldNameError(hf.Name)
+ }
+ }
+
+ if invalid != nil {
+ hdec.SetEmitEnabled(false)
+ return
+ }
+
+ size := hf.Size()
+ if size > remainSize {
+ hdec.SetEmitEnabled(false)
+ mh.Truncated = true
+ return
+ }
+ remainSize -= size
+
+ mh.Fields = append(mh.Fields, hf)
+ })
+ // Lose reference to MetaHeadersFrame:
+ defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
+
+ var hc http2headersOrContinuation = hf
+ for {
+ frag := hc.HeaderBlockFragment()
+ if _, err := hdec.Write(frag); err != nil {
+ return nil, http2ConnectionError(http2ErrCodeCompression)
+ }
+
+ if hc.HeadersEnded() {
+ break
+ }
+ if f, err := fr.ReadFrame(); err != nil {
+ return nil, err
+ } else {
+ hc = f.(*http2ContinuationFrame) // guaranteed by checkFrameOrder
+ }
+ }
+
+ mh.http2HeadersFrame.headerFragBuf = nil
+ mh.http2HeadersFrame.invalidate()
+
+ if err := hdec.Close(); err != nil {
+ return nil, http2ConnectionError(http2ErrCodeCompression)
+ }
+ if invalid != nil {
+ fr.errDetail = invalid
+ if http2VerboseLogs {
+ log.Printf("http2: invalid header: %v", invalid)
+ }
+ return nil, http2StreamError{mh.StreamID, http2ErrCodeProtocol, invalid}
+ }
+ if err := mh.checkPseudos(); err != nil {
+ fr.errDetail = err
+ if http2VerboseLogs {
+ log.Printf("http2: invalid pseudo headers: %v", err)
+ }
+ return nil, http2StreamError{mh.StreamID, http2ErrCodeProtocol, err}
+ }
+ return mh, nil
+}
+
+func http2summarizeFrame(f http2Frame) string {
+ var buf bytes.Buffer
+ f.Header().writeDebug(&buf)
+ switch f := f.(type) {
+ case *http2SettingsFrame:
+ n := 0
+ f.ForeachSetting(func(s http2Setting) error {
+ n++
+ if n == 1 {
+ buf.WriteString(", settings:")
+ }
+ fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
+ return nil
+ })
+ if n > 0 {
+ buf.Truncate(buf.Len() - 1) // remove trailing comma
+ }
+ case *http2DataFrame:
+ data := f.Data()
+ const max = 256
+ if len(data) > max {
+ data = data[:max]
+ }
+ fmt.Fprintf(&buf, " data=%q", data)
+ if len(f.Data()) > max {
+ fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
+ }
+ case *http2WindowUpdateFrame:
+ if f.StreamID == 0 {
+ buf.WriteString(" (conn)")
+ }
+ fmt.Fprintf(&buf, " incr=%v", f.Increment)
+ case *http2PingFrame:
+ fmt.Fprintf(&buf, " ping=%q", f.Data[:])
+ case *http2GoAwayFrame:
+ fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
+ f.LastStreamID, f.ErrCode, f.debugData)
+ case *http2RSTStreamFrame:
+ fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
+ }
+ return buf.String()
+}
+
+var http2DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+
+type http2goroutineLock uint64
+
+func http2newGoroutineLock() http2goroutineLock {
+ if !http2DebugGoroutines {
+ return 0
+ }
+ return http2goroutineLock(http2curGoroutineID())
+}
+
+func (g http2goroutineLock) check() {
+ if !http2DebugGoroutines {
+ return
+ }
+ if http2curGoroutineID() != uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+func (g http2goroutineLock) checkNotOn() {
+ if !http2DebugGoroutines {
+ return
+ }
+ if http2curGoroutineID() == uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+var http2goroutineSpace = []byte("goroutine ")
+
+func http2curGoroutineID() uint64 {
+ bp := http2littleBuf.Get().(*[]byte)
+ defer http2littleBuf.Put(bp)
+ b := *bp
+ b = b[:runtime.Stack(b, false)]
+ // Parse the 4707 out of "goroutine 4707 ["
+ b = bytes.TrimPrefix(b, http2goroutineSpace)
+ i := bytes.IndexByte(b, ' ')
+ if i < 0 {
+ panic(fmt.Sprintf("No space found in %q", b))
+ }
+ b = b[:i]
+ n, err := http2parseUintBytes(b, 10, 64)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+ }
+ return n
+}
+
+var http2littleBuf = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, 64)
+ return &buf
+ },
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func http2parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+ var cutoff, maxVal uint64
+
+ if bitSize == 0 {
+ bitSize = int(strconv.IntSize)
+ }
+
+ s0 := s
+ switch {
+ case len(s) < 1:
+ err = strconv.ErrSyntax
+ goto Error
+
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ switch {
+ case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+ base = 16
+ s = s[2:]
+ if len(s) < 1 {
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ case s[0] == '0':
+ base = 8
+ default:
+ base = 10
+ }
+
+ default:
+ err = errors.New("invalid base " + strconv.Itoa(base))
+ goto Error
+ }
+
+ n = 0
+ cutoff = http2cutoff64(base)
+ maxVal = 1<<uint(bitSize) - 1
+
+ for i := 0; i < len(s); i++ {
+ var v byte
+ d := s[i]
+ switch {
+ case '0' <= d && d <= '9':
+ v = d - '0'
+ case 'a' <= d && d <= 'z':
+ v = d - 'a' + 10
+ case 'A' <= d && d <= 'Z':
+ v = d - 'A' + 10
+ default:
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ if int(v) >= base {
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(v)
+ if n1 < n || n1 > maxVal {
+ // n+v overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n = n1
+ }
+
+ return n, nil
+
+Error:
+ return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func http2cutoff64(base int) uint64 {
+ if base < 2 {
+ return 0
+ }
+ return (1<<64-1)/uint64(base) + 1
+}
+
+var (
+ http2commonBuildOnce sync.Once
+ http2commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
+ http2commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
+)
+
+func http2buildCommonHeaderMapsOnce() {
+ http2commonBuildOnce.Do(http2buildCommonHeaderMaps)
+}
+
+func http2buildCommonHeaderMaps() {
+ common := []string{
+ "accept",
+ "accept-charset",
+ "accept-encoding",
+ "accept-language",
+ "accept-ranges",
+ "age",
+ "access-control-allow-credentials",
+ "access-control-allow-headers",
+ "access-control-allow-methods",
+ "access-control-allow-origin",
+ "access-control-expose-headers",
+ "access-control-max-age",
+ "access-control-request-headers",
+ "access-control-request-method",
+ "allow",
+ "authorization",
+ "cache-control",
+ "content-disposition",
+ "content-encoding",
+ "content-language",
+ "content-length",
+ "content-location",
+ "content-range",
+ "content-type",
+ "cookie",
+ "date",
+ "etag",
+ "expect",
+ "expires",
+ "from",
+ "host",
+ "if-match",
+ "if-modified-since",
+ "if-none-match",
+ "if-unmodified-since",
+ "last-modified",
+ "link",
+ "location",
+ "max-forwards",
+ "origin",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "range",
+ "referer",
+ "refresh",
+ "retry-after",
+ "server",
+ "set-cookie",
+ "strict-transport-security",
+ "trailer",
+ "transfer-encoding",
+ "user-agent",
+ "vary",
+ "via",
+ "www-authenticate",
+ "x-forwarded-for",
+ "x-forwarded-proto",
+ }
+ http2commonLowerHeader = make(map[string]string, len(common))
+ http2commonCanonHeader = make(map[string]string, len(common))
+ for _, v := range common {
+ chk := CanonicalHeaderKey(v)
+ http2commonLowerHeader[chk] = v
+ http2commonCanonHeader[v] = chk
+ }
+}
+
+func http2lowerHeader(v string) (lower string, ascii bool) {
+ http2buildCommonHeaderMapsOnce()
+ if s, ok := http2commonLowerHeader[v]; ok {
+ return s, true
+ }
+ return http2asciiToLower(v)
+}
+
+func http2canonicalHeader(v string) string {
+ http2buildCommonHeaderMapsOnce()
+ if s, ok := http2commonCanonHeader[v]; ok {
+ return s
+ }
+ return CanonicalHeaderKey(v)
+}
+
+var (
+ http2VerboseLogs bool
+ http2logFrameWrites bool
+ http2logFrameReads bool
+ http2inTests bool
+)
+
+func init() {
+ e := os.Getenv("GODEBUG")
+ if strings.Contains(e, "http2debug=1") {
+ http2VerboseLogs = true
+ }
+ if strings.Contains(e, "http2debug=2") {
+ http2VerboseLogs = true
+ http2logFrameWrites = true
+ http2logFrameReads = true
+ }
+}
+
+const (
+ // ClientPreface is the string that must be sent by new
+ // connections from clients.
+ http2ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+ // SETTINGS_MAX_FRAME_SIZE default
+ // https://httpwg.org/specs/rfc7540.html#rfc.section.6.5.2
+ http2initialMaxFrameSize = 16384
+
+ // NextProtoTLS is the NPN/ALPN protocol negotiated during
+ // HTTP/2's TLS setup.
+ http2NextProtoTLS = "h2"
+
+ // https://httpwg.org/specs/rfc7540.html#SettingValues
+ http2initialHeaderTableSize = 4096
+
+ http2initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+ http2defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+ http2clientPreface = []byte(http2ClientPreface)
+)
+
+type http2streamState int
+
+// HTTP/2 stream states.
+//
+// See http://tools.ietf.org/html/rfc7540#section-5.1.
+//
+// For simplicity, the server code merges "reserved (local)" into
+// "half-closed (remote)". This is one less state transition to track.
+// The only downside is that we send PUSH_PROMISEs slightly less
+// liberally than allowable. More discussion here:
+// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
+//
+// "reserved (remote)" is omitted since the client code does not
+// support server push.
+const (
+ http2stateIdle http2streamState = iota
+ http2stateOpen
+ http2stateHalfClosedLocal
+ http2stateHalfClosedRemote
+ http2stateClosed
+)
+
+var http2stateName = [...]string{
+ http2stateIdle: "Idle",
+ http2stateOpen: "Open",
+ http2stateHalfClosedLocal: "HalfClosedLocal",
+ http2stateHalfClosedRemote: "HalfClosedRemote",
+ http2stateClosed: "Closed",
+}
+
+func (st http2streamState) String() string {
+ return http2stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type http2Setting struct {
+ // ID is which setting is being set.
+ // See https://httpwg.org/specs/rfc7540.html#SettingFormat
+ ID http2SettingID
+
+ // Val is the value.
+ Val uint32
+}
+
+func (s http2Setting) String() string {
+ return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s http2Setting) Valid() error {
+ // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+ switch s.ID {
+ case http2SettingEnablePush:
+ if s.Val != 1 && s.Val != 0 {
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ case http2SettingInitialWindowSize:
+ if s.Val > 1<<31-1 {
+ return http2ConnectionError(http2ErrCodeFlowControl)
+ }
+ case http2SettingMaxFrameSize:
+ if s.Val < 16384 || s.Val > 1<<24-1 {
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ }
+ return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// https://httpwg.org/specs/rfc7540.html#iana-settings
+type http2SettingID uint16
+
+const (
+ http2SettingHeaderTableSize http2SettingID = 0x1
+ http2SettingEnablePush http2SettingID = 0x2
+ http2SettingMaxConcurrentStreams http2SettingID = 0x3
+ http2SettingInitialWindowSize http2SettingID = 0x4
+ http2SettingMaxFrameSize http2SettingID = 0x5
+ http2SettingMaxHeaderListSize http2SettingID = 0x6
+)
+
+var http2settingName = map[http2SettingID]string{
+ http2SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ http2SettingEnablePush: "ENABLE_PUSH",
+ http2SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ http2SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ http2SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ http2SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+}
+
+func (s http2SettingID) String() string {
+ if v, ok := http2settingName[s]; ok {
+ return v
+ }
+ return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+// validWireHeaderFieldName reports whether v is a valid header field
+// name (key). See httpguts.ValidHeaderName for the base rules.
+//
+// Further, http2 says:
+//
+// "Just as in HTTP/1.x, header field names are strings of ASCII
+// characters that are compared in a case-insensitive
+// fashion. However, header field names MUST be converted to
+// lowercase prior to their encoding in HTTP/2. "
+func http2validWireHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !httpguts.IsTokenRune(r) {
+ return false
+ }
+ if 'A' <= r && r <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+func http2httpCodeString(code int) string {
+ switch code {
+ case 200:
+ return "200"
+ case 404:
+ return "404"
+ }
+ return strconv.Itoa(code)
+}
+
+// from pkg io
+type http2stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// A gate lets two goroutines coordinate their activities.
+type http2gate chan struct{}
+
+func (g http2gate) Done() { g <- struct{}{} }
+
+func (g http2gate) Wait() { <-g }
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type http2closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *http2closeWaiter) Init() {
+ *cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw http2closeWaiter) Close() {
+ close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw http2closeWaiter) Wait() {
+ <-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type http2bufferedWriter struct {
+ _ http2incomparable
+ w io.Writer // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+}
+
+func http2newBufferedWriter(w io.Writer) *http2bufferedWriter {
+ return &http2bufferedWriter{w: w}
+}
+
+// bufWriterPoolBufferSize is the size of bufio.Writer's
+// buffers created using bufWriterPool.
+//
+// TODO: pick a less arbitrary value? this is a bit under
+// (3 x typical 1500 byte MTU) at least. Other than that,
+// not much thought went into it.
+const http2bufWriterPoolBufferSize = 4 << 10
+
+var http2bufWriterPool = sync.Pool{
+ New: func() interface{} {
+ return bufio.NewWriterSize(nil, http2bufWriterPoolBufferSize)
+ },
+}
+
+func (w *http2bufferedWriter) Available() int {
+ if w.bw == nil {
+ return http2bufWriterPoolBufferSize
+ }
+ return w.bw.Available()
+}
+
+func (w *http2bufferedWriter) Write(p []byte) (n int, err error) {
+ if w.bw == nil {
+ bw := http2bufWriterPool.Get().(*bufio.Writer)
+ bw.Reset(w.w)
+ w.bw = bw
+ }
+ return w.bw.Write(p)
+}
+
+func (w *http2bufferedWriter) Flush() error {
+ bw := w.bw
+ if bw == nil {
+ return nil
+ }
+ err := bw.Flush()
+ bw.Reset(nil)
+ http2bufWriterPool.Put(bw)
+ w.bw = nil
+ return err
+}
+
+func http2mustUint31(v int32) uint32 {
+ if v < 0 || v > 2147483647 {
+ panic("out of range")
+ }
+ return uint32(v)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 7230, section 3.3.
+func http2bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+type http2httpError struct {
+ _ http2incomparable
+ msg string
+ timeout bool
+}
+
+func (e *http2httpError) Error() string { return e.msg }
+
+func (e *http2httpError) Timeout() bool { return e.timeout }
+
+func (e *http2httpError) Temporary() bool { return true }
+
+var http2errTimeout error = &http2httpError{msg: "http2: timeout awaiting response headers", timeout: true}
+
+type http2connectionStater interface {
+ ConnectionState() tls.ConnectionState
+}
+
+var http2sorterPool = sync.Pool{New: func() interface{} { return new(http2sorter) }}
+
+type http2sorter struct {
+ v []string // owned by sorter
+}
+
+func (s *http2sorter) Len() int { return len(s.v) }
+
+func (s *http2sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
+
+func (s *http2sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
+
+// Keys returns the sorted keys of h.
+//
+// The returned slice is only valid until s used again or returned to
+// its pool.
+func (s *http2sorter) Keys(h Header) []string {
+ keys := s.v[:0]
+ for k := range h {
+ keys = append(keys, k)
+ }
+ s.v = keys
+ sort.Sort(s)
+ return keys
+}
+
+func (s *http2sorter) SortStrings(ss []string) {
+ // Our sorter works on s.v, which sorter owns, so
+ // stash it away while we sort the user's buffer.
+ save := s.v
+ s.v = ss
+ sort.Sort(s)
+ s.v = save
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+// - a non-empty string starting with '/'
+// - the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
+func http2validPseudoPath(v string) bool {
+ return (len(v) > 0 && v[0] == '/') || v == "*"
+}
+
+// incomparable is a zero-width, non-comparable type. Adding it to a struct
+// makes that struct also non-comparable, and generally doesn't add
+// any size (as long as it's first).
+type http2incomparable [0]func()
+
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// io.Pipe except there are no PipeReader/PipeWriter halves, and the
+// underlying buffer is an interface. (io.Pipe is always unbuffered)
+type http2pipe struct {
+ mu sync.Mutex
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b http2pipeBuffer // nil when done reading
+ unread int // bytes unread when done
+ err error // read error once empty. non-nil means closed.
+ breakErr error // immediate read error (caller doesn't see rest of b)
+ donec chan struct{} // closed on error
+ readFn func() // optional code to run in Read before error
+}
+
+type http2pipeBuffer interface {
+ Len() int
+ io.Writer
+ io.Reader
+}
+
+// setBuffer initializes the pipe buffer.
+// It has no effect if the pipe is already closed.
+func (p *http2pipe) setBuffer(b http2pipeBuffer) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.err != nil || p.breakErr != nil {
+ return
+ }
+ p.b = b
+}
+
+func (p *http2pipe) Len() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.b == nil {
+ return p.unread
+ }
+ return p.b.Len()
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (p *http2pipe) Read(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ for {
+ if p.breakErr != nil {
+ return 0, p.breakErr
+ }
+ if p.b != nil && p.b.Len() > 0 {
+ return p.b.Read(d)
+ }
+ if p.err != nil {
+ if p.readFn != nil {
+ p.readFn() // e.g. copy trailers
+ p.readFn = nil // not sticky like p.err
+ }
+ p.b = nil
+ return 0, p.err
+ }
+ p.c.Wait()
+ }
+}
+
+var http2errClosedPipeWrite = errors.New("write on closed buffer")
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (p *http2pipe) Write(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if p.err != nil || p.breakErr != nil {
+ return 0, http2errClosedPipeWrite
+ }
+ return p.b.Write(d)
+}
+
+// CloseWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err after all data has been
+// read.
+//
+// The error must be non-nil.
+func (p *http2pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
+
+// BreakWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err immediately, without
+// waiting for unread data.
+func (p *http2pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
+
+// closeWithErrorAndCode is like CloseWithError but also sets some code to run
+// in the caller's goroutine before returning the error.
+func (p *http2pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
+
+func (p *http2pipe) closeWithError(dst *error, err error, fn func()) {
+ if err == nil {
+ panic("err must be non-nil")
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if *dst != nil {
+ // Already been done.
+ return
+ }
+ p.readFn = fn
+ if dst == &p.breakErr {
+ if p.b != nil {
+ p.unread += p.b.Len()
+ }
+ p.b = nil
+ }
+ *dst = err
+ p.closeDoneLocked()
+}
+
+// requires p.mu be held.
+func (p *http2pipe) closeDoneLocked() {
+ if p.donec == nil {
+ return
+ }
+ // Close if unclosed. This isn't racy since we always
+ // hold p.mu while closing.
+ select {
+ case <-p.donec:
+ default:
+ close(p.donec)
+ }
+}
+
+// Err returns the error (if any) first set by BreakWithError or CloseWithError.
+func (p *http2pipe) Err() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.breakErr != nil {
+ return p.breakErr
+ }
+ return p.err
+}
+
+// Done returns a channel which is closed if and when this pipe is closed
+// with CloseWithError.
+func (p *http2pipe) Done() <-chan struct{} {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.donec == nil {
+ p.donec = make(chan struct{})
+ if p.err != nil || p.breakErr != nil {
+ // Already hit an error.
+ p.closeDoneLocked()
+ }
+ }
+ return p.donec
+}
+
+const (
+ http2prefaceTimeout = 10 * time.Second
+ http2firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ http2handlerChunkWriteSize = 4 << 10
+ http2defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+ http2maxQueuedControlFrames = 10000
+)
+
+var (
+ http2errClientDisconnected = errors.New("client disconnected")
+ http2errClosedBody = errors.New("body closed by handler")
+ http2errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
+ http2errStreamClosed = errors.New("http2: stream closed")
+)
+
+var http2responseWriterStatePool = sync.Pool{
+ New: func() interface{} {
+ rws := &http2responseWriterState{}
+ rws.bw = bufio.NewWriterSize(http2chunkWriter{rws}, http2handlerChunkWriteSize)
+ return rws
+ },
+}
+
+// Test hooks.
+var (
+ http2testHookOnConn func()
+ http2testHookGetServerConn func(*http2serverConn)
+ http2testHookOnPanicMu *sync.Mutex // nil except in tests
+ http2testHookOnPanic func(sc *http2serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type http2Server struct {
+ // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+ // which may run at a time over all connections.
+ // Negative or zero no limit.
+ // TODO: implement
+ MaxHandlers int
+
+ // MaxConcurrentStreams optionally specifies the number of
+ // concurrent streams that each client may have open at a
+ // time. This is unrelated to the number of http.Handler goroutines
+ // which may be active globally, which is MaxHandlers.
+ // If zero, MaxConcurrentStreams defaults to at least 100, per
+ // the HTTP/2 spec's recommendations.
+ MaxConcurrentStreams uint32
+
+ // MaxDecoderHeaderTableSize optionally specifies the http2
+ // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
+ // informs the remote endpoint of the maximum size of the header compression
+ // table used to decode header blocks, in octets. If zero, the default value
+ // of 4096 is used.
+ MaxDecoderHeaderTableSize uint32
+
+ // MaxEncoderHeaderTableSize optionally specifies an upper limit for the
+ // header compression table used for encoding request headers. Received
+ // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
+ // the default value of 4096 is used.
+ MaxEncoderHeaderTableSize uint32
+
+ // MaxReadFrameSize optionally specifies the largest frame
+ // this server is willing to read. A valid value is between
+ // 16k and 16M, inclusive. If zero or otherwise invalid, a
+ // default value is used.
+ MaxReadFrameSize uint32
+
+ // PermitProhibitedCipherSuites, if true, permits the use of
+ // cipher suites prohibited by the HTTP/2 spec.
+ PermitProhibitedCipherSuites bool
+
+ // IdleTimeout specifies how long until idle clients should be
+ // closed with a GOAWAY frame. PING frames are not considered
+ // activity for the purposes of IdleTimeout.
+ IdleTimeout time.Duration
+
+ // MaxUploadBufferPerConnection is the size of the initial flow
+ // control window for each connections. The HTTP/2 spec does not
+ // allow this to be smaller than 65535 or larger than 2^32-1.
+ // If the value is outside this range, a default value will be
+ // used instead.
+ MaxUploadBufferPerConnection int32
+
+ // MaxUploadBufferPerStream is the size of the initial flow control
+ // window for each stream. The HTTP/2 spec does not allow this to
+ // be larger than 2^32-1. If the value is zero or larger than the
+ // maximum, a default value will be used instead.
+ MaxUploadBufferPerStream int32
+
+ // NewWriteScheduler constructs a write scheduler for a connection.
+ // If nil, a default scheduler is chosen.
+ NewWriteScheduler func() http2WriteScheduler
+
+ // CountError, if non-nil, is called on HTTP/2 server errors.
+ // It's intended to increment a metric for monitoring, such
+ // as an expvar or Prometheus metric.
+ // The errType consists of only ASCII word characters.
+ CountError func(errType string)
+
+ // Internal state. This is a pointer (rather than embedded directly)
+ // so that we don't embed a Mutex in this struct, which will make the
+ // struct non-copyable, which might break some callers.
+ state *http2serverInternalState
+}
+
+func (s *http2Server) initialConnRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerConnection >= http2initialWindowSize {
+ return s.MaxUploadBufferPerConnection
+ }
+ return 1 << 20
+}
+
+func (s *http2Server) initialStreamRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerStream > 0 {
+ return s.MaxUploadBufferPerStream
+ }
+ return 1 << 20
+}
+
+func (s *http2Server) maxReadFrameSize() uint32 {
+ if v := s.MaxReadFrameSize; v >= http2minMaxFrameSize && v <= http2maxFrameSize {
+ return v
+ }
+ return http2defaultMaxReadFrameSize
+}
+
+func (s *http2Server) maxConcurrentStreams() uint32 {
+ if v := s.MaxConcurrentStreams; v > 0 {
+ return v
+ }
+ return http2defaultMaxStreams
+}
+
+func (s *http2Server) maxDecoderHeaderTableSize() uint32 {
+ if v := s.MaxDecoderHeaderTableSize; v > 0 {
+ return v
+ }
+ return http2initialHeaderTableSize
+}
+
+func (s *http2Server) maxEncoderHeaderTableSize() uint32 {
+ if v := s.MaxEncoderHeaderTableSize; v > 0 {
+ return v
+ }
+ return http2initialHeaderTableSize
+}
+
+// maxQueuedControlFrames is the maximum number of control frames like
+// SETTINGS, PING and RST_STREAM that will be queued for writing before
+// the connection is closed to prevent memory exhaustion attacks.
+func (s *http2Server) maxQueuedControlFrames() int {
+ // TODO: if anybody asks, add a Server field, and remember to define the
+ // behavior of negative values.
+ return http2maxQueuedControlFrames
+}
+
+type http2serverInternalState struct {
+ mu sync.Mutex
+ activeConns map[*http2serverConn]struct{}
+}
+
+func (s *http2serverInternalState) registerConn(sc *http2serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ s.activeConns[sc] = struct{}{}
+ s.mu.Unlock()
+}
+
+func (s *http2serverInternalState) unregisterConn(sc *http2serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ delete(s.activeConns, sc)
+ s.mu.Unlock()
+}
+
+func (s *http2serverInternalState) startGracefulShutdown() {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ for sc := range s.activeConns {
+ sc.startGracefulShutdown()
+ }
+ s.mu.Unlock()
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func http2ConfigureServer(s *Server, conf *http2Server) error {
+ if s == nil {
+ panic("nil *http.Server")
+ }
+ if conf == nil {
+ conf = new(http2Server)
+ }
+ conf.state = &http2serverInternalState{activeConns: make(map[*http2serverConn]struct{})}
+ if h1, h2 := s, conf; h2.IdleTimeout == 0 {
+ if h1.IdleTimeout != 0 {
+ h2.IdleTimeout = h1.IdleTimeout
+ } else {
+ h2.IdleTimeout = h1.ReadTimeout
+ }
+ }
+ s.RegisterOnShutdown(conf.state.startGracefulShutdown)
+
+ if s.TLSConfig == nil {
+ s.TLSConfig = new(tls.Config)
+ } else if s.TLSConfig.CipherSuites != nil && s.TLSConfig.MinVersion < tls.VersionTLS13 {
+ // If they already provided a TLS 1.0–1.2 CipherSuite list, return an
+ // error if it is missing ECDHE_RSA_WITH_AES_128_GCM_SHA256 or
+ // ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
+ haveRequired := false
+ for _, cs := range s.TLSConfig.CipherSuites {
+ switch cs {
+ case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ // Alternative MTI cipher to not discourage ECDSA-only servers.
+ // See http://golang.org/cl/30721 for further information.
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ haveRequired = true
+ }
+ }
+ if !haveRequired {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)")
+ }
+ }
+
+ // Note: not setting MinVersion to tls.VersionTLS12,
+ // as we don't want to interfere with HTTP/1.1 traffic
+ // on the user's server. We enforce TLS 1.2 later once
+ // we accept a connection. Ideally this should be done
+ // during next-proto selection, but using TLS <1.2 with
+ // HTTP/2 is still the client's bug.
+
+ s.TLSConfig.PreferServerCipherSuites = true
+
+ if !http2strSliceContains(s.TLSConfig.NextProtos, http2NextProtoTLS) {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, http2NextProtoTLS)
+ }
+ if !http2strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
+ }
+
+ if s.TLSNextProto == nil {
+ s.TLSNextProto = map[string]func(*Server, *tls.Conn, Handler){}
+ }
+ protoHandler := func(hs *Server, c *tls.Conn, h Handler) {
+ if http2testHookOnConn != nil {
+ http2testHookOnConn()
+ }
+ // The TLSNextProto interface predates contexts, so
+ // the net/http package passes down its per-connection
+ // base context via an exported but unadvertised
+ // method on the Handler. This is for internal
+ // net/http<=>http2 use only.
+ var ctx context.Context
+ type baseContexter interface {
+ BaseContext() context.Context
+ }
+ if bc, ok := h.(baseContexter); ok {
+ ctx = bc.BaseContext()
+ }
+ conf.ServeConn(c, &http2ServeConnOpts{
+ Context: ctx,
+ Handler: h,
+ BaseConfig: hs,
+ })
+ }
+ s.TLSNextProto[http2NextProtoTLS] = protoHandler
+ return nil
+}
+
+// ServeConnOpts are options for the Server.ServeConn method.
+type http2ServeConnOpts struct {
+ // Context is the base context to use.
+ // If nil, context.Background is used.
+ Context context.Context
+
+ // BaseConfig optionally sets the base configuration
+ // for values. If nil, defaults are used.
+ BaseConfig *Server
+
+ // Handler specifies which handler to use for processing
+ // requests. If nil, BaseConfig.Handler is used. If BaseConfig
+ // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
+ Handler Handler
+
+ // UpgradeRequest is an initial request received on a connection
+ // undergoing an h2c upgrade. The request body must have been
+ // completely read from the connection before calling ServeConn,
+ // and the 101 Switching Protocols response written.
+ UpgradeRequest *Request
+
+ // Settings is the decoded contents of the HTTP2-Settings header
+ // in an h2c upgrade request.
+ Settings []byte
+
+ // SawClientPreface is set if the HTTP/2 connection preface
+ // has already been read from the connection.
+ SawClientPreface bool
+}
+
+func (o *http2ServeConnOpts) context() context.Context {
+ if o != nil && o.Context != nil {
+ return o.Context
+ }
+ return context.Background()
+}
+
+func (o *http2ServeConnOpts) baseConfig() *Server {
+ if o != nil && o.BaseConfig != nil {
+ return o.BaseConfig
+ }
+ return new(Server)
+}
+
+func (o *http2ServeConnOpts) handler() Handler {
+ if o != nil {
+ if o.Handler != nil {
+ return o.Handler
+ }
+ if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
+ return o.BaseConfig.Handler
+ }
+ }
+ return DefaultServeMux
+}
+
+// ServeConn serves HTTP/2 requests on the provided connection and
+// blocks until the connection is no longer readable.
+//
+// ServeConn starts speaking HTTP/2 assuming that c has not had any
+// reads or writes. It writes its initial settings frame and expects
+// to be able to read the preface and settings frame from the
+// client. If c has a ConnectionState method like a *tls.Conn, the
+// ConnectionState is used to verify the TLS ciphersuite and to set
+// the Request.TLS field in Handlers.
+//
+// ServeConn does not support h2c by itself. Any h2c support must be
+// implemented in terms of providing a suitably-behaving net.Conn.
+//
+// The opts parameter is optional. If nil, default values are used.
+func (s *http2Server) ServeConn(c net.Conn, opts *http2ServeConnOpts) {
+ baseCtx, cancel := http2serverConnBaseContext(c, opts)
+ defer cancel()
+
+ sc := &http2serverConn{
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: http2newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*http2stream),
+ readFrameCh: make(chan http2readFrameResult),
+ wantWriteFrameCh: make(chan http2FrameWriteRequest, 8),
+ serveMsgCh: make(chan interface{}, 8),
+ wroteFrameCh: make(chan http2frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan http2bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
+ advMaxStreams: s.maxConcurrentStreams(),
+ initialStreamSendWindowSize: http2initialWindowSize,
+ maxFrameSize: http2initialMaxFrameSize,
+ serveG: http2newGoroutineLock(),
+ pushEnabled: true,
+ sawClientPreface: opts.SawClientPreface,
+ }
+
+ s.state.registerConn(sc)
+ defer s.state.unregisterConn(sc)
+
+ // The net/http package sets the write deadline from the
+ // http.Server.WriteTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already set.
+ // Write deadlines are set per stream in serverConn.newStream.
+ // Disarm the net.Conn write deadline here.
+ if sc.hs.WriteTimeout != 0 {
+ sc.conn.SetWriteDeadline(time.Time{})
+ }
+
+ if s.NewWriteScheduler != nil {
+ sc.writeSched = s.NewWriteScheduler()
+ } else {
+ sc.writeSched = http2newRoundRobinWriteScheduler()
+ }
+
+ // These start at the RFC-specified defaults. If there is a higher
+ // configured value for inflow, that will be updated when we send a
+ // WINDOW_UPDATE shortly after sending SETTINGS.
+ sc.flow.add(http2initialWindowSize)
+ sc.inflow.init(http2initialWindowSize)
+ sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+ sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
+
+ fr := http2NewFramer(sc.bw, c)
+ if s.CountError != nil {
+ fr.countError = s.CountError
+ }
+ fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
+ fr.MaxHeaderListSize = sc.maxHeaderListSize()
+ fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ sc.framer = fr
+
+ if tc, ok := c.(http2connectionStater); ok {
+ sc.tlsState = new(tls.ConnectionState)
+ *sc.tlsState = tc.ConnectionState()
+ // 9.2 Use of TLS Features
+ // An implementation of HTTP/2 over TLS MUST use TLS
+ // 1.2 or higher with the restrictions on feature set
+ // and cipher suite described in this section. Due to
+ // implementation limitations, it might not be
+ // possible to fail TLS negotiation. An endpoint MUST
+ // immediately terminate an HTTP/2 connection that
+ // does not meet the TLS requirements described in
+ // this section with a connection error (Section
+ // 5.4.1) of type INADEQUATE_SECURITY.
+ if sc.tlsState.Version < tls.VersionTLS12 {
+ sc.rejectConn(http2ErrCodeInadequateSecurity, "TLS version too low")
+ return
+ }
+
+ if sc.tlsState.ServerName == "" {
+ // Client must use SNI, but we don't enforce that anymore,
+ // since it was causing problems when connecting to bare IP
+ // addresses during development.
+ //
+ // TODO: optionally enforce? Or enforce at the time we receive
+ // a new request, and verify the ServerName matches the :authority?
+ // But that precludes proxy situations, perhaps.
+ //
+ // So for now, do nothing here again.
+ }
+
+ if !s.PermitProhibitedCipherSuites && http2isBadCipher(sc.tlsState.CipherSuite) {
+ // "Endpoints MAY choose to generate a connection error
+ // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+ // the prohibited cipher suites are negotiated."
+ //
+ // We choose that. In my opinion, the spec is weak
+ // here. It also says both parties must support at least
+ // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+ // excuses here. If we really must, we could allow an
+ // "AllowInsecureWeakCiphers" option on the server later.
+ // Let's see how it plays out first.
+ sc.rejectConn(http2ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+ return
+ }
+ }
+
+ if opts.Settings != nil {
+ fr := &http2SettingsFrame{
+ http2FrameHeader: http2FrameHeader{valid: true},
+ p: opts.Settings,
+ }
+ if err := fr.ForeachSetting(sc.processSetting); err != nil {
+ sc.rejectConn(http2ErrCodeProtocol, "invalid settings")
+ return
+ }
+ opts.Settings = nil
+ }
+
+ if hook := http2testHookGetServerConn; hook != nil {
+ hook(sc)
+ }
+
+ if opts.UpgradeRequest != nil {
+ sc.upgradeRequest(opts.UpgradeRequest)
+ opts.UpgradeRequest = nil
+ }
+
+ sc.serve()
+}
+
+func http2serverConnBaseContext(c net.Conn, opts *http2ServeConnOpts) (ctx context.Context, cancel func()) {
+ ctx, cancel = context.WithCancel(opts.context())
+ ctx = context.WithValue(ctx, LocalAddrContextKey, c.LocalAddr())
+ if hs := opts.baseConfig(); hs != nil {
+ ctx = context.WithValue(ctx, ServerContextKey, hs)
+ }
+ return
+}
+
+func (sc *http2serverConn) rejectConn(err http2ErrCode, debug string) {
+ sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
+ // ignoring errors. hanging up anyway.
+ sc.framer.WriteGoAway(0, err, []byte(debug))
+ sc.bw.Flush()
+ sc.conn.Close()
+}
+
+type http2serverConn struct {
+ // Immutable:
+ srv *http2Server
+ hs *Server
+ conn net.Conn
+ bw *http2bufferedWriter // writing to conn
+ handler Handler
+ baseCtx context.Context
+ framer *http2Framer
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan http2readFrameResult // written by serverConn.readFrames
+ wantWriteFrameCh chan http2FrameWriteRequest // from handlers -> serve
+ wroteFrameCh chan http2frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan http2bodyReadMsg // from handlers -> serve
+ serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
+ flow http2outflow // conn-wide (not stream-specific) outbound flow control
+ inflow http2inflow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ remoteAddrStr string
+ writeSched http2WriteScheduler
+
+ // Everything following is owned by the serve loop; use serveG.check():
+ serveG http2goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawClientPreface bool // preface has already been read, used in h2c upgrade
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ queuedControlFrames int // control frames in the writeSched queue
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curClientStreams uint32 // number of open streams initiated by the client
+ curPushedStreams uint32 // number of open streams initiated by server push
+ curHandlers uint32 // number of running handler goroutines
+ maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
+ maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
+ streams map[uint32]*http2stream
+ unstartedHandlers []http2unstartedHandler
+ initialStreamSendWindowSize int32
+ maxFrameSize int32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ canonHeaderKeysSize int // canonHeader keys size in bytes
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ inGoAway bool // we've started to or sent GOAWAY
+ inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode http2ErrCode
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
+
+ // Owned by the writeFrameAsync goroutine:
+ headerWriteBuf bytes.Buffer
+ hpackEncoder *hpack.Encoder
+
+ // Used by startGracefulShutdown.
+ shutdownOnce sync.Once
+}
+
+func (sc *http2serverConn) maxHeaderListSize() uint32 {
+ n := sc.hs.MaxHeaderBytes
+ if n <= 0 {
+ n = DefaultMaxHeaderBytes
+ }
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return uint32(n + typicalHeaders*perFieldOverhead)
+}
+
+func (sc *http2serverConn) curOpenStreams() uint32 {
+ sc.serveG.check()
+ return sc.curClientStreams + sc.curPushedStreams
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type http2stream struct {
+ // immutable:
+ sc *http2serverConn
+ id uint32
+ body *http2pipe // non-nil if expecting DATA frames
+ cw http2closeWaiter // closed wait stream transitions to closed state
+ ctx context.Context
+ cancelCtx func()
+
+ // owned by serverConn's serve loop:
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow http2outflow // limits writing from Handler to client
+ inflow http2inflow // what the client is allowed to POST/etc to us
+ state http2streamState
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline *time.Timer // nil if unused
+ writeDeadline *time.Timer // nil if unused
+ closeErr error // set before cw is closed
+
+ trailer Header // accumulated trailers
+ reqTrailer Header // handler's Request.Trailer
+}
+
+func (sc *http2serverConn) Framer() *http2Framer { return sc.framer }
+
+func (sc *http2serverConn) CloseConn() error { return sc.conn.Close() }
+
+func (sc *http2serverConn) Flush() error { return sc.bw.Flush() }
+
+func (sc *http2serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+ return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *http2serverConn) state(streamID uint32) (http2streamState, *http2stream) {
+ sc.serveG.check()
+ // http://tools.ietf.org/html/rfc7540#section-5.1
+ if st, ok := sc.streams[streamID]; ok {
+ return st.state, st
+ }
+ // "The first use of a new stream identifier implicitly closes all
+ // streams in the "idle" state that might have been initiated by
+ // that peer with a lower-valued stream identifier. For example, if
+ // a client sends a HEADERS frame on stream 7 without ever sending a
+ // frame on stream 5, then stream 5 transitions to the "closed"
+ // state when the first frame for stream 7 is sent or received."
+ if streamID%2 == 1 {
+ if streamID <= sc.maxClientStreamID {
+ return http2stateClosed, nil
+ }
+ } else {
+ if streamID <= sc.maxPushPromiseID {
+ return http2stateClosed, nil
+ }
+ }
+ return http2stateIdle, nil
+}
+
+// setConnState calls the net/http ConnState hook for this connection, if configured.
+// Note that the net/http package does StateNew and StateClosed for us.
+// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
+func (sc *http2serverConn) setConnState(state ConnState) {
+ if sc.hs.ConnState != nil {
+ sc.hs.ConnState(sc.conn, state)
+ }
+}
+
+func (sc *http2serverConn) vlogf(format string, args ...interface{}) {
+ if http2VerboseLogs {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *http2serverConn) logf(format string, args ...interface{}) {
+ if lg := sc.hs.ErrorLog; lg != nil {
+ lg.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// errno returns v's underlying uintptr, else 0.
+//
+// TODO: remove this helper function once http2 can use build
+// tags. See comment in isClosedConnError.
+func http2errno(v error) uintptr {
+ if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
+ return uintptr(rv.Uint())
+ }
+ return 0
+}
+
+// isClosedConnError reports whether err is an error from use of a closed
+// network connection.
+func http2isClosedConnError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // TODO: remove this string search and be more like the Windows
+ // case below. That might involve modifying the standard library
+ // to return better error types.
+ str := err.Error()
+ if strings.Contains(str, "use of closed network connection") {
+ return true
+ }
+
+ // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+ // build tags, so I can't make an http2_windows.go file with
+ // Windows-specific stuff. Fix that and move this, once we
+ // have a way to bundle this into std's net/http somehow.
+ if runtime.GOOS == "windows" {
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
+ const WSAECONNABORTED = 10053
+ const WSAECONNRESET = 10054
+ if n := http2errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (sc *http2serverConn) condlogf(err error, format string, args ...interface{}) {
+ if err == nil {
+ return
+ }
+ if err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err) || err == http2errPrefaceTimeout {
+ // Boring, expected errors.
+ sc.vlogf(format, args...)
+ } else {
+ sc.logf(format, args...)
+ }
+}
+
+// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
+// of the entries in the canonHeader cache.
+// This should be larger than the size of unique, uncommon header keys likely to
+// be sent by the peer, while not so high as to permit unreasonable memory usage
+// if the peer sends an unbounded number of unique header keys.
+const http2maxCachedCanonicalHeadersKeysSize = 2048
+
+func (sc *http2serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ http2buildCommonHeaderMapsOnce()
+ cv, ok := http2commonCanonHeader[v]
+ if ok {
+ return cv
+ }
+ cv, ok = sc.canonHeader[v]
+ if ok {
+ return cv
+ }
+ if sc.canonHeader == nil {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = CanonicalHeaderKey(v)
+ size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
+ if sc.canonHeaderKeysSize+size <= http2maxCachedCanonicalHeadersKeysSize {
+ sc.canonHeader[v] = cv
+ sc.canonHeaderKeysSize += size
+ }
+ return cv
+}
+
+type http2readFrameResult struct {
+ f http2Frame // valid until readMore is called
+ err error
+
+ // readMore should be called once the consumer no longer needs or
+ // retains f. After readMore, f is invalid and more frames can be
+ // read.
+ readMore func()
+}
+
+// readFrames is the loop that reads incoming frames.
+// It takes care to only read one frame at a time, blocking until the
+// consumer is done with the frame.
+// It's run on its own goroutine.
+func (sc *http2serverConn) readFrames() {
+ gate := make(http2gate)
+ gateDone := gate.Done
+ for {
+ f, err := sc.framer.ReadFrame()
+ select {
+ case sc.readFrameCh <- http2readFrameResult{f, err, gateDone}:
+ case <-sc.doneServing:
+ return
+ }
+ select {
+ case <-gate:
+ case <-sc.doneServing:
+ return
+ }
+ if http2terminalReadFrameError(err) {
+ return
+ }
+ }
+}
+
+// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
+type http2frameWriteResult struct {
+ _ http2incomparable
+ wr http2FrameWriteRequest // what was written (or attempted)
+ err error // result of the writeFrame call
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *http2serverConn) writeFrameAsync(wr http2FrameWriteRequest, wd *http2writeData) {
+ var err error
+ if wd == nil {
+ err = wr.write.writeFrame(sc)
+ } else {
+ err = sc.framer.endWrite()
+ }
+ sc.wroteFrameCh <- http2frameWriteResult{wr: wr, err: err}
+}
+
+func (sc *http2serverConn) closeAllStreamsOnConnClose() {
+ sc.serveG.check()
+ for _, st := range sc.streams {
+ sc.closeStream(st, http2errClientDisconnected)
+ }
+}
+
+func (sc *http2serverConn) stopShutdownTimer() {
+ sc.serveG.check()
+ if t := sc.shutdownTimer; t != nil {
+ t.Stop()
+ }
+}
+
+func (sc *http2serverConn) notePanic() {
+ // Note: this is for serverConn.serve panicking, not http.Handler code.
+ if http2testHookOnPanicMu != nil {
+ http2testHookOnPanicMu.Lock()
+ defer http2testHookOnPanicMu.Unlock()
+ }
+ if http2testHookOnPanic != nil {
+ if e := recover(); e != nil {
+ if http2testHookOnPanic(sc, e) {
+ panic(e)
+ }
+ }
+ }
+}
+
+func (sc *http2serverConn) serve() {
+ sc.serveG.check()
+ defer sc.notePanic()
+ defer sc.conn.Close()
+ defer sc.closeAllStreamsOnConnClose()
+ defer sc.stopShutdownTimer()
+ defer close(sc.doneServing) // unblocks handlers trying to send
+
+ if http2VerboseLogs {
+ sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+ }
+
+ sc.writeFrame(http2FrameWriteRequest{
+ write: http2writeSettings{
+ {http2SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {http2SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {http2SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+ {http2SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
+ {http2SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
+ },
+ })
+ sc.unackedSettings++
+
+ // Each connection starts with initialWindowSize inflow tokens.
+ // If a higher value is configured, we add more tokens.
+ if diff := sc.srv.initialConnRecvWindowSize() - http2initialWindowSize; diff > 0 {
+ sc.sendWindowUpdate(nil, int(diff))
+ }
+
+ if err := sc.readPreface(); err != nil {
+ sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+ return
+ }
+ // Now that we've got the preface, get us out of the
+ // "StateNew" state. We can't go directly to idle, though.
+ // Active means we read some data and anticipate a request. We'll
+ // do another Active when we get a HEADERS frame.
+ sc.setConnState(StateActive)
+ sc.setConnState(StateIdle)
+
+ if sc.srv.IdleTimeout != 0 {
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ defer sc.idleTimer.Stop()
+ }
+
+ go sc.readFrames() // closed by defer sc.conn.Close above
+
+ settingsTimer := time.AfterFunc(http2firstSettingsTimeout, sc.onSettingsTimer)
+ defer settingsTimer.Stop()
+
+ loopNum := 0
+ for {
+ loopNum++
+ select {
+ case wr := <-sc.wantWriteFrameCh:
+ if se, ok := wr.write.(http2StreamError); ok {
+ sc.resetStream(se)
+ break
+ }
+ sc.writeFrame(wr)
+ case res := <-sc.wroteFrameCh:
+ sc.wroteFrame(res)
+ case res := <-sc.readFrameCh:
+ // Process any written frames before reading new frames from the client since a
+ // written frame could have triggered a new stream to be started.
+ if sc.writingFrameAsync {
+ select {
+ case wroteRes := <-sc.wroteFrameCh:
+ sc.wroteFrame(wroteRes)
+ default:
+ }
+ }
+ if !sc.processFrameFromReader(res) {
+ return
+ }
+ res.readMore()
+ if settingsTimer != nil {
+ settingsTimer.Stop()
+ settingsTimer = nil
+ }
+ case m := <-sc.bodyReadCh:
+ sc.noteBodyRead(m.st, m.n)
+ case msg := <-sc.serveMsgCh:
+ switch v := msg.(type) {
+ case func(int):
+ v(loopNum) // for testing
+ case *http2serverMessage:
+ switch v {
+ case http2settingsTimerMsg:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case http2idleTimerMsg:
+ sc.vlogf("connection is idle")
+ sc.goAway(http2ErrCodeNo)
+ case http2shutdownTimerMsg:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case http2gracefulShutdownMsg:
+ sc.startGracefulShutdownInternal()
+ case http2handlerDoneMsg:
+ sc.handlerDone()
+ default:
+ panic("unknown timer")
+ }
+ case *http2startPushRequest:
+ sc.startPush(v)
+ case func(*http2serverConn):
+ v(sc)
+ default:
+ panic(fmt.Sprintf("unexpected type %T", v))
+ }
+ }
+
+ // If the peer is causing us to generate a lot of control frames,
+ // but not reading them from us, assume they are trying to make us
+ // run out of memory.
+ if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
+ sc.vlogf("http2: too many control frames in send queue, closing connection")
+ return
+ }
+
+ // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
+ // with no error code (graceful shutdown), don't start the timer until
+ // all open streams have been completed.
+ sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
+ gracefulShutdownComplete := sc.goAwayCode == http2ErrCodeNo && sc.curOpenStreams() == 0
+ if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != http2ErrCodeNo || gracefulShutdownComplete) {
+ sc.shutDownIn(http2goAwayTimeout)
+ }
+ }
+}
+
+type http2serverMessage int
+
+// Message values sent to serveMsgCh.
+var (
+ http2settingsTimerMsg = new(http2serverMessage)
+ http2idleTimerMsg = new(http2serverMessage)
+ http2shutdownTimerMsg = new(http2serverMessage)
+ http2gracefulShutdownMsg = new(http2serverMessage)
+ http2handlerDoneMsg = new(http2serverMessage)
+)
+
+func (sc *http2serverConn) onSettingsTimer() { sc.sendServeMsg(http2settingsTimerMsg) }
+
+func (sc *http2serverConn) onIdleTimer() { sc.sendServeMsg(http2idleTimerMsg) }
+
+func (sc *http2serverConn) onShutdownTimer() { sc.sendServeMsg(http2shutdownTimerMsg) }
+
+func (sc *http2serverConn) sendServeMsg(msg interface{}) {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.serveMsgCh <- msg:
+ case <-sc.doneServing:
+ }
+}
+
+var http2errPrefaceTimeout = errors.New("timeout waiting for client preface")
+
+// readPreface reads the ClientPreface greeting from the peer or
+// returns errPrefaceTimeout on timeout, or an error if the greeting
+// is invalid.
+func (sc *http2serverConn) readPreface() error {
+ if sc.sawClientPreface {
+ return nil
+ }
+ errc := make(chan error, 1)
+ go func() {
+ // Read the client preface
+ buf := make([]byte, len(http2ClientPreface))
+ if _, err := io.ReadFull(sc.conn, buf); err != nil {
+ errc <- err
+ } else if !bytes.Equal(buf, http2clientPreface) {
+ errc <- fmt.Errorf("bogus greeting %q", buf)
+ } else {
+ errc <- nil
+ }
+ }()
+ timer := time.NewTimer(http2prefaceTimeout) // TODO: configurable on *Server?
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ return http2errPrefaceTimeout
+ case err := <-errc:
+ if err == nil {
+ if http2VerboseLogs {
+ sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
+ }
+ }
+ return err
+ }
+}
+
+var http2errChanPool = sync.Pool{
+ New: func() interface{} { return make(chan error, 1) },
+}
+
+var http2writeDataPool = sync.Pool{
+ New: func() interface{} { return new(http2writeData) },
+}
+
+// writeDataFromHandler writes DATA response frames from a handler on
+// the given stream.
+func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte, endStream bool) error {
+ ch := http2errChanPool.Get().(chan error)
+ writeArg := http2writeDataPool.Get().(*http2writeData)
+ *writeArg = http2writeData{stream.id, data, endStream}
+ err := sc.writeFrameFromHandler(http2FrameWriteRequest{
+ write: writeArg,
+ stream: stream,
+ done: ch,
+ })
+ if err != nil {
+ return err
+ }
+ var frameWriteDone bool // the frame write is done (successfully or not)
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ case <-sc.doneServing:
+ return http2errClientDisconnected
+ case <-stream.cw:
+ // If both ch and stream.cw were ready (as might
+ // happen on the final Write after an http.Handler
+ // ends), prefer the write result. Otherwise this
+ // might just be us successfully closing the stream.
+ // The writeFrameAsync and serve goroutines guarantee
+ // that the ch send will happen before the stream.cw
+ // close.
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ default:
+ return http2errStreamClosed
+ }
+ }
+ http2errChanPool.Put(ch)
+ if frameWriteDone {
+ http2writeDataPool.Put(writeArg)
+ }
+ return err
+}
+
+// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *http2serverConn) writeFrameFromHandler(wr http2FrameWriteRequest) error {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.wantWriteFrameCh <- wr:
+ return nil
+ case <-sc.doneServing:
+ // Serve loop is gone.
+ // Client has closed their connection to the server.
+ return http2errClientDisconnected
+ }
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *http2serverConn) writeFrame(wr http2FrameWriteRequest) {
+ sc.serveG.check()
+
+ // If true, wr will not be written and wr.done will not be signaled.
+ var ignoreWrite bool
+
+ // We are not allowed to write frames on closed streams. RFC 7540 Section
+ // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
+ // a closed stream." Our server never sends PRIORITY, so that exception
+ // does not apply.
+ //
+ // The serverConn might close an open stream while the stream's handler
+ // is still running. For example, the server might close a stream when it
+ // receives bad data from the client. If this happens, the handler might
+ // attempt to write a frame after the stream has been closed (since the
+ // handler hasn't yet been notified of the close). In this case, we simply
+ // ignore the frame. The handler will notice that the stream is closed when
+ // it waits for the frame to be written.
+ //
+ // As an exception to this rule, we allow sending RST_STREAM after close.
+ // This allows us to immediately reject new streams without tracking any
+ // state for those streams (except for the queued RST_STREAM frame). This
+ // may result in duplicate RST_STREAMs in some cases, but the client should
+ // ignore those.
+ if wr.StreamID() != 0 {
+ _, isReset := wr.write.(http2StreamError)
+ if state, _ := sc.state(wr.StreamID()); state == http2stateClosed && !isReset {
+ ignoreWrite = true
+ }
+ }
+
+ // Don't send a 100-continue response if we've already sent headers.
+ // See golang.org/issue/14030.
+ switch wr.write.(type) {
+ case *http2writeResHeaders:
+ wr.stream.wroteHeaders = true
+ case http2write100ContinueHeadersFrame:
+ if wr.stream.wroteHeaders {
+ // We do not need to notify wr.done because this frame is
+ // never written with wr.done != nil.
+ if wr.done != nil {
+ panic("wr.done != nil for write100ContinueHeadersFrame")
+ }
+ ignoreWrite = true
+ }
+ }
+
+ if !ignoreWrite {
+ if wr.isControl() {
+ sc.queuedControlFrames++
+ // For extra safety, detect wraparounds, which should not happen,
+ // and pull the plug.
+ if sc.queuedControlFrames < 0 {
+ sc.conn.Close()
+ }
+ }
+ sc.writeSched.Push(wr)
+ }
+ sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wr (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wr.
+func (sc *http2serverConn) startFrameWrite(wr http2FrameWriteRequest) {
+ sc.serveG.check()
+ if sc.writingFrame {
+ panic("internal error: can only be writing one frame at a time")
+ }
+
+ st := wr.stream
+ if st != nil {
+ switch st.state {
+ case http2stateHalfClosedLocal:
+ switch wr.write.(type) {
+ case http2StreamError, http2handlerPanicRST, http2writeWindowUpdate:
+ // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
+ // in this state. (We never send PRIORITY from the server, so that is not checked.)
+ default:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
+ }
+ case http2stateClosed:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
+ }
+ }
+ if wpp, ok := wr.write.(*http2writePushPromise); ok {
+ var err error
+ wpp.promisedID, err = wpp.allocatePromisedID()
+ if err != nil {
+ sc.writingFrameAsync = false
+ wr.replyToWriter(err)
+ return
+ }
+ }
+
+ sc.writingFrame = true
+ sc.needsFrameFlush = true
+ if wr.write.staysWithinBuffer(sc.bw.Available()) {
+ sc.writingFrameAsync = false
+ err := wr.write.writeFrame(sc)
+ sc.wroteFrame(http2frameWriteResult{wr: wr, err: err})
+ } else if wd, ok := wr.write.(*http2writeData); ok {
+ // Encode the frame in the serve goroutine, to ensure we don't have
+ // any lingering asynchronous references to data passed to Write.
+ // See https://go.dev/issue/58446.
+ sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
+ sc.writingFrameAsync = true
+ go sc.writeFrameAsync(wr, wd)
+ } else {
+ sc.writingFrameAsync = true
+ go sc.writeFrameAsync(wr, nil)
+ }
+}
+
+// errHandlerPanicked is the error given to any callers blocked in a read from
+// Request.Body when the main goroutine panics. Since most handlers read in the
+// main ServeHTTP goroutine, this will show up rarely.
+var http2errHandlerPanicked = errors.New("http2: handler panicked")
+
+// wroteFrame is called on the serve goroutine with the result of
+// whatever happened on writeFrameAsync.
+func (sc *http2serverConn) wroteFrame(res http2frameWriteResult) {
+ sc.serveG.check()
+ if !sc.writingFrame {
+ panic("internal error: expected to be already writing a frame")
+ }
+ sc.writingFrame = false
+ sc.writingFrameAsync = false
+
+ wr := res.wr
+
+ if http2writeEndsStream(wr.write) {
+ st := wr.stream
+ if st == nil {
+ panic("internal error: expecting non-nil stream")
+ }
+ switch st.state {
+ case http2stateOpen:
+ // Here we would go to stateHalfClosedLocal in
+ // theory, but since our handler is done and
+ // the net/http package provides no mechanism
+ // for closing a ResponseWriter while still
+ // reading data (see possible TODO at top of
+ // this file), we go into closed state here
+ // anyway, after telling the peer we're
+ // hanging up on them. We'll transition to
+ // stateClosed after the RST_STREAM frame is
+ // written.
+ st.state = http2stateHalfClosedLocal
+ // Section 8.1: a server MAY request that the client abort
+ // transmission of a request without error by sending a
+ // RST_STREAM with an error code of NO_ERROR after sending
+ // a complete response.
+ sc.resetStream(http2streamError(st.id, http2ErrCodeNo))
+ case http2stateHalfClosedRemote:
+ sc.closeStream(st, http2errHandlerComplete)
+ }
+ } else {
+ switch v := wr.write.(type) {
+ case http2StreamError:
+ // st may be unknown if the RST_STREAM was generated to reject bad input.
+ if st, ok := sc.streams[v.StreamID]; ok {
+ sc.closeStream(st, v)
+ }
+ case http2handlerPanicRST:
+ sc.closeStream(wr.stream, http2errHandlerPanicked)
+ }
+ }
+
+ // Reply (if requested) to unblock the ServeHTTP goroutine.
+ wr.replyToWriter(res.err)
+
+ sc.scheduleFrameWrite()
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written and we need to send one, the best frame
+// to send is selected by writeSched.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *http2serverConn) scheduleFrameWrite() {
+ sc.serveG.check()
+ if sc.writingFrame || sc.inFrameScheduleLoop {
+ return
+ }
+ sc.inFrameScheduleLoop = true
+ for !sc.writingFrameAsync {
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(http2FrameWriteRequest{
+ write: &http2writeGoAway{
+ maxStreamID: sc.maxClientStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ continue
+ }
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(http2FrameWriteRequest{write: http2writeSettingsAck{}})
+ continue
+ }
+ if !sc.inGoAway || sc.goAwayCode == http2ErrCodeNo {
+ if wr, ok := sc.writeSched.Pop(); ok {
+ if wr.isControl() {
+ sc.queuedControlFrames--
+ }
+ sc.startFrameWrite(wr)
+ continue
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(http2FrameWriteRequest{write: http2flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ continue
+ }
+ break
+ }
+ sc.inFrameScheduleLoop = false
+}
+
+// startGracefulShutdown gracefully shuts down a connection. This
+// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
+// shutting down. The connection isn't closed until all current
+// streams are done.
+//
+// startGracefulShutdown returns immediately; it does not wait until
+// the connection has shut down.
+func (sc *http2serverConn) startGracefulShutdown() {
+ sc.serveG.checkNotOn() // NOT
+ sc.shutdownOnce.Do(func() { sc.sendServeMsg(http2gracefulShutdownMsg) })
+}
+
+// After sending GOAWAY with an error code (non-graceful shutdown), the
+// connection will close after goAwayTimeout.
+//
+// If we close the connection immediately after sending GOAWAY, there may
+// be unsent data in our kernel receive buffer, which will cause the kernel
+// to send a TCP RST on close() instead of a FIN. This RST will abort the
+// connection immediately, whether or not the client had received the GOAWAY.
+//
+// Ideally we should delay for at least 1 RTT + epsilon so the client has
+// a chance to read the GOAWAY and stop sending messages. Measuring RTT
+// is hard, so we approximate with 1 second. See golang.org/issue/18701.
+//
+// This is a var so it can be shorter in tests, where all requests uses the
+// loopback interface making the expected RTT very small.
+//
+// TODO: configurable?
+var http2goAwayTimeout = 1 * time.Second
+
+func (sc *http2serverConn) startGracefulShutdownInternal() {
+ sc.goAway(http2ErrCodeNo)
+}
+
+func (sc *http2serverConn) goAway(code http2ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
+ if sc.goAwayCode == http2ErrCodeNo {
+ sc.goAwayCode = code
+ }
+ return
+ }
+ sc.inGoAway = true
+ sc.needToSendGoAway = true
+ sc.goAwayCode = code
+ sc.scheduleFrameWrite()
+}
+
+func (sc *http2serverConn) shutDownIn(d time.Duration) {
+ sc.serveG.check()
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
+}
+
+func (sc *http2serverConn) resetStream(se http2StreamError) {
+ sc.serveG.check()
+ sc.writeFrame(http2FrameWriteRequest{write: se})
+ if st, ok := sc.streams[se.StreamID]; ok {
+ st.resetQueued = true
+ }
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *http2serverConn) processFrameFromReader(res http2readFrameResult) bool {
+ sc.serveG.check()
+ err := res.err
+ if err != nil {
+ if err == http2ErrFrameTooLarge {
+ sc.goAway(http2ErrCodeFrameSize)
+ return true // goAway will close the loop
+ }
+ clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err)
+ if clientGone {
+ // TODO: could we also get into this state if
+ // the peer does a half close
+ // (e.g. CloseWrite) because they're done
+ // sending frames but they're still wanting
+ // our open replies? Investigate.
+ // TODO: add CloseWrite to crypto/tls.Conn first
+ // so we have a way to test this? I suppose
+ // just for testing we could have a non-TLS mode.
+ return false
+ }
+ } else {
+ f := res.f
+ if http2VerboseLogs {
+ sc.vlogf("http2: server read frame %v", http2summarizeFrame(f))
+ }
+ err = sc.processFrame(f)
+ if err == nil {
+ return true
+ }
+ }
+
+ switch ev := err.(type) {
+ case http2StreamError:
+ sc.resetStream(ev)
+ return true
+ case http2goAwayFlowError:
+ sc.goAway(http2ErrCodeFlowControl)
+ return true
+ case http2ConnectionError:
+ sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
+ sc.goAway(http2ErrCode(ev))
+ return true // goAway will handle shutdown
+ default:
+ if res.err != nil {
+ sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+ } else {
+ sc.logf("http2: server closing client connection: %v", err)
+ }
+ return false
+ }
+}
+
+func (sc *http2serverConn) processFrame(f http2Frame) error {
+ sc.serveG.check()
+
+ // First frame received must be SETTINGS.
+ if !sc.sawFirstSettings {
+ if _, ok := f.(*http2SettingsFrame); !ok {
+ return sc.countError("first_settings", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ sc.sawFirstSettings = true
+ }
+
+ // Discard frames for streams initiated after the identified last
+ // stream sent in a GOAWAY, or all frames after sending an error.
+ // We still need to return connection-level flow control for DATA frames.
+ // RFC 9113 Section 6.8.
+ if sc.inGoAway && (sc.goAwayCode != http2ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
+
+ if f, ok := f.(*http2DataFrame); ok {
+ if !sc.inflow.take(f.Length) {
+ return sc.countError("data_flow", http2streamError(f.Header().StreamID, http2ErrCodeFlowControl))
+ }
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+ }
+ return nil
+ }
+
+ switch f := f.(type) {
+ case *http2SettingsFrame:
+ return sc.processSettings(f)
+ case *http2MetaHeadersFrame:
+ return sc.processHeaders(f)
+ case *http2WindowUpdateFrame:
+ return sc.processWindowUpdate(f)
+ case *http2PingFrame:
+ return sc.processPing(f)
+ case *http2DataFrame:
+ return sc.processData(f)
+ case *http2RSTStreamFrame:
+ return sc.processResetStream(f)
+ case *http2PriorityFrame:
+ return sc.processPriority(f)
+ case *http2GoAwayFrame:
+ return sc.processGoAway(f)
+ case *http2PushPromiseFrame:
+ // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+ // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ return sc.countError("push_promise", http2ConnectionError(http2ErrCodeProtocol))
+ default:
+ sc.vlogf("http2: server ignoring frame: %v", f.Header())
+ return nil
+ }
+}
+
+func (sc *http2serverConn) processPing(f *http2PingFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ if f.StreamID != 0 {
+ // "PING frames are not associated with any individual
+ // stream. If a PING frame is received with a stream
+ // identifier field value other than 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ return sc.countError("ping_on_stream", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ sc.writeFrame(http2FrameWriteRequest{write: http2writePingAck{f}})
+ return nil
+}
+
+func (sc *http2serverConn) processWindowUpdate(f *http2WindowUpdateFrame) error {
+ sc.serveG.check()
+ switch {
+ case f.StreamID != 0: // stream-level flow control
+ state, st := sc.state(f.StreamID)
+ if state == http2stateIdle {
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return sc.countError("stream_idle", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ if st == nil {
+ // "WINDOW_UPDATE can be sent by a peer that has sent a
+ // frame bearing the END_STREAM flag. This means that a
+ // receiver could receive a WINDOW_UPDATE frame on a "half
+ // closed (remote)" or "closed" stream. A receiver MUST
+ // NOT treat this as an error, see Section 5.1."
+ return nil
+ }
+ if !st.flow.add(int32(f.Increment)) {
+ return sc.countError("bad_flow", http2streamError(f.StreamID, http2ErrCodeFlowControl))
+ }
+ default: // connection-level flow control
+ if !sc.flow.add(int32(f.Increment)) {
+ return http2goAwayFlowError{}
+ }
+ }
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *http2serverConn) processResetStream(f *http2RSTStreamFrame) error {
+ sc.serveG.check()
+
+ state, st := sc.state(f.StreamID)
+ if state == http2stateIdle {
+ // 6.4 "RST_STREAM frames MUST NOT be sent for a
+ // stream in the "idle" state. If a RST_STREAM frame
+ // identifying an idle stream is received, the
+ // recipient MUST treat this as a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return sc.countError("reset_idle_stream", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ if st != nil {
+ st.cancelCtx()
+ sc.closeStream(st, http2streamError(f.StreamID, f.ErrCode))
+ }
+ return nil
+}
+
+func (sc *http2serverConn) closeStream(st *http2stream, err error) {
+ sc.serveG.check()
+ if st.state == http2stateIdle || st.state == http2stateClosed {
+ panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+ }
+ st.state = http2stateClosed
+ if st.readDeadline != nil {
+ st.readDeadline.Stop()
+ }
+ if st.writeDeadline != nil {
+ st.writeDeadline.Stop()
+ }
+ if st.isPushed() {
+ sc.curPushedStreams--
+ } else {
+ sc.curClientStreams--
+ }
+ delete(sc.streams, st.id)
+ if len(sc.streams) == 0 {
+ sc.setConnState(StateIdle)
+ if sc.srv.IdleTimeout != 0 {
+ sc.idleTimer.Reset(sc.srv.IdleTimeout)
+ }
+ if http2h1ServerKeepAlivesDisabled(sc.hs) {
+ sc.startGracefulShutdownInternal()
+ }
+ }
+ if p := st.body; p != nil {
+ // Return any buffered unread bytes worth of conn-level flow control.
+ // See golang.org/issue/16481
+ sc.sendWindowUpdate(nil, p.Len())
+
+ p.CloseWithError(err)
+ }
+ if e, ok := err.(http2StreamError); ok {
+ if e.Cause != nil {
+ err = e.Cause
+ } else {
+ err = http2errStreamClosed
+ }
+ }
+ st.closeErr = err
+ st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+ sc.writeSched.CloseStream(st.id)
+}
+
+func (sc *http2serverConn) processSettings(f *http2SettingsFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ sc.unackedSettings--
+ if sc.unackedSettings < 0 {
+ // Why is the peer ACKing settings we never sent?
+ // The spec doesn't mention this case, but
+ // hang up on them anyway.
+ return sc.countError("ack_mystery", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ return nil
+ }
+ if f.NumSettings() > 100 || f.HasDuplicates() {
+ // This isn't actually in the spec, but hang up on
+ // suspiciously large settings frames or those with
+ // duplicate entries.
+ return sc.countError("settings_big_or_dups", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ if err := f.ForeachSetting(sc.processSetting); err != nil {
+ return err
+ }
+ // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
+ // acknowledged individually, even if multiple are received before the ACK.
+ sc.needToSendSettingsAck = true
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *http2serverConn) processSetting(s http2Setting) error {
+ sc.serveG.check()
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ if http2VerboseLogs {
+ sc.vlogf("http2: server processing setting %v", s)
+ }
+ switch s.ID {
+ case http2SettingHeaderTableSize:
+ sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+ case http2SettingEnablePush:
+ sc.pushEnabled = s.Val != 0
+ case http2SettingMaxConcurrentStreams:
+ sc.clientMaxStreams = s.Val
+ case http2SettingInitialWindowSize:
+ return sc.processSettingInitialWindowSize(s.Val)
+ case http2SettingMaxFrameSize:
+ sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
+ case http2SettingMaxHeaderListSize:
+ sc.peerMaxHeaderListSize = s.Val
+ default:
+ // Unknown setting: "An endpoint that receives a SETTINGS
+ // frame with any unknown or unsupported identifier MUST
+ // ignore that setting."
+ if http2VerboseLogs {
+ sc.vlogf("http2: server ignoring unknown setting %v", s)
+ }
+ }
+ return nil
+}
+
+func (sc *http2serverConn) processSettingInitialWindowSize(val uint32) error {
+ sc.serveG.check()
+ // Note: val already validated to be within range by
+ // processSetting's Valid call.
+
+ // "A SETTINGS frame can alter the initial flow control window
+ // size for all current streams. When the value of
+ // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+ // adjust the size of all stream flow control windows that it
+ // maintains by the difference between the new value and the
+ // old value."
+ old := sc.initialStreamSendWindowSize
+ sc.initialStreamSendWindowSize = int32(val)
+ growth := int32(val) - old // may be negative
+ for _, st := range sc.streams {
+ if !st.flow.add(growth) {
+ // 6.9.2 Initial Flow Control Window Size
+ // "An endpoint MUST treat a change to
+ // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+ // control window to exceed the maximum size as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR."
+ return sc.countError("setting_win_size", http2ConnectionError(http2ErrCodeFlowControl))
+ }
+ }
+ return nil
+}
+
+func (sc *http2serverConn) processData(f *http2DataFrame) error {
+ sc.serveG.check()
+ id := f.Header().StreamID
+
+ data := f.Data()
+ state, st := sc.state(id)
+ if id == 0 || state == http2stateIdle {
+ // Section 6.1: "DATA frames MUST be associated with a
+ // stream. If a DATA frame is received whose stream
+ // identifier field is 0x0, the recipient MUST respond
+ // with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ //
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return sc.countError("data_on_idle", http2ConnectionError(http2ErrCodeProtocol))
+ }
+
+ // "If a DATA frame is received whose stream is not in "open"
+ // or "half closed (local)" state, the recipient MUST respond
+ // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+ if st == nil || state != http2stateOpen || st.gotTrailerHeader || st.resetQueued {
+ // This includes sending a RST_STREAM if the stream is
+ // in stateHalfClosedLocal (which currently means that
+ // the http.Handler returned, so it's done reading &
+ // done writing). Try to stop the client from sending
+ // more DATA.
+
+ // But still enforce their connection-level flow control,
+ // and return any flow control bytes since we're not going
+ // to consume them.
+ if !sc.inflow.take(f.Length) {
+ return sc.countError("data_flow", http2streamError(id, http2ErrCodeFlowControl))
+ }
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+
+ if st != nil && st.resetQueued {
+ // Already have a stream error in flight. Don't send another.
+ return nil
+ }
+ return sc.countError("closed", http2streamError(id, http2ErrCodeStreamClosed))
+ }
+ if st.body == nil {
+ panic("internal error: should have a body in this state")
+ }
+
+ // Sender sending more than they'd declared?
+ if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+ if !sc.inflow.take(f.Length) {
+ return sc.countError("data_flow", http2streamError(id, http2ErrCodeFlowControl))
+ }
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+
+ st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+ // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
+ // value of a content-length header field does not equal the sum of the
+ // DATA frame payload lengths that form the body.
+ return sc.countError("send_too_much", http2streamError(id, http2ErrCodeProtocol))
+ }
+ if f.Length > 0 {
+ // Check whether the client has flow control quota.
+ if !http2takeInflows(&sc.inflow, &st.inflow, f.Length) {
+ return sc.countError("flow_on_data_length", http2streamError(id, http2ErrCodeFlowControl))
+ }
+
+ if len(data) > 0 {
+ st.bodyBytes += int64(len(data))
+ wrote, err := st.body.Write(data)
+ if err != nil {
+ // The handler has closed the request body.
+ // Return the connection-level flow control for the discarded data,
+ // but not the stream-level flow control.
+ sc.sendWindowUpdate(nil, int(f.Length)-wrote)
+ return nil
+ }
+ if wrote != len(data) {
+ panic("internal error: bad Writer")
+ }
+ }
+
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ // Call sendWindowUpdate even if there is no padding,
+ // to return buffered flow control credit if the sent
+ // window has shrunk.
+ pad := int32(f.Length) - int32(len(data))
+ sc.sendWindowUpdate32(nil, pad)
+ sc.sendWindowUpdate32(st, pad)
+ }
+ if f.StreamEnded() {
+ st.endStream()
+ }
+ return nil
+}
+
+func (sc *http2serverConn) processGoAway(f *http2GoAwayFrame) error {
+ sc.serveG.check()
+ if f.ErrCode != http2ErrCodeNo {
+ sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ } else {
+ sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ }
+ sc.startGracefulShutdownInternal()
+ // http://tools.ietf.org/html/rfc7540#section-6.8
+ // We should not create any new streams, which means we should disable push.
+ sc.pushEnabled = false
+ return nil
+}
+
+// isPushed reports whether the stream is server-initiated.
+func (st *http2stream) isPushed() bool {
+ return st.id%2 == 0
+}
+
+// endStream closes a Request.Body's pipe. It is called when a DATA
+// frame says a request body is over (or after trailers).
+func (st *http2stream) endStream() {
+ sc := st.sc
+ sc.serveG.check()
+
+ if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+ st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+ st.declBodyBytes, st.bodyBytes))
+ } else {
+ st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
+ st.body.CloseWithError(io.EOF)
+ }
+ st.state = http2stateHalfClosedRemote
+}
+
+// copyTrailersToHandlerRequest is run in the Handler's goroutine in
+// its Request.Body.Read just before it gets io.EOF.
+func (st *http2stream) copyTrailersToHandlerRequest() {
+ for k, vv := range st.trailer {
+ if _, ok := st.reqTrailer[k]; ok {
+ // Only copy it over it was pre-declared.
+ st.reqTrailer[k] = vv
+ }
+ }
+}
+
+// onReadTimeout is run on its own goroutine (from time.AfterFunc)
+// when the stream's ReadTimeout has fired.
+func (st *http2stream) onReadTimeout() {
+ if st.body != nil {
+ // Wrap the ErrDeadlineExceeded to avoid callers depending on us
+ // returning the bare error.
+ st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
+ }
+}
+
+// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
+// when the stream's WriteTimeout has fired.
+func (st *http2stream) onWriteTimeout() {
+ st.sc.writeFrameFromHandler(http2FrameWriteRequest{write: http2StreamError{
+ StreamID: st.id,
+ Code: http2ErrCodeInternal,
+ Cause: os.ErrDeadlineExceeded,
+ }})
+}
+
+func (sc *http2serverConn) processHeaders(f *http2MetaHeadersFrame) error {
+ sc.serveG.check()
+ id := f.StreamID
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1
+ // Streams initiated by a client MUST use odd-numbered stream
+ // identifiers. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id%2 != 1 {
+ return sc.countError("headers_even", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ // A HEADERS frame can be used to create a new stream or
+ // send a trailer for an open one. If we already have a stream
+ // open, let it process its own HEADERS frame (trailers at this
+ // point, if it's valid).
+ if st := sc.streams[f.StreamID]; st != nil {
+ if st.resetQueued {
+ // We're sending RST_STREAM to close the stream, so don't bother
+ // processing this frame.
+ return nil
+ }
+ // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
+ // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
+ // this state, it MUST respond with a stream error (Section 5.4.2) of
+ // type STREAM_CLOSED.
+ if st.state == http2stateHalfClosedRemote {
+ return sc.countError("headers_half_closed", http2streamError(id, http2ErrCodeStreamClosed))
+ }
+ return st.processTrailerHeaders(f)
+ }
+
+ // [...] The identifier of a newly established stream MUST be
+ // numerically greater than all streams that the initiating
+ // endpoint has opened or reserved. [...] An endpoint that
+ // receives an unexpected stream identifier MUST respond with
+ // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id <= sc.maxClientStreamID {
+ return sc.countError("stream_went_down", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ sc.maxClientStreamID = id
+
+ if sc.idleTimer != nil {
+ sc.idleTimer.Stop()
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-5.1.2
+ // [...] Endpoints MUST NOT exceed the limit set by their peer. An
+ // endpoint that receives a HEADERS frame that causes their
+ // advertised concurrent stream limit to be exceeded MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
+ // or REFUSED_STREAM.
+ if sc.curClientStreams+1 > sc.advMaxStreams {
+ if sc.unackedSettings == 0 {
+ // They should know better.
+ return sc.countError("over_max_streams", http2streamError(id, http2ErrCodeProtocol))
+ }
+ // Assume it's a network race, where they just haven't
+ // received our last SETTINGS update. But actually
+ // this can't happen yet, because we don't yet provide
+ // a way for users to adjust server parameters at
+ // runtime.
+ return sc.countError("over_max_streams_race", http2streamError(id, http2ErrCodeRefusedStream))
+ }
+
+ initialState := http2stateOpen
+ if f.StreamEnded() {
+ initialState = http2stateHalfClosedRemote
+ }
+ st := sc.newStream(id, 0, initialState)
+
+ if f.HasPriority() {
+ if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(st.id, f.Priority)
+ }
+
+ rw, req, err := sc.newWriterAndRequest(st, f)
+ if err != nil {
+ return err
+ }
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(Header)
+ }
+ st.body = req.Body.(*http2requestBody).pipe // may be nil
+ st.declBodyBytes = req.ContentLength
+
+ handler := sc.handler.ServeHTTP
+ if f.Truncated {
+ // Their header list was too long. Send a 431 error.
+ handler = http2handleHeaderListTooLong
+ } else if err := http2checkValidHTTP2RequestHeaders(req.Header); err != nil {
+ handler = http2new400Handler(err)
+ }
+
+ // The net/http package sets the read deadline from the
+ // http.Server.ReadTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already
+ // set. Disarm it here after the request headers are read,
+ // similar to how the http1 server works. Here it's
+ // technically more like the http1 Server's ReadHeaderTimeout
+ // (in Go 1.8), though. That's a more sane option anyway.
+ if sc.hs.ReadTimeout != 0 {
+ sc.conn.SetReadDeadline(time.Time{})
+ st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ }
+
+ return sc.scheduleHandler(id, rw, req, handler)
+}
+
+func (sc *http2serverConn) upgradeRequest(req *Request) {
+ sc.serveG.check()
+ id := uint32(1)
+ sc.maxClientStreamID = id
+ st := sc.newStream(id, 0, http2stateHalfClosedRemote)
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(Header)
+ }
+ rw := sc.newResponseWriter(st, req)
+
+ // Disable any read deadline set by the net/http package
+ // prior to the upgrade.
+ if sc.hs.ReadTimeout != 0 {
+ sc.conn.SetReadDeadline(time.Time{})
+ }
+
+ // This is the first request on the connection,
+ // so start the handler directly rather than going
+ // through scheduleHandler.
+ sc.curHandlers++
+ go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+}
+
+func (st *http2stream) processTrailerHeaders(f *http2MetaHeadersFrame) error {
+ sc := st.sc
+ sc.serveG.check()
+ if st.gotTrailerHeader {
+ return sc.countError("dup_trailers", http2ConnectionError(http2ErrCodeProtocol))
+ }
+ st.gotTrailerHeader = true
+ if !f.StreamEnded() {
+ return sc.countError("trailers_not_ended", http2streamError(st.id, http2ErrCodeProtocol))
+ }
+
+ if len(f.PseudoFields()) > 0 {
+ return sc.countError("trailers_pseudo", http2streamError(st.id, http2ErrCodeProtocol))
+ }
+ if st.trailer != nil {
+ for _, hf := range f.RegularFields() {
+ key := sc.canonicalHeader(hf.Name)
+ if !httpguts.ValidTrailerHeader(key) {
+ // TODO: send more details to the peer somehow. But http2 has
+ // no way to send debug data at a stream level. Discuss with
+ // HTTP folk.
+ return sc.countError("trailers_bogus", http2streamError(st.id, http2ErrCodeProtocol))
+ }
+ st.trailer[key] = append(st.trailer[key], hf.Value)
+ }
+ }
+ st.endStream()
+ return nil
+}
+
+func (sc *http2serverConn) checkPriority(streamID uint32, p http2PriorityParam) error {
+ if streamID == p.StreamDep {
+ // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
+ // Section 5.3.3 says that a stream can depend on one of its dependencies,
+ // so it's only self-dependencies that are forbidden.
+ return sc.countError("priority", http2streamError(streamID, http2ErrCodeProtocol))
+ }
+ return nil
+}
+
+func (sc *http2serverConn) processPriority(f *http2PriorityFrame) error {
+ if err := sc.checkPriority(f.StreamID, f.http2PriorityParam); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(f.StreamID, f.http2PriorityParam)
+ return nil
+}
+
+func (sc *http2serverConn) newStream(id, pusherID uint32, state http2streamState) *http2stream {
+ sc.serveG.check()
+ if id == 0 {
+ panic("internal error: cannot create stream with id 0")
+ }
+
+ ctx, cancelCtx := context.WithCancel(sc.baseCtx)
+ st := &http2stream{
+ sc: sc,
+ id: id,
+ state: state,
+ ctx: ctx,
+ cancelCtx: cancelCtx,
+ }
+ st.cw.Init()
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialStreamSendWindowSize)
+ st.inflow.init(sc.srv.initialStreamRecvWindowSize())
+ if sc.hs.WriteTimeout != 0 {
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ }
+
+ sc.streams[id] = st
+ sc.writeSched.OpenStream(st.id, http2OpenStreamOptions{PusherID: pusherID})
+ if st.isPushed() {
+ sc.curPushedStreams++
+ } else {
+ sc.curClientStreams++
+ }
+ if sc.curOpenStreams() == 1 {
+ sc.setConnState(StateActive)
+ }
+
+ return st
+}
+
+func (sc *http2serverConn) newWriterAndRequest(st *http2stream, f *http2MetaHeadersFrame) (*http2responseWriter, *Request, error) {
+ sc.serveG.check()
+
+ rp := http2requestParam{
+ method: f.PseudoValue("method"),
+ scheme: f.PseudoValue("scheme"),
+ authority: f.PseudoValue("authority"),
+ path: f.PseudoValue("path"),
+ }
+
+ isConnect := rp.method == "CONNECT"
+ if isConnect {
+ if rp.path != "" || rp.scheme != "" || rp.authority == "" {
+ return nil, nil, sc.countError("bad_connect", http2streamError(f.StreamID, http2ErrCodeProtocol))
+ }
+ } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
+ // See 8.1.2.6 Malformed Requests and Responses:
+ //
+ // Malformed requests or responses that are detected
+ // MUST be treated as a stream error (Section 5.4.2)
+ // of type PROTOCOL_ERROR."
+ //
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid
+ // value for the :method, :scheme, and :path
+ // pseudo-header fields"
+ return nil, nil, sc.countError("bad_path_method", http2streamError(f.StreamID, http2ErrCodeProtocol))
+ }
+
+ rp.header = make(Header)
+ for _, hf := range f.RegularFields() {
+ rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ }
+ if rp.authority == "" {
+ rp.authority = rp.header.Get("Host")
+ }
+
+ rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
+ if err != nil {
+ return nil, nil, err
+ }
+ bodyOpen := !f.StreamEnded()
+ if bodyOpen {
+ if vv, ok := rp.header["Content-Length"]; ok {
+ if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
+ req.ContentLength = int64(cl)
+ } else {
+ req.ContentLength = 0
+ }
+ } else {
+ req.ContentLength = -1
+ }
+ req.Body.(*http2requestBody).pipe = &http2pipe{
+ b: &http2dataBuffer{expected: req.ContentLength},
+ }
+ }
+ return rw, req, nil
+}
+
+type http2requestParam struct {
+ method string
+ scheme, authority, path string
+ header Header
+}
+
+func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2requestParam) (*http2responseWriter, *Request, error) {
+ sc.serveG.check()
+
+ var tlsState *tls.ConnectionState // nil if not scheme https
+ if rp.scheme == "https" {
+ tlsState = sc.tlsState
+ }
+
+ needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
+ if needsContinue {
+ rp.header.Del("Expect")
+ }
+ // Merge Cookie headers into one "; "-delimited value.
+ if cookies := rp.header["Cookie"]; len(cookies) > 1 {
+ rp.header.Set("Cookie", strings.Join(cookies, "; "))
+ }
+
+ // Setup Trailers
+ var trailer Header
+ for _, v := range rp.header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = CanonicalHeaderKey(textproto.TrimString(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(Header)
+ }
+ trailer[key] = nil
+ }
+ }
+ }
+ delete(rp.header, "Trailer")
+
+ var url_ *url.URL
+ var requestURI string
+ if rp.method == "CONNECT" {
+ url_ = &url.URL{Host: rp.authority}
+ requestURI = rp.authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(rp.path)
+ if err != nil {
+ return nil, nil, sc.countError("bad_path", http2streamError(st.id, http2ErrCodeProtocol))
+ }
+ requestURI = rp.path
+ }
+
+ body := &http2requestBody{
+ conn: sc,
+ stream: st,
+ needsContinue: needsContinue,
+ }
+ req := &Request{
+ Method: rp.method,
+ URL: url_,
+ RemoteAddr: sc.remoteAddrStr,
+ Header: rp.header,
+ RequestURI: requestURI,
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ TLS: tlsState,
+ Host: rp.authority,
+ Body: body,
+ Trailer: trailer,
+ }
+ req = req.WithContext(st.ctx)
+
+ rw := sc.newResponseWriter(st, req)
+ return rw, req, nil
+}
+
+func (sc *http2serverConn) newResponseWriter(st *http2stream, req *Request) *http2responseWriter {
+ rws := http2responseWriterStatePool.Get().(*http2responseWriterState)
+ bwSave := rws.bw
+ *rws = http2responseWriterState{} // zero all the fields
+ rws.conn = sc
+ rws.bw = bwSave
+ rws.bw.Reset(http2chunkWriter{rws})
+ rws.stream = st
+ rws.req = req
+ return &http2responseWriter{rws: rws}
+}
+
+type http2unstartedHandler struct {
+ streamID uint32
+ rw *http2responseWriter
+ req *Request
+ handler func(ResponseWriter, *Request)
+}
+
+// scheduleHandler starts a handler goroutine,
+// or schedules one to start as soon as an existing handler finishes.
+func (sc *http2serverConn) scheduleHandler(streamID uint32, rw *http2responseWriter, req *Request, handler func(ResponseWriter, *Request)) error {
+ sc.serveG.check()
+ maxHandlers := sc.advMaxStreams
+ if sc.curHandlers < maxHandlers {
+ sc.curHandlers++
+ go sc.runHandler(rw, req, handler)
+ return nil
+ }
+ if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
+ return sc.countError("too_many_early_resets", http2ConnectionError(http2ErrCodeEnhanceYourCalm))
+ }
+ sc.unstartedHandlers = append(sc.unstartedHandlers, http2unstartedHandler{
+ streamID: streamID,
+ rw: rw,
+ req: req,
+ handler: handler,
+ })
+ return nil
+}
+
+func (sc *http2serverConn) handlerDone() {
+ sc.serveG.check()
+ sc.curHandlers--
+ i := 0
+ maxHandlers := sc.advMaxStreams
+ for ; i < len(sc.unstartedHandlers); i++ {
+ u := sc.unstartedHandlers[i]
+ if sc.streams[u.streamID] == nil {
+ // This stream was reset before its goroutine had a chance to start.
+ continue
+ }
+ if sc.curHandlers >= maxHandlers {
+ break
+ }
+ sc.curHandlers++
+ go sc.runHandler(u.rw, u.req, u.handler)
+ sc.unstartedHandlers[i] = http2unstartedHandler{} // don't retain references
+ }
+ sc.unstartedHandlers = sc.unstartedHandlers[i:]
+ if len(sc.unstartedHandlers) == 0 {
+ sc.unstartedHandlers = nil
+ }
+}
+
+// Run on its own goroutine.
+func (sc *http2serverConn) runHandler(rw *http2responseWriter, req *Request, handler func(ResponseWriter, *Request)) {
+ defer sc.sendServeMsg(http2handlerDoneMsg)
+ didPanic := true
+ defer func() {
+ rw.rws.stream.cancelCtx()
+ if req.MultipartForm != nil {
+ req.MultipartForm.RemoveAll()
+ }
+ if didPanic {
+ e := recover()
+ sc.writeFrameFromHandler(http2FrameWriteRequest{
+ write: http2handlerPanicRST{rw.rws.stream.id},
+ stream: rw.rws.stream,
+ })
+ // Same as net/http:
+ if e != nil && e != ErrAbortHandler {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ }
+ return
+ }
+ rw.handlerDone()
+ }()
+ handler(rw, req)
+ didPanic = false
+}
+
+func http2handleHeaderListTooLong(w ResponseWriter, r *Request) {
+ // 10.5.1 Limits on Header Block Size:
+ // .. "A server that receives a larger header block than it is
+ // willing to handle can send an HTTP 431 (Request Header Fields Too
+ // Large) status code"
+ const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
+ w.WriteHeader(statusRequestHeaderFieldsTooLarge)
+ io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *http2serverConn) writeHeaders(st *http2stream, headerData *http2writeResHeaders) error {
+ sc.serveG.checkNotOn() // NOT on
+ var errc chan error
+ if headerData.h != nil {
+ // If there's a header map (which we don't own), so we have to block on
+ // waiting for this frame to be written, so an http.Flush mid-handler
+ // writes out the correct value of keys, before a handler later potentially
+ // mutates it.
+ errc = http2errChanPool.Get().(chan error)
+ }
+ if err := sc.writeFrameFromHandler(http2FrameWriteRequest{
+ write: headerData,
+ stream: st,
+ done: errc,
+ }); err != nil {
+ return err
+ }
+ if errc != nil {
+ select {
+ case err := <-errc:
+ http2errChanPool.Put(errc)
+ return err
+ case <-sc.doneServing:
+ return http2errClientDisconnected
+ case <-st.cw:
+ return http2errStreamClosed
+ }
+ }
+ return nil
+}
+
+// called from handler goroutines.
+func (sc *http2serverConn) write100ContinueHeaders(st *http2stream) {
+ sc.writeFrameFromHandler(http2FrameWriteRequest{
+ write: http2write100ContinueHeadersFrame{st.id},
+ stream: st,
+ })
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type http2bodyReadMsg struct {
+ st *http2stream
+ n int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *http2serverConn) noteBodyReadFromHandler(st *http2stream, n int, err error) {
+ sc.serveG.checkNotOn() // NOT on
+ if n > 0 {
+ select {
+ case sc.bodyReadCh <- http2bodyReadMsg{st, n}:
+ case <-sc.doneServing:
+ }
+ }
+}
+
+func (sc *http2serverConn) noteBodyRead(st *http2stream, n int) {
+ sc.serveG.check()
+ sc.sendWindowUpdate(nil, n) // conn-level
+ if st.state != http2stateHalfClosedRemote && st.state != http2stateClosed {
+ // Don't send this WINDOW_UPDATE if the stream is closed
+ // remotely.
+ sc.sendWindowUpdate(st, n)
+ }
+}
+
+// st may be nil for conn-level
+func (sc *http2serverConn) sendWindowUpdate32(st *http2stream, n int32) {
+ sc.sendWindowUpdate(st, int(n))
+}
+
+// st may be nil for conn-level
+func (sc *http2serverConn) sendWindowUpdate(st *http2stream, n int) {
+ sc.serveG.check()
+ var streamID uint32
+ var send int32
+ if st == nil {
+ send = sc.inflow.add(n)
+ } else {
+ streamID = st.id
+ send = st.inflow.add(n)
+ }
+ if send == 0 {
+ return
+ }
+ sc.writeFrame(http2FrameWriteRequest{
+ write: http2writeWindowUpdate{streamID: streamID, n: uint32(send)},
+ stream: st,
+ })
+}
+
+// requestBody is the Handler's Request.Body type.
+// Read and Close may be called concurrently.
+type http2requestBody struct {
+ _ http2incomparable
+ stream *http2stream
+ conn *http2serverConn
+ closeOnce sync.Once // for use by Close only
+ sawEOF bool // for use by Read only
+ pipe *http2pipe // non-nil if we have an HTTP entity message body
+ needsContinue bool // need to send a 100-continue
+}
+
+func (b *http2requestBody) Close() error {
+ b.closeOnce.Do(func() {
+ if b.pipe != nil {
+ b.pipe.BreakWithError(http2errClosedBody)
+ }
+ })
+ return nil
+}
+
+func (b *http2requestBody) Read(p []byte) (n int, err error) {
+ if b.needsContinue {
+ b.needsContinue = false
+ b.conn.write100ContinueHeaders(b.stream)
+ }
+ if b.pipe == nil || b.sawEOF {
+ return 0, io.EOF
+ }
+ n, err = b.pipe.Read(p)
+ if err == io.EOF {
+ b.sawEOF = true
+ }
+ if b.conn == nil && http2inTests {
+ return
+ }
+ b.conn.noteBodyReadFromHandler(b.stream, n, err)
+ return
+}
+
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type http2responseWriter struct {
+ rws *http2responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+ _ CloseNotifier = (*http2responseWriter)(nil)
+ _ Flusher = (*http2responseWriter)(nil)
+ _ http2stringWriter = (*http2responseWriter)(nil)
+)
+
+type http2responseWriterState struct {
+ // immutable within a request:
+ stream *http2stream
+ req *Request
+ conn *http2serverConn
+
+ // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+ bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+ // mutated by http.Handler goroutine:
+ handlerHeader Header // nil until called
+ snapHeader Header // snapshot of handlerHeader at WriteHeader time
+ trailers []string // set in writeChunk
+ status int // status code passed to WriteHeader
+ wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+ sentHeader bool // have we sent the header frame?
+ handlerDone bool // handler has finished
+
+ sentContentLen int64 // non-zero if handler set a Content-Length header
+ wroteBytes int64
+
+ closeNotifierMu sync.Mutex // guards closeNotifierCh
+ closeNotifierCh chan bool // nil until first used
+}
+
+type http2chunkWriter struct{ rws *http2responseWriterState }
+
+func (cw http2chunkWriter) Write(p []byte) (n int, err error) {
+ n, err = cw.rws.writeChunk(p)
+ if err == http2errStreamClosed {
+ // If writing failed because the stream has been closed,
+ // return the reason it was closed.
+ err = cw.rws.stream.closeErr
+ }
+ return n, err
+}
+
+func (rws *http2responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
+
+func (rws *http2responseWriterState) hasNonemptyTrailers() bool {
+ for _, trailer := range rws.trailers {
+ if _, ok := rws.handlerHeader[trailer]; ok {
+ return true
+ }
+ }
+ return false
+}
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (rws *http2responseWriterState) declareTrailer(k string) {
+ k = CanonicalHeaderKey(k)
+ if !httpguts.ValidTrailerHeader(k) {
+ // Forbidden by RFC 7230, section 4.1.2.
+ rws.conn.logf("ignoring invalid trailer %q", k)
+ return
+ }
+ if !http2strSliceContains(rws.trailers, k) {
+ rws.trailers = append(rws.trailers, k)
+ }
+}
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
+ if !rws.wroteHeader {
+ rws.writeHeader(200)
+ }
+
+ if rws.handlerDone {
+ rws.promoteUndeclaredTrailers()
+ }
+
+ isHeadResp := rws.req.Method == "HEAD"
+ if !rws.sentHeader {
+ rws.sentHeader = true
+ var ctype, clen string
+ if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
+ rws.snapHeader.Del("Content-Length")
+ if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
+ rws.sentContentLen = int64(cl)
+ } else {
+ clen = ""
+ }
+ }
+ _, hasContentLength := rws.snapHeader["Content-Length"]
+ if !hasContentLength && clen == "" && rws.handlerDone && http2bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+ clen = strconv.Itoa(len(p))
+ }
+ _, hasContentType := rws.snapHeader["Content-Type"]
+ // If the Content-Encoding is non-blank, we shouldn't
+ // sniff the body. See Issue golang.org/issue/31753.
+ ce := rws.snapHeader.Get("Content-Encoding")
+ hasCE := len(ce) > 0
+ if !hasCE && !hasContentType && http2bodyAllowedForStatus(rws.status) && len(p) > 0 {
+ ctype = DetectContentType(p)
+ }
+ var date string
+ if _, ok := rws.snapHeader["Date"]; !ok {
+ // TODO(bradfitz): be faster here, like net/http? measure.
+ date = time.Now().UTC().Format(TimeFormat)
+ }
+
+ for _, v := range rws.snapHeader["Trailer"] {
+ http2foreachHeaderElement(v, rws.declareTrailer)
+ }
+
+ // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
+ // but respect "Connection" == "close" to mean sending a GOAWAY and tearing
+ // down the TCP connection when idle, like we do for HTTP/1.
+ // TODO: remove more Connection-specific header fields here, in addition
+ // to "Connection".
+ if _, ok := rws.snapHeader["Connection"]; ok {
+ v := rws.snapHeader.Get("Connection")
+ delete(rws.snapHeader, "Connection")
+ if v == "close" {
+ rws.conn.startGracefulShutdown()
+ }
+ }
+
+ endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
+ err = rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: rws.status,
+ h: rws.snapHeader,
+ endStream: endStream,
+ contentType: ctype,
+ contentLength: clen,
+ date: date,
+ })
+ if err != nil {
+ return 0, err
+ }
+ if endStream {
+ return 0, nil
+ }
+ }
+ if isHeadResp {
+ return len(p), nil
+ }
+ if len(p) == 0 && !rws.handlerDone {
+ return 0, nil
+ }
+
+ // only send trailers if they have actually been defined by the
+ // server handler.
+ hasNonemptyTrailers := rws.hasNonemptyTrailers()
+ endStream := rws.handlerDone && !hasNonemptyTrailers
+ if len(p) > 0 || endStream {
+ // only send a 0 byte DATA frame if we're ending the stream.
+ if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ return 0, err
+ }
+ }
+
+ if rws.handlerDone && hasNonemptyTrailers {
+ err = rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
+ streamID: rws.stream.id,
+ h: rws.handlerHeader,
+ trailers: rws.trailers,
+ endStream: true,
+ })
+ return len(p), err
+ }
+ return len(p), nil
+}
+
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+//
+// https://golang.org/pkg/net/http/#ResponseWriter
+// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+const http2TrailerPrefix = "Trailer:"
+
+// promoteUndeclaredTrailers permits http.Handlers to set trailers
+// after the header has already been flushed. Because the Go
+// ResponseWriter interface has no way to set Trailers (only the
+// Header), and because we didn't want to expand the ResponseWriter
+// interface, and because nobody used trailers, and because RFC 7230
+// says you SHOULD (but not must) predeclare any trailers in the
+// header, the official ResponseWriter rules said trailers in Go must
+// be predeclared, and then we reuse the same ResponseWriter.Header()
+// map to mean both Headers and Trailers. When it's time to write the
+// Trailers, we pick out the fields of Headers that were declared as
+// trailers. That worked for a while, until we found the first major
+// user of Trailers in the wild: gRPC (using them only over http2),
+// and gRPC libraries permit setting trailers mid-stream without
+// predeclaring them. So: change of plans. We still permit the old
+// way, but we also permit this hack: if a Header() key begins with
+// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
+// invalid token byte anyway, there is no ambiguity. (And it's already
+// filtered out) It's mildly hacky, but not terrible.
+//
+// This method runs after the Handler is done and promotes any Header
+// fields to be trailers.
+func (rws *http2responseWriterState) promoteUndeclaredTrailers() {
+ for k, vv := range rws.handlerHeader {
+ if !strings.HasPrefix(k, http2TrailerPrefix) {
+ continue
+ }
+ trailerKey := strings.TrimPrefix(k, http2TrailerPrefix)
+ rws.declareTrailer(trailerKey)
+ rws.handlerHeader[CanonicalHeaderKey(trailerKey)] = vv
+ }
+
+ if len(rws.trailers) > 1 {
+ sorter := http2sorterPool.Get().(*http2sorter)
+ sorter.SortStrings(rws.trailers)
+ http2sorterPool.Put(sorter)
+ }
+}
+
+func (w *http2responseWriter) SetReadDeadline(deadline time.Time) error {
+ st := w.rws.stream
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
+ // If we're setting a deadline in the past, reset the stream immediately
+ // so writes after SetWriteDeadline returns will fail.
+ st.onReadTimeout()
+ return nil
+ }
+ w.rws.conn.sendServeMsg(func(sc *http2serverConn) {
+ if st.readDeadline != nil {
+ if !st.readDeadline.Stop() {
+ // Deadline already exceeded, or stream has been closed.
+ return
+ }
+ }
+ if deadline.IsZero() {
+ st.readDeadline = nil
+ } else if st.readDeadline == nil {
+ st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
+ } else {
+ st.readDeadline.Reset(deadline.Sub(time.Now()))
+ }
+ })
+ return nil
+}
+
+func (w *http2responseWriter) SetWriteDeadline(deadline time.Time) error {
+ st := w.rws.stream
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
+ // If we're setting a deadline in the past, reset the stream immediately
+ // so writes after SetWriteDeadline returns will fail.
+ st.onWriteTimeout()
+ return nil
+ }
+ w.rws.conn.sendServeMsg(func(sc *http2serverConn) {
+ if st.writeDeadline != nil {
+ if !st.writeDeadline.Stop() {
+ // Deadline already exceeded, or stream has been closed.
+ return
+ }
+ }
+ if deadline.IsZero() {
+ st.writeDeadline = nil
+ } else if st.writeDeadline == nil {
+ st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
+ } else {
+ st.writeDeadline.Reset(deadline.Sub(time.Now()))
+ }
+ })
+ return nil
+}
+
+func (w *http2responseWriter) Flush() {
+ w.FlushError()
+}
+
+func (w *http2responseWriter) FlushError() error {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ var err error
+ if rws.bw.Buffered() > 0 {
+ err = rws.bw.Flush()
+ } else {
+ // The bufio.Writer won't call chunkWriter.Write
+ // (writeChunk with zero bytes), so we have to do it
+ // ourselves to force the HTTP response header and/or
+ // final DATA frame (with END_STREAM) to be sent.
+ _, err = http2chunkWriter{rws}.Write(nil)
+ if err == nil {
+ select {
+ case <-rws.stream.cw:
+ err = rws.stream.closeErr
+ default:
+ }
+ }
+ }
+ return err
+}
+
+func (w *http2responseWriter) CloseNotify() <-chan bool {
+ rws := w.rws
+ if rws == nil {
+ panic("CloseNotify called after Handler finished")
+ }
+ rws.closeNotifierMu.Lock()
+ ch := rws.closeNotifierCh
+ if ch == nil {
+ ch = make(chan bool, 1)
+ rws.closeNotifierCh = ch
+ cw := rws.stream.cw
+ go func() {
+ cw.Wait() // wait for close
+ ch <- true
+ }()
+ }
+ rws.closeNotifierMu.Unlock()
+ return ch
+}
+
+func (w *http2responseWriter) Header() Header {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.handlerHeader == nil {
+ rws.handlerHeader = make(Header)
+ }
+ return rws.handlerHeader
+}
+
+// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
+func http2checkWriteHeaderCode(code int) {
+ // Issue 22880: require valid WriteHeader status codes.
+ // For now we only enforce that it's three digits.
+ // In the future we might block things over 599 (600 and above aren't defined
+ // at http://httpwg.org/specs/rfc7231.html#status.codes).
+ // But for now any three digits.
+ //
+ // We used to send "HTTP/1.1 000 0" on the wire in responses but there's
+ // no equivalent bogus thing we can realistically send in HTTP/2,
+ // so we'll consistently panic instead and help people find their bugs
+ // early. (We can't return an error from WriteHeader even if we wanted to.)
+ if code < 100 || code > 999 {
+ panic(fmt.Sprintf("invalid WriteHeader code %v", code))
+ }
+}
+
+func (w *http2responseWriter) WriteHeader(code int) {
+ rws := w.rws
+ if rws == nil {
+ panic("WriteHeader called after Handler finished")
+ }
+ rws.writeHeader(code)
+}
+
+func (rws *http2responseWriterState) writeHeader(code int) {
+ if rws.wroteHeader {
+ return
+ }
+
+ http2checkWriteHeaderCode(code)
+
+ // Handle informational headers
+ if code >= 100 && code <= 199 {
+ // Per RFC 8297 we must not clear the current header map
+ h := rws.handlerHeader
+
+ _, cl := h["Content-Length"]
+ _, te := h["Transfer-Encoding"]
+ if cl || te {
+ h = h.Clone()
+ h.Del("Content-Length")
+ h.Del("Transfer-Encoding")
+ }
+
+ rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: code,
+ h: h,
+ endStream: rws.handlerDone && !rws.hasTrailers(),
+ })
+
+ return
+ }
+
+ rws.wroteHeader = true
+ rws.status = code
+ if len(rws.handlerHeader) > 0 {
+ rws.snapHeader = http2cloneHeader(rws.handlerHeader)
+ }
+}
+
+func http2cloneHeader(h Header) Header {
+ h2 := make(Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler might call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *http2responseWriter) Write(p []byte) (n int, err error) {
+ return w.write(len(p), p, "")
+}
+
+func (w *http2responseWriter) WriteString(s string) (n int, err error) {
+ return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *http2responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ rws := w.rws
+ if rws == nil {
+ panic("Write called after Handler finished")
+ }
+ if !rws.wroteHeader {
+ w.WriteHeader(200)
+ }
+ if !http2bodyAllowedForStatus(rws.status) {
+ return 0, ErrBodyNotAllowed
+ }
+ rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
+ if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
+ // TODO: send a RST_STREAM
+ return 0, errors.New("http2: handler wrote more than declared Content-Length")
+ }
+
+ if dataB != nil {
+ return rws.bw.Write(dataB)
+ } else {
+ return rws.bw.WriteString(dataS)
+ }
+}
+
+func (w *http2responseWriter) handlerDone() {
+ rws := w.rws
+ rws.handlerDone = true
+ w.Flush()
+ w.rws = nil
+ http2responseWriterStatePool.Put(rws)
+}
+
+// Push errors.
+var (
+ http2ErrRecursivePush = errors.New("http2: recursive push not allowed")
+ http2ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
+)
+
+var _ Pusher = (*http2responseWriter)(nil)
+
+func (w *http2responseWriter) Push(target string, opts *PushOptions) error {
+ st := w.rws.stream
+ sc := st.sc
+ sc.serveG.checkNotOn()
+
+ // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
+ // http://tools.ietf.org/html/rfc7540#section-6.6
+ if st.isPushed() {
+ return http2ErrRecursivePush
+ }
+
+ if opts == nil {
+ opts = new(PushOptions)
+ }
+
+ // Default options.
+ if opts.Method == "" {
+ opts.Method = "GET"
+ }
+ if opts.Header == nil {
+ opts.Header = Header{}
+ }
+ wantScheme := "http"
+ if w.rws.req.TLS != nil {
+ wantScheme = "https"
+ }
+
+ // Validate the request.
+ u, err := url.Parse(target)
+ if err != nil {
+ return err
+ }
+ if u.Scheme == "" {
+ if !strings.HasPrefix(target, "/") {
+ return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
+ }
+ u.Scheme = wantScheme
+ u.Host = w.rws.req.Host
+ } else {
+ if u.Scheme != wantScheme {
+ return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
+ }
+ if u.Host == "" {
+ return errors.New("URL must have a host")
+ }
+ }
+ for k := range opts.Header {
+ if strings.HasPrefix(k, ":") {
+ return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
+ }
+ // These headers are meaningful only if the request has a body,
+ // but PUSH_PROMISE requests cannot have a body.
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ // Also disallow Host, since the promised URL must be absolute.
+ if http2asciiEqualFold(k, "content-length") ||
+ http2asciiEqualFold(k, "content-encoding") ||
+ http2asciiEqualFold(k, "trailer") ||
+ http2asciiEqualFold(k, "te") ||
+ http2asciiEqualFold(k, "expect") ||
+ http2asciiEqualFold(k, "host") {
+ return fmt.Errorf("promised request headers cannot include %q", k)
+ }
+ }
+ if err := http2checkValidHTTP2RequestHeaders(opts.Header); err != nil {
+ return err
+ }
+
+ // The RFC effectively limits promised requests to GET and HEAD:
+ // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ if opts.Method != "GET" && opts.Method != "HEAD" {
+ return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
+ }
+
+ msg := &http2startPushRequest{
+ parent: st,
+ method: opts.Method,
+ url: u,
+ header: http2cloneHeader(opts.Header),
+ done: http2errChanPool.Get().(chan error),
+ }
+
+ select {
+ case <-sc.doneServing:
+ return http2errClientDisconnected
+ case <-st.cw:
+ return http2errStreamClosed
+ case sc.serveMsgCh <- msg:
+ }
+
+ select {
+ case <-sc.doneServing:
+ return http2errClientDisconnected
+ case <-st.cw:
+ return http2errStreamClosed
+ case err := <-msg.done:
+ http2errChanPool.Put(msg.done)
+ return err
+ }
+}
+
+type http2startPushRequest struct {
+ parent *http2stream
+ method string
+ url *url.URL
+ header Header
+ done chan error
+}
+
+func (sc *http2serverConn) startPush(msg *http2startPushRequest) {
+ sc.serveG.check()
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
+ // is in either the "open" or "half-closed (remote)" state.
+ if msg.parent.state != http2stateOpen && msg.parent.state != http2stateHalfClosedRemote {
+ // responseWriter.Push checks that the stream is peer-initiated.
+ msg.done <- http2errStreamClosed
+ return
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ if !sc.pushEnabled {
+ msg.done <- ErrNotSupported
+ return
+ }
+
+ // PUSH_PROMISE frames must be sent in increasing order by stream ID, so
+ // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
+ // is written. Once the ID is allocated, we start the request handler.
+ allocatePromisedID := func() (uint32, error) {
+ sc.serveG.check()
+
+ // Check this again, just in case. Technically, we might have received
+ // an updated SETTINGS by the time we got around to writing this frame.
+ if !sc.pushEnabled {
+ return 0, ErrNotSupported
+ }
+ // http://tools.ietf.org/html/rfc7540#section-6.5.2.
+ if sc.curPushedStreams+1 > sc.clientMaxStreams {
+ return 0, http2ErrPushLimitReached
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1.
+ // Streams initiated by the server MUST use even-numbered identifiers.
+ // A server that is unable to establish a new stream identifier can send a GOAWAY
+ // frame so that the client is forced to open a new connection for new streams.
+ if sc.maxPushPromiseID+2 >= 1<<31 {
+ sc.startGracefulShutdownInternal()
+ return 0, http2ErrPushLimitReached
+ }
+ sc.maxPushPromiseID += 2
+ promisedID := sc.maxPushPromiseID
+
+ // http://tools.ietf.org/html/rfc7540#section-8.2.
+ // Strictly speaking, the new stream should start in "reserved (local)", then
+ // transition to "half closed (remote)" after sending the initial HEADERS, but
+ // we start in "half closed (remote)" for simplicity.
+ // See further comments at the definition of stateHalfClosedRemote.
+ promised := sc.newStream(promisedID, msg.parent.id, http2stateHalfClosedRemote)
+ rw, req, err := sc.newWriterAndRequestNoBody(promised, http2requestParam{
+ method: msg.method,
+ scheme: msg.url.Scheme,
+ authority: msg.url.Host,
+ path: msg.url.RequestURI(),
+ header: http2cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
+ })
+ if err != nil {
+ // Should not happen, since we've already validated msg.url.
+ panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
+ }
+
+ sc.curHandlers++
+ go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+ return promisedID, nil
+ }
+
+ sc.writeFrame(http2FrameWriteRequest{
+ write: &http2writePushPromise{
+ streamID: msg.parent.id,
+ method: msg.method,
+ url: msg.url,
+ h: msg.header,
+ allocatePromisedID: allocatePromisedID,
+ },
+ stream: msg.parent,
+ done: msg.done,
+ })
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 7230 section 7 and calls fn for each non-empty element.
+func http2foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
+// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
+var http2connHeaders = []string{
+ "Connection",
+ "Keep-Alive",
+ "Proxy-Connection",
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
+// per RFC 7540 Section 8.1.2.2.
+// The returned error is reported to users.
+func http2checkValidHTTP2RequestHeaders(h Header) error {
+ for _, k := range http2connHeaders {
+ if _, ok := h[k]; ok {
+ return fmt.Errorf("request header %q is not valid in HTTP/2", k)
+ }
+ }
+ te := h["Te"]
+ if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
+ return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
+ }
+ return nil
+}
+
+func http2new400Handler(err error) HandlerFunc {
+ return func(w ResponseWriter, r *Request) {
+ Error(w, err.Error(), StatusBadRequest)
+ }
+}
+
+// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
+// disabled. See comments on h1ServerShutdownChan above for why
+// the code is written this way.
+func http2h1ServerKeepAlivesDisabled(hs *Server) bool {
+ var x interface{} = hs
+ type I interface {
+ doKeepAlives() bool
+ }
+ if hs, ok := x.(I); ok {
+ return !hs.doKeepAlives()
+ }
+ return false
+}
+
+func (sc *http2serverConn) countError(name string, err error) error {
+ if sc == nil || sc.srv == nil {
+ return err
+ }
+ f := sc.srv.CountError
+ if f == nil {
+ return err
+ }
+ var typ string
+ var code http2ErrCode
+ switch e := err.(type) {
+ case http2ConnectionError:
+ typ = "conn"
+ code = http2ErrCode(e)
+ case http2StreamError:
+ typ = "stream"
+ code = http2ErrCode(e.Code)
+ default:
+ return err
+ }
+ codeStr := http2errCodeName[code]
+ if codeStr == "" {
+ codeStr = strconv.Itoa(int(code))
+ }
+ f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
+ return err
+}
+
+const (
+ // transportDefaultConnFlow is how many connection-level flow control
+ // tokens we give the server at start-up, past the default 64k.
+ http2transportDefaultConnFlow = 1 << 30
+
+ // transportDefaultStreamFlow is how many stream-level flow
+ // control tokens we announce to the peer, and how many bytes
+ // we buffer per stream.
+ http2transportDefaultStreamFlow = 4 << 20
+
+ http2defaultUserAgent = "Go-http-client/2.0"
+
+ // initialMaxConcurrentStreams is a connections maxConcurrentStreams until
+ // it's received servers initial SETTINGS frame, which corresponds with the
+ // spec's minimum recommended value.
+ http2initialMaxConcurrentStreams = 100
+
+ // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams
+ // if the server doesn't include one in its initial SETTINGS frame.
+ http2defaultMaxConcurrentStreams = 1000
+)
+
+// Transport is an HTTP/2 Transport.
+//
+// A Transport internally caches connections to servers. It is safe
+// for concurrent use by multiple goroutines.
+type http2Transport struct {
+ // DialTLSContext specifies an optional dial function with context for
+ // creating TLS connections for requests.
+ //
+ // If DialTLSContext and DialTLS is nil, tls.Dial is used.
+ //
+ // If the returned net.Conn has a ConnectionState method like tls.Conn,
+ // it will be used to set http.Response.TLS.
+ DialTLSContext func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for requests.
+ //
+ // If DialTLSContext and DialTLS is nil, tls.Dial is used.
+ //
+ // Deprecated: Use DialTLSContext instead, which allows the transport
+ // to cancel dials as soon as they are no longer needed.
+ // If both are set, DialTLSContext takes priority.
+ DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // ConnPool optionally specifies an alternate connection pool to use.
+ // If nil, the default is used.
+ ConnPool http2ClientConnPool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // AllowHTTP, if true, permits HTTP/2 requests using the insecure,
+ // plain-text "http" scheme. Note that this does not enable h2c support.
+ AllowHTTP bool
+
+ // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
+ // send in the initial settings frame. It is how many bytes
+ // of response headers are allowed. Unlike the http2 spec, zero here
+ // means to use a default limit (currently 10MB). If you actually
+ // want to advertise an unlimited value to the peer, Transport
+ // interprets the highest possible value here (0xffffffff or 1<<32-1)
+ // to mean no limit.
+ MaxHeaderListSize uint32
+
+ // MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the
+ // initial settings frame. It is the size in bytes of the largest frame
+ // payload that the sender is willing to receive. If 0, no setting is
+ // sent, and the value is provided by the peer, which should be 16384
+ // according to the spec:
+ // https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2.
+ // Values are bounded in the range 16k to 16M.
+ MaxReadFrameSize uint32
+
+ // MaxDecoderHeaderTableSize optionally specifies the http2
+ // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
+ // informs the remote endpoint of the maximum size of the header compression
+ // table used to decode header blocks, in octets. If zero, the default value
+ // of 4096 is used.
+ MaxDecoderHeaderTableSize uint32
+
+ // MaxEncoderHeaderTableSize optionally specifies an upper limit for the
+ // header compression table used for encoding request headers. Received
+ // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
+ // the default value of 4096 is used.
+ MaxEncoderHeaderTableSize uint32
+
+ // StrictMaxConcurrentStreams controls whether the server's
+ // SETTINGS_MAX_CONCURRENT_STREAMS should be respected
+ // globally. If false, new TCP connections are created to the
+ // server as needed to keep each under the per-connection
+ // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the
+ // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as
+ // a global limit and callers of RoundTrip block when needed,
+ // waiting for their turn.
+ StrictMaxConcurrentStreams bool
+
+ // ReadIdleTimeout is the timeout after which a health check using ping
+ // frame will be carried out if no frame is received on the connection.
+ // Note that a ping response will is considered a received frame, so if
+ // there is no other traffic on the connection, the health check will
+ // be performed every ReadIdleTimeout interval.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to Ping is not received.
+ // Defaults to 15s.
+ PingTimeout time.Duration
+
+ // WriteByteTimeout is the timeout after which the connection will be
+ // closed no data can be written to it. The timeout begins when data is
+ // available to write, and is extended whenever any bytes are written.
+ WriteByteTimeout time.Duration
+
+ // CountError, if non-nil, is called on HTTP/2 transport errors.
+ // It's intended to increment a metric for monitoring, such
+ // as an expvar or Prometheus metric.
+ // The errType consists of only ASCII word characters.
+ CountError func(errType string)
+
+ // t1, if non-nil, is the standard library Transport using
+ // this transport. Its settings are used (but not its
+ // RoundTrip method, etc).
+ t1 *Transport
+
+ connPoolOnce sync.Once
+ connPoolOrDef http2ClientConnPool // non-nil version of ConnPool
+}
+
+func (t *http2Transport) maxHeaderListSize() uint32 {
+ if t.MaxHeaderListSize == 0 {
+ return 10 << 20
+ }
+ if t.MaxHeaderListSize == 0xffffffff {
+ return 0
+ }
+ return t.MaxHeaderListSize
+}
+
+func (t *http2Transport) maxFrameReadSize() uint32 {
+ if t.MaxReadFrameSize == 0 {
+ return 0 // use the default provided by the peer
+ }
+ if t.MaxReadFrameSize < http2minMaxFrameSize {
+ return http2minMaxFrameSize
+ }
+ if t.MaxReadFrameSize > http2maxFrameSize {
+ return http2maxFrameSize
+ }
+ return t.MaxReadFrameSize
+}
+
+func (t *http2Transport) disableCompression() bool {
+ return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+}
+
+func (t *http2Transport) pingTimeout() time.Duration {
+ if t.PingTimeout == 0 {
+ return 15 * time.Second
+ }
+ return t.PingTimeout
+
+}
+
+// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
+// It returns an error if t1 has already been HTTP/2-enabled.
+//
+// Use ConfigureTransports instead to configure the HTTP/2 Transport.
+func http2ConfigureTransport(t1 *Transport) error {
+ _, err := http2ConfigureTransports(t1)
+ return err
+}
+
+// ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2.
+// It returns a new HTTP/2 Transport for further configuration.
+// It returns an error if t1 has already been HTTP/2-enabled.
+func http2ConfigureTransports(t1 *Transport) (*http2Transport, error) {
+ return http2configureTransports(t1)
+}
+
+func http2configureTransports(t1 *Transport) (*http2Transport, error) {
+ connPool := new(http2clientConnPool)
+ t2 := &http2Transport{
+ ConnPool: http2noDialClientConnPool{connPool},
+ t1: t1,
+ }
+ connPool.t = t2
+ if err := http2registerHTTPSProtocol(t1, http2noDialH2RoundTripper{t2}); err != nil {
+ return nil, err
+ }
+ if t1.TLSClientConfig == nil {
+ t1.TLSClientConfig = new(tls.Config)
+ }
+ if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+ t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+ }
+ if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+ t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+ }
+ upgradeFn := func(authority string, c *tls.Conn) RoundTripper {
+ addr := http2authorityAddr("https", authority)
+ if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+ go c.Close()
+ return http2erringRoundTripper{err}
+ } else if !used {
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
+ go c.Close()
+ }
+ return t2
+ }
+ if m := t1.TLSNextProto; len(m) == 0 {
+ t1.TLSNextProto = map[string]func(string, *tls.Conn) RoundTripper{
+ "h2": upgradeFn,
+ }
+ } else {
+ m["h2"] = upgradeFn
+ }
+ return t2, nil
+}
+
+func (t *http2Transport) connPool() http2ClientConnPool {
+ t.connPoolOnce.Do(t.initConnPool)
+ return t.connPoolOrDef
+}
+
+func (t *http2Transport) initConnPool() {
+ if t.ConnPool != nil {
+ t.connPoolOrDef = t.ConnPool
+ } else {
+ t.connPoolOrDef = &http2clientConnPool{t: t}
+ }
+}
+
+// ClientConn is the state of a single HTTP/2 client connection to an
+// HTTP/2 server.
+type http2ClientConn struct {
+ t *http2Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+ reused uint32 // whether conn is being reused; atomic
+ singleUse bool // whether being used for a single http.Request
+ getConnCalled bool // used by clientConnPool
+
+ // readLoop goroutine fields:
+ readerDone chan struct{} // closed on error
+ readerErr error // set before readerDone is closed
+
+ idleTimeout time.Duration // or 0 for never
+ idleTimer *time.Timer
+
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow http2outflow // our conn-level flow control quota (cs.outflow is per stream)
+ inflow http2inflow // peer's conn-level flow control
+ doNotReuse bool // whether conn is marked to not be reused for any future requests
+ closing bool
+ closed bool
+ seenSettings bool // true if we've seen a settings frame, false otherwise
+ wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
+ goAway *http2GoAwayFrame // if non-nil, the GoAwayFrame we received
+ goAwayDebug string // goAway frame's debug data, retained as a string
+ streams map[uint32]*http2clientStream // client-initiated
+ streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
+ nextStreamID uint32
+ pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
+ pings map[[8]byte]chan struct{} // in flight ping data to notification channel
+ br *bufio.Reader
+ lastActive time.Time
+ lastIdle time.Time // time last idle
+ // Settings from peer: (also guarded by wmu)
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ peerMaxHeaderListSize uint64
+ peerMaxHeaderTableSize uint32
+ initialWindowSize uint32
+
+ // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
+ // Write to reqHeaderMu to lock it, read from it to unlock.
+ // Lock reqmu BEFORE mu or wmu.
+ reqHeaderMu chan struct{}
+
+ // wmu is held while writing.
+ // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
+ // Only acquire both at the same time when changing peer settings.
+ wmu sync.Mutex
+ bw *bufio.Writer
+ fr *http2Framer
+ werr error // first write error that has occurred
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
+}
+
+// clientStream is the state for a single HTTP/2 stream. One of these
+// is created for each Transport.RoundTrip call.
+type http2clientStream struct {
+ cc *http2ClientConn
+
+ // Fields of Request that we may access even after the response body is closed.
+ ctx context.Context
+ reqCancel <-chan struct{}
+
+ trace *httptrace.ClientTrace // or nil
+ ID uint32
+ bufPipe http2pipe // buffered pipe with the flow-controlled response payload
+ requestedGzip bool
+ isHead bool
+
+ abortOnce sync.Once
+ abort chan struct{} // closed to signal stream should end immediately
+ abortErr error // set if abort is closed
+
+ peerClosed chan struct{} // closed when the peer sends an END_STREAM flag
+ donec chan struct{} // closed after the stream is in the closed state
+ on100 chan struct{} // buffered; written to if a 100 is received
+
+ respHeaderRecv chan struct{} // closed when headers are received
+ res *Response // set if respHeaderRecv is closed
+
+ flow http2outflow // guarded by cc.mu
+ inflow http2inflow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
+
+ reqBody io.ReadCloser
+ reqBodyContentLength int64 // -1 means unknown
+ reqBodyClosed chan struct{} // guarded by cc.mu; non-nil on Close, closed when done
+
+ // owned by writeRequest:
+ sentEndStream bool // sent an END_STREAM flag to the peer
+ sentHeaders bool
+
+ // owned by clientConnReadLoop:
+ firstByte bool // got the first response byte
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+ num1xx uint8 // number of 1xx responses seen
+ readClosed bool // peer sent an END_STREAM flag
+ readAborted bool // read loop reset the stream
+
+ trailer Header // accumulated trailers
+ resTrailer *Header // client's Response.Trailer
+}
+
+var http2got1xxFuncForTests func(int, textproto.MIMEHeader) error
+
+// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
+// if any. It returns nil if not set or if the Go version is too old.
+func (cs *http2clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error {
+ if fn := http2got1xxFuncForTests; fn != nil {
+ return fn
+ }
+ return http2traceGot1xxResponseFunc(cs.trace)
+}
+
+func (cs *http2clientStream) abortStream(err error) {
+ cs.cc.mu.Lock()
+ defer cs.cc.mu.Unlock()
+ cs.abortStreamLocked(err)
+}
+
+func (cs *http2clientStream) abortStreamLocked(err error) {
+ cs.abortOnce.Do(func() {
+ cs.abortErr = err
+ close(cs.abort)
+ })
+ if cs.reqBody != nil {
+ cs.closeReqBodyLocked()
+ }
+ // TODO(dneil): Clean up tests where cs.cc.cond is nil.
+ if cs.cc.cond != nil {
+ // Wake up writeRequestBody if it is waiting on flow control.
+ cs.cc.cond.Broadcast()
+ }
+}
+
+func (cs *http2clientStream) abortRequestBodyWrite() {
+ cc := cs.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if cs.reqBody != nil && cs.reqBodyClosed == nil {
+ cs.closeReqBodyLocked()
+ cc.cond.Broadcast()
+ }
+}
+
+func (cs *http2clientStream) closeReqBodyLocked() {
+ if cs.reqBodyClosed != nil {
+ return
+ }
+ cs.reqBodyClosed = make(chan struct{})
+ reqBodyClosed := cs.reqBodyClosed
+ go func() {
+ cs.reqBody.Close()
+ close(reqBodyClosed)
+ }()
+}
+
+type http2stickyErrWriter struct {
+ conn net.Conn
+ timeout time.Duration
+ err *error
+}
+
+func (sew http2stickyErrWriter) Write(p []byte) (n int, err error) {
+ if *sew.err != nil {
+ return 0, *sew.err
+ }
+ for {
+ if sew.timeout != 0 {
+ sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
+ }
+ nn, err := sew.conn.Write(p[n:])
+ n += nn
+ if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
+ // Keep extending the deadline so long as we're making progress.
+ continue
+ }
+ if sew.timeout != 0 {
+ sew.conn.SetWriteDeadline(time.Time{})
+ }
+ *sew.err = err
+ return n, err
+ }
+}
+
+// noCachedConnError is the concrete type of ErrNoCachedConn, which
+// needs to be detected by net/http regardless of whether it's its
+// bundled version (in h2_bundle.go with a rewritten type name) or
+// from a user's x/net/http2. As such, as it has a unique method name
+// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
+// isNoCachedConnError.
+type http2noCachedConnError struct{}
+
+func (http2noCachedConnError) IsHTTP2NoCachedConnError() {}
+
+func (http2noCachedConnError) Error() string { return "http2: no cached connection was available" }
+
+// isNoCachedConnError reports whether err is of type noCachedConnError
+// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
+// may coexist in the same running program.
+func http2isNoCachedConnError(err error) bool {
+ _, ok := err.(interface{ IsHTTP2NoCachedConnError() })
+ return ok
+}
+
+var http2ErrNoCachedConn error = http2noCachedConnError{}
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type http2RoundTripOpt struct {
+ // OnlyCachedConn controls whether RoundTripOpt may
+ // create a new TCP connection. If set true and
+ // no cached connection is available, RoundTripOpt
+ // will return ErrNoCachedConn.
+ OnlyCachedConn bool
+}
+
+func (t *http2Transport) RoundTrip(req *Request) (*Response, error) {
+ return t.RoundTripOpt(req, http2RoundTripOpt{})
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func http2authorityAddr(scheme string, authority string) (addr string) {
+ host, port, err := net.SplitHostPort(authority)
+ if err != nil { // authority didn't have a port
+ host = authority
+ port = ""
+ }
+ if port == "" { // authority's port was empty
+ port = "443"
+ if scheme == "http" {
+ port = "80"
+ }
+ }
+ if a, err := idna.ToASCII(host); err == nil {
+ host = a
+ }
+ // IPv6 address literal, without a port:
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ return host + ":" + port
+ }
+ return net.JoinHostPort(host, port)
+}
+
+var http2retryBackoffHook func(time.Duration) *time.Timer
+
+func http2backoffNewTimer(d time.Duration) *time.Timer {
+ if http2retryBackoffHook != nil {
+ return http2retryBackoffHook(d)
+ }
+ return time.NewTimer(d)
+}
+
+// RoundTripOpt is like RoundTrip, but takes options.
+func (t *http2Transport) RoundTripOpt(req *Request, opt http2RoundTripOpt) (*Response, error) {
+ if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
+ return nil, errors.New("http2: unsupported scheme")
+ }
+
+ addr := http2authorityAddr(req.URL.Scheme, req.URL.Host)
+ for retry := 0; ; retry++ {
+ cc, err := t.connPool().GetClientConn(req, addr)
+ if err != nil {
+ t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
+ return nil, err
+ }
+ reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
+ http2traceGotConn(req, cc, reused)
+ res, err := cc.RoundTrip(req)
+ if err != nil && retry <= 6 {
+ roundTripErr := err
+ if req, err = http2shouldRetryRequest(req, err); err == nil {
+ // After the first retry, do exponential backoff with 10% jitter.
+ if retry == 0 {
+ t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
+ continue
+ }
+ backoff := float64(uint(1) << (uint(retry) - 1))
+ backoff += backoff * (0.1 * mathrand.Float64())
+ d := time.Second * time.Duration(backoff)
+ timer := http2backoffNewTimer(d)
+ select {
+ case <-timer.C:
+ t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
+ continue
+ case <-req.Context().Done():
+ timer.Stop()
+ err = req.Context().Err()
+ }
+ }
+ }
+ if err != nil {
+ t.vlogf("RoundTrip failure: %v", err)
+ return nil, err
+ }
+ return res, nil
+ }
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *http2Transport) CloseIdleConnections() {
+ if cp, ok := t.connPool().(http2clientConnPoolIdleCloser); ok {
+ cp.closeIdleConnections()
+ }
+}
+
+var (
+ http2errClientConnClosed = errors.New("http2: client conn is closed")
+ http2errClientConnUnusable = errors.New("http2: client conn not usable")
+ http2errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+)
+
+// shouldRetryRequest is called by RoundTrip when a request fails to get
+// response headers. It is always called with a non-nil error.
+// It returns either a request to retry (either the same request, or a
+// modified clone), or an error if the request can't be replayed.
+func http2shouldRetryRequest(req *Request, err error) (*Request, error) {
+ if !http2canRetryError(err) {
+ return nil, err
+ }
+ // If the Body is nil (or http.NoBody), it's safe to reuse
+ // this request and its Body.
+ if req.Body == nil || req.Body == NoBody {
+ return req, nil
+ }
+
+ // If the request body can be reset back to its original
+ // state via the optional req.GetBody, do that.
+ if req.GetBody != nil {
+ body, err := req.GetBody()
+ if err != nil {
+ return nil, err
+ }
+ newReq := *req
+ newReq.Body = body
+ return &newReq, nil
+ }
+
+ // The Request.Body can't reset back to the beginning, but we
+ // don't seem to have started to read from it yet, so reuse
+ // the request directly.
+ if err == http2errClientConnUnusable {
+ return req, nil
+ }
+
+ return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
+}
+
+func http2canRetryError(err error) bool {
+ if err == http2errClientConnUnusable || err == http2errClientConnGotGoAway {
+ return true
+ }
+ if se, ok := err.(http2StreamError); ok {
+ if se.Code == http2ErrCodeProtocol && se.Cause == http2errFromPeer {
+ // See golang/go#47635, golang/go#42777
+ return true
+ }
+ return se.Code == http2ErrCodeRefusedStream
+ }
+ return false
+}
+
+func (t *http2Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*http2ClientConn, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ tconn, err := t.dialTLS(ctx, "tcp", addr, t.newTLSConfig(host))
+ if err != nil {
+ return nil, err
+ }
+ return t.newClientConn(tconn, singleUse)
+}
+
+func (t *http2Transport) newTLSConfig(host string) *tls.Config {
+ cfg := new(tls.Config)
+ if t.TLSClientConfig != nil {
+ *cfg = *t.TLSClientConfig.Clone()
+ }
+ if !http2strSliceContains(cfg.NextProtos, http2NextProtoTLS) {
+ cfg.NextProtos = append([]string{http2NextProtoTLS}, cfg.NextProtos...)
+ }
+ if cfg.ServerName == "" {
+ cfg.ServerName = host
+ }
+ return cfg
+}
+
+func (t *http2Transport) dialTLS(ctx context.Context, network, addr string, tlsCfg *tls.Config) (net.Conn, error) {
+ if t.DialTLSContext != nil {
+ return t.DialTLSContext(ctx, network, addr, tlsCfg)
+ } else if t.DialTLS != nil {
+ return t.DialTLS(network, addr, tlsCfg)
+ }
+
+ tlsCn, err := t.dialTLSWithContext(ctx, network, addr, tlsCfg)
+ if err != nil {
+ return nil, err
+ }
+ state := tlsCn.ConnectionState()
+ if p := state.NegotiatedProtocol; p != http2NextProtoTLS {
+ return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, http2NextProtoTLS)
+ }
+ if !state.NegotiatedProtocolIsMutual {
+ return nil, errors.New("http2: could not negotiate protocol mutually")
+ }
+ return tlsCn, nil
+}
+
+// disableKeepAlives reports whether connections should be closed as
+// soon as possible after handling the first request.
+func (t *http2Transport) disableKeepAlives() bool {
+ return t.t1 != nil && t.t1.DisableKeepAlives
+}
+
+func (t *http2Transport) expectContinueTimeout() time.Duration {
+ if t.t1 == nil {
+ return 0
+ }
+ return t.t1.ExpectContinueTimeout
+}
+
+func (t *http2Transport) maxDecoderHeaderTableSize() uint32 {
+ if v := t.MaxDecoderHeaderTableSize; v > 0 {
+ return v
+ }
+ return http2initialHeaderTableSize
+}
+
+func (t *http2Transport) maxEncoderHeaderTableSize() uint32 {
+ if v := t.MaxEncoderHeaderTableSize; v > 0 {
+ return v
+ }
+ return http2initialHeaderTableSize
+}
+
+func (t *http2Transport) NewClientConn(c net.Conn) (*http2ClientConn, error) {
+ return t.newClientConn(c, t.disableKeepAlives())
+}
+
+func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2ClientConn, error) {
+ cc := &http2ClientConn{
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: http2initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ streams: make(map[uint32]*http2clientStream),
+ singleUse: singleUse,
+ wantSettingsAck: true,
+ pings: make(map[[8]byte]chan struct{}),
+ reqHeaderMu: make(chan struct{}, 1),
+ }
+ if d := t.idleConnTimeout(); d != 0 {
+ cc.idleTimeout = d
+ cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
+ }
+ if http2VerboseLogs {
+ t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
+ }
+
+ cc.cond = sync.NewCond(&cc.mu)
+ cc.flow.add(int32(http2initialWindowSize))
+
+ // TODO: adjust this writer size to account for frame size +
+ // MTU + crypto/tls record padding.
+ cc.bw = bufio.NewWriter(http2stickyErrWriter{
+ conn: c,
+ timeout: t.WriteByteTimeout,
+ err: &cc.werr,
+ })
+ cc.br = bufio.NewReader(c)
+ cc.fr = http2NewFramer(cc.bw, cc.br)
+ if t.maxFrameReadSize() != 0 {
+ cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
+ }
+ if t.CountError != nil {
+ cc.fr.countError = t.CountError
+ }
+ maxHeaderTableSize := t.maxDecoderHeaderTableSize()
+ cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
+ cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
+
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+ cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
+ cc.peerMaxHeaderTableSize = http2initialHeaderTableSize
+
+ if t.AllowHTTP {
+ cc.nextStreamID = 3
+ }
+
+ if cs, ok := c.(http2connectionStater); ok {
+ state := cs.ConnectionState()
+ cc.tlsState = &state
+ }
+
+ initialSettings := []http2Setting{
+ {ID: http2SettingEnablePush, Val: 0},
+ {ID: http2SettingInitialWindowSize, Val: http2transportDefaultStreamFlow},
+ }
+ if max := t.maxFrameReadSize(); max != 0 {
+ initialSettings = append(initialSettings, http2Setting{ID: http2SettingMaxFrameSize, Val: max})
+ }
+ if max := t.maxHeaderListSize(); max != 0 {
+ initialSettings = append(initialSettings, http2Setting{ID: http2SettingMaxHeaderListSize, Val: max})
+ }
+ if maxHeaderTableSize != http2initialHeaderTableSize {
+ initialSettings = append(initialSettings, http2Setting{ID: http2SettingHeaderTableSize, Val: maxHeaderTableSize})
+ }
+
+ cc.bw.Write(http2clientPreface)
+ cc.fr.WriteSettings(initialSettings...)
+ cc.fr.WriteWindowUpdate(0, http2transportDefaultConnFlow)
+ cc.inflow.init(http2transportDefaultConnFlow + http2initialWindowSize)
+ cc.bw.Flush()
+ if cc.werr != nil {
+ cc.Close()
+ return nil, cc.werr
+ }
+
+ go cc.readLoop()
+ return cc, nil
+}
+
+func (cc *http2ClientConn) healthCheck() {
+ pingTimeout := cc.t.pingTimeout()
+ // We don't need to periodically ping in the health check, because the readLoop of ClientConn will
+ // trigger the healthCheck again if there is no frame received.
+ ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
+ defer cancel()
+ cc.vlogf("http2: Transport sending health check")
+ err := cc.Ping(ctx)
+ if err != nil {
+ cc.vlogf("http2: Transport health check failure: %v", err)
+ cc.closeForLostPing()
+ } else {
+ cc.vlogf("http2: Transport health check success")
+ }
+}
+
+// SetDoNotReuse marks cc as not reusable for future HTTP requests.
+func (cc *http2ClientConn) SetDoNotReuse() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.doNotReuse = true
+}
+
+func (cc *http2ClientConn) setGoAway(f *http2GoAwayFrame) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ old := cc.goAway
+ cc.goAway = f
+
+ // Merge the previous and current GoAway error frames.
+ if cc.goAwayDebug == "" {
+ cc.goAwayDebug = string(f.DebugData())
+ }
+ if old != nil && old.ErrCode != http2ErrCodeNo {
+ cc.goAway.ErrCode = old.ErrCode
+ }
+ last := f.LastStreamID
+ for streamID, cs := range cc.streams {
+ if streamID > last {
+ cs.abortStreamLocked(http2errClientConnGotGoAway)
+ }
+ }
+}
+
+// CanTakeNewRequest reports whether the connection can take a new request,
+// meaning it has not been closed or received or sent a GOAWAY.
+//
+// If the caller is going to immediately make a new request on this
+// connection, use ReserveNewRequest instead.
+func (cc *http2ClientConn) CanTakeNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.canTakeNewRequestLocked()
+}
+
+// ReserveNewRequest is like CanTakeNewRequest but also reserves a
+// concurrent stream in cc. The reservation is decremented on the
+// next call to RoundTrip.
+func (cc *http2ClientConn) ReserveNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if st := cc.idleStateLocked(); !st.canTakeNewRequest {
+ return false
+ }
+ cc.streamsReserved++
+ return true
+}
+
+// ClientConnState describes the state of a ClientConn.
+type http2ClientConnState struct {
+ // Closed is whether the connection is closed.
+ Closed bool
+
+ // Closing is whether the connection is in the process of
+ // closing. It may be closing due to shutdown, being a
+ // single-use connection, being marked as DoNotReuse, or
+ // having received a GOAWAY frame.
+ Closing bool
+
+ // StreamsActive is how many streams are active.
+ StreamsActive int
+
+ // StreamsReserved is how many streams have been reserved via
+ // ClientConn.ReserveNewRequest.
+ StreamsReserved int
+
+ // StreamsPending is how many requests have been sent in excess
+ // of the peer's advertised MaxConcurrentStreams setting and
+ // are waiting for other streams to complete.
+ StreamsPending int
+
+ // MaxConcurrentStreams is how many concurrent streams the
+ // peer advertised as acceptable. Zero means no SETTINGS
+ // frame has been received yet.
+ MaxConcurrentStreams uint32
+
+ // LastIdle, if non-zero, is when the connection last
+ // transitioned to idle state.
+ LastIdle time.Time
+}
+
+// State returns a snapshot of cc's state.
+func (cc *http2ClientConn) State() http2ClientConnState {
+ cc.wmu.Lock()
+ maxConcurrent := cc.maxConcurrentStreams
+ if !cc.seenSettings {
+ maxConcurrent = 0
+ }
+ cc.wmu.Unlock()
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return http2ClientConnState{
+ Closed: cc.closed,
+ Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil,
+ StreamsActive: len(cc.streams),
+ StreamsReserved: cc.streamsReserved,
+ StreamsPending: cc.pendingRequests,
+ LastIdle: cc.lastIdle,
+ MaxConcurrentStreams: maxConcurrent,
+ }
+}
+
+// clientConnIdleState describes the suitability of a client
+// connection to initiate a new RoundTrip request.
+type http2clientConnIdleState struct {
+ canTakeNewRequest bool
+}
+
+func (cc *http2ClientConn) idleState() http2clientConnIdleState {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.idleStateLocked()
+}
+
+func (cc *http2ClientConn) idleStateLocked() (st http2clientConnIdleState) {
+ if cc.singleUse && cc.nextStreamID > 1 {
+ return
+ }
+ var maxConcurrentOkay bool
+ if cc.t.StrictMaxConcurrentStreams {
+ // We'll tell the caller we can take a new request to
+ // prevent the caller from dialing a new TCP
+ // connection, but then we'll block later before
+ // writing it.
+ maxConcurrentOkay = true
+ } else {
+ maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams)
+ }
+
+ st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
+ !cc.doNotReuse &&
+ int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
+ !cc.tooIdleLocked()
+ return
+}
+
+func (cc *http2ClientConn) canTakeNewRequestLocked() bool {
+ st := cc.idleStateLocked()
+ return st.canTakeNewRequest
+}
+
+// tooIdleLocked reports whether this connection has been been sitting idle
+// for too much wall time.
+func (cc *http2ClientConn) tooIdleLocked() bool {
+ // The Round(0) strips the monontonic clock reading so the
+ // times are compared based on their wall time. We don't want
+ // to reuse a connection that's been sitting idle during
+ // VM/laptop suspend if monotonic time was also frozen.
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
+}
+
+// onIdleTimeout is called from a time.AfterFunc goroutine. It will
+// only be called when we're idle, but because we're coming from a new
+// goroutine, there could be a new request coming in at the same time,
+// so this simply calls the synchronized closeIfIdle to shut down this
+// connection. The timer could just call closeIfIdle, but this is more
+// clear.
+func (cc *http2ClientConn) onIdleTimeout() {
+ cc.closeIfIdle()
+}
+
+func (cc *http2ClientConn) closeConn() {
+ t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn)
+ defer t.Stop()
+ cc.tconn.Close()
+}
+
+// A tls.Conn.Close can hang for a long time if the peer is unresponsive.
+// Try to shut it down more aggressively.
+func (cc *http2ClientConn) forceCloseConn() {
+ tc, ok := cc.tconn.(*tls.Conn)
+ if !ok {
+ return
+ }
+ if nc := tc.NetConn(); nc != nil {
+ nc.Close()
+ }
+}
+
+func (cc *http2ClientConn) closeIfIdle() {
+ cc.mu.Lock()
+ if len(cc.streams) > 0 || cc.streamsReserved > 0 {
+ cc.mu.Unlock()
+ return
+ }
+ cc.closed = true
+ nextID := cc.nextStreamID
+ // TODO: do clients send GOAWAY too? maybe? Just Close:
+ cc.mu.Unlock()
+
+ if http2VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
+ }
+ cc.closeConn()
+}
+
+func (cc *http2ClientConn) isDoNotReuseAndIdle() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.doNotReuse && len(cc.streams) == 0
+}
+
+var http2shutdownEnterWaitStateHook = func() {}
+
+// Shutdown gracefully closes the client connection, waiting for running streams to complete.
+func (cc *http2ClientConn) Shutdown(ctx context.Context) error {
+ if err := cc.sendGoAway(); err != nil {
+ return err
+ }
+ // Wait for all in-flight streams to complete or connection to close
+ done := make(chan struct{})
+ cancelled := false // guarded by cc.mu
+ go func() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if len(cc.streams) == 0 || cc.closed {
+ cc.closed = true
+ close(done)
+ break
+ }
+ if cancelled {
+ break
+ }
+ cc.cond.Wait()
+ }
+ }()
+ http2shutdownEnterWaitStateHook()
+ select {
+ case <-done:
+ cc.closeConn()
+ return nil
+ case <-ctx.Done():
+ cc.mu.Lock()
+ // Free the goroutine above
+ cancelled = true
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+ return ctx.Err()
+ }
+}
+
+func (cc *http2ClientConn) sendGoAway() error {
+ cc.mu.Lock()
+ closing := cc.closing
+ cc.closing = true
+ maxStreamID := cc.nextStreamID
+ cc.mu.Unlock()
+ if closing {
+ // GOAWAY sent already
+ return nil
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ // Send a graceful shutdown frame to server
+ if err := cc.fr.WriteGoAway(maxStreamID, http2ErrCodeNo, nil); err != nil {
+ return err
+ }
+ if err := cc.bw.Flush(); err != nil {
+ return err
+ }
+ // Prevent new requests
+ return nil
+}
+
+// closes the client connection immediately. In-flight requests are interrupted.
+// err is sent to streams.
+func (cc *http2ClientConn) closeForError(err error) {
+ cc.mu.Lock()
+ cc.closed = true
+ for _, cs := range cc.streams {
+ cs.abortStreamLocked(err)
+ }
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+ cc.closeConn()
+}
+
+// Close closes the client connection immediately.
+//
+// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
+func (cc *http2ClientConn) Close() error {
+ err := errors.New("http2: client connection force closed via ClientConn.Close")
+ cc.closeForError(err)
+ return nil
+}
+
+// closes the client connection immediately. In-flight requests are interrupted.
+func (cc *http2ClientConn) closeForLostPing() {
+ err := errors.New("http2: client connection lost")
+ if f := cc.t.CountError; f != nil {
+ f("conn_close_lost_ping")
+ }
+ cc.closeForError(err)
+}
+
+// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
+// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
+var http2errRequestCanceled = errors.New("net/http: request canceled")
+
+func http2commaSeparatedTrailers(req *Request) (string, error) {
+ keys := make([]string, 0, len(req.Trailer))
+ for k := range req.Trailer {
+ k = http2canonicalHeader(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", fmt.Errorf("invalid Trailer key %q", k)
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+func (cc *http2ClientConn) responseHeaderTimeout() time.Duration {
+ if cc.t.t1 != nil {
+ return cc.t.t1.ResponseHeaderTimeout
+ }
+ // No way to do this (yet?) with just an http2.Transport. Probably
+ // no need. Request.Cancel this is the new way. We only need to support
+ // this for compatibility with the old http.Transport fields when
+ // we're doing transparent http2.
+ return 0
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
+// Certain headers are special-cased as okay but not transmitted later.
+func http2checkConnHeaders(req *Request) error {
+ if v := req.Header.Get("Upgrade"); v != "" {
+ return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
+ }
+ if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+ return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
+ }
+ if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !http2asciiEqualFold(vv[0], "close") && !http2asciiEqualFold(vv[0], "keep-alive")) {
+ return fmt.Errorf("http2: invalid Connection request header: %q", vv)
+ }
+ return nil
+}
+
+// actualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func http2actualContentLength(req *Request) int64 {
+ if req.Body == nil || req.Body == NoBody {
+ return 0
+ }
+ if req.ContentLength != 0 {
+ return req.ContentLength
+ }
+ return -1
+}
+
+func (cc *http2ClientConn) decrStreamReservations() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.decrStreamReservationsLocked()
+}
+
+func (cc *http2ClientConn) decrStreamReservationsLocked() {
+ if cc.streamsReserved > 0 {
+ cc.streamsReserved--
+ }
+}
+
+func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) {
+ ctx := req.Context()
+ cs := &http2clientStream{
+ cc: cc,
+ ctx: ctx,
+ reqCancel: req.Cancel,
+ isHead: req.Method == "HEAD",
+ reqBody: req.Body,
+ reqBodyContentLength: http2actualContentLength(req),
+ trace: httptrace.ContextClientTrace(ctx),
+ peerClosed: make(chan struct{}),
+ abort: make(chan struct{}),
+ respHeaderRecv: make(chan struct{}),
+ donec: make(chan struct{}),
+ }
+ go cs.doRequest(req)
+
+ waitDone := func() error {
+ select {
+ case <-cs.donec:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return http2errRequestCanceled
+ }
+ }
+
+ handleResponseHeaders := func() (*Response, error) {
+ res := cs.res
+ if res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ cs.abortRequestBodyWrite()
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ if res.Body == http2noBody && http2actualContentLength(req) == 0 {
+ // If there isn't a request or response body still being
+ // written, then wait for the stream to be closed before
+ // RoundTrip returns.
+ if err := waitDone(); err != nil {
+ return nil, err
+ }
+ }
+ return res, nil
+ }
+
+ cancelRequest := func(cs *http2clientStream, err error) error {
+ cs.cc.mu.Lock()
+ bodyClosed := cs.reqBodyClosed
+ cs.cc.mu.Unlock()
+ // Wait for the request body to be closed.
+ //
+ // If nothing closed the body before now, abortStreamLocked
+ // will have started a goroutine to close it.
+ //
+ // Closing the body before returning avoids a race condition
+ // with net/http checking its readTrackingBody to see if the
+ // body was read from or closed. See golang/go#60041.
+ //
+ // The body is closed in a separate goroutine without the
+ // connection mutex held, but dropping the mutex before waiting
+ // will keep us from holding it indefinitely if the body
+ // close is slow for some reason.
+ if bodyClosed != nil {
+ <-bodyClosed
+ }
+ return err
+ }
+
+ for {
+ select {
+ case <-cs.respHeaderRecv:
+ return handleResponseHeaders()
+ case <-cs.abort:
+ select {
+ case <-cs.respHeaderRecv:
+ // If both cs.respHeaderRecv and cs.abort are signaling,
+ // pick respHeaderRecv. The server probably wrote the
+ // response and immediately reset the stream.
+ // golang.org/issue/49645
+ return handleResponseHeaders()
+ default:
+ waitDone()
+ return nil, cs.abortErr
+ }
+ case <-ctx.Done():
+ err := ctx.Err()
+ cs.abortStream(err)
+ return nil, cancelRequest(cs, err)
+ case <-cs.reqCancel:
+ cs.abortStream(http2errRequestCanceled)
+ return nil, cancelRequest(cs, http2errRequestCanceled)
+ }
+ }
+}
+
+// doRequest runs for the duration of the request lifetime.
+//
+// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
+func (cs *http2clientStream) doRequest(req *Request) {
+ err := cs.writeRequest(req)
+ cs.cleanupWriteRequest(err)
+}
+
+// writeRequest sends a request.
+//
+// It returns nil after the request is written, the response read,
+// and the request stream is half-closed by the peer.
+//
+// It returns non-nil if the request ends otherwise.
+// If the returned error is StreamError, the error Code may be used in resetting the stream.
+func (cs *http2clientStream) writeRequest(req *Request) (err error) {
+ cc := cs.cc
+ ctx := cs.ctx
+
+ if err := http2checkConnHeaders(req); err != nil {
+ return err
+ }
+
+ // Acquire the new-request lock by writing to reqHeaderMu.
+ // This lock guards the critical section covering allocating a new stream ID
+ // (requires mu) and creating the stream (requires wmu).
+ if cc.reqHeaderMu == nil {
+ panic("RoundTrip on uninitialized ClientConn") // for tests
+ }
+ select {
+ case cc.reqHeaderMu <- struct{}{}:
+ case <-cs.reqCancel:
+ return http2errRequestCanceled
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ cc.mu.Lock()
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+ cc.decrStreamReservationsLocked()
+ if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil {
+ cc.mu.Unlock()
+ <-cc.reqHeaderMu
+ return err
+ }
+ cc.addStreamLocked(cs) // assigns stream ID
+ if http2isConnectionCloseRequest(req) {
+ cc.doNotReuse = true
+ }
+ cc.mu.Unlock()
+
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ if !cc.t.disableCompression() &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ !cs.isHead {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: https://zlib.net/zlib_faq.html#faq39
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ cs.requestedGzip = true
+ }
+
+ continueTimeout := cc.t.expectContinueTimeout()
+ if continueTimeout != 0 {
+ if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") {
+ continueTimeout = 0
+ } else {
+ cs.on100 = make(chan struct{}, 1)
+ }
+ }
+
+ // Past this point (where we send request headers), it is possible for
+ // RoundTrip to return successfully. Since the RoundTrip contract permits
+ // the caller to "mutate or reuse" the Request after closing the Response's Body,
+ // we must take care when referencing the Request from here on.
+ err = cs.encodeAndWriteHeaders(req)
+ <-cc.reqHeaderMu
+ if err != nil {
+ return err
+ }
+
+ hasBody := cs.reqBodyContentLength != 0
+ if !hasBody {
+ cs.sentEndStream = true
+ } else {
+ if continueTimeout != 0 {
+ http2traceWait100Continue(cs.trace)
+ timer := time.NewTimer(continueTimeout)
+ select {
+ case <-timer.C:
+ err = nil
+ case <-cs.on100:
+ err = nil
+ case <-cs.abort:
+ err = cs.abortErr
+ case <-ctx.Done():
+ err = ctx.Err()
+ case <-cs.reqCancel:
+ err = http2errRequestCanceled
+ }
+ timer.Stop()
+ if err != nil {
+ http2traceWroteRequest(cs.trace, err)
+ return err
+ }
+ }
+
+ if err = cs.writeRequestBody(req); err != nil {
+ if err != http2errStopReqBodyWrite {
+ http2traceWroteRequest(cs.trace, err)
+ return err
+ }
+ } else {
+ cs.sentEndStream = true
+ }
+ }
+
+ http2traceWroteRequest(cs.trace, err)
+
+ var respHeaderTimer <-chan time.Time
+ var respHeaderRecv chan struct{}
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ respHeaderRecv = cs.respHeaderRecv
+ }
+ // Wait until the peer half-closes its end of the stream,
+ // or until the request is aborted (via context, error, or otherwise),
+ // whichever comes first.
+ for {
+ select {
+ case <-cs.peerClosed:
+ return nil
+ case <-respHeaderTimer:
+ return http2errTimeout
+ case <-respHeaderRecv:
+ respHeaderRecv = nil
+ respHeaderTimer = nil // keep waiting for END_STREAM
+ case <-cs.abort:
+ return cs.abortErr
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return http2errRequestCanceled
+ }
+ }
+}
+
+func (cs *http2clientStream) encodeAndWriteHeaders(req *Request) error {
+ cc := cs.cc
+ ctx := cs.ctx
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ // If the request was canceled while waiting for cc.mu, just quit.
+ select {
+ case <-cs.abort:
+ return cs.abortErr
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return http2errRequestCanceled
+ default:
+ }
+
+ // Encode headers.
+ //
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ trailers, err := http2commaSeparatedTrailers(req)
+ if err != nil {
+ return err
+ }
+ hasTrailers := trailers != ""
+ contentLen := http2actualContentLength(req)
+ hasBody := contentLen != 0
+ hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
+ if err != nil {
+ return err
+ }
+
+ // Write the request.
+ endStream := !hasBody && !hasTrailers
+ cs.sentHeaders = true
+ err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
+ http2traceWroteHeaders(cs.trace)
+ return err
+}
+
+// cleanupWriteRequest performs post-request tasks.
+//
+// If err (the result of writeRequest) is non-nil and the stream is not closed,
+// cleanupWriteRequest will send a reset to the peer.
+func (cs *http2clientStream) cleanupWriteRequest(err error) {
+ cc := cs.cc
+
+ if cs.ID == 0 {
+ // We were canceled before creating the stream, so return our reservation.
+ cc.decrStreamReservations()
+ }
+
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ cc.mu.Lock()
+ mustCloseBody := false
+ if cs.reqBody != nil && cs.reqBodyClosed == nil {
+ mustCloseBody = true
+ cs.reqBodyClosed = make(chan struct{})
+ }
+ bodyClosed := cs.reqBodyClosed
+ cc.mu.Unlock()
+ if mustCloseBody {
+ cs.reqBody.Close()
+ close(bodyClosed)
+ }
+ if bodyClosed != nil {
+ <-bodyClosed
+ }
+
+ if err != nil && cs.sentEndStream {
+ // If the connection is closed immediately after the response is read,
+ // we may be aborted before finishing up here. If the stream was closed
+ // cleanly on both sides, there is no error.
+ select {
+ case <-cs.peerClosed:
+ err = nil
+ default:
+ }
+ }
+ if err != nil {
+ cs.abortStream(err) // possibly redundant, but harmless
+ if cs.sentHeaders {
+ if se, ok := err.(http2StreamError); ok {
+ if se.Cause != http2errFromPeer {
+ cc.writeStreamReset(cs.ID, se.Code, err)
+ }
+ } else {
+ cc.writeStreamReset(cs.ID, http2ErrCodeCancel, err)
+ }
+ }
+ cs.bufPipe.CloseWithError(err) // no-op if already closed
+ } else {
+ if cs.sentHeaders && !cs.sentEndStream {
+ cc.writeStreamReset(cs.ID, http2ErrCodeNo, nil)
+ }
+ cs.bufPipe.CloseWithError(http2errRequestCanceled)
+ }
+ if cs.ID != 0 {
+ cc.forgetStreamID(cs.ID)
+ }
+
+ cc.wmu.Lock()
+ werr := cc.werr
+ cc.wmu.Unlock()
+ if werr != nil {
+ cc.Close()
+ }
+
+ close(cs.donec)
+}
+
+// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams.
+// Must hold cc.mu.
+func (cc *http2ClientConn) awaitOpenSlotForStreamLocked(cs *http2clientStream) error {
+ for {
+ cc.lastActive = time.Now()
+ if cc.closed || !cc.canTakeNewRequestLocked() {
+ return http2errClientConnUnusable
+ }
+ cc.lastIdle = time.Time{}
+ if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) {
+ return nil
+ }
+ cc.pendingRequests++
+ cc.cond.Wait()
+ cc.pendingRequests--
+ select {
+ case <-cs.abort:
+ return cs.abortErr
+ default:
+ }
+ }
+}
+
+// requires cc.wmu be held
+func (cc *http2ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error {
+ first := true // first frame written (HEADERS is first, then CONTINUATION)
+ for len(hdrs) > 0 && cc.werr == nil {
+ chunk := hdrs
+ if len(chunk) > maxFrameSize {
+ chunk = chunk[:maxFrameSize]
+ }
+ hdrs = hdrs[len(chunk):]
+ endHeaders := len(hdrs) == 0
+ if first {
+ cc.fr.WriteHeaders(http2HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: chunk,
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ })
+ first = false
+ } else {
+ cc.fr.WriteContinuation(streamID, endHeaders, chunk)
+ }
+ }
+ cc.bw.Flush()
+ return cc.werr
+}
+
+// internal error values; they don't escape to callers
+var (
+ // abort request body write; don't send cancel
+ http2errStopReqBodyWrite = errors.New("http2: aborting request body write")
+
+ // abort request body write, but send stream reset of cancel.
+ http2errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+
+ http2errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
+)
+
+// frameScratchBufferLen returns the length of a buffer to use for
+// outgoing request bodies to read/write to/from.
+//
+// It returns max(1, min(peer's advertised max frame size,
+// Request.ContentLength+1, 512KB)).
+func (cs *http2clientStream) frameScratchBufferLen(maxFrameSize int) int {
+ const max = 512 << 10
+ n := int64(maxFrameSize)
+ if n > max {
+ n = max
+ }
+ if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n {
+ // Add an extra byte past the declared content-length to
+ // give the caller's Request.Body io.Reader a chance to
+ // give us more bytes than they declared, so we can catch it
+ // early.
+ n = cl + 1
+ }
+ if n < 1 {
+ return 1
+ }
+ return int(n) // doesn't truncate; max is 512K
+}
+
+// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
+// streaming requests using small frame sizes occupy large buffers initially allocated for prior
+// requests needing big buffers. The size ranges are as follows:
+// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
+// {256 KB, 512 KB], {512 KB, infinity}
+// In practice, the maximum scratch buffer size should not exceed 512 KB due to
+// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
+// It exists mainly as a safety measure, for potential future increases in max buffer size.
+var http2bufPools [7]sync.Pool // of *[]byte
+
+func http2bufPoolIndex(size int) int {
+ if size <= 16384 {
+ return 0
+ }
+ size -= 1
+ bits := bits.Len(uint(size))
+ index := bits - 14
+ if index >= len(http2bufPools) {
+ return len(http2bufPools) - 1
+ }
+ return index
+}
+
+func (cs *http2clientStream) writeRequestBody(req *Request) (err error) {
+ cc := cs.cc
+ body := cs.reqBody
+ sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
+
+ hasTrailers := req.Trailer != nil
+ remainLen := cs.reqBodyContentLength
+ hasContentLen := remainLen != -1
+
+ cc.mu.Lock()
+ maxFrameSize := int(cc.maxFrameSize)
+ cc.mu.Unlock()
+
+ // Scratch buffer for reading into & writing from.
+ scratchLen := cs.frameScratchBufferLen(maxFrameSize)
+ var buf []byte
+ index := http2bufPoolIndex(scratchLen)
+ if bp, ok := http2bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
+ defer http2bufPools[index].Put(bp)
+ buf = *bp
+ } else {
+ buf = make([]byte, scratchLen)
+ defer http2bufPools[index].Put(&buf)
+ }
+
+ var sawEOF bool
+ for !sawEOF {
+ n, err := body.Read(buf)
+ if hasContentLen {
+ remainLen -= int64(n)
+ if remainLen == 0 && err == nil {
+ // The request body's Content-Length was predeclared and
+ // we just finished reading it all, but the underlying io.Reader
+ // returned the final chunk with a nil error (which is one of
+ // the two valid things a Reader can do at EOF). Because we'd prefer
+ // to send the END_STREAM bit early, double-check that we're actually
+ // at EOF. Subsequent reads should return (0, EOF) at this point.
+ // If either value is different, we return an error in one of two ways below.
+ var scratch [1]byte
+ var n1 int
+ n1, err = body.Read(scratch[:])
+ remainLen -= int64(n1)
+ }
+ if remainLen < 0 {
+ err = http2errReqBodyTooLong
+ return err
+ }
+ }
+ if err != nil {
+ cc.mu.Lock()
+ bodyClosed := cs.reqBodyClosed != nil
+ cc.mu.Unlock()
+ switch {
+ case bodyClosed:
+ return http2errStopReqBodyWrite
+ case err == io.EOF:
+ sawEOF = true
+ err = nil
+ default:
+ return err
+ }
+ }
+
+ remain := buf[:n]
+ for len(remain) > 0 && err == nil {
+ var allowed int32
+ allowed, err = cs.awaitFlowControl(len(remain))
+ if err != nil {
+ return err
+ }
+ cc.wmu.Lock()
+ data := remain[:allowed]
+ remain = remain[allowed:]
+ sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
+ err = cc.fr.WriteData(cs.ID, sentEnd, data)
+ if err == nil {
+ // TODO(bradfitz): this flush is for latency, not bandwidth.
+ // Most requests won't need this. Make this opt-in or
+ // opt-out? Use some heuristic on the body type? Nagel-like
+ // timers? Based on 'n'? Only last chunk of this for loop,
+ // unless flow control tokens are low? For now, always.
+ // If we change this, see comment below.
+ err = cc.bw.Flush()
+ }
+ cc.wmu.Unlock()
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ if sentEnd {
+ // Already sent END_STREAM (which implies we have no
+ // trailers) and flushed, because currently all
+ // WriteData frames above get a flush. So we're done.
+ return nil
+ }
+
+ // Since the RoundTrip contract permits the caller to "mutate or reuse"
+ // a request after the Response's Body is closed, verify that this hasn't
+ // happened before accessing the trailers.
+ cc.mu.Lock()
+ trailer := req.Trailer
+ err = cs.abortErr
+ cc.mu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ var trls []byte
+ if len(trailer) > 0 {
+ trls, err = cc.encodeTrailers(trailer)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Two ways to send END_STREAM: either with trailers, or
+ // with an empty DATA frame.
+ if len(trls) > 0 {
+ err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls)
+ } else {
+ err = cc.fr.WriteData(cs.ID, true, nil)
+ }
+ if ferr := cc.bw.Flush(); ferr != nil && err == nil {
+ err = ferr
+ }
+ return err
+}
+
+// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
+// control tokens from the server.
+// It returns either the non-zero number of tokens taken or an error
+// if the stream is dead.
+func (cs *http2clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
+ cc := cs.cc
+ ctx := cs.ctx
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if cc.closed {
+ return 0, http2errClientConnClosed
+ }
+ if cs.reqBodyClosed != nil {
+ return 0, http2errStopReqBodyWrite
+ }
+ select {
+ case <-cs.abort:
+ return 0, cs.abortErr
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ case <-cs.reqCancel:
+ return 0, http2errRequestCanceled
+ default:
+ }
+ if a := cs.flow.available(); a > 0 {
+ take := a
+ if int(take) > maxBytes {
+
+ take = int32(maxBytes) // can't truncate int; take is int32
+ }
+ if take > int32(cc.maxFrameSize) {
+ take = int32(cc.maxFrameSize)
+ }
+ cs.flow.take(take)
+ return take, nil
+ }
+ cc.cond.Wait()
+ }
+}
+
+var http2errNilRequestURL = errors.New("http2: Request.URI is nil")
+
+// requires cc.wmu be held.
+func (cc *http2ClientConn) encodeHeaders(req *Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
+ cc.hbuf.Reset()
+ if req.URL == nil {
+ return nil, http2errNilRequestURL
+ }
+
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+ host, err := httpguts.PunycodeHostPort(host)
+ if err != nil {
+ return nil, err
+ }
+ if !httpguts.ValidHostHeader(host) {
+ return nil, errors.New("http2: invalid Host header")
+ }
+
+ var path string
+ if req.Method != "CONNECT" {
+ path = req.URL.RequestURI()
+ if !http2validPseudoPath(path) {
+ orig := path
+ path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+ if !http2validPseudoPath(path) {
+ if req.URL.Opaque != "" {
+ return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+ } else {
+ return nil, fmt.Errorf("invalid request :path %q", orig)
+ }
+ }
+ }
+ }
+
+ // Check for any invalid headers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ for k, vv := range req.Header {
+ if !httpguts.ValidHeaderFieldName(k) {
+ return nil, fmt.Errorf("invalid HTTP header name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ // Don't include the value in the error, because it may be sensitive.
+ return nil, fmt.Errorf("invalid HTTP header value for header %q", k)
+ }
+ }
+ }
+
+ enumerateHeaders := func(f func(name, value string)) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production, see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ f(":authority", host)
+ m := req.Method
+ if m == "" {
+ m = MethodGet
+ }
+ f(":method", m)
+ if req.Method != "CONNECT" {
+ f(":path", path)
+ f(":scheme", req.URL.Scheme)
+ }
+ if trailers != "" {
+ f("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ if http2asciiEqualFold(k, "host") || http2asciiEqualFold(k, "content-length") {
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ } else if http2asciiEqualFold(k, "connection") ||
+ http2asciiEqualFold(k, "proxy-connection") ||
+ http2asciiEqualFold(k, "transfer-encoding") ||
+ http2asciiEqualFold(k, "upgrade") ||
+ http2asciiEqualFold(k, "keep-alive") {
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ } else if http2asciiEqualFold(k, "user-agent") {
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+ } else if http2asciiEqualFold(k, "cookie") {
+ // Per 8.1.2.5 To allow for better compression efficiency, the
+ // Cookie header field MAY be split into separate header fields,
+ // each with one or more cookie-pairs.
+ for _, v := range vv {
+ for {
+ p := strings.IndexByte(v, ';')
+ if p < 0 {
+ break
+ }
+ f("cookie", v[:p])
+ p++
+ // strip space after semicolon if any.
+ for p+1 <= len(v) && v[p] == ' ' {
+ p++
+ }
+ v = v[p:]
+ }
+ if len(v) > 0 {
+ f("cookie", v)
+ }
+ }
+ continue
+ }
+
+ for _, v := range vv {
+ f(k, v)
+ }
+ }
+ if http2shouldSendReqContentLength(req.Method, contentLength) {
+ f("content-length", strconv.FormatInt(contentLength, 10))
+ }
+ if addGzipHeader {
+ f("accept-encoding", "gzip")
+ }
+ if !didUA {
+ f("user-agent", http2defaultUserAgent)
+ }
+ }
+
+ // Do a first pass over the headers counting bytes to ensure
+ // we don't exceed cc.peerMaxHeaderListSize. This is done as a
+ // separate pass before encoding the headers to prevent
+ // modifying the hpack state.
+ hlSize := uint64(0)
+ enumerateHeaders(func(name, value string) {
+ hf := hpack.HeaderField{Name: name, Value: value}
+ hlSize += uint64(hf.Size())
+ })
+
+ if hlSize > cc.peerMaxHeaderListSize {
+ return nil, http2errRequestHeaderListSize
+ }
+
+ trace := httptrace.ContextClientTrace(req.Context())
+ traceHeaders := http2traceHasWroteHeaderField(trace)
+
+ // Header list size is ok. Write the headers.
+ enumerateHeaders(func(name, value string) {
+ name, ascii := http2lowerHeader(name)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ return
+ }
+ cc.writeHeader(name, value)
+ if traceHeaders {
+ http2traceWroteHeaderField(trace, name, value)
+ }
+ })
+
+ return cc.hbuf.Bytes(), nil
+}
+
+// shouldSendReqContentLength reports whether the http2.Transport should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func http2shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// requires cc.wmu be held.
+func (cc *http2ClientConn) encodeTrailers(trailer Header) ([]byte, error) {
+ cc.hbuf.Reset()
+
+ hlSize := uint64(0)
+ for k, vv := range trailer {
+ for _, v := range vv {
+ hf := hpack.HeaderField{Name: k, Value: v}
+ hlSize += uint64(hf.Size())
+ }
+ }
+ if hlSize > cc.peerMaxHeaderListSize {
+ return nil, http2errRequestHeaderListSize
+ }
+
+ for k, vv := range trailer {
+ lowKey, ascii := http2lowerHeader(k)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ continue
+ }
+ // Transfer-Encoding, etc.. have already been filtered at the
+ // start of RoundTrip
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ return cc.hbuf.Bytes(), nil
+}
+
+func (cc *http2ClientConn) writeHeader(name, value string) {
+ if http2VerboseLogs {
+ log.Printf("http2: Transport encoding header %q = %q", name, value)
+ }
+ cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type http2resAndError struct {
+ _ http2incomparable
+ res *Response
+ err error
+}
+
+// requires cc.mu be held.
+func (cc *http2ClientConn) addStreamLocked(cs *http2clientStream) {
+ cs.flow.add(int32(cc.initialWindowSize))
+ cs.flow.setConnFlow(&cc.flow)
+ cs.inflow.init(http2transportDefaultStreamFlow)
+ cs.ID = cc.nextStreamID
+ cc.nextStreamID += 2
+ cc.streams[cs.ID] = cs
+ if cs.ID == 0 {
+ panic("assigned stream ID 0")
+ }
+}
+
+func (cc *http2ClientConn) forgetStreamID(id uint32) {
+ cc.mu.Lock()
+ slen := len(cc.streams)
+ delete(cc.streams, id)
+ if len(cc.streams) != slen-1 {
+ panic("forgetting unknown stream id")
+ }
+ cc.lastActive = time.Now()
+ if len(cc.streams) == 0 && cc.idleTimer != nil {
+ cc.idleTimer.Reset(cc.idleTimeout)
+ cc.lastIdle = time.Now()
+ }
+ // Wake up writeRequestBody via clientStream.awaitFlowControl and
+ // wake up RoundTrip if there is a pending request.
+ cc.cond.Broadcast()
+
+ closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
+ if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
+ if http2VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2)
+ }
+ cc.closed = true
+ defer cc.closeConn()
+ }
+
+ cc.mu.Unlock()
+}
+
+// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
+type http2clientConnReadLoop struct {
+ _ http2incomparable
+ cc *http2ClientConn
+}
+
+// readLoop runs in its own goroutine and reads and dispatches frames.
+func (cc *http2ClientConn) readLoop() {
+ rl := &http2clientConnReadLoop{cc: cc}
+ defer rl.cleanup()
+ cc.readerErr = rl.run()
+ if ce, ok := cc.readerErr.(http2ConnectionError); ok {
+ cc.wmu.Lock()
+ cc.fr.WriteGoAway(0, http2ErrCode(ce), nil)
+ cc.wmu.Unlock()
+ }
+}
+
+// GoAwayError is returned by the Transport when the server closes the
+// TCP connection after sending a GOAWAY frame.
+type http2GoAwayError struct {
+ LastStreamID uint32
+ ErrCode http2ErrCode
+ DebugData string
+}
+
+func (e http2GoAwayError) Error() string {
+ return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
+ e.LastStreamID, e.ErrCode, e.DebugData)
+}
+
+func http2isEOFOrNetReadError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ ne, ok := err.(*net.OpError)
+ return ok && ne.Op == "read"
+}
+
+func (rl *http2clientConnReadLoop) cleanup() {
+ cc := rl.cc
+ cc.t.connPool().MarkDead(cc)
+ defer cc.closeConn()
+ defer close(cc.readerDone)
+
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+
+ // Close any response bodies if the server closes prematurely.
+ // TODO: also do this if we've written the headers but not
+ // gotten a response yet.
+ err := cc.readerErr
+ cc.mu.Lock()
+ if cc.goAway != nil && http2isEOFOrNetReadError(err) {
+ err = http2GoAwayError{
+ LastStreamID: cc.goAway.LastStreamID,
+ ErrCode: cc.goAway.ErrCode,
+ DebugData: cc.goAwayDebug,
+ }
+ } else if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ cc.closed = true
+
+ for _, cs := range cc.streams {
+ select {
+ case <-cs.peerClosed:
+ // The server closed the stream before closing the conn,
+ // so no need to interrupt it.
+ default:
+ cs.abortStreamLocked(err)
+ }
+ }
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+// countReadFrameError calls Transport.CountError with a string
+// representing err.
+func (cc *http2ClientConn) countReadFrameError(err error) {
+ f := cc.t.CountError
+ if f == nil || err == nil {
+ return
+ }
+ if ce, ok := err.(http2ConnectionError); ok {
+ errCode := http2ErrCode(ce)
+ f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken()))
+ return
+ }
+ if errors.Is(err, io.EOF) {
+ f("read_frame_eof")
+ return
+ }
+ if errors.Is(err, io.ErrUnexpectedEOF) {
+ f("read_frame_unexpected_eof")
+ return
+ }
+ if errors.Is(err, http2ErrFrameTooLarge) {
+ f("read_frame_too_large")
+ return
+ }
+ f("read_frame_other")
+}
+
+func (rl *http2clientConnReadLoop) run() error {
+ cc := rl.cc
+ gotSettings := false
+ readIdleTimeout := cc.t.ReadIdleTimeout
+ var t *time.Timer
+ if readIdleTimeout != 0 {
+ t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
+ defer t.Stop()
+ }
+ for {
+ f, err := cc.fr.ReadFrame()
+ if t != nil {
+ t.Reset(readIdleTimeout)
+ }
+ if err != nil {
+ cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
+ }
+ if se, ok := err.(http2StreamError); ok {
+ if cs := rl.streamByID(se.StreamID); cs != nil {
+ if se.Cause == nil {
+ se.Cause = cc.fr.errDetail
+ }
+ rl.endStreamError(cs, se)
+ }
+ continue
+ } else if err != nil {
+ cc.countReadFrameError(err)
+ return err
+ }
+ if http2VerboseLogs {
+ cc.vlogf("http2: Transport received %s", http2summarizeFrame(f))
+ }
+ if !gotSettings {
+ if _, ok := f.(*http2SettingsFrame); !ok {
+ cc.logf("protocol error: received %T before a SETTINGS frame", f)
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ gotSettings = true
+ }
+
+ switch f := f.(type) {
+ case *http2MetaHeadersFrame:
+ err = rl.processHeaders(f)
+ case *http2DataFrame:
+ err = rl.processData(f)
+ case *http2GoAwayFrame:
+ err = rl.processGoAway(f)
+ case *http2RSTStreamFrame:
+ err = rl.processResetStream(f)
+ case *http2SettingsFrame:
+ err = rl.processSettings(f)
+ case *http2PushPromiseFrame:
+ err = rl.processPushPromise(f)
+ case *http2WindowUpdateFrame:
+ err = rl.processWindowUpdate(f)
+ case *http2PingFrame:
+ err = rl.processPing(f)
+ default:
+ cc.logf("Transport: unhandled response frame type %T", f)
+ }
+ if err != nil {
+ if http2VerboseLogs {
+ cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, http2summarizeFrame(f), err)
+ }
+ return err
+ }
+ }
+}
+
+func (rl *http2clientConnReadLoop) processHeaders(f *http2MetaHeadersFrame) error {
+ cs := rl.streamByID(f.StreamID)
+ if cs == nil {
+ // We'd get here if we canceled a request while the
+ // server had its response still in flight. So if this
+ // was just something we canceled, ignore it.
+ return nil
+ }
+ if cs.readClosed {
+ rl.endStreamError(cs, http2StreamError{
+ StreamID: f.StreamID,
+ Code: http2ErrCodeProtocol,
+ Cause: errors.New("protocol error: headers after END_STREAM"),
+ })
+ return nil
+ }
+ if !cs.firstByte {
+ if cs.trace != nil {
+ // TODO(bradfitz): move first response byte earlier,
+ // when we first read the 9 byte header, not waiting
+ // until all the HEADERS+CONTINUATION frames have been
+ // merged. This works for now.
+ http2traceFirstResponseByte(cs.trace)
+ }
+ cs.firstByte = true
+ }
+ if !cs.pastHeaders {
+ cs.pastHeaders = true
+ } else {
+ return rl.processTrailers(cs, f)
+ }
+
+ res, err := rl.handleResponse(cs, f)
+ if err != nil {
+ if _, ok := err.(http2ConnectionError); ok {
+ return err
+ }
+ // Any other error type is a stream error.
+ rl.endStreamError(cs, http2StreamError{
+ StreamID: f.StreamID,
+ Code: http2ErrCodeProtocol,
+ Cause: err,
+ })
+ return nil // return nil from process* funcs to keep conn alive
+ }
+ if res == nil {
+ // (nil, nil) special case. See handleResponse docs.
+ return nil
+ }
+ cs.resTrailer = &res.Trailer
+ cs.res = res
+ close(cs.respHeaderRecv)
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+// may return error types nil, or ConnectionError. Any other error value
+// is a StreamError of type ErrCodeProtocol. The returned error in that case
+// is the detail.
+//
+// As a special case, handleResponse may return (nil, nil) to skip the
+// frame (currently only used for 1xx responses).
+func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http2MetaHeadersFrame) (*Response, error) {
+ if f.Truncated {
+ return nil, http2errResponseHeaderListSize
+ }
+
+ status := f.PseudoValue("status")
+ if status == "" {
+ return nil, errors.New("malformed response from server: missing status pseudo header")
+ }
+ statusCode, err := strconv.Atoi(status)
+ if err != nil {
+ return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
+ }
+
+ regularFields := f.RegularFields()
+ strs := make([]string, len(regularFields))
+ header := make(Header, len(regularFields))
+ res := &Response{
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ Header: header,
+ StatusCode: statusCode,
+ Status: status + " " + StatusText(statusCode),
+ }
+ for _, hf := range regularFields {
+ key := http2canonicalHeader(hf.Name)
+ if key == "Trailer" {
+ t := res.Trailer
+ if t == nil {
+ t = make(Header)
+ res.Trailer = t
+ }
+ http2foreachHeaderElement(hf.Value, func(v string) {
+ t[http2canonicalHeader(v)] = nil
+ })
+ } else {
+ vv := header[key]
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+ // Most headers aren't multi-valued.
+ // Set the capacity on strs[0] to 1, so any future append
+ // won't extend the slice into the other strings.
+ vv, strs = strs[:1:1], strs[1:]
+ vv[0] = hf.Value
+ header[key] = vv
+ } else {
+ header[key] = append(vv, hf.Value)
+ }
+ }
+ }
+
+ if statusCode >= 100 && statusCode <= 199 {
+ if f.StreamEnded() {
+ return nil, errors.New("1xx informational response with END_STREAM flag")
+ }
+ cs.num1xx++
+ const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
+ if cs.num1xx > max1xxResponses {
+ return nil, errors.New("http2: too many 1xx informational responses")
+ }
+ if fn := cs.get1xxTraceFunc(); fn != nil {
+ if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
+ return nil, err
+ }
+ }
+ if statusCode == 100 {
+ http2traceGot100Continue(cs.trace)
+ select {
+ case cs.on100 <- struct{}{}:
+ default:
+ }
+ }
+ cs.pastHeaders = false // do it all again
+ return nil, nil
+ }
+
+ res.ContentLength = -1
+ if clens := res.Header["Content-Length"]; len(clens) == 1 {
+ if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil {
+ res.ContentLength = int64(cl)
+ } else {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ } else if len(clens) > 1 {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ } else if f.StreamEnded() && !cs.isHead {
+ res.ContentLength = 0
+ }
+
+ if cs.isHead {
+ res.Body = http2noBody
+ return res, nil
+ }
+
+ if f.StreamEnded() {
+ if res.ContentLength > 0 {
+ res.Body = http2missingBody{}
+ } else {
+ res.Body = http2noBody
+ }
+ return res, nil
+ }
+
+ cs.bufPipe.setBuffer(&http2dataBuffer{expected: res.ContentLength})
+ cs.bytesRemain = res.ContentLength
+ res.Body = http2transportResponseBody{cs}
+
+ if cs.requestedGzip && http2asciiEqualFold(res.Header.Get("Content-Encoding"), "gzip") {
+ res.Header.Del("Content-Encoding")
+ res.Header.Del("Content-Length")
+ res.ContentLength = -1
+ res.Body = &http2gzipReader{body: res.Body}
+ res.Uncompressed = true
+ }
+ return res, nil
+}
+
+func (rl *http2clientConnReadLoop) processTrailers(cs *http2clientStream, f *http2MetaHeadersFrame) error {
+ if cs.pastTrailers {
+ // Too many HEADERS frames for this stream.
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ cs.pastTrailers = true
+ if !f.StreamEnded() {
+ // We expect that any headers for trailers also
+ // has END_STREAM.
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ if len(f.PseudoFields()) > 0 {
+ // No pseudo header fields are defined for trailers.
+ // TODO: ConnectionError might be overly harsh? Check.
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+
+ trailer := make(Header)
+ for _, hf := range f.RegularFields() {
+ key := http2canonicalHeader(hf.Name)
+ trailer[key] = append(trailer[key], hf.Value)
+ }
+ cs.trailer = trailer
+
+ rl.endStream(cs)
+ return nil
+}
+
+// transportResponseBody is the concrete type of Transport.RoundTrip's
+// Response.Body. It is an io.ReadCloser.
+type http2transportResponseBody struct {
+ cs *http2clientStream
+}
+
+func (b http2transportResponseBody) Read(p []byte) (n int, err error) {
+ cs := b.cs
+ cc := cs.cc
+
+ if cs.readErr != nil {
+ return 0, cs.readErr
+ }
+ n, err = b.cs.bufPipe.Read(p)
+ if cs.bytesRemain != -1 {
+ if int64(n) > cs.bytesRemain {
+ n = int(cs.bytesRemain)
+ if err == nil {
+ err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
+ cs.abortStream(err)
+ }
+ cs.readErr = err
+ return int(cs.bytesRemain), err
+ }
+ cs.bytesRemain -= int64(n)
+ if err == io.EOF && cs.bytesRemain > 0 {
+ err = io.ErrUnexpectedEOF
+ cs.readErr = err
+ return n, err
+ }
+ }
+ if n == 0 {
+ // No flow control tokens to send back.
+ return
+ }
+
+ cc.mu.Lock()
+ connAdd := cc.inflow.add(n)
+ var streamAdd int32
+ if err == nil { // No need to refresh if the stream is over or failed.
+ streamAdd = cs.inflow.add(n)
+ }
+ cc.mu.Unlock()
+
+ if connAdd != 0 || streamAdd != 0 {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if connAdd != 0 {
+ cc.fr.WriteWindowUpdate(0, http2mustUint31(connAdd))
+ }
+ if streamAdd != 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, http2mustUint31(streamAdd))
+ }
+ cc.bw.Flush()
+ }
+ return
+}
+
+var http2errClosedResponseBody = errors.New("http2: response body closed")
+
+func (b http2transportResponseBody) Close() error {
+ cs := b.cs
+ cc := cs.cc
+
+ cs.bufPipe.BreakWithError(http2errClosedResponseBody)
+ cs.abortStream(http2errClosedResponseBody)
+
+ unread := cs.bufPipe.Len()
+ if unread > 0 {
+ cc.mu.Lock()
+ // Return connection-level flow control.
+ connAdd := cc.inflow.add(unread)
+ cc.mu.Unlock()
+
+ // TODO(dneil): Acquiring this mutex can block indefinitely.
+ // Move flow control return to a goroutine?
+ cc.wmu.Lock()
+ // Return connection-level flow control.
+ if connAdd > 0 {
+ cc.fr.WriteWindowUpdate(0, uint32(connAdd))
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+
+ select {
+ case <-cs.donec:
+ case <-cs.ctx.Done():
+ // See golang/go#49366: The net/http package can cancel the
+ // request context after the response body is fully read.
+ // Don't treat this as an error.
+ return nil
+ case <-cs.reqCancel:
+ return http2errRequestCanceled
+ }
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) processData(f *http2DataFrame) error {
+ cc := rl.cc
+ cs := rl.streamByID(f.StreamID)
+ data := f.Data()
+ if cs == nil {
+ cc.mu.Lock()
+ neverSent := cc.nextStreamID
+ cc.mu.Unlock()
+ if f.StreamID >= neverSent {
+ // We never asked for this.
+ cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+ // We probably did ask for this, but canceled. Just ignore it.
+ // TODO: be stricter here? only silently ignore things which
+ // we canceled, but not things which were closed normally
+ // by the peer? Tough without accumulating too much state.
+
+ // But at least return their flow control:
+ if f.Length > 0 {
+ cc.mu.Lock()
+ ok := cc.inflow.take(f.Length)
+ connAdd := cc.inflow.add(int(f.Length))
+ cc.mu.Unlock()
+ if !ok {
+ return http2ConnectionError(http2ErrCodeFlowControl)
+ }
+ if connAdd > 0 {
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(connAdd))
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+ }
+ return nil
+ }
+ if cs.readClosed {
+ cc.logf("protocol error: received DATA after END_STREAM")
+ rl.endStreamError(cs, http2StreamError{
+ StreamID: f.StreamID,
+ Code: http2ErrCodeProtocol,
+ })
+ return nil
+ }
+ if !cs.firstByte {
+ cc.logf("protocol error: received DATA before a HEADERS frame")
+ rl.endStreamError(cs, http2StreamError{
+ StreamID: f.StreamID,
+ Code: http2ErrCodeProtocol,
+ })
+ return nil
+ }
+ if f.Length > 0 {
+ if cs.isHead && len(data) > 0 {
+ cc.logf("protocol error: received DATA on a HEAD request")
+ rl.endStreamError(cs, http2StreamError{
+ StreamID: f.StreamID,
+ Code: http2ErrCodeProtocol,
+ })
+ return nil
+ }
+ // Check connection-level flow control.
+ cc.mu.Lock()
+ if !http2takeInflows(&cc.inflow, &cs.inflow, f.Length) {
+ cc.mu.Unlock()
+ return http2ConnectionError(http2ErrCodeFlowControl)
+ }
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ var refund int
+ if pad := int(f.Length) - len(data); pad > 0 {
+ refund += pad
+ }
+
+ didReset := false
+ var err error
+ if len(data) > 0 {
+ if _, err = cs.bufPipe.Write(data); err != nil {
+ // Return len(data) now if the stream is already closed,
+ // since data will never be read.
+ didReset = true
+ refund += len(data)
+ }
+ }
+
+ sendConn := cc.inflow.add(refund)
+ var sendStream int32
+ if !didReset {
+ sendStream = cs.inflow.add(refund)
+ }
+ cc.mu.Unlock()
+
+ if sendConn > 0 || sendStream > 0 {
+ cc.wmu.Lock()
+ if sendConn > 0 {
+ cc.fr.WriteWindowUpdate(0, uint32(sendConn))
+ }
+ if sendStream > 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream))
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
+
+ if err != nil {
+ rl.endStreamError(cs, err)
+ return nil
+ }
+ }
+
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) endStream(cs *http2clientStream) {
+ // TODO: check that any declared content-length matches, like
+ // server.go's (*stream).endStream method.
+ if !cs.readClosed {
+ cs.readClosed = true
+ // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a
+ // race condition: The caller can read io.EOF from Response.Body
+ // and close the body before we close cs.peerClosed, causing
+ // cleanupWriteRequest to send a RST_STREAM.
+ rl.cc.mu.Lock()
+ defer rl.cc.mu.Unlock()
+ cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers)
+ close(cs.peerClosed)
+ }
+}
+
+func (rl *http2clientConnReadLoop) endStreamError(cs *http2clientStream, err error) {
+ cs.readAborted = true
+ cs.abortStream(err)
+}
+
+func (rl *http2clientConnReadLoop) streamByID(id uint32) *http2clientStream {
+ rl.cc.mu.Lock()
+ defer rl.cc.mu.Unlock()
+ cs := rl.cc.streams[id]
+ if cs != nil && !cs.readAborted {
+ return cs
+ }
+ return nil
+}
+
+func (cs *http2clientStream) copyTrailers() {
+ for k, vv := range cs.trailer {
+ t := cs.resTrailer
+ if *t == nil {
+ *t = make(Header)
+ }
+ (*t)[k] = vv
+ }
+}
+
+func (rl *http2clientConnReadLoop) processGoAway(f *http2GoAwayFrame) error {
+ cc := rl.cc
+ cc.t.connPool().MarkDead(cc)
+ if f.ErrCode != 0 {
+ // TODO: deal with GOAWAY more. particularly the error code
+ cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+ if fn := cc.t.CountError; fn != nil {
+ fn("recv_goaway_" + f.ErrCode.stringToken())
+ }
+ }
+ cc.setGoAway(f)
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) processSettings(f *http2SettingsFrame) error {
+ cc := rl.cc
+ // Locking both mu and wmu here allows frame encoding to read settings with only wmu held.
+ // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless.
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ if err := rl.processSettingsNoWrite(f); err != nil {
+ return err
+ }
+ if !f.IsAck() {
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+ }
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) processSettingsNoWrite(f *http2SettingsFrame) error {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ if f.IsAck() {
+ if cc.wantSettingsAck {
+ cc.wantSettingsAck = false
+ return nil
+ }
+ return http2ConnectionError(http2ErrCodeProtocol)
+ }
+
+ var seenMaxConcurrentStreams bool
+ err := f.ForeachSetting(func(s http2Setting) error {
+ switch s.ID {
+ case http2SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case http2SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ seenMaxConcurrentStreams = true
+ case http2SettingMaxHeaderListSize:
+ cc.peerMaxHeaderListSize = uint64(s.Val)
+ case http2SettingInitialWindowSize:
+ // Values above the maximum flow-control
+ // window size of 2^31-1 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ if s.Val > math.MaxInt32 {
+ return http2ConnectionError(http2ErrCodeFlowControl)
+ }
+
+ // Adjust flow control of currently-open
+ // frames by the difference of the old initial
+ // window size and this one.
+ delta := int32(s.Val) - int32(cc.initialWindowSize)
+ for _, cs := range cc.streams {
+ cs.flow.add(delta)
+ }
+ cc.cond.Broadcast()
+
+ cc.initialWindowSize = s.Val
+ case http2SettingHeaderTableSize:
+ cc.henc.SetMaxDynamicTableSize(s.Val)
+ cc.peerMaxHeaderTableSize = s.Val
+ default:
+ cc.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if !cc.seenSettings {
+ if !seenMaxConcurrentStreams {
+ // This was the servers initial SETTINGS frame and it
+ // didn't contain a MAX_CONCURRENT_STREAMS field so
+ // increase the number of concurrent streams this
+ // connection can establish to our default.
+ cc.maxConcurrentStreams = http2defaultMaxConcurrentStreams
+ }
+ cc.seenSettings = true
+ }
+
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) processWindowUpdate(f *http2WindowUpdateFrame) error {
+ cc := rl.cc
+ cs := rl.streamByID(f.StreamID)
+ if f.StreamID != 0 && cs == nil {
+ return nil
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ fl := &cc.flow
+ if cs != nil {
+ fl = &cs.flow
+ }
+ if !fl.add(int32(f.Increment)) {
+ return http2ConnectionError(http2ErrCodeFlowControl)
+ }
+ cc.cond.Broadcast()
+ return nil
+}
+
+func (rl *http2clientConnReadLoop) processResetStream(f *http2RSTStreamFrame) error {
+ cs := rl.streamByID(f.StreamID)
+ if cs == nil {
+ // TODO: return error if server tries to RST_STREAM an idle stream
+ return nil
+ }
+ serr := http2streamError(cs.ID, f.ErrCode)
+ serr.Cause = http2errFromPeer
+ if f.ErrCode == http2ErrCodeProtocol {
+ rl.cc.SetDoNotReuse()
+ }
+ if fn := cs.cc.t.CountError; fn != nil {
+ fn("recv_rststream_" + f.ErrCode.stringToken())
+ }
+ cs.abortStream(serr)
+
+ cs.bufPipe.CloseWithError(serr)
+ return nil
+}
+
+// Ping sends a PING frame to the server and waits for the ack.
+func (cc *http2ClientConn) Ping(ctx context.Context) error {
+ c := make(chan struct{})
+ // Generate a random payload
+ var p [8]byte
+ for {
+ if _, err := rand.Read(p[:]); err != nil {
+ return err
+ }
+ cc.mu.Lock()
+ // check for dup before insert
+ if _, found := cc.pings[p]; !found {
+ cc.pings[p] = c
+ cc.mu.Unlock()
+ break
+ }
+ cc.mu.Unlock()
+ }
+ errc := make(chan error, 1)
+ go func() {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(false, p); err != nil {
+ errc <- err
+ return
+ }
+ if err := cc.bw.Flush(); err != nil {
+ errc <- err
+ return
+ }
+ }()
+ select {
+ case <-c:
+ return nil
+ case err := <-errc:
+ return err
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cc.readerDone:
+ // connection closed
+ return cc.readerErr
+ }
+}
+
+func (rl *http2clientConnReadLoop) processPing(f *http2PingFrame) error {
+ if f.IsAck() {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ // If ack, notify listener if any
+ if c, ok := cc.pings[f.Data]; ok {
+ close(c)
+ delete(cc.pings, f.Data)
+ }
+ return nil
+ }
+ cc := rl.cc
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(true, f.Data); err != nil {
+ return err
+ }
+ return cc.bw.Flush()
+}
+
+func (rl *http2clientConnReadLoop) processPushPromise(f *http2PushPromiseFrame) error {
+ // We told the peer we don't want them.
+ // Spec says:
+ // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
+ // setting of the peer endpoint is set to 0. An endpoint that
+ // has set this setting and has received acknowledgement MUST
+ // treat the receipt of a PUSH_PROMISE frame as a connection
+ // error (Section 5.4.1) of type PROTOCOL_ERROR."
+ return http2ConnectionError(http2ErrCodeProtocol)
+}
+
+func (cc *http2ClientConn) writeStreamReset(streamID uint32, code http2ErrCode, err error) {
+ // TODO: map err to more interesting error codes, once the
+ // HTTP community comes up with some. But currently for
+ // RST_STREAM there's no equivalent to GOAWAY frame's debug
+ // data, and the error codes are all pretty vague ("cancel").
+ cc.wmu.Lock()
+ cc.fr.WriteRSTStream(streamID, code)
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+}
+
+var (
+ http2errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+ http2errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
+)
+
+func (cc *http2ClientConn) logf(format string, args ...interface{}) {
+ cc.t.logf(format, args...)
+}
+
+func (cc *http2ClientConn) vlogf(format string, args ...interface{}) {
+ cc.t.vlogf(format, args...)
+}
+
+func (t *http2Transport) vlogf(format string, args ...interface{}) {
+ if http2VerboseLogs {
+ t.logf(format, args...)
+ }
+}
+
+func (t *http2Transport) logf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+}
+
+var http2noBody io.ReadCloser = http2noBodyReader{}
+
+type http2noBodyReader struct{}
+
+func (http2noBodyReader) Close() error { return nil }
+
+func (http2noBodyReader) Read([]byte) (int, error) { return 0, io.EOF }
+
+type http2missingBody struct{}
+
+func (http2missingBody) Close() error { return nil }
+
+func (http2missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF }
+
+func http2strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+type http2erringRoundTripper struct{ err error }
+
+func (rt http2erringRoundTripper) RoundTripErr() error { return rt.err }
+
+func (rt http2erringRoundTripper) RoundTrip(*Request) (*Response, error) { return nil, rt.err }
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type http2gzipReader struct {
+ _ http2incomparable
+ body io.ReadCloser // underlying Response.Body
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // sticky error
+}
+
+func (gz *http2gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ if gz.zr == nil {
+ gz.zr, err = gzip.NewReader(gz.body)
+ if err != nil {
+ gz.zerr = err
+ return 0, err
+ }
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *http2gzipReader) Close() error {
+ if err := gz.body.Close(); err != nil {
+ return err
+ }
+ gz.zerr = fs.ErrClosed
+ return nil
+}
+
+type http2errorReader struct{ err error }
+
+func (r http2errorReader) Read(p []byte) (int, error) { return 0, r.err }
+
+// isConnectionCloseRequest reports whether req should use its own
+// connection for a single request and then close the connection.
+func http2isConnectionCloseRequest(req *Request) bool {
+ return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close")
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// converting panics into errors.
+func http2registerHTTPSProtocol(t *Transport, rt http2noDialH2RoundTripper) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("%v", e)
+ }
+ }()
+ t.RegisterProtocol("https", rt)
+ return nil
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+// (The field is exported so it can be accessed via reflect from net/http; tested
+// by TestNoDialH2RoundTripperType)
+type http2noDialH2RoundTripper struct{ *http2Transport }
+
+func (rt http2noDialH2RoundTripper) RoundTrip(req *Request) (*Response, error) {
+ res, err := rt.http2Transport.RoundTrip(req)
+ if http2isNoCachedConnError(err) {
+ return nil, ErrSkipAltProtocol
+ }
+ return res, err
+}
+
+func (t *http2Transport) idleConnTimeout() time.Duration {
+ if t.t1 != nil {
+ return t.t1.IdleConnTimeout
+ }
+ return 0
+}
+
+func http2traceGetConn(req *Request, hostPort string) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GetConn == nil {
+ return
+ }
+ trace.GetConn(hostPort)
+}
+
+func http2traceGotConn(req *Request, cc *http2ClientConn, reused bool) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GotConn == nil {
+ return
+ }
+ ci := httptrace.GotConnInfo{Conn: cc.tconn}
+ ci.Reused = reused
+ cc.mu.Lock()
+ ci.WasIdle = len(cc.streams) == 0 && reused
+ if ci.WasIdle && !cc.lastActive.IsZero() {
+ ci.IdleTime = time.Since(cc.lastActive)
+ }
+ cc.mu.Unlock()
+
+ trace.GotConn(ci)
+}
+
+func http2traceWroteHeaders(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+}
+
+func http2traceGot100Continue(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+}
+
+func http2traceWait100Continue(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+}
+
+func http2traceWroteRequest(trace *httptrace.ClientTrace, err error) {
+ if trace != nil && trace.WroteRequest != nil {
+ trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
+ }
+}
+
+func http2traceFirstResponseByte(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ trace.GotFirstResponseByte()
+ }
+}
+
+func http2traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
+ return trace != nil && trace.WroteHeaderField != nil
+}
+
+func http2traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField(k, []string{v})
+ }
+}
+
+func http2traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
+ if trace != nil {
+ return trace.Got1xxResponse
+ }
+ return nil
+}
+
+// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
+// connection.
+func (t *http2Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
+ dialer := &tls.Dialer{
+ Config: cfg,
+ }
+ cn, err := dialer.DialContext(ctx, network, addr)
+ if err != nil {
+ return nil, err
+ }
+ tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
+ return tlsCn, nil
+}
+
+// writeFramer is implemented by any type that is used to write frames.
+type http2writeFramer interface {
+ writeFrame(http2writeContext) error
+
+ // staysWithinBuffer reports whether this writer promises that
+ // it will only write less than or equal to size bytes, and it
+ // won't Flush the write context.
+ staysWithinBuffer(size int) bool
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+//
+// TODO: decide whether to a) use this in the client code (which didn't
+// end up using this yet, because it has a simpler design, not
+// currently implementing priorities), or b) delete this and
+// make the server code a bit more concrete.
+type http2writeContext interface {
+ Framer() *http2Framer
+ Flush() error
+ CloseConn() error
+ // HeaderEncoder returns an HPACK encoder that writes to the
+ // returned buffer.
+ HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// writeEndsStream reports whether w writes a frame that will transition
+// the stream to a half-closed local state. This returns false for RST_STREAM,
+// which closes the entire stream (not just the local half).
+func http2writeEndsStream(w http2writeFramer) bool {
+ switch v := w.(type) {
+ case *http2writeData:
+ return v.endStream
+ case *http2writeResHeaders:
+ return v.endStream
+ case nil:
+ // This can only happen if the caller reuses w after it's
+ // been intentionally nil'ed out to prevent use. Keep this
+ // here to catch future refactoring breaking it.
+ panic("writeEndsStream called on nil writeFramer")
+ }
+ return false
+}
+
+type http2flushFrameWriter struct{}
+
+func (http2flushFrameWriter) writeFrame(ctx http2writeContext) error {
+ return ctx.Flush()
+}
+
+func (http2flushFrameWriter) staysWithinBuffer(max int) bool { return false }
+
+type http2writeSettings []http2Setting
+
+func (s http2writeSettings) staysWithinBuffer(max int) bool {
+ const settingSize = 6 // uint16 + uint32
+ return http2frameHeaderLen+settingSize*len(s) <= max
+
+}
+
+func (s http2writeSettings) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteSettings([]http2Setting(s)...)
+}
+
+type http2writeGoAway struct {
+ maxStreamID uint32
+ code http2ErrCode
+}
+
+func (p *http2writeGoAway) writeFrame(ctx http2writeContext) error {
+ err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+ ctx.Flush() // ignore error: we're hanging up on them anyway
+ return err
+}
+
+func (*http2writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
+
+type http2writeData struct {
+ streamID uint32
+ p []byte
+ endStream bool
+}
+
+func (w *http2writeData) String() string {
+ return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *http2writeData) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+func (w *http2writeData) staysWithinBuffer(max int) bool {
+ return http2frameHeaderLen+len(w.p) <= max
+}
+
+// handlerPanicRST is the message sent from handler goroutines when
+// the handler panics.
+type http2handlerPanicRST struct {
+ StreamID uint32
+}
+
+func (hp http2handlerPanicRST) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteRSTStream(hp.StreamID, http2ErrCodeInternal)
+}
+
+func (hp http2handlerPanicRST) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
+
+func (se http2StreamError) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+func (se http2StreamError) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
+
+type http2writePingAck struct{ pf *http2PingFrame }
+
+func (w http2writePingAck) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+func (w http2writePingAck) staysWithinBuffer(max int) bool {
+ return http2frameHeaderLen+len(w.pf.Data) <= max
+}
+
+type http2writeSettingsAck struct{}
+
+func (http2writeSettingsAck) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteSettingsAck()
+}
+
+func (http2writeSettingsAck) staysWithinBuffer(max int) bool { return http2frameHeaderLen <= max }
+
+// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
+// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
+// for the first/last fragment, respectively.
+func http2splitHeaderBlock(ctx http2writeContext, headerBlock []byte, fn func(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
+ return err
+ }
+ first = false
+ }
+ return nil
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers or trailers from a server handler.
+type http2writeResHeaders struct {
+ streamID uint32
+ httpResCode int // 0 means no ":status" line
+ h Header // may be nil
+ trailers []string // if non-nil, which keys of h to write. nil means all.
+ endStream bool
+
+ date string
+ contentType string
+ contentLength string
+}
+
+func http2encKV(enc *hpack.Encoder, k, v string) {
+ if http2VerboseLogs {
+ log.Printf("http2: server encoding header %q = %q", k, v)
+ }
+ enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+}
+
+func (w *http2writeResHeaders) staysWithinBuffer(max int) bool {
+ // TODO: this is a common one. It'd be nice to return true
+ // here and get into the fast path if we could be clever and
+ // calculate the size fast enough, or at least a conservative
+ // upper bound that usually fires. (Maybe if w.h and
+ // w.trailers are nil, so we don't need to enumerate it.)
+ // Otherwise I'm afraid that just calculating the length to
+ // answer this question would be slower than the ~2µs benefit.
+ return false
+}
+
+func (w *http2writeResHeaders) writeFrame(ctx http2writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ if w.httpResCode != 0 {
+ http2encKV(enc, ":status", http2httpCodeString(w.httpResCode))
+ }
+
+ http2encodeHeaders(enc, w.h, w.trailers)
+
+ if w.contentType != "" {
+ http2encKV(enc, "content-type", w.contentType)
+ }
+ if w.contentLength != "" {
+ http2encKV(enc, "content-length", w.contentLength)
+ }
+ if w.date != "" {
+ http2encKV(enc, "date", w.date)
+ }
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 && w.trailers == nil {
+ panic("unexpected empty hpack")
+ }
+
+ return http2splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *http2writeResHeaders) writeHeaderBlock(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WriteHeaders(http2HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+ }
+}
+
+// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
+type http2writePushPromise struct {
+ streamID uint32 // pusher stream
+ method string // for :method
+ url *url.URL // for :scheme, :authority, :path
+ h Header
+
+ // Creates an ID for a pushed stream. This runs on serveG just before
+ // the frame is written. The returned ID is copied to promisedID.
+ allocatePromisedID func() (uint32, error)
+ promisedID uint32
+}
+
+func (w *http2writePushPromise) staysWithinBuffer(max int) bool {
+ // TODO: see writeResHeaders.staysWithinBuffer
+ return false
+}
+
+func (w *http2writePushPromise) writeFrame(ctx http2writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ http2encKV(enc, ":method", w.method)
+ http2encKV(enc, ":scheme", w.url.Scheme)
+ http2encKV(enc, ":authority", w.url.Host)
+ http2encKV(enc, ":path", w.url.RequestURI())
+ http2encodeHeaders(enc, w.h, nil)
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 {
+ panic("unexpected empty hpack")
+ }
+
+ return http2splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *http2writePushPromise) writeHeaderBlock(ctx http2writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WritePushPromise(http2PushPromiseParam{
+ StreamID: w.streamID,
+ PromiseID: w.promisedID,
+ BlockFragment: frag,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+ }
+}
+
+type http2write100ContinueHeadersFrame struct {
+ streamID uint32
+}
+
+func (w http2write100ContinueHeadersFrame) writeFrame(ctx http2writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+ http2encKV(enc, ":status", "100")
+ return ctx.Framer().WriteHeaders(http2HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: buf.Bytes(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+}
+
+func (w http2write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
+ // Sloppy but conservative:
+ return 9+2*(len(":status")+len("100")) <= max
+}
+
+type http2writeWindowUpdate struct {
+ streamID uint32 // or 0 for conn-level
+ n uint32
+}
+
+func (wu http2writeWindowUpdate) staysWithinBuffer(max int) bool { return http2frameHeaderLen+4 <= max }
+
+func (wu http2writeWindowUpdate) writeFrame(ctx http2writeContext) error {
+ return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}
+
+// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
+// is encoded only if k is in keys.
+func http2encodeHeaders(enc *hpack.Encoder, h Header, keys []string) {
+ if keys == nil {
+ sorter := http2sorterPool.Get().(*http2sorter)
+ // Using defer here, since the returned keys from the
+ // sorter.Keys method is only valid until the sorter
+ // is returned:
+ defer http2sorterPool.Put(sorter)
+ keys = sorter.Keys(h)
+ }
+ for _, k := range keys {
+ vv := h[k]
+ k, ascii := http2lowerHeader(k)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ continue
+ }
+ if !http2validWireHeaderFieldName(k) {
+ // Skip it as backup paranoia. Per
+ // golang.org/issue/14048, these should
+ // already be rejected at a higher level.
+ continue
+ }
+ isTE := k == "transfer-encoding"
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
+ continue
+ }
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+ if isTE && v != "trailers" {
+ continue
+ }
+ http2encKV(enc, k, v)
+ }
+ }
+}
+
+// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
+// Methods are never called concurrently.
+type http2WriteScheduler interface {
+ // OpenStream opens a new stream in the write scheduler.
+ // It is illegal to call this with streamID=0 or with a streamID that is
+ // already open -- the call may panic.
+ OpenStream(streamID uint32, options http2OpenStreamOptions)
+
+ // CloseStream closes a stream in the write scheduler. Any frames queued on
+ // this stream should be discarded. It is illegal to call this on a stream
+ // that is not open -- the call may panic.
+ CloseStream(streamID uint32)
+
+ // AdjustStream adjusts the priority of the given stream. This may be called
+ // on a stream that has not yet been opened or has been closed. Note that
+ // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
+ // https://tools.ietf.org/html/rfc7540#section-5.1
+ AdjustStream(streamID uint32, priority http2PriorityParam)
+
+ // Push queues a frame in the scheduler. In most cases, this will not be
+ // called with wr.StreamID()!=0 unless that stream is currently open. The one
+ // exception is RST_STREAM frames, which may be sent on idle or closed streams.
+ Push(wr http2FrameWriteRequest)
+
+ // Pop dequeues the next frame to write. Returns false if no frames can
+ // be written. Frames with a given wr.StreamID() are Pop'd in the same
+ // order they are Push'd, except RST_STREAM frames. No frames should be
+ // discarded except by CloseStream.
+ Pop() (wr http2FrameWriteRequest, ok bool)
+}
+
+// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
+type http2OpenStreamOptions struct {
+ // PusherID is zero if the stream was initiated by the client. Otherwise,
+ // PusherID names the stream that pushed the newly opened stream.
+ PusherID uint32
+}
+
+// FrameWriteRequest is a request to write a frame.
+type http2FrameWriteRequest struct {
+ // write is the interface value that does the writing, once the
+ // WriteScheduler has selected this frame to write. The write
+ // functions are all defined in write.go.
+ write http2writeFramer
+
+ // stream is the stream on which this frame will be written.
+ // nil for non-stream frames like PING and SETTINGS.
+ // nil for RST_STREAM streams, which use the StreamError.StreamID field instead.
+ stream *http2stream
+
+ // done, if non-nil, must be a buffered channel with space for
+ // 1 message and is sent the return value from write (or an
+ // earlier error) when the frame has been written.
+ done chan error
+}
+
+// StreamID returns the id of the stream this frame will be written to.
+// 0 is used for non-stream frames such as PING and SETTINGS.
+func (wr http2FrameWriteRequest) StreamID() uint32 {
+ if wr.stream == nil {
+ if se, ok := wr.write.(http2StreamError); ok {
+ // (*serverConn).resetStream doesn't set
+ // stream because it doesn't necessarily have
+ // one. So special case this type of write
+ // message.
+ return se.StreamID
+ }
+ return 0
+ }
+ return wr.stream.id
+}
+
+// isControl reports whether wr is a control frame for MaxQueuedControlFrames
+// purposes. That includes non-stream frames and RST_STREAM frames.
+func (wr http2FrameWriteRequest) isControl() bool {
+ return wr.stream == nil
+}
+
+// DataSize returns the number of flow control bytes that must be consumed
+// to write this entire frame. This is 0 for non-DATA frames.
+func (wr http2FrameWriteRequest) DataSize() int {
+ if wd, ok := wr.write.(*http2writeData); ok {
+ return len(wd.p)
+ }
+ return 0
+}
+
+// Consume consumes min(n, available) bytes from this frame, where available
+// is the number of flow control bytes available on the stream. Consume returns
+// 0, 1, or 2 frames, where the integer return value gives the number of frames
+// returned.
+//
+// If flow control prevents consuming any bytes, this returns (_, _, 0). If
+// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
+// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
+// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
+// underlying stream's flow control budget.
+func (wr http2FrameWriteRequest) Consume(n int32) (http2FrameWriteRequest, http2FrameWriteRequest, int) {
+ var empty http2FrameWriteRequest
+
+ // Non-DATA frames are always consumed whole.
+ wd, ok := wr.write.(*http2writeData)
+ if !ok || len(wd.p) == 0 {
+ return wr, empty, 1
+ }
+
+ // Might need to split after applying limits.
+ allowed := wr.stream.flow.available()
+ if n < allowed {
+ allowed = n
+ }
+ if wr.stream.sc.maxFrameSize < allowed {
+ allowed = wr.stream.sc.maxFrameSize
+ }
+ if allowed <= 0 {
+ return empty, empty, 0
+ }
+ if len(wd.p) > int(allowed) {
+ wr.stream.flow.take(allowed)
+ consumed := http2FrameWriteRequest{
+ stream: wr.stream,
+ write: &http2writeData{
+ streamID: wd.streamID,
+ p: wd.p[:allowed],
+ // Even if the original had endStream set, there
+ // are bytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false.
+ endStream: false,
+ },
+ // Our caller is blocking on the final DATA frame, not
+ // this intermediate frame, so no need to wait.
+ done: nil,
+ }
+ rest := http2FrameWriteRequest{
+ stream: wr.stream,
+ write: &http2writeData{
+ streamID: wd.streamID,
+ p: wd.p[allowed:],
+ endStream: wd.endStream,
+ },
+ done: wr.done,
+ }
+ return consumed, rest, 2
+ }
+
+ // The frame is consumed whole.
+ // NB: This cast cannot overflow because allowed is <= math.MaxInt32.
+ wr.stream.flow.take(int32(len(wd.p)))
+ return wr, empty, 1
+}
+
+// String is for debugging only.
+func (wr http2FrameWriteRequest) String() string {
+ var des string
+ if s, ok := wr.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wr.write)
+ }
+ return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
+}
+
+// replyToWriter sends err to wr.done and panics if the send must block
+// This does nothing if wr.done is nil.
+func (wr *http2FrameWriteRequest) replyToWriter(err error) {
+ if wr.done == nil {
+ return
+ }
+ select {
+ case wr.done <- err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
+ }
+ wr.write = nil // prevent use (assume it's tainted after wr.done send)
+}
+
+// writeQueue is used by implementations of WriteScheduler.
+type http2writeQueue struct {
+ s []http2FrameWriteRequest
+ prev, next *http2writeQueue
+}
+
+func (q *http2writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *http2writeQueue) push(wr http2FrameWriteRequest) {
+ q.s = append(q.s, wr)
+}
+
+func (q *http2writeQueue) shift() http2FrameWriteRequest {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ wr := q.s[0]
+ // TODO: less copy-happy queue.
+ copy(q.s, q.s[1:])
+ q.s[len(q.s)-1] = http2FrameWriteRequest{}
+ q.s = q.s[:len(q.s)-1]
+ return wr
+}
+
+// consume consumes up to n bytes from q.s[0]. If the frame is
+// entirely consumed, it is removed from the queue. If the frame
+// is partially consumed, the frame is kept with the consumed
+// bytes removed. Returns true iff any bytes were consumed.
+func (q *http2writeQueue) consume(n int32) (http2FrameWriteRequest, bool) {
+ if len(q.s) == 0 {
+ return http2FrameWriteRequest{}, false
+ }
+ consumed, rest, numresult := q.s[0].Consume(n)
+ switch numresult {
+ case 0:
+ return http2FrameWriteRequest{}, false
+ case 1:
+ q.shift()
+ case 2:
+ q.s[0] = rest
+ }
+ return consumed, true
+}
+
+type http2writeQueuePool []*http2writeQueue
+
+// put inserts an unused writeQueue into the pool.
+
+// put inserts an unused writeQueue into the pool.
+func (p *http2writeQueuePool) put(q *http2writeQueue) {
+ for i := range q.s {
+ q.s[i] = http2FrameWriteRequest{}
+ }
+ q.s = q.s[:0]
+ *p = append(*p, q)
+}
+
+// get returns an empty writeQueue.
+func (p *http2writeQueuePool) get() *http2writeQueue {
+ ln := len(*p)
+ if ln == 0 {
+ return new(http2writeQueue)
+ }
+ x := ln - 1
+ q := (*p)[x]
+ (*p)[x] = nil
+ *p = (*p)[:x]
+ return q
+}
+
+// RFC 7540, Section 5.3.5: the default weight is 16.
+const http2priorityDefaultWeight = 15 // 16 = 15 + 1
+
+// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
+type http2PriorityWriteSchedulerConfig struct {
+ // MaxClosedNodesInTree controls the maximum number of closed streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // "It is possible for a stream to become closed while prioritization
+ // information ... is in transit. ... This potentially creates suboptimal
+ // prioritization, since the stream could be given a priority that is
+ // different from what is intended. To avoid these problems, an endpoint
+ // SHOULD retain stream prioritization state for a period after streams
+ // become closed. The longer state is retained, the lower the chance that
+ // streams are assigned incorrect or default priority values."
+ MaxClosedNodesInTree int
+
+ // MaxIdleNodesInTree controls the maximum number of idle streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // Similarly, streams that are in the "idle" state can be assigned
+ // priority or become a parent of other streams. This allows for the
+ // creation of a grouping node in the dependency tree, which enables
+ // more flexible expressions of priority. Idle streams begin with a
+ // default priority (Section 5.3.5).
+ MaxIdleNodesInTree int
+
+ // ThrottleOutOfOrderWrites enables write throttling to help ensure that
+ // data is delivered in priority order. This works around a race where
+ // stream B depends on stream A and both streams are about to call Write
+ // to queue DATA frames. If B wins the race, a naive scheduler would eagerly
+ // write as much data from B as possible, but this is suboptimal because A
+ // is a higher-priority stream. With throttling enabled, we write a small
+ // amount of data from B to minimize the amount of bandwidth that B can
+ // steal from A.
+ ThrottleOutOfOrderWrites bool
+}
+
+// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
+// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
+// If cfg is nil, default options are used.
+func http2NewPriorityWriteScheduler(cfg *http2PriorityWriteSchedulerConfig) http2WriteScheduler {
+ if cfg == nil {
+ // For justification of these defaults, see:
+ // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
+ cfg = &http2PriorityWriteSchedulerConfig{
+ MaxClosedNodesInTree: 10,
+ MaxIdleNodesInTree: 10,
+ ThrottleOutOfOrderWrites: false,
+ }
+ }
+
+ ws := &http2priorityWriteScheduler{
+ nodes: make(map[uint32]*http2priorityNode),
+ maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
+ maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
+ enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
+ }
+ ws.nodes[0] = &ws.root
+ if cfg.ThrottleOutOfOrderWrites {
+ ws.writeThrottleLimit = 1024
+ } else {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ return ws
+}
+
+type http2priorityNodeState int
+
+const (
+ http2priorityNodeOpen http2priorityNodeState = iota
+ http2priorityNodeClosed
+ http2priorityNodeIdle
+)
+
+// priorityNode is a node in an HTTP/2 priority tree.
+// Each node is associated with a single stream ID.
+// See RFC 7540, Section 5.3.
+type http2priorityNode struct {
+ q http2writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state http2priorityNodeState // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+
+ // These links form the priority tree.
+ parent *http2priorityNode
+ kids *http2priorityNode // start of the kids list
+ prev, next *http2priorityNode // doubly-linked list of siblings
+}
+
+func (n *http2priorityNode) setParent(parent *http2priorityNode) {
+ if n == parent {
+ panic("setParent to self")
+ }
+ if n.parent == parent {
+ return
+ }
+ // Unlink from current parent.
+ if parent := n.parent; parent != nil {
+ if n.prev == nil {
+ parent.kids = n.next
+ } else {
+ n.prev.next = n.next
+ }
+ if n.next != nil {
+ n.next.prev = n.prev
+ }
+ }
+ // Link to new parent.
+ // If parent=nil, remove n from the tree.
+ // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
+ n.parent = parent
+ if parent == nil {
+ n.next = nil
+ n.prev = nil
+ } else {
+ n.next = parent.kids
+ n.prev = nil
+ if n.next != nil {
+ n.next.prev = n
+ }
+ parent.kids = n
+ }
+}
+
+func (n *http2priorityNode) addBytes(b int64) {
+ n.bytes += b
+ for ; n != nil; n = n.parent {
+ n.subtreeBytes += b
+ }
+}
+
+// walkReadyInOrder iterates over the tree in priority order, calling f for each node
+// with a non-empty write queue. When f returns true, this function returns true and the
+// walk halts. tmp is used as scratch space for sorting.
+//
+// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
+// if any ancestor p of n is still open (ignoring the root node).
+func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2priorityNode, f func(*http2priorityNode, bool) bool) bool {
+ if !n.q.empty() && f(n, openParent) {
+ return true
+ }
+ if n.kids == nil {
+ return false
+ }
+
+ // Don't consider the root "open" when updating openParent since
+ // we can't send data frames on the root stream (only control frames).
+ if n.id != 0 {
+ openParent = openParent || (n.state == http2priorityNodeOpen)
+ }
+
+ // Common case: only one kid or all kids have the same weight.
+ // Some clients don't use weights; other clients (like web browsers)
+ // use mostly-linear priority trees.
+ w := n.kids.weight
+ needSort := false
+ for k := n.kids.next; k != nil; k = k.next {
+ if k.weight != w {
+ needSort = true
+ break
+ }
+ }
+ if !needSort {
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Uncommon case: sort the child nodes. We remove the kids from the parent,
+ // then re-insert after sorting so we can reuse tmp for future sort calls.
+ *tmp = (*tmp)[:0]
+ for n.kids != nil {
+ *tmp = append(*tmp, n.kids)
+ n.kids.setParent(nil)
+ }
+ sort.Sort(http2sortPriorityNodeSiblings(*tmp))
+ for i := len(*tmp) - 1; i >= 0; i-- {
+ (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
+ }
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+}
+
+type http2sortPriorityNodeSiblings []*http2priorityNode
+
+func (z http2sortPriorityNodeSiblings) Len() int { return len(z) }
+
+func (z http2sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+
+func (z http2sortPriorityNodeSiblings) Less(i, k int) bool {
+ // Prefer the subtree that has sent fewer bytes relative to its weight.
+ // See sections 5.3.2 and 5.3.4.
+ wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ if bi == 0 && bk == 0 {
+ return wi >= wk
+ }
+ if bk == 0 {
+ return false
+ }
+ return bi/bk <= wi/wk
+}
+
+type http2priorityWriteScheduler struct {
+ // root is the root of the priority tree, where root.id = 0.
+ // The root queues control frames that are not associated with any stream.
+ root http2priorityNode
+
+ // nodes maps stream ids to priority tree nodes.
+ nodes map[uint32]*http2priorityNode
+
+ // maxID is the maximum stream id in nodes.
+ maxID uint32
+
+ // lists of nodes that have been closed or are idle, but are kept in
+ // the tree for improved prioritization. When the lengths exceed either
+ // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
+ closedNodes, idleNodes []*http2priorityNode
+
+ // From the config.
+ maxClosedNodesInTree int
+ maxIdleNodesInTree int
+ writeThrottleLimit int32
+ enableWriteThrottle bool
+
+ // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
+ tmp []*http2priorityNode
+
+ // pool of empty queues for reuse.
+ queuePool http2writeQueuePool
+}
+
+func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
+ // The stream may be currently idle but cannot be opened or closed.
+ if curr := ws.nodes[streamID]; curr != nil {
+ if curr.state != http2priorityNodeIdle {
+ panic(fmt.Sprintf("stream %d already opened", streamID))
+ }
+ curr.state = http2priorityNodeOpen
+ return
+ }
+
+ // RFC 7540, Section 5.3.5:
+ // "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+ // Pushed streams initially depend on their associated stream. In both cases,
+ // streams are assigned a default weight of 16."
+ parent := ws.nodes[options.PusherID]
+ if parent == nil {
+ parent = &ws.root
+ }
+ n := &http2priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: http2priorityDefaultWeight,
+ state: http2priorityNodeOpen,
+ }
+ n.setParent(parent)
+ ws.nodes[streamID] = n
+ if streamID > ws.maxID {
+ ws.maxID = streamID
+ }
+}
+
+func (ws *http2priorityWriteScheduler) CloseStream(streamID uint32) {
+ if streamID == 0 {
+ panic("violation of WriteScheduler interface: cannot close stream 0")
+ }
+ if ws.nodes[streamID] == nil {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
+ }
+ if ws.nodes[streamID].state != http2priorityNodeOpen {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
+ }
+
+ n := ws.nodes[streamID]
+ n.state = http2priorityNodeClosed
+ n.addBytes(-n.bytes)
+
+ q := n.q
+ ws.queuePool.put(&q)
+ n.q.s = nil
+ if ws.maxClosedNodesInTree > 0 {
+ ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
+ } else {
+ ws.removeNode(n)
+ }
+}
+
+func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) {
+ if streamID == 0 {
+ panic("adjustPriority on root")
+ }
+
+ // If streamID does not exist, there are two cases:
+ // - A closed stream that has been removed (this will have ID <= maxID)
+ // - An idle stream that is being used for "grouping" (this will have ID > maxID)
+ n := ws.nodes[streamID]
+ if n == nil {
+ if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
+ return
+ }
+ ws.maxID = streamID
+ n = &http2priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: http2priorityDefaultWeight,
+ state: http2priorityNodeIdle,
+ }
+ n.setParent(&ws.root)
+ ws.nodes[streamID] = n
+ ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
+ }
+
+ // Section 5.3.1: A dependency on a stream that is not currently in the tree
+ // results in that stream being given a default priority (Section 5.3.5).
+ parent := ws.nodes[priority.StreamDep]
+ if parent == nil {
+ n.setParent(&ws.root)
+ n.weight = http2priorityDefaultWeight
+ return
+ }
+
+ // Ignore if the client tries to make a node its own parent.
+ if n == parent {
+ return
+ }
+
+ // Section 5.3.3:
+ // "If a stream is made dependent on one of its own dependencies, the
+ // formerly dependent stream is first moved to be dependent on the
+ // reprioritized stream's previous parent. The moved dependency retains
+ // its weight."
+ //
+ // That is: if parent depends on n, move parent to depend on n.parent.
+ for x := parent.parent; x != nil; x = x.parent {
+ if x == n {
+ parent.setParent(n.parent)
+ break
+ }
+ }
+
+ // Section 5.3.3: The exclusive flag causes the stream to become the sole
+ // dependency of its parent stream, causing other dependencies to become
+ // dependent on the exclusive stream.
+ if priority.Exclusive {
+ k := parent.kids
+ for k != nil {
+ next := k.next
+ if k != n {
+ k.setParent(n)
+ }
+ k = next
+ }
+ }
+
+ n.setParent(parent)
+ n.weight = priority.Weight
+}
+
+func (ws *http2priorityWriteScheduler) Push(wr http2FrameWriteRequest) {
+ var n *http2priorityNode
+ if wr.isControl() {
+ n = &ws.root
+ } else {
+ id := wr.StreamID()
+ n = ws.nodes[id]
+ if n == nil {
+ // id is an idle or closed stream. wr should not be a HEADERS or
+ // DATA frame. In other case, we push wr onto the root, rather
+ // than creating a new priorityNode.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ n = &ws.root
+ }
+ }
+ n.q.push(wr)
+}
+
+func (ws *http2priorityWriteScheduler) Pop() (wr http2FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *http2priorityNode, openParent bool) bool {
+ limit := int32(math.MaxInt32)
+ if openParent {
+ limit = ws.writeThrottleLimit
+ }
+ wr, ok = n.q.consume(limit)
+ if !ok {
+ return false
+ }
+ n.addBytes(int64(wr.DataSize()))
+ // If B depends on A and B continuously has data available but A
+ // does not, gradually increase the throttling limit to allow B to
+ // steal more and more bandwidth from A.
+ if openParent {
+ ws.writeThrottleLimit += 1024
+ if ws.writeThrottleLimit < 0 {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ } else if ws.enableWriteThrottle {
+ ws.writeThrottleLimit = 1024
+ }
+ return true
+ })
+ return wr, ok
+}
+
+func (ws *http2priorityWriteScheduler) addClosedOrIdleNode(list *[]*http2priorityNode, maxSize int, n *http2priorityNode) {
+ if maxSize == 0 {
+ return
+ }
+ if len(*list) == maxSize {
+ // Remove the oldest node, then shift left.
+ ws.removeNode((*list)[0])
+ x := (*list)[1:]
+ copy(*list, x)
+ *list = (*list)[:len(x)]
+ }
+ *list = append(*list, n)
+}
+
+func (ws *http2priorityWriteScheduler) removeNode(n *http2priorityNode) {
+ for k := n.kids; k != nil; k = k.next {
+ k.setParent(n.parent)
+ }
+ n.setParent(nil)
+ delete(ws.nodes, n.id)
+}
+
+// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
+// priorities. Control frames like SETTINGS and PING are written before DATA
+// frames, but if no control frames are queued and multiple streams have queued
+// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
+func http2NewRandomWriteScheduler() http2WriteScheduler {
+ return &http2randomWriteScheduler{sq: make(map[uint32]*http2writeQueue)}
+}
+
+type http2randomWriteScheduler struct {
+ // zero are frames not associated with a specific stream.
+ zero http2writeQueue
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // When a stream is idle, closed, or emptied, it's deleted
+ // from the map.
+ sq map[uint32]*http2writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool http2writeQueuePool
+}
+
+func (ws *http2randomWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
+ // no-op: idle streams are not tracked
+}
+
+func (ws *http2randomWriteScheduler) CloseStream(streamID uint32) {
+ q, ok := ws.sq[streamID]
+ if !ok {
+ return
+ }
+ delete(ws.sq, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *http2randomWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) {
+ // no-op: priorities are ignored
+}
+
+func (ws *http2randomWriteScheduler) Push(wr http2FrameWriteRequest) {
+ if wr.isControl() {
+ ws.zero.push(wr)
+ return
+ }
+ id := wr.StreamID()
+ q, ok := ws.sq[id]
+ if !ok {
+ q = ws.queuePool.get()
+ ws.sq[id] = q
+ }
+ q.push(wr)
+}
+
+func (ws *http2randomWriteScheduler) Pop() (http2FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ // Iterate over all non-idle streams until finding one that can be consumed.
+ for streamID, q := range ws.sq {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ if q.empty() {
+ delete(ws.sq, streamID)
+ ws.queuePool.put(q)
+ }
+ return wr, true
+ }
+ }
+ return http2FrameWriteRequest{}, false
+}
+
+type http2roundRobinWriteScheduler struct {
+ // control contains control frames (SETTINGS, PING, etc.).
+ control http2writeQueue
+
+ // streams maps stream ID to a queue.
+ streams map[uint32]*http2writeQueue
+
+ // stream queues are stored in a circular linked list.
+ // head is the next stream to write, or nil if there are no streams open.
+ head *http2writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool http2writeQueuePool
+}
+
+// newRoundRobinWriteScheduler constructs a new write scheduler.
+// The round robin scheduler priorizes control frames
+// like SETTINGS and PING over DATA frames.
+// When there are no control frames to send, it performs a round-robin
+// selection from the ready streams.
+func http2newRoundRobinWriteScheduler() http2WriteScheduler {
+ ws := &http2roundRobinWriteScheduler{
+ streams: make(map[uint32]*http2writeQueue),
+ }
+ return ws
+}
+
+func (ws *http2roundRobinWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
+ if ws.streams[streamID] != nil {
+ panic(fmt.Errorf("stream %d already opened", streamID))
+ }
+ q := ws.queuePool.get()
+ ws.streams[streamID] = q
+ if ws.head == nil {
+ ws.head = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.head.prev
+ q.next = ws.head
+ q.prev.next = q
+ q.next.prev = q
+ }
+}
+
+func (ws *http2roundRobinWriteScheduler) CloseStream(streamID uint32) {
+ q := ws.streams[streamID]
+ if q == nil {
+ return
+ }
+ if q.next == q {
+ // This was the only open stream.
+ ws.head = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.head == q {
+ ws.head = q.next
+ }
+ }
+ delete(ws.streams, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *http2roundRobinWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) {}
+
+func (ws *http2roundRobinWriteScheduler) Push(wr http2FrameWriteRequest) {
+ if wr.isControl() {
+ ws.control.push(wr)
+ return
+ }
+ q := ws.streams[wr.StreamID()]
+ if q == nil {
+ // This is a closed stream.
+ // wr should not be a HEADERS or DATA frame.
+ // We push the request onto the control queue.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ ws.control.push(wr)
+ return
+ }
+ q.push(wr)
+}
+
+func (ws *http2roundRobinWriteScheduler) Pop() (http2FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.control.empty() {
+ return ws.control.shift(), true
+ }
+ if ws.head == nil {
+ return http2FrameWriteRequest{}, false
+ }
+ q := ws.head
+ for {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ ws.head = q.next
+ return wr, true
+ }
+ q = q.next
+ if q == ws.head {
+ break
+ }
+ }
+ return http2FrameWriteRequest{}, false
+}
diff --git a/contrib/go/_std_1.22/src/net/http/h2_error.go b/contrib/go/_std_1.22/src/net/http/h2_error.go
new file mode 100644
index 0000000000..2c0b21ec07
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/h2_error.go
@@ -0,0 +1,37 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !nethttpomithttp2
+
+package http
+
+import (
+ "reflect"
+)
+
+func (e http2StreamError) As(target any) bool {
+ dst := reflect.ValueOf(target).Elem()
+ dstType := dst.Type()
+ if dstType.Kind() != reflect.Struct {
+ return false
+ }
+ src := reflect.ValueOf(e)
+ srcType := src.Type()
+ numField := srcType.NumField()
+ if dstType.NumField() != numField {
+ return false
+ }
+ for i := 0; i < numField; i++ {
+ sf := srcType.Field(i)
+ df := dstType.Field(i)
+ if sf.Name != df.Name || !sf.Type.ConvertibleTo(df.Type) {
+ return false
+ }
+ }
+ for i := 0; i < numField; i++ {
+ df := dst.Field(i)
+ df.Set(src.Field(i).Convert(df.Type()))
+ }
+ return true
+}
diff --git a/contrib/go/_std_1.22/src/net/http/header.go b/contrib/go/_std_1.22/src/net/http/header.go
new file mode 100644
index 0000000000..9d0f3a125d
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/header.go
@@ -0,0 +1,280 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "io"
+ "net/http/httptrace"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+// A Header represents the key-value pairs in an HTTP header.
+//
+// The keys should be in canonical form, as returned by
+// [CanonicalHeaderKey].
+type Header map[string][]string
+
+// Add adds the key, value pair to the header.
+// It appends to any existing values associated with key.
+// The key is case insensitive; it is canonicalized by
+// [CanonicalHeaderKey].
+func (h Header) Add(key, value string) {
+ textproto.MIMEHeader(h).Add(key, value)
+}
+
+// Set sets the header entries associated with key to the
+// single element value. It replaces any existing values
+// associated with key. The key is case insensitive; it is
+// canonicalized by [textproto.CanonicalMIMEHeaderKey].
+// To use non-canonical keys, assign to the map directly.
+func (h Header) Set(key, value string) {
+ textproto.MIMEHeader(h).Set(key, value)
+}
+
+// Get gets the first value associated with the given key. If
+// there are no values associated with the key, Get returns "".
+// It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is
+// used to canonicalize the provided key. Get assumes that all
+// keys are stored in canonical form. To use non-canonical keys,
+// access the map directly.
+func (h Header) Get(key string) string {
+ return textproto.MIMEHeader(h).Get(key)
+}
+
+// Values returns all values associated with the given key.
+// It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is
+// used to canonicalize the provided key. To use non-canonical
+// keys, access the map directly.
+// The returned slice is not a copy.
+func (h Header) Values(key string) []string {
+ return textproto.MIMEHeader(h).Values(key)
+}
+
+// get is like Get, but key must already be in CanonicalHeaderKey form.
+func (h Header) get(key string) string {
+ if v := h[key]; len(v) > 0 {
+ return v[0]
+ }
+ return ""
+}
+
+// has reports whether h has the provided key defined, even if it's
+// set to 0-length slice.
+func (h Header) has(key string) bool {
+ _, ok := h[key]
+ return ok
+}
+
+// Del deletes the values associated with key.
+// The key is case insensitive; it is canonicalized by
+// [CanonicalHeaderKey].
+func (h Header) Del(key string) {
+ textproto.MIMEHeader(h).Del(key)
+}
+
+// Write writes a header in wire format.
+func (h Header) Write(w io.Writer) error {
+ return h.write(w, nil)
+}
+
+func (h Header) write(w io.Writer, trace *httptrace.ClientTrace) error {
+ return h.writeSubset(w, nil, trace)
+}
+
+// Clone returns a copy of h or nil if h is nil.
+func (h Header) Clone() Header {
+ if h == nil {
+ return nil
+ }
+
+ // Find total number of values.
+ nv := 0
+ for _, vv := range h {
+ nv += len(vv)
+ }
+ sv := make([]string, nv) // shared backing array for headers' values
+ h2 := make(Header, len(h))
+ for k, vv := range h {
+ if vv == nil {
+ // Preserve nil values. ReverseProxy distinguishes
+ // between nil and zero-length header values.
+ h2[k] = nil
+ continue
+ }
+ n := copy(sv, vv)
+ h2[k] = sv[:n:n]
+ sv = sv[n:]
+ }
+ return h2
+}
+
+var timeFormats = []string{
+ TimeFormat,
+ time.RFC850,
+ time.ANSIC,
+}
+
+// ParseTime parses a time header (such as the Date: header),
+// trying each of the three formats allowed by HTTP/1.1:
+// [TimeFormat], [time.RFC850], and [time.ANSIC].
+func ParseTime(text string) (t time.Time, err error) {
+ for _, layout := range timeFormats {
+ t, err = time.Parse(layout, text)
+ if err == nil {
+ return
+ }
+ }
+ return
+}
+
+var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ")
+
+// stringWriter implements WriteString on a Writer.
+type stringWriter struct {
+ w io.Writer
+}
+
+func (w stringWriter) WriteString(s string) (n int, err error) {
+ return w.w.Write([]byte(s))
+}
+
+type keyValues struct {
+ key string
+ values []string
+}
+
+// A headerSorter implements sort.Interface by sorting a []keyValues
+// by key. It's used as a pointer, so it can fit in a sort.Interface
+// interface value without allocation.
+type headerSorter struct {
+ kvs []keyValues
+}
+
+func (s *headerSorter) Len() int { return len(s.kvs) }
+func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] }
+func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].key < s.kvs[j].key }
+
+var headerSorterPool = sync.Pool{
+ New: func() any { return new(headerSorter) },
+}
+
+// sortedKeyValues returns h's keys sorted in the returned kvs
+// slice. The headerSorter used to sort is also returned, for possible
+// return to headerSorterCache.
+func (h Header) sortedKeyValues(exclude map[string]bool) (kvs []keyValues, hs *headerSorter) {
+ hs = headerSorterPool.Get().(*headerSorter)
+ if cap(hs.kvs) < len(h) {
+ hs.kvs = make([]keyValues, 0, len(h))
+ }
+ kvs = hs.kvs[:0]
+ for k, vv := range h {
+ if !exclude[k] {
+ kvs = append(kvs, keyValues{k, vv})
+ }
+ }
+ hs.kvs = kvs
+ sort.Sort(hs)
+ return kvs, hs
+}
+
+// WriteSubset writes a header in wire format.
+// If exclude is not nil, keys where exclude[key] == true are not written.
+// Keys are not canonicalized before checking the exclude map.
+func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
+ return h.writeSubset(w, exclude, nil)
+}
+
+func (h Header) writeSubset(w io.Writer, exclude map[string]bool, trace *httptrace.ClientTrace) error {
+ ws, ok := w.(io.StringWriter)
+ if !ok {
+ ws = stringWriter{w}
+ }
+ kvs, sorter := h.sortedKeyValues(exclude)
+ var formattedVals []string
+ for _, kv := range kvs {
+ if !httpguts.ValidHeaderFieldName(kv.key) {
+ // This could be an error. In the common case of
+ // writing response headers, however, we have no good
+ // way to provide the error back to the server
+ // handler, so just drop invalid headers instead.
+ continue
+ }
+ for _, v := range kv.values {
+ v = headerNewlineToSpace.Replace(v)
+ v = textproto.TrimString(v)
+ for _, s := range []string{kv.key, ": ", v, "\r\n"} {
+ if _, err := ws.WriteString(s); err != nil {
+ headerSorterPool.Put(sorter)
+ return err
+ }
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ formattedVals = append(formattedVals, v)
+ }
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField(kv.key, formattedVals)
+ formattedVals = nil
+ }
+ }
+ headerSorterPool.Put(sorter)
+ return nil
+}
+
+// CanonicalHeaderKey returns the canonical format of the
+// header key s. The canonicalization converts the first
+// letter and any letter following a hyphen to upper case;
+// the rest are converted to lowercase. For example, the
+// canonical key for "accept-encoding" is "Accept-Encoding".
+// If s contains a space or invalid header field bytes, it is
+// returned without modifications.
+func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) }
+
+// hasToken reports whether token appears with v, ASCII
+// case-insensitive, with space or comma boundaries.
+// token must be all lowercase.
+// v may contain mixed cased.
+func hasToken(v, token string) bool {
+ if len(token) > len(v) || token == "" {
+ return false
+ }
+ if v == token {
+ return true
+ }
+ for sp := 0; sp <= len(v)-len(token); sp++ {
+ // Check that first character is good.
+ // The token is ASCII, so checking only a single byte
+ // is sufficient. We skip this potential starting
+ // position if both the first byte and its potential
+ // ASCII uppercase equivalent (b|0x20) don't match.
+ // False positives ('^' => '~') are caught by EqualFold.
+ if b := v[sp]; b != token[0] && b|0x20 != token[0] {
+ continue
+ }
+ // Check that start pos is on a valid token boundary.
+ if sp > 0 && !isTokenBoundary(v[sp-1]) {
+ continue
+ }
+ // Check that end pos is on a valid token boundary.
+ if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) {
+ continue
+ }
+ if ascii.EqualFold(v[sp:sp+len(token)], token) {
+ return true
+ }
+ }
+ return false
+}
+
+func isTokenBoundary(b byte) bool {
+ return b == ' ' || b == ',' || b == '\t'
+}
diff --git a/contrib/go/_std_1.22/src/net/http/http.go b/contrib/go/_std_1.22/src/net/http/http.go
new file mode 100644
index 0000000000..6e2259adbf
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/http.go
@@ -0,0 +1,165 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate bundle -o=h2_bundle.go -prefix=http2 -tags=!nethttpomithttp2 golang.org/x/net/http2
+
+package http
+
+import (
+ "io"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+// incomparable is a zero-width, non-comparable type. Adding it to a struct
+// makes that struct also non-comparable, and generally doesn't add
+// any size (as long as it's first).
+type incomparable [0]func()
+
+// maxInt64 is the effective "infinite" value for the Server and
+// Transport's byte-limiting readers.
+const maxInt64 = 1<<63 - 1
+
+// aLongTimeAgo is a non-zero time, far in the past, used for
+// immediate cancellation of network operations.
+var aLongTimeAgo = time.Unix(1, 0)
+
+// omitBundledHTTP2 is set by omithttp2.go when the nethttpomithttp2
+// build tag is set. That means h2_bundle.go isn't compiled in and we
+// shouldn't try to use it.
+var omitBundledHTTP2 bool
+
+// TODO(bradfitz): move common stuff here. The other files have accumulated
+// generic http stuff in random places.
+
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string { return "net/http context value " + k.name }
+
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+// removeEmptyPort strips the empty port in ":port" to ""
+// as mandated by RFC 3986 Section 6.2.3.
+func removeEmptyPort(host string) string {
+ if hasPort(host) {
+ return strings.TrimSuffix(host, ":")
+ }
+ return host
+}
+
+func isNotToken(r rune) bool {
+ return !httpguts.IsTokenRune(r)
+}
+
+// stringContainsCTLByte reports whether s contains any ASCII control character.
+func stringContainsCTLByte(s string) bool {
+ for i := 0; i < len(s); i++ {
+ b := s[i]
+ if b < ' ' || b == 0x7f {
+ return true
+ }
+ }
+ return false
+}
+
+func hexEscapeNonASCII(s string) string {
+ newLen := 0
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ newLen += 3
+ } else {
+ newLen++
+ }
+ }
+ if newLen == len(s) {
+ return s
+ }
+ b := make([]byte, 0, newLen)
+ var pos int
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ if pos < i {
+ b = append(b, s[pos:i]...)
+ }
+ b = append(b, '%')
+ b = strconv.AppendInt(b, int64(s[i]), 16)
+ pos = i + 1
+ }
+ }
+ if pos < len(s) {
+ b = append(b, s[pos:]...)
+ }
+ return string(b)
+}
+
+// NoBody is an [io.ReadCloser] with no bytes. Read always returns EOF
+// and Close always returns nil. It can be used in an outgoing client
+// request to explicitly signal that a request has zero bytes.
+// An alternative, however, is to simply set [Request.Body] to nil.
+var NoBody = noBody{}
+
+type noBody struct{}
+
+func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
+func (noBody) Close() error { return nil }
+func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
+
+var (
+ // verify that an io.Copy from NoBody won't require a buffer:
+ _ io.WriterTo = NoBody
+ _ io.ReadCloser = NoBody
+)
+
+// PushOptions describes options for [Pusher.Push].
+type PushOptions struct {
+ // Method specifies the HTTP method for the promised request.
+ // If set, it must be "GET" or "HEAD". Empty means "GET".
+ Method string
+
+ // Header specifies additional promised request headers. This cannot
+ // include HTTP/2 pseudo header fields like ":path" and ":scheme",
+ // which will be added automatically.
+ Header Header
+}
+
+// Pusher is the interface implemented by ResponseWriters that support
+// HTTP/2 server push. For more background, see
+// https://tools.ietf.org/html/rfc7540#section-8.2.
+type Pusher interface {
+ // Push initiates an HTTP/2 server push. This constructs a synthetic
+ // request using the given target and options, serializes that request
+ // into a PUSH_PROMISE frame, then dispatches that request using the
+ // server's request handler. If opts is nil, default options are used.
+ //
+ // The target must either be an absolute path (like "/path") or an absolute
+ // URL that contains a valid host and the same scheme as the parent request.
+ // If the target is a path, it will inherit the scheme and host of the
+ // parent request.
+ //
+ // The HTTP/2 spec disallows recursive pushes and cross-authority pushes.
+ // Push may or may not detect these invalid pushes; however, invalid
+ // pushes will be detected and canceled by conforming clients.
+ //
+ // Handlers that wish to push URL X should call Push before sending any
+ // data that may trigger a request for URL X. This avoids a race where the
+ // client issues requests for X before receiving the PUSH_PROMISE for X.
+ //
+ // Push will run in a separate goroutine making the order of arrival
+ // non-deterministic. Any required synchronization needs to be implemented
+ // by the caller.
+ //
+ // Push returns ErrNotSupported if the client has disabled push or if push
+ // is not supported on the underlying connection.
+ Push(target string, opts *PushOptions) error
+}
diff --git a/contrib/go/_std_1.22/src/net/http/httptest/httptest.go b/contrib/go/_std_1.22/src/net/http/httptest/httptest.go
new file mode 100644
index 0000000000..f0ca64362d
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httptest/httptest.go
@@ -0,0 +1,90 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httptest provides utilities for HTTP testing.
+package httptest
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "io"
+ "net/http"
+ "strings"
+)
+
+// NewRequest returns a new incoming server Request, suitable
+// for passing to an [http.Handler] for testing.
+//
+// The target is the RFC 7230 "request-target": it may be either a
+// path or an absolute URL. If target is an absolute URL, the host name
+// from the URL is used. Otherwise, "example.com" is used.
+//
+// The TLS field is set to a non-nil dummy value if target has scheme
+// "https".
+//
+// The Request.Proto is always HTTP/1.1.
+//
+// An empty method means "GET".
+//
+// The provided body may be nil. If the body is of type *bytes.Reader,
+// *strings.Reader, or *bytes.Buffer, the Request.ContentLength is
+// set.
+//
+// NewRequest panics on error for ease of use in testing, where a
+// panic is acceptable.
+//
+// To generate a client HTTP request instead of a server request, see
+// the NewRequest function in the net/http package.
+func NewRequest(method, target string, body io.Reader) *http.Request {
+ if method == "" {
+ method = "GET"
+ }
+ req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(method + " " + target + " HTTP/1.0\r\n\r\n")))
+ if err != nil {
+ panic("invalid NewRequest arguments; " + err.Error())
+ }
+
+ // HTTP/1.0 was used above to avoid needing a Host field. Change it to 1.1 here.
+ req.Proto = "HTTP/1.1"
+ req.ProtoMinor = 1
+ req.Close = false
+
+ if body != nil {
+ switch v := body.(type) {
+ case *bytes.Buffer:
+ req.ContentLength = int64(v.Len())
+ case *bytes.Reader:
+ req.ContentLength = int64(v.Len())
+ case *strings.Reader:
+ req.ContentLength = int64(v.Len())
+ default:
+ req.ContentLength = -1
+ }
+ if rc, ok := body.(io.ReadCloser); ok {
+ req.Body = rc
+ } else {
+ req.Body = io.NopCloser(body)
+ }
+ }
+
+ // 192.0.2.0/24 is "TEST-NET" in RFC 5737 for use solely in
+ // documentation and example source code and should not be
+ // used publicly.
+ req.RemoteAddr = "192.0.2.1:1234"
+
+ if req.Host == "" {
+ req.Host = "example.com"
+ }
+
+ if strings.HasPrefix(target, "https://") {
+ req.TLS = &tls.ConnectionState{
+ Version: tls.VersionTLS12,
+ HandshakeComplete: true,
+ ServerName: req.Host,
+ }
+ }
+
+ return req
+}
diff --git a/contrib/go/_std_1.22/src/net/http/httptest/recorder.go b/contrib/go/_std_1.22/src/net/http/httptest/recorder.go
new file mode 100644
index 0000000000..dd51901b0d
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httptest/recorder.go
@@ -0,0 +1,255 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httptest
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "net/textproto"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+// ResponseRecorder is an implementation of [http.ResponseWriter] that
+// records its mutations for later inspection in tests.
+type ResponseRecorder struct {
+ // Code is the HTTP response code set by WriteHeader.
+ //
+ // Note that if a Handler never calls WriteHeader or Write,
+ // this might end up being 0, rather than the implicit
+ // http.StatusOK. To get the implicit value, use the Result
+ // method.
+ Code int
+
+ // HeaderMap contains the headers explicitly set by the Handler.
+ // It is an internal detail.
+ //
+ // Deprecated: HeaderMap exists for historical compatibility
+ // and should not be used. To access the headers returned by a handler,
+ // use the Response.Header map as returned by the Result method.
+ HeaderMap http.Header
+
+ // Body is the buffer to which the Handler's Write calls are sent.
+ // If nil, the Writes are silently discarded.
+ Body *bytes.Buffer
+
+ // Flushed is whether the Handler called Flush.
+ Flushed bool
+
+ result *http.Response // cache of Result's return value
+ snapHeader http.Header // snapshot of HeaderMap at first Write
+ wroteHeader bool
+}
+
+// NewRecorder returns an initialized [ResponseRecorder].
+func NewRecorder() *ResponseRecorder {
+ return &ResponseRecorder{
+ HeaderMap: make(http.Header),
+ Body: new(bytes.Buffer),
+ Code: 200,
+ }
+}
+
+// DefaultRemoteAddr is the default remote address to return in RemoteAddr if
+// an explicit DefaultRemoteAddr isn't set on [ResponseRecorder].
+const DefaultRemoteAddr = "1.2.3.4"
+
+// Header implements [http.ResponseWriter]. It returns the response
+// headers to mutate within a handler. To test the headers that were
+// written after a handler completes, use the [ResponseRecorder.Result] method and see
+// the returned Response value's Header.
+func (rw *ResponseRecorder) Header() http.Header {
+ m := rw.HeaderMap
+ if m == nil {
+ m = make(http.Header)
+ rw.HeaderMap = m
+ }
+ return m
+}
+
+// writeHeader writes a header if it was not written yet and
+// detects Content-Type if needed.
+//
+// bytes or str are the beginning of the response body.
+// We pass both to avoid unnecessarily generate garbage
+// in rw.WriteString which was created for performance reasons.
+// Non-nil bytes win.
+func (rw *ResponseRecorder) writeHeader(b []byte, str string) {
+ if rw.wroteHeader {
+ return
+ }
+ if len(str) > 512 {
+ str = str[:512]
+ }
+
+ m := rw.Header()
+
+ _, hasType := m["Content-Type"]
+ hasTE := m.Get("Transfer-Encoding") != ""
+ if !hasType && !hasTE {
+ if b == nil {
+ b = []byte(str)
+ }
+ m.Set("Content-Type", http.DetectContentType(b))
+ }
+
+ rw.WriteHeader(200)
+}
+
+// Write implements http.ResponseWriter. The data in buf is written to
+// rw.Body, if not nil.
+func (rw *ResponseRecorder) Write(buf []byte) (int, error) {
+ rw.writeHeader(buf, "")
+ if rw.Body != nil {
+ rw.Body.Write(buf)
+ }
+ return len(buf), nil
+}
+
+// WriteString implements [io.StringWriter]. The data in str is written
+// to rw.Body, if not nil.
+func (rw *ResponseRecorder) WriteString(str string) (int, error) {
+ rw.writeHeader(nil, str)
+ if rw.Body != nil {
+ rw.Body.WriteString(str)
+ }
+ return len(str), nil
+}
+
+func checkWriteHeaderCode(code int) {
+ // Issue 22880: require valid WriteHeader status codes.
+ // For now we only enforce that it's three digits.
+ // In the future we might block things over 599 (600 and above aren't defined
+ // at https://httpwg.org/specs/rfc7231.html#status.codes)
+ // and we might block under 200 (once we have more mature 1xx support).
+ // But for now any three digits.
+ //
+ // We used to send "HTTP/1.1 000 0" on the wire in responses but there's
+ // no equivalent bogus thing we can realistically send in HTTP/2,
+ // so we'll consistently panic instead and help people find their bugs
+ // early. (We can't return an error from WriteHeader even if we wanted to.)
+ if code < 100 || code > 999 {
+ panic(fmt.Sprintf("invalid WriteHeader code %v", code))
+ }
+}
+
+// WriteHeader implements [http.ResponseWriter].
+func (rw *ResponseRecorder) WriteHeader(code int) {
+ if rw.wroteHeader {
+ return
+ }
+
+ checkWriteHeaderCode(code)
+ rw.Code = code
+ rw.wroteHeader = true
+ if rw.HeaderMap == nil {
+ rw.HeaderMap = make(http.Header)
+ }
+ rw.snapHeader = rw.HeaderMap.Clone()
+}
+
+// Flush implements [http.Flusher]. To test whether Flush was
+// called, see rw.Flushed.
+func (rw *ResponseRecorder) Flush() {
+ if !rw.wroteHeader {
+ rw.WriteHeader(200)
+ }
+ rw.Flushed = true
+}
+
+// Result returns the response generated by the handler.
+//
+// The returned Response will have at least its StatusCode,
+// Header, Body, and optionally Trailer populated.
+// More fields may be populated in the future, so callers should
+// not DeepEqual the result in tests.
+//
+// The Response.Header is a snapshot of the headers at the time of the
+// first write call, or at the time of this call, if the handler never
+// did a write.
+//
+// The Response.Body is guaranteed to be non-nil and Body.Read call is
+// guaranteed to not return any error other than [io.EOF].
+//
+// Result must only be called after the handler has finished running.
+func (rw *ResponseRecorder) Result() *http.Response {
+ if rw.result != nil {
+ return rw.result
+ }
+ if rw.snapHeader == nil {
+ rw.snapHeader = rw.HeaderMap.Clone()
+ }
+ res := &http.Response{
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ StatusCode: rw.Code,
+ Header: rw.snapHeader,
+ }
+ rw.result = res
+ if res.StatusCode == 0 {
+ res.StatusCode = 200
+ }
+ res.Status = fmt.Sprintf("%03d %s", res.StatusCode, http.StatusText(res.StatusCode))
+ if rw.Body != nil {
+ res.Body = io.NopCloser(bytes.NewReader(rw.Body.Bytes()))
+ } else {
+ res.Body = http.NoBody
+ }
+ res.ContentLength = parseContentLength(res.Header.Get("Content-Length"))
+
+ if trailers, ok := rw.snapHeader["Trailer"]; ok {
+ res.Trailer = make(http.Header, len(trailers))
+ for _, k := range trailers {
+ for _, k := range strings.Split(k, ",") {
+ k = http.CanonicalHeaderKey(textproto.TrimString(k))
+ if !httpguts.ValidTrailerHeader(k) {
+ // Ignore since forbidden by RFC 7230, section 4.1.2.
+ continue
+ }
+ vv, ok := rw.HeaderMap[k]
+ if !ok {
+ continue
+ }
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ res.Trailer[k] = vv2
+ }
+ }
+ }
+ for k, vv := range rw.HeaderMap {
+ if !strings.HasPrefix(k, http.TrailerPrefix) {
+ continue
+ }
+ if res.Trailer == nil {
+ res.Trailer = make(http.Header)
+ }
+ for _, v := range vv {
+ res.Trailer.Add(strings.TrimPrefix(k, http.TrailerPrefix), v)
+ }
+ }
+ return res
+}
+
+// parseContentLength trims whitespace from s and returns -1 if no value
+// is set, or the value if it's >= 0.
+//
+// This a modified version of same function found in net/http/transfer.go. This
+// one just ignores an invalid header.
+func parseContentLength(cl string) int64 {
+ cl = textproto.TrimString(cl)
+ if cl == "" {
+ return -1
+ }
+ n, err := strconv.ParseUint(cl, 10, 63)
+ if err != nil {
+ return -1
+ }
+ return int64(n)
+}
diff --git a/contrib/go/_std_1.22/src/net/http/httptest/server.go b/contrib/go/_std_1.22/src/net/http/httptest/server.go
new file mode 100644
index 0000000000..5095b438ec
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httptest/server.go
@@ -0,0 +1,385 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implementation of Server
+
+package httptest
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "flag"
+ "fmt"
+ "log"
+ "net"
+ "net/http"
+ "net/http/internal/testcert"
+ "os"
+ "strings"
+ "sync"
+ "time"
+)
+
+// A Server is an HTTP server listening on a system-chosen port on the
+// local loopback interface, for use in end-to-end HTTP tests.
+type Server struct {
+ URL string // base URL of form http://ipaddr:port with no trailing slash
+ Listener net.Listener
+
+ // EnableHTTP2 controls whether HTTP/2 is enabled
+ // on the server. It must be set between calling
+ // NewUnstartedServer and calling Server.StartTLS.
+ EnableHTTP2 bool
+
+ // TLS is the optional TLS configuration, populated with a new config
+ // after TLS is started. If set on an unstarted server before StartTLS
+ // is called, existing fields are copied into the new config.
+ TLS *tls.Config
+
+ // Config may be changed after calling NewUnstartedServer and
+ // before Start or StartTLS.
+ Config *http.Server
+
+ // certificate is a parsed version of the TLS config certificate, if present.
+ certificate *x509.Certificate
+
+ // wg counts the number of outstanding HTTP requests on this server.
+ // Close blocks until all requests are finished.
+ wg sync.WaitGroup
+
+ mu sync.Mutex // guards closed and conns
+ closed bool
+ conns map[net.Conn]http.ConnState // except terminal states
+
+ // client is configured for use with the server.
+ // Its transport is automatically closed when Close is called.
+ client *http.Client
+}
+
+func newLocalListener() net.Listener {
+ if serveFlag != "" {
+ l, err := net.Listen("tcp", serveFlag)
+ if err != nil {
+ panic(fmt.Sprintf("httptest: failed to listen on %v: %v", serveFlag, err))
+ }
+ return l
+ }
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {
+ panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err))
+ }
+ }
+ return l
+}
+
+// When debugging a particular http server-based test,
+// this flag lets you run
+//
+// go test -run='^BrokenTest$' -httptest.serve=127.0.0.1:8000
+//
+// to start the broken server so you can interact with it manually.
+// We only register this flag if it looks like the caller knows about it
+// and is trying to use it as we don't want to pollute flags and this
+// isn't really part of our API. Don't depend on this.
+var serveFlag string
+
+func init() {
+ if strSliceContainsPrefix(os.Args, "-httptest.serve=") || strSliceContainsPrefix(os.Args, "--httptest.serve=") {
+ flag.StringVar(&serveFlag, "httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks.")
+ }
+}
+
+func strSliceContainsPrefix(v []string, pre string) bool {
+ for _, s := range v {
+ if strings.HasPrefix(s, pre) {
+ return true
+ }
+ }
+ return false
+}
+
+// NewServer starts and returns a new [Server].
+// The caller should call Close when finished, to shut it down.
+func NewServer(handler http.Handler) *Server {
+ ts := NewUnstartedServer(handler)
+ ts.Start()
+ return ts
+}
+
+// NewUnstartedServer returns a new [Server] but doesn't start it.
+//
+// After changing its configuration, the caller should call Start or
+// StartTLS.
+//
+// The caller should call Close when finished, to shut it down.
+func NewUnstartedServer(handler http.Handler) *Server {
+ return &Server{
+ Listener: newLocalListener(),
+ Config: &http.Server{Handler: handler},
+ }
+}
+
+// Start starts a server from NewUnstartedServer.
+func (s *Server) Start() {
+ if s.URL != "" {
+ panic("Server already started")
+ }
+ if s.client == nil {
+ s.client = &http.Client{Transport: &http.Transport{}}
+ }
+ s.URL = "http://" + s.Listener.Addr().String()
+ s.wrap()
+ s.goServe()
+ if serveFlag != "" {
+ fmt.Fprintln(os.Stderr, "httptest: serving on", s.URL)
+ select {}
+ }
+}
+
+// StartTLS starts TLS on a server from NewUnstartedServer.
+func (s *Server) StartTLS() {
+ if s.URL != "" {
+ panic("Server already started")
+ }
+ if s.client == nil {
+ s.client = &http.Client{}
+ }
+ cert, err := tls.X509KeyPair(testcert.LocalhostCert, testcert.LocalhostKey)
+ if err != nil {
+ panic(fmt.Sprintf("httptest: NewTLSServer: %v", err))
+ }
+
+ existingConfig := s.TLS
+ if existingConfig != nil {
+ s.TLS = existingConfig.Clone()
+ } else {
+ s.TLS = new(tls.Config)
+ }
+ if s.TLS.NextProtos == nil {
+ nextProtos := []string{"http/1.1"}
+ if s.EnableHTTP2 {
+ nextProtos = []string{"h2"}
+ }
+ s.TLS.NextProtos = nextProtos
+ }
+ if len(s.TLS.Certificates) == 0 {
+ s.TLS.Certificates = []tls.Certificate{cert}
+ }
+ s.certificate, err = x509.ParseCertificate(s.TLS.Certificates[0].Certificate[0])
+ if err != nil {
+ panic(fmt.Sprintf("httptest: NewTLSServer: %v", err))
+ }
+ certpool := x509.NewCertPool()
+ certpool.AddCert(s.certificate)
+ s.client.Transport = &http.Transport{
+ TLSClientConfig: &tls.Config{
+ RootCAs: certpool,
+ },
+ ForceAttemptHTTP2: s.EnableHTTP2,
+ }
+ s.Listener = tls.NewListener(s.Listener, s.TLS)
+ s.URL = "https://" + s.Listener.Addr().String()
+ s.wrap()
+ s.goServe()
+}
+
+// NewTLSServer starts and returns a new [Server] using TLS.
+// The caller should call Close when finished, to shut it down.
+func NewTLSServer(handler http.Handler) *Server {
+ ts := NewUnstartedServer(handler)
+ ts.StartTLS()
+ return ts
+}
+
+type closeIdleTransport interface {
+ CloseIdleConnections()
+}
+
+// Close shuts down the server and blocks until all outstanding
+// requests on this server have completed.
+func (s *Server) Close() {
+ s.mu.Lock()
+ if !s.closed {
+ s.closed = true
+ s.Listener.Close()
+ s.Config.SetKeepAlivesEnabled(false)
+ for c, st := range s.conns {
+ // Force-close any idle connections (those between
+ // requests) and new connections (those which connected
+ // but never sent a request). StateNew connections are
+ // super rare and have only been seen (in
+ // previously-flaky tests) in the case of
+ // socket-late-binding races from the http Client
+ // dialing this server and then getting an idle
+ // connection before the dial completed. There is thus
+ // a connected connection in StateNew with no
+ // associated Request. We only close StateIdle and
+ // StateNew because they're not doing anything. It's
+ // possible StateNew is about to do something in a few
+ // milliseconds, but a previous CL to check again in a
+ // few milliseconds wasn't liked (early versions of
+ // https://golang.org/cl/15151) so now we just
+ // forcefully close StateNew. The docs for Server.Close say
+ // we wait for "outstanding requests", so we don't close things
+ // in StateActive.
+ if st == http.StateIdle || st == http.StateNew {
+ s.closeConn(c)
+ }
+ }
+ // If this server doesn't shut down in 5 seconds, tell the user why.
+ t := time.AfterFunc(5*time.Second, s.logCloseHangDebugInfo)
+ defer t.Stop()
+ }
+ s.mu.Unlock()
+
+ // Not part of httptest.Server's correctness, but assume most
+ // users of httptest.Server will be using the standard
+ // transport, so help them out and close any idle connections for them.
+ if t, ok := http.DefaultTransport.(closeIdleTransport); ok {
+ t.CloseIdleConnections()
+ }
+
+ // Also close the client idle connections.
+ if s.client != nil {
+ if t, ok := s.client.Transport.(closeIdleTransport); ok {
+ t.CloseIdleConnections()
+ }
+ }
+
+ s.wg.Wait()
+}
+
+func (s *Server) logCloseHangDebugInfo() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ var buf strings.Builder
+ buf.WriteString("httptest.Server blocked in Close after 5 seconds, waiting for connections:\n")
+ for c, st := range s.conns {
+ fmt.Fprintf(&buf, " %T %p %v in state %v\n", c, c, c.RemoteAddr(), st)
+ }
+ log.Print(buf.String())
+}
+
+// CloseClientConnections closes any open HTTP connections to the test Server.
+func (s *Server) CloseClientConnections() {
+ s.mu.Lock()
+ nconn := len(s.conns)
+ ch := make(chan struct{}, nconn)
+ for c := range s.conns {
+ go s.closeConnChan(c, ch)
+ }
+ s.mu.Unlock()
+
+ // Wait for outstanding closes to finish.
+ //
+ // Out of paranoia for making a late change in Go 1.6, we
+ // bound how long this can wait, since golang.org/issue/14291
+ // isn't fully understood yet. At least this should only be used
+ // in tests.
+ timer := time.NewTimer(5 * time.Second)
+ defer timer.Stop()
+ for i := 0; i < nconn; i++ {
+ select {
+ case <-ch:
+ case <-timer.C:
+ // Too slow. Give up.
+ return
+ }
+ }
+}
+
+// Certificate returns the certificate used by the server, or nil if
+// the server doesn't use TLS.
+func (s *Server) Certificate() *x509.Certificate {
+ return s.certificate
+}
+
+// Client returns an HTTP client configured for making requests to the server.
+// It is configured to trust the server's TLS test certificate and will
+// close its idle connections on [Server.Close].
+func (s *Server) Client() *http.Client {
+ return s.client
+}
+
+func (s *Server) goServe() {
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ s.Config.Serve(s.Listener)
+ }()
+}
+
+// wrap installs the connection state-tracking hook to know which
+// connections are idle.
+func (s *Server) wrap() {
+ oldHook := s.Config.ConnState
+ s.Config.ConnState = func(c net.Conn, cs http.ConnState) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ switch cs {
+ case http.StateNew:
+ if _, exists := s.conns[c]; exists {
+ panic("invalid state transition")
+ }
+ if s.conns == nil {
+ s.conns = make(map[net.Conn]http.ConnState)
+ }
+ // Add c to the set of tracked conns and increment it to the
+ // waitgroup.
+ s.wg.Add(1)
+ s.conns[c] = cs
+ if s.closed {
+ // Probably just a socket-late-binding dial from
+ // the default transport that lost the race (and
+ // thus this connection is now idle and will
+ // never be used).
+ s.closeConn(c)
+ }
+ case http.StateActive:
+ if oldState, ok := s.conns[c]; ok {
+ if oldState != http.StateNew && oldState != http.StateIdle {
+ panic("invalid state transition")
+ }
+ s.conns[c] = cs
+ }
+ case http.StateIdle:
+ if oldState, ok := s.conns[c]; ok {
+ if oldState != http.StateActive {
+ panic("invalid state transition")
+ }
+ s.conns[c] = cs
+ }
+ if s.closed {
+ s.closeConn(c)
+ }
+ case http.StateHijacked, http.StateClosed:
+ // Remove c from the set of tracked conns and decrement it from the
+ // waitgroup, unless it was previously removed.
+ if _, ok := s.conns[c]; ok {
+ delete(s.conns, c)
+ // Keep Close from returning until the user's ConnState hook
+ // (if any) finishes.
+ defer s.wg.Done()
+ }
+ }
+ if oldHook != nil {
+ oldHook(c, cs)
+ }
+ }
+}
+
+// closeConn closes c.
+// s.mu must be held.
+func (s *Server) closeConn(c net.Conn) { s.closeConnChan(c, nil) }
+
+// closeConnChan is like closeConn, but takes an optional channel to receive a value
+// when the goroutine closing c is done.
+func (s *Server) closeConnChan(c net.Conn, done chan<- struct{}) {
+ c.Close()
+ if done != nil {
+ done <- struct{}{}
+ }
+}
diff --git a/contrib/go/_std_1.22/src/net/http/httptest/ya.make b/contrib/go/_std_1.22/src/net/http/httptest/ya.make
new file mode 100644
index 0000000000..eb2e918667
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httptest/ya.make
@@ -0,0 +1,9 @@
+GO_LIBRARY()
+IF (TRUE)
+ SRCS(
+ httptest.go
+ recorder.go
+ server.go
+ )
+ENDIF()
+END()
diff --git a/contrib/go/_std_1.22/src/net/http/httptrace/trace.go b/contrib/go/_std_1.22/src/net/http/httptrace/trace.go
new file mode 100644
index 0000000000..706a432957
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httptrace/trace.go
@@ -0,0 +1,255 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httptrace provides mechanisms to trace the events within
+// HTTP client requests.
+package httptrace
+
+import (
+ "context"
+ "crypto/tls"
+ "internal/nettrace"
+ "net"
+ "net/textproto"
+ "reflect"
+ "time"
+)
+
+// unique type to prevent assignment.
+type clientEventContextKey struct{}
+
+// ContextClientTrace returns the [ClientTrace] associated with the
+// provided context. If none, it returns nil.
+func ContextClientTrace(ctx context.Context) *ClientTrace {
+ trace, _ := ctx.Value(clientEventContextKey{}).(*ClientTrace)
+ return trace
+}
+
+// WithClientTrace returns a new context based on the provided parent
+// ctx. HTTP client requests made with the returned context will use
+// the provided trace hooks, in addition to any previous hooks
+// registered with ctx. Any hooks defined in the provided trace will
+// be called first.
+func WithClientTrace(ctx context.Context, trace *ClientTrace) context.Context {
+ if trace == nil {
+ panic("nil trace")
+ }
+ old := ContextClientTrace(ctx)
+ trace.compose(old)
+
+ ctx = context.WithValue(ctx, clientEventContextKey{}, trace)
+ if trace.hasNetHooks() {
+ nt := &nettrace.Trace{
+ ConnectStart: trace.ConnectStart,
+ ConnectDone: trace.ConnectDone,
+ }
+ if trace.DNSStart != nil {
+ nt.DNSStart = func(name string) {
+ trace.DNSStart(DNSStartInfo{Host: name})
+ }
+ }
+ if trace.DNSDone != nil {
+ nt.DNSDone = func(netIPs []any, coalesced bool, err error) {
+ addrs := make([]net.IPAddr, len(netIPs))
+ for i, ip := range netIPs {
+ addrs[i] = ip.(net.IPAddr)
+ }
+ trace.DNSDone(DNSDoneInfo{
+ Addrs: addrs,
+ Coalesced: coalesced,
+ Err: err,
+ })
+ }
+ }
+ ctx = context.WithValue(ctx, nettrace.TraceKey{}, nt)
+ }
+ return ctx
+}
+
+// ClientTrace is a set of hooks to run at various stages of an outgoing
+// HTTP request. Any particular hook may be nil. Functions may be
+// called concurrently from different goroutines and some may be called
+// after the request has completed or failed.
+//
+// ClientTrace currently traces a single HTTP request & response
+// during a single round trip and has no hooks that span a series
+// of redirected requests.
+//
+// See https://blog.golang.org/http-tracing for more.
+type ClientTrace struct {
+ // GetConn is called before a connection is created or
+ // retrieved from an idle pool. The hostPort is the
+ // "host:port" of the target or proxy. GetConn is called even
+ // if there's already an idle cached connection available.
+ GetConn func(hostPort string)
+
+ // GotConn is called after a successful connection is
+ // obtained. There is no hook for failure to obtain a
+ // connection; instead, use the error from
+ // Transport.RoundTrip.
+ GotConn func(GotConnInfo)
+
+ // PutIdleConn is called when the connection is returned to
+ // the idle pool. If err is nil, the connection was
+ // successfully returned to the idle pool. If err is non-nil,
+ // it describes why not. PutIdleConn is not called if
+ // connection reuse is disabled via Transport.DisableKeepAlives.
+ // PutIdleConn is called before the caller's Response.Body.Close
+ // call returns.
+ // For HTTP/2, this hook is not currently used.
+ PutIdleConn func(err error)
+
+ // GotFirstResponseByte is called when the first byte of the response
+ // headers is available.
+ GotFirstResponseByte func()
+
+ // Got100Continue is called if the server replies with a "100
+ // Continue" response.
+ Got100Continue func()
+
+ // Got1xxResponse is called for each 1xx informational response header
+ // returned before the final non-1xx response. Got1xxResponse is called
+ // for "100 Continue" responses, even if Got100Continue is also defined.
+ // If it returns an error, the client request is aborted with that error value.
+ Got1xxResponse func(code int, header textproto.MIMEHeader) error
+
+ // DNSStart is called when a DNS lookup begins.
+ DNSStart func(DNSStartInfo)
+
+ // DNSDone is called when a DNS lookup ends.
+ DNSDone func(DNSDoneInfo)
+
+ // ConnectStart is called when a new connection's Dial begins.
+ // If net.Dialer.DualStack (IPv6 "Happy Eyeballs") support is
+ // enabled, this may be called multiple times.
+ ConnectStart func(network, addr string)
+
+ // ConnectDone is called when a new connection's Dial
+ // completes. The provided err indicates whether the
+ // connection completed successfully.
+ // If net.Dialer.DualStack ("Happy Eyeballs") support is
+ // enabled, this may be called multiple times.
+ ConnectDone func(network, addr string, err error)
+
+ // TLSHandshakeStart is called when the TLS handshake is started. When
+ // connecting to an HTTPS site via an HTTP proxy, the handshake happens
+ // after the CONNECT request is processed by the proxy.
+ TLSHandshakeStart func()
+
+ // TLSHandshakeDone is called after the TLS handshake with either the
+ // successful handshake's connection state, or a non-nil error on handshake
+ // failure.
+ TLSHandshakeDone func(tls.ConnectionState, error)
+
+ // WroteHeaderField is called after the Transport has written
+ // each request header. At the time of this call the values
+ // might be buffered and not yet written to the network.
+ WroteHeaderField func(key string, value []string)
+
+ // WroteHeaders is called after the Transport has written
+ // all request headers.
+ WroteHeaders func()
+
+ // Wait100Continue is called if the Request specified
+ // "Expect: 100-continue" and the Transport has written the
+ // request headers but is waiting for "100 Continue" from the
+ // server before writing the request body.
+ Wait100Continue func()
+
+ // WroteRequest is called with the result of writing the
+ // request and any body. It may be called multiple times
+ // in the case of retried requests.
+ WroteRequest func(WroteRequestInfo)
+}
+
+// WroteRequestInfo contains information provided to the WroteRequest
+// hook.
+type WroteRequestInfo struct {
+ // Err is any error encountered while writing the Request.
+ Err error
+}
+
+// compose modifies t such that it respects the previously-registered hooks in old,
+// subject to the composition policy requested in t.Compose.
+func (t *ClientTrace) compose(old *ClientTrace) {
+ if old == nil {
+ return
+ }
+ tv := reflect.ValueOf(t).Elem()
+ ov := reflect.ValueOf(old).Elem()
+ structType := tv.Type()
+ for i := 0; i < structType.NumField(); i++ {
+ tf := tv.Field(i)
+ hookType := tf.Type()
+ if hookType.Kind() != reflect.Func {
+ continue
+ }
+ of := ov.Field(i)
+ if of.IsNil() {
+ continue
+ }
+ if tf.IsNil() {
+ tf.Set(of)
+ continue
+ }
+
+ // Make a copy of tf for tf to call. (Otherwise it
+ // creates a recursive call cycle and stack overflows)
+ tfCopy := reflect.ValueOf(tf.Interface())
+
+ // We need to call both tf and of in some order.
+ newFunc := reflect.MakeFunc(hookType, func(args []reflect.Value) []reflect.Value {
+ tfCopy.Call(args)
+ return of.Call(args)
+ })
+ tv.Field(i).Set(newFunc)
+ }
+}
+
+// DNSStartInfo contains information about a DNS request.
+type DNSStartInfo struct {
+ Host string
+}
+
+// DNSDoneInfo contains information about the results of a DNS lookup.
+type DNSDoneInfo struct {
+ // Addrs are the IPv4 and/or IPv6 addresses found in the DNS
+ // lookup. The contents of the slice should not be mutated.
+ Addrs []net.IPAddr
+
+ // Err is any error that occurred during the DNS lookup.
+ Err error
+
+ // Coalesced is whether the Addrs were shared with another
+ // caller who was doing the same DNS lookup concurrently.
+ Coalesced bool
+}
+
+func (t *ClientTrace) hasNetHooks() bool {
+ if t == nil {
+ return false
+ }
+ return t.DNSStart != nil || t.DNSDone != nil || t.ConnectStart != nil || t.ConnectDone != nil
+}
+
+// GotConnInfo is the argument to the [ClientTrace.GotConn] function and
+// contains information about the obtained connection.
+type GotConnInfo struct {
+ // Conn is the connection that was obtained. It is owned by
+ // the http.Transport and should not be read, written or
+ // closed by users of ClientTrace.
+ Conn net.Conn
+
+ // Reused is whether this connection has been previously
+ // used for another HTTP request.
+ Reused bool
+
+ // WasIdle is whether this connection was obtained from an
+ // idle pool.
+ WasIdle bool
+
+ // IdleTime reports how long the connection was previously
+ // idle, if WasIdle is true.
+ IdleTime time.Duration
+}
diff --git a/contrib/go/_std_1.22/src/net/http/httptrace/ya.make b/contrib/go/_std_1.22/src/net/http/httptrace/ya.make
new file mode 100644
index 0000000000..b7c2ebf180
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httptrace/ya.make
@@ -0,0 +1,7 @@
+GO_LIBRARY()
+IF (TRUE)
+ SRCS(
+ trace.go
+ )
+ENDIF()
+END()
diff --git a/contrib/go/_std_1.22/src/net/http/httputil/dump.go b/contrib/go/_std_1.22/src/net/http/httputil/dump.go
new file mode 100644
index 0000000000..2edb9bc98d
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httputil/dump.go
@@ -0,0 +1,337 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httputil
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// drainBody reads all of b to memory and then returns two equivalent
+// ReadClosers yielding the same bytes.
+//
+// It returns an error if the initial slurp of all bytes fails. It does not attempt
+// to make the returned ReadClosers have identical error-matching behavior.
+func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
+ if b == nil || b == http.NoBody {
+ // No copying needed. Preserve the magic sentinel meaning of NoBody.
+ return http.NoBody, http.NoBody, nil
+ }
+ var buf bytes.Buffer
+ if _, err = buf.ReadFrom(b); err != nil {
+ return nil, b, err
+ }
+ if err = b.Close(); err != nil {
+ return nil, b, err
+ }
+ return io.NopCloser(&buf), io.NopCloser(bytes.NewReader(buf.Bytes())), nil
+}
+
+// dumpConn is a net.Conn which writes to Writer and reads from Reader
+type dumpConn struct {
+ io.Writer
+ io.Reader
+}
+
+func (c *dumpConn) Close() error { return nil }
+func (c *dumpConn) LocalAddr() net.Addr { return nil }
+func (c *dumpConn) RemoteAddr() net.Addr { return nil }
+func (c *dumpConn) SetDeadline(t time.Time) error { return nil }
+func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil }
+func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil }
+
+type neverEnding byte
+
+func (b neverEnding) Read(p []byte) (n int, err error) {
+ for i := range p {
+ p[i] = byte(b)
+ }
+ return len(p), nil
+}
+
+// outgoingLength is a copy of the unexported
+// (*http.Request).outgoingLength method.
+func outgoingLength(req *http.Request) int64 {
+ if req.Body == nil || req.Body == http.NoBody {
+ return 0
+ }
+ if req.ContentLength != 0 {
+ return req.ContentLength
+ }
+ return -1
+}
+
+// DumpRequestOut is like [DumpRequest] but for outgoing client requests. It
+// includes any headers that the standard [http.Transport] adds, such as
+// User-Agent.
+func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
+ save := req.Body
+ dummyBody := false
+ if !body {
+ contentLength := outgoingLength(req)
+ if contentLength != 0 {
+ req.Body = io.NopCloser(io.LimitReader(neverEnding('x'), contentLength))
+ dummyBody = true
+ }
+ } else {
+ var err error
+ save, req.Body, err = drainBody(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Since we're using the actual Transport code to write the request,
+ // switch to http so the Transport doesn't try to do an SSL
+ // negotiation with our dumpConn and its bytes.Buffer & pipe.
+ // The wire format for https and http are the same, anyway.
+ reqSend := req
+ if req.URL.Scheme == "https" {
+ reqSend = new(http.Request)
+ *reqSend = *req
+ reqSend.URL = new(url.URL)
+ *reqSend.URL = *req.URL
+ reqSend.URL.Scheme = "http"
+ }
+
+ // Use the actual Transport code to record what we would send
+ // on the wire, but not using TCP. Use a Transport with a
+ // custom dialer that returns a fake net.Conn that waits
+ // for the full input (and recording it), and then responds
+ // with a dummy response.
+ var buf bytes.Buffer // records the output
+ pr, pw := io.Pipe()
+ defer pr.Close()
+ defer pw.Close()
+ dr := &delegateReader{c: make(chan io.Reader)}
+
+ t := &http.Transport{
+ Dial: func(net, addr string) (net.Conn, error) {
+ return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil
+ },
+ }
+ defer t.CloseIdleConnections()
+
+ // We need this channel to ensure that the reader
+ // goroutine exits if t.RoundTrip returns an error.
+ // See golang.org/issue/32571.
+ quitReadCh := make(chan struct{})
+ // Wait for the request before replying with a dummy response:
+ go func() {
+ req, err := http.ReadRequest(bufio.NewReader(pr))
+ if err == nil {
+ // Ensure all the body is read; otherwise
+ // we'll get a partial dump.
+ io.Copy(io.Discard, req.Body)
+ req.Body.Close()
+ }
+ select {
+ case dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\nConnection: close\r\n\r\n"):
+ case <-quitReadCh:
+ // Ensure delegateReader.Read doesn't block forever if we get an error.
+ close(dr.c)
+ }
+ }()
+
+ _, err := t.RoundTrip(reqSend)
+
+ req.Body = save
+ if err != nil {
+ pw.Close()
+ dr.err = err
+ close(quitReadCh)
+ return nil, err
+ }
+ dump := buf.Bytes()
+
+ // If we used a dummy body above, remove it now.
+ // TODO: if the req.ContentLength is large, we allocate memory
+ // unnecessarily just to slice it off here. But this is just
+ // a debug function, so this is acceptable for now. We could
+ // discard the body earlier if this matters.
+ if dummyBody {
+ if i := bytes.Index(dump, []byte("\r\n\r\n")); i >= 0 {
+ dump = dump[:i+4]
+ }
+ }
+ return dump, nil
+}
+
+// delegateReader is a reader that delegates to another reader,
+// once it arrives on a channel.
+type delegateReader struct {
+ c chan io.Reader
+ err error // only used if r is nil and c is closed.
+ r io.Reader // nil until received from c
+}
+
+func (r *delegateReader) Read(p []byte) (int, error) {
+ if r.r == nil {
+ var ok bool
+ if r.r, ok = <-r.c; !ok {
+ return 0, r.err
+ }
+ }
+ return r.r.Read(p)
+}
+
+// Return value if nonempty, def otherwise.
+func valueOrDefault(value, def string) string {
+ if value != "" {
+ return value
+ }
+ return def
+}
+
+var reqWriteExcludeHeaderDump = map[string]bool{
+ "Host": true, // not in Header map anyway
+ "Transfer-Encoding": true,
+ "Trailer": true,
+}
+
+// DumpRequest returns the given request in its HTTP/1.x wire
+// representation. It should only be used by servers to debug client
+// requests. The returned representation is an approximation only;
+// some details of the initial request are lost while parsing it into
+// an [http.Request]. In particular, the order and case of header field
+// names are lost. The order of values in multi-valued headers is kept
+// intact. HTTP/2 requests are dumped in HTTP/1.x form, not in their
+// original binary representations.
+//
+// If body is true, DumpRequest also returns the body. To do so, it
+// consumes req.Body and then replaces it with a new [io.ReadCloser]
+// that yields the same bytes. If DumpRequest returns an error,
+// the state of req is undefined.
+//
+// The documentation for [http.Request.Write] details which fields
+// of req are included in the dump.
+func DumpRequest(req *http.Request, body bool) ([]byte, error) {
+ var err error
+ save := req.Body
+ if !body || req.Body == nil {
+ req.Body = nil
+ } else {
+ save, req.Body, err = drainBody(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var b bytes.Buffer
+
+ // By default, print out the unmodified req.RequestURI, which
+ // is always set for incoming server requests. But because we
+ // previously used req.URL.RequestURI and the docs weren't
+ // always so clear about when to use DumpRequest vs
+ // DumpRequestOut, fall back to the old way if the caller
+ // provides a non-server Request.
+ reqURI := req.RequestURI
+ if reqURI == "" {
+ reqURI = req.URL.RequestURI()
+ }
+
+ fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"),
+ reqURI, req.ProtoMajor, req.ProtoMinor)
+
+ absRequestURI := strings.HasPrefix(req.RequestURI, "http://") || strings.HasPrefix(req.RequestURI, "https://")
+ if !absRequestURI {
+ host := req.Host
+ if host == "" && req.URL != nil {
+ host = req.URL.Host
+ }
+ if host != "" {
+ fmt.Fprintf(&b, "Host: %s\r\n", host)
+ }
+ }
+
+ chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked"
+ if len(req.TransferEncoding) > 0 {
+ fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ","))
+ }
+
+ err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump)
+ if err != nil {
+ return nil, err
+ }
+
+ io.WriteString(&b, "\r\n")
+
+ if req.Body != nil {
+ var dest io.Writer = &b
+ if chunked {
+ dest = NewChunkedWriter(dest)
+ }
+ _, err = io.Copy(dest, req.Body)
+ if chunked {
+ dest.(io.Closer).Close()
+ io.WriteString(&b, "\r\n")
+ }
+ }
+
+ req.Body = save
+ if err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// errNoBody is a sentinel error value used by failureToReadBody so we
+// can detect that the lack of body was intentional.
+var errNoBody = errors.New("sentinel error value")
+
+// failureToReadBody is an io.ReadCloser that just returns errNoBody on
+// Read. It's swapped in when we don't actually want to consume
+// the body, but need a non-nil one, and want to distinguish the
+// error from reading the dummy body.
+type failureToReadBody struct{}
+
+func (failureToReadBody) Read([]byte) (int, error) { return 0, errNoBody }
+func (failureToReadBody) Close() error { return nil }
+
+// emptyBody is an instance of empty reader.
+var emptyBody = io.NopCloser(strings.NewReader(""))
+
+// DumpResponse is like DumpRequest but dumps a response.
+func DumpResponse(resp *http.Response, body bool) ([]byte, error) {
+ var b bytes.Buffer
+ var err error
+ save := resp.Body
+ savecl := resp.ContentLength
+
+ if !body {
+ // For content length of zero. Make sure the body is an empty
+ // reader, instead of returning error through failureToReadBody{}.
+ if resp.ContentLength == 0 {
+ resp.Body = emptyBody
+ } else {
+ resp.Body = failureToReadBody{}
+ }
+ } else if resp.Body == nil {
+ resp.Body = emptyBody
+ } else {
+ save, resp.Body, err = drainBody(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ err = resp.Write(&b)
+ if err == errNoBody {
+ err = nil
+ }
+ resp.Body = save
+ resp.ContentLength = savecl
+ if err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
diff --git a/contrib/go/_std_1.22/src/net/http/httputil/httputil.go b/contrib/go/_std_1.22/src/net/http/httputil/httputil.go
new file mode 100644
index 0000000000..431930ea65
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httputil/httputil.go
@@ -0,0 +1,41 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httputil provides HTTP utility functions, complementing the
+// more common ones in the net/http package.
+package httputil
+
+import (
+ "io"
+ "net/http/internal"
+)
+
+// NewChunkedReader returns a new chunkedReader that translates the data read from r
+// out of HTTP "chunked" format before returning it.
+// The chunkedReader returns [io.EOF] when the final 0-length chunk is read.
+//
+// NewChunkedReader is not needed by normal applications. The http package
+// automatically decodes chunking when reading response bodies.
+func NewChunkedReader(r io.Reader) io.Reader {
+ return internal.NewChunkedReader(r)
+}
+
+// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP
+// "chunked" format before writing them to w. Closing the returned chunkedWriter
+// sends the final 0-length chunk that marks the end of the stream but does
+// not send the final CRLF that appears after trailers; trailers and the last
+// CRLF must be written separately.
+//
+// NewChunkedWriter is not needed by normal applications. The http
+// package adds chunking automatically if handlers don't set a
+// Content-Length header. Using NewChunkedWriter inside a handler
+// would result in double chunking or chunking with a Content-Length
+// length, both of which are wrong.
+func NewChunkedWriter(w io.Writer) io.WriteCloser {
+ return internal.NewChunkedWriter(w)
+}
+
+// ErrLineTooLong is returned when reading malformed chunked data
+// with lines that are too long.
+var ErrLineTooLong = internal.ErrLineTooLong
diff --git a/contrib/go/_std_1.22/src/net/http/httputil/persist.go b/contrib/go/_std_1.22/src/net/http/httputil/persist.go
new file mode 100644
index 0000000000..0cbe3ebf10
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httputil/persist.go
@@ -0,0 +1,431 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httputil
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "net/textproto"
+ "sync"
+)
+
+var (
+ // Deprecated: No longer used.
+ ErrPersistEOF = &http.ProtocolError{ErrorString: "persistent connection closed"}
+
+ // Deprecated: No longer used.
+ ErrClosed = &http.ProtocolError{ErrorString: "connection closed by user"}
+
+ // Deprecated: No longer used.
+ ErrPipeline = &http.ProtocolError{ErrorString: "pipeline error"}
+)
+
+// This is an API usage error - the local side is closed.
+// ErrPersistEOF (above) reports that the remote side is closed.
+var errClosed = errors.New("i/o operation on closed connection")
+
+// ServerConn is an artifact of Go's early HTTP implementation.
+// It is low-level, old, and unused by Go's current HTTP stack.
+// We should have deleted it before Go 1.
+//
+// Deprecated: Use the Server in package [net/http] instead.
+type ServerConn struct {
+ mu sync.Mutex // read-write protects the following fields
+ c net.Conn
+ r *bufio.Reader
+ re, we error // read/write errors
+ lastbody io.ReadCloser
+ nread, nwritten int
+ pipereq map[*http.Request]uint
+
+ pipe textproto.Pipeline
+}
+
+// NewServerConn is an artifact of Go's early HTTP implementation.
+// It is low-level, old, and unused by Go's current HTTP stack.
+// We should have deleted it before Go 1.
+//
+// Deprecated: Use the Server in package [net/http] instead.
+func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn {
+ if r == nil {
+ r = bufio.NewReader(c)
+ }
+ return &ServerConn{c: c, r: r, pipereq: make(map[*http.Request]uint)}
+}
+
+// Hijack detaches the [ServerConn] and returns the underlying connection as well
+// as the read-side bufio which may have some left over data. Hijack may be
+// called before Read has signaled the end of the keep-alive logic. The user
+// should not call Hijack while [ServerConn.Read] or [ServerConn.Write] is in progress.
+func (sc *ServerConn) Hijack() (net.Conn, *bufio.Reader) {
+ sc.mu.Lock()
+ defer sc.mu.Unlock()
+ c := sc.c
+ r := sc.r
+ sc.c = nil
+ sc.r = nil
+ return c, r
+}
+
+// Close calls [ServerConn.Hijack] and then also closes the underlying connection.
+func (sc *ServerConn) Close() error {
+ c, _ := sc.Hijack()
+ if c != nil {
+ return c.Close()
+ }
+ return nil
+}
+
+// Read returns the next request on the wire. An [ErrPersistEOF] is returned if
+// it is gracefully determined that there are no more requests (e.g. after the
+// first request on an HTTP/1.0 connection, or after a Connection:close on a
+// HTTP/1.1 connection).
+func (sc *ServerConn) Read() (*http.Request, error) {
+ var req *http.Request
+ var err error
+
+ // Ensure ordered execution of Reads and Writes
+ id := sc.pipe.Next()
+ sc.pipe.StartRequest(id)
+ defer func() {
+ sc.pipe.EndRequest(id)
+ if req == nil {
+ sc.pipe.StartResponse(id)
+ sc.pipe.EndResponse(id)
+ } else {
+ // Remember the pipeline id of this request
+ sc.mu.Lock()
+ sc.pipereq[req] = id
+ sc.mu.Unlock()
+ }
+ }()
+
+ sc.mu.Lock()
+ if sc.we != nil { // no point receiving if write-side broken or closed
+ defer sc.mu.Unlock()
+ return nil, sc.we
+ }
+ if sc.re != nil {
+ defer sc.mu.Unlock()
+ return nil, sc.re
+ }
+ if sc.r == nil { // connection closed by user in the meantime
+ defer sc.mu.Unlock()
+ return nil, errClosed
+ }
+ r := sc.r
+ lastbody := sc.lastbody
+ sc.lastbody = nil
+ sc.mu.Unlock()
+
+ // Make sure body is fully consumed, even if user does not call body.Close
+ if lastbody != nil {
+ // body.Close is assumed to be idempotent and multiple calls to
+ // it should return the error that its first invocation
+ // returned.
+ err = lastbody.Close()
+ if err != nil {
+ sc.mu.Lock()
+ defer sc.mu.Unlock()
+ sc.re = err
+ return nil, err
+ }
+ }
+
+ req, err = http.ReadRequest(r)
+ sc.mu.Lock()
+ defer sc.mu.Unlock()
+ if err != nil {
+ if err == io.ErrUnexpectedEOF {
+ // A close from the opposing client is treated as a
+ // graceful close, even if there was some unparse-able
+ // data before the close.
+ sc.re = ErrPersistEOF
+ return nil, sc.re
+ } else {
+ sc.re = err
+ return req, err
+ }
+ }
+ sc.lastbody = req.Body
+ sc.nread++
+ if req.Close {
+ sc.re = ErrPersistEOF
+ return req, sc.re
+ }
+ return req, err
+}
+
+// Pending returns the number of unanswered requests
+// that have been received on the connection.
+func (sc *ServerConn) Pending() int {
+ sc.mu.Lock()
+ defer sc.mu.Unlock()
+ return sc.nread - sc.nwritten
+}
+
+// Write writes resp in response to req. To close the connection gracefully, set the
+// Response.Close field to true. Write should be considered operational until
+// it returns an error, regardless of any errors returned on the [ServerConn.Read] side.
+func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error {
+
+ // Retrieve the pipeline ID of this request/response pair
+ sc.mu.Lock()
+ id, ok := sc.pipereq[req]
+ delete(sc.pipereq, req)
+ if !ok {
+ sc.mu.Unlock()
+ return ErrPipeline
+ }
+ sc.mu.Unlock()
+
+ // Ensure pipeline order
+ sc.pipe.StartResponse(id)
+ defer sc.pipe.EndResponse(id)
+
+ sc.mu.Lock()
+ if sc.we != nil {
+ defer sc.mu.Unlock()
+ return sc.we
+ }
+ if sc.c == nil { // connection closed by user in the meantime
+ defer sc.mu.Unlock()
+ return ErrClosed
+ }
+ c := sc.c
+ if sc.nread <= sc.nwritten {
+ defer sc.mu.Unlock()
+ return errors.New("persist server pipe count")
+ }
+ if resp.Close {
+ // After signaling a keep-alive close, any pipelined unread
+ // requests will be lost. It is up to the user to drain them
+ // before signaling.
+ sc.re = ErrPersistEOF
+ }
+ sc.mu.Unlock()
+
+ err := resp.Write(c)
+ sc.mu.Lock()
+ defer sc.mu.Unlock()
+ if err != nil {
+ sc.we = err
+ return err
+ }
+ sc.nwritten++
+
+ return nil
+}
+
+// ClientConn is an artifact of Go's early HTTP implementation.
+// It is low-level, old, and unused by Go's current HTTP stack.
+// We should have deleted it before Go 1.
+//
+// Deprecated: Use Client or Transport in package [net/http] instead.
+type ClientConn struct {
+ mu sync.Mutex // read-write protects the following fields
+ c net.Conn
+ r *bufio.Reader
+ re, we error // read/write errors
+ lastbody io.ReadCloser
+ nread, nwritten int
+ pipereq map[*http.Request]uint
+
+ pipe textproto.Pipeline
+ writeReq func(*http.Request, io.Writer) error
+}
+
+// NewClientConn is an artifact of Go's early HTTP implementation.
+// It is low-level, old, and unused by Go's current HTTP stack.
+// We should have deleted it before Go 1.
+//
+// Deprecated: Use the Client or Transport in package [net/http] instead.
+func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
+ if r == nil {
+ r = bufio.NewReader(c)
+ }
+ return &ClientConn{
+ c: c,
+ r: r,
+ pipereq: make(map[*http.Request]uint),
+ writeReq: (*http.Request).Write,
+ }
+}
+
+// NewProxyClientConn is an artifact of Go's early HTTP implementation.
+// It is low-level, old, and unused by Go's current HTTP stack.
+// We should have deleted it before Go 1.
+//
+// Deprecated: Use the Client or Transport in package [net/http] instead.
+func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn {
+ cc := NewClientConn(c, r)
+ cc.writeReq = (*http.Request).WriteProxy
+ return cc
+}
+
+// Hijack detaches the [ClientConn] and returns the underlying connection as well
+// as the read-side bufio which may have some left over data. Hijack may be
+// called before the user or Read have signaled the end of the keep-alive
+// logic. The user should not call Hijack while [ClientConn.Read] or ClientConn.Write is in progress.
+func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ c = cc.c
+ r = cc.r
+ cc.c = nil
+ cc.r = nil
+ return
+}
+
+// Close calls [ClientConn.Hijack] and then also closes the underlying connection.
+func (cc *ClientConn) Close() error {
+ c, _ := cc.Hijack()
+ if c != nil {
+ return c.Close()
+ }
+ return nil
+}
+
+// Write writes a request. An [ErrPersistEOF] error is returned if the connection
+// has been closed in an HTTP keep-alive sense. If req.Close equals true, the
+// keep-alive connection is logically closed after this request and the opposing
+// server is informed. An ErrUnexpectedEOF indicates the remote closed the
+// underlying TCP connection, which is usually considered as graceful close.
+func (cc *ClientConn) Write(req *http.Request) error {
+ var err error
+
+ // Ensure ordered execution of Writes
+ id := cc.pipe.Next()
+ cc.pipe.StartRequest(id)
+ defer func() {
+ cc.pipe.EndRequest(id)
+ if err != nil {
+ cc.pipe.StartResponse(id)
+ cc.pipe.EndResponse(id)
+ } else {
+ // Remember the pipeline id of this request
+ cc.mu.Lock()
+ cc.pipereq[req] = id
+ cc.mu.Unlock()
+ }
+ }()
+
+ cc.mu.Lock()
+ if cc.re != nil { // no point sending if read-side closed or broken
+ defer cc.mu.Unlock()
+ return cc.re
+ }
+ if cc.we != nil {
+ defer cc.mu.Unlock()
+ return cc.we
+ }
+ if cc.c == nil { // connection closed by user in the meantime
+ defer cc.mu.Unlock()
+ return errClosed
+ }
+ c := cc.c
+ if req.Close {
+ // We write the EOF to the write-side error, because there
+ // still might be some pipelined reads
+ cc.we = ErrPersistEOF
+ }
+ cc.mu.Unlock()
+
+ err = cc.writeReq(req, c)
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if err != nil {
+ cc.we = err
+ return err
+ }
+ cc.nwritten++
+
+ return nil
+}
+
+// Pending returns the number of unanswered requests
+// that have been sent on the connection.
+func (cc *ClientConn) Pending() int {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.nwritten - cc.nread
+}
+
+// Read reads the next response from the wire. A valid response might be
+// returned together with an [ErrPersistEOF], which means that the remote
+// requested that this be the last request serviced. Read can be called
+// concurrently with [ClientConn.Write], but not with another Read.
+func (cc *ClientConn) Read(req *http.Request) (resp *http.Response, err error) {
+ // Retrieve the pipeline ID of this request/response pair
+ cc.mu.Lock()
+ id, ok := cc.pipereq[req]
+ delete(cc.pipereq, req)
+ if !ok {
+ cc.mu.Unlock()
+ return nil, ErrPipeline
+ }
+ cc.mu.Unlock()
+
+ // Ensure pipeline order
+ cc.pipe.StartResponse(id)
+ defer cc.pipe.EndResponse(id)
+
+ cc.mu.Lock()
+ if cc.re != nil {
+ defer cc.mu.Unlock()
+ return nil, cc.re
+ }
+ if cc.r == nil { // connection closed by user in the meantime
+ defer cc.mu.Unlock()
+ return nil, errClosed
+ }
+ r := cc.r
+ lastbody := cc.lastbody
+ cc.lastbody = nil
+ cc.mu.Unlock()
+
+ // Make sure body is fully consumed, even if user does not call body.Close
+ if lastbody != nil {
+ // body.Close is assumed to be idempotent and multiple calls to
+ // it should return the error that its first invocation
+ // returned.
+ err = lastbody.Close()
+ if err != nil {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.re = err
+ return nil, err
+ }
+ }
+
+ resp, err = http.ReadResponse(r, req)
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if err != nil {
+ cc.re = err
+ return resp, err
+ }
+ cc.lastbody = resp.Body
+
+ cc.nread++
+
+ if resp.Close {
+ cc.re = ErrPersistEOF // don't send any more requests
+ return resp, cc.re
+ }
+ return resp, err
+}
+
+// Do is convenience method that writes a request and reads a response.
+func (cc *ClientConn) Do(req *http.Request) (*http.Response, error) {
+ err := cc.Write(req)
+ if err != nil {
+ return nil, err
+ }
+ return cc.Read(req)
+}
diff --git a/contrib/go/_std_1.22/src/net/http/httputil/reverseproxy.go b/contrib/go/_std_1.22/src/net/http/httputil/reverseproxy.go
new file mode 100644
index 0000000000..5c70f0d27b
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httputil/reverseproxy.go
@@ -0,0 +1,831 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP reverse proxy handler
+
+package httputil
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "mime"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+// A ProxyRequest contains a request to be rewritten by a [ReverseProxy].
+type ProxyRequest struct {
+ // In is the request received by the proxy.
+ // The Rewrite function must not modify In.
+ In *http.Request
+
+ // Out is the request which will be sent by the proxy.
+ // The Rewrite function may modify or replace this request.
+ // Hop-by-hop headers are removed from this request
+ // before Rewrite is called.
+ Out *http.Request
+}
+
+// SetURL routes the outbound request to the scheme, host, and base path
+// provided in target. If the target's path is "/base" and the incoming
+// request was for "/dir", the target request will be for "/base/dir".
+//
+// SetURL rewrites the outbound Host header to match the target's host.
+// To preserve the inbound request's Host header (the default behavior
+// of [NewSingleHostReverseProxy]):
+//
+// rewriteFunc := func(r *httputil.ProxyRequest) {
+// r.SetURL(url)
+// r.Out.Host = r.In.Host
+// }
+func (r *ProxyRequest) SetURL(target *url.URL) {
+ rewriteRequestURL(r.Out, target)
+ r.Out.Host = ""
+}
+
+// SetXForwarded sets the X-Forwarded-For, X-Forwarded-Host, and
+// X-Forwarded-Proto headers of the outbound request.
+//
+// - The X-Forwarded-For header is set to the client IP address.
+// - The X-Forwarded-Host header is set to the host name requested
+// by the client.
+// - The X-Forwarded-Proto header is set to "http" or "https", depending
+// on whether the inbound request was made on a TLS-enabled connection.
+//
+// If the outbound request contains an existing X-Forwarded-For header,
+// SetXForwarded appends the client IP address to it. To append to the
+// inbound request's X-Forwarded-For header (the default behavior of
+// [ReverseProxy] when using a Director function), copy the header
+// from the inbound request before calling SetXForwarded:
+//
+// rewriteFunc := func(r *httputil.ProxyRequest) {
+// r.Out.Header["X-Forwarded-For"] = r.In.Header["X-Forwarded-For"]
+// r.SetXForwarded()
+// }
+func (r *ProxyRequest) SetXForwarded() {
+ clientIP, _, err := net.SplitHostPort(r.In.RemoteAddr)
+ if err == nil {
+ prior := r.Out.Header["X-Forwarded-For"]
+ if len(prior) > 0 {
+ clientIP = strings.Join(prior, ", ") + ", " + clientIP
+ }
+ r.Out.Header.Set("X-Forwarded-For", clientIP)
+ } else {
+ r.Out.Header.Del("X-Forwarded-For")
+ }
+ r.Out.Header.Set("X-Forwarded-Host", r.In.Host)
+ if r.In.TLS == nil {
+ r.Out.Header.Set("X-Forwarded-Proto", "http")
+ } else {
+ r.Out.Header.Set("X-Forwarded-Proto", "https")
+ }
+}
+
+// ReverseProxy is an HTTP Handler that takes an incoming request and
+// sends it to another server, proxying the response back to the
+// client.
+//
+// 1xx responses are forwarded to the client if the underlying
+// transport supports ClientTrace.Got1xxResponse.
+type ReverseProxy struct {
+ // Rewrite must be a function which modifies
+ // the request into a new request to be sent
+ // using Transport. Its response is then copied
+ // back to the original client unmodified.
+ // Rewrite must not access the provided ProxyRequest
+ // or its contents after returning.
+ //
+ // The Forwarded, X-Forwarded, X-Forwarded-Host,
+ // and X-Forwarded-Proto headers are removed from the
+ // outbound request before Rewrite is called. See also
+ // the ProxyRequest.SetXForwarded method.
+ //
+ // Unparsable query parameters are removed from the
+ // outbound request before Rewrite is called.
+ // The Rewrite function may copy the inbound URL's
+ // RawQuery to the outbound URL to preserve the original
+ // parameter string. Note that this can lead to security
+ // issues if the proxy's interpretation of query parameters
+ // does not match that of the downstream server.
+ //
+ // At most one of Rewrite or Director may be set.
+ Rewrite func(*ProxyRequest)
+
+ // Director is a function which modifies
+ // the request into a new request to be sent
+ // using Transport. Its response is then copied
+ // back to the original client unmodified.
+ // Director must not access the provided Request
+ // after returning.
+ //
+ // By default, the X-Forwarded-For header is set to the
+ // value of the client IP address. If an X-Forwarded-For
+ // header already exists, the client IP is appended to the
+ // existing values. As a special case, if the header
+ // exists in the Request.Header map but has a nil value
+ // (such as when set by the Director func), the X-Forwarded-For
+ // header is not modified.
+ //
+ // To prevent IP spoofing, be sure to delete any pre-existing
+ // X-Forwarded-For header coming from the client or
+ // an untrusted proxy.
+ //
+ // Hop-by-hop headers are removed from the request after
+ // Director returns, which can remove headers added by
+ // Director. Use a Rewrite function instead to ensure
+ // modifications to the request are preserved.
+ //
+ // Unparsable query parameters are removed from the outbound
+ // request if Request.Form is set after Director returns.
+ //
+ // At most one of Rewrite or Director may be set.
+ Director func(*http.Request)
+
+ // The transport used to perform proxy requests.
+ // If nil, http.DefaultTransport is used.
+ Transport http.RoundTripper
+
+ // FlushInterval specifies the flush interval
+ // to flush to the client while copying the
+ // response body.
+ // If zero, no periodic flushing is done.
+ // A negative value means to flush immediately
+ // after each write to the client.
+ // The FlushInterval is ignored when ReverseProxy
+ // recognizes a response as a streaming response, or
+ // if its ContentLength is -1; for such responses, writes
+ // are flushed to the client immediately.
+ FlushInterval time.Duration
+
+ // ErrorLog specifies an optional logger for errors
+ // that occur when attempting to proxy the request.
+ // If nil, logging is done via the log package's standard logger.
+ ErrorLog *log.Logger
+
+ // BufferPool optionally specifies a buffer pool to
+ // get byte slices for use by io.CopyBuffer when
+ // copying HTTP response bodies.
+ BufferPool BufferPool
+
+ // ModifyResponse is an optional function that modifies the
+ // Response from the backend. It is called if the backend
+ // returns a response at all, with any HTTP status code.
+ // If the backend is unreachable, the optional ErrorHandler is
+ // called without any call to ModifyResponse.
+ //
+ // If ModifyResponse returns an error, ErrorHandler is called
+ // with its error value. If ErrorHandler is nil, its default
+ // implementation is used.
+ ModifyResponse func(*http.Response) error
+
+ // ErrorHandler is an optional function that handles errors
+ // reaching the backend or errors from ModifyResponse.
+ //
+ // If nil, the default is to log the provided error and return
+ // a 502 Status Bad Gateway response.
+ ErrorHandler func(http.ResponseWriter, *http.Request, error)
+}
+
+// A BufferPool is an interface for getting and returning temporary
+// byte slices for use by [io.CopyBuffer].
+type BufferPool interface {
+ Get() []byte
+ Put([]byte)
+}
+
+func singleJoiningSlash(a, b string) string {
+ aslash := strings.HasSuffix(a, "/")
+ bslash := strings.HasPrefix(b, "/")
+ switch {
+ case aslash && bslash:
+ return a + b[1:]
+ case !aslash && !bslash:
+ return a + "/" + b
+ }
+ return a + b
+}
+
+func joinURLPath(a, b *url.URL) (path, rawpath string) {
+ if a.RawPath == "" && b.RawPath == "" {
+ return singleJoiningSlash(a.Path, b.Path), ""
+ }
+ // Same as singleJoiningSlash, but uses EscapedPath to determine
+ // whether a slash should be added
+ apath := a.EscapedPath()
+ bpath := b.EscapedPath()
+
+ aslash := strings.HasSuffix(apath, "/")
+ bslash := strings.HasPrefix(bpath, "/")
+
+ switch {
+ case aslash && bslash:
+ return a.Path + b.Path[1:], apath + bpath[1:]
+ case !aslash && !bslash:
+ return a.Path + "/" + b.Path, apath + "/" + bpath
+ }
+ return a.Path + b.Path, apath + bpath
+}
+
+// NewSingleHostReverseProxy returns a new [ReverseProxy] that routes
+// URLs to the scheme, host, and base path provided in target. If the
+// target's path is "/base" and the incoming request was for "/dir",
+// the target request will be for /base/dir.
+//
+// NewSingleHostReverseProxy does not rewrite the Host header.
+//
+// To customize the ReverseProxy behavior beyond what
+// NewSingleHostReverseProxy provides, use ReverseProxy directly
+// with a Rewrite function. The ProxyRequest SetURL method
+// may be used to route the outbound request. (Note that SetURL,
+// unlike NewSingleHostReverseProxy, rewrites the Host header
+// of the outbound request by default.)
+//
+// proxy := &ReverseProxy{
+// Rewrite: func(r *ProxyRequest) {
+// r.SetURL(target)
+// r.Out.Host = r.In.Host // if desired
+// },
+// }
+func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {
+ director := func(req *http.Request) {
+ rewriteRequestURL(req, target)
+ }
+ return &ReverseProxy{Director: director}
+}
+
+func rewriteRequestURL(req *http.Request, target *url.URL) {
+ targetQuery := target.RawQuery
+ req.URL.Scheme = target.Scheme
+ req.URL.Host = target.Host
+ req.URL.Path, req.URL.RawPath = joinURLPath(target, req.URL)
+ if targetQuery == "" || req.URL.RawQuery == "" {
+ req.URL.RawQuery = targetQuery + req.URL.RawQuery
+ } else {
+ req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
+ }
+}
+
+func copyHeader(dst, src http.Header) {
+ for k, vv := range src {
+ for _, v := range vv {
+ dst.Add(k, v)
+ }
+ }
+}
+
+// Hop-by-hop headers. These are removed when sent to the backend.
+// As of RFC 7230, hop-by-hop headers are required to appear in the
+// Connection header field. These are the headers defined by the
+// obsoleted RFC 2616 (section 13.5.1) and are used for backward
+// compatibility.
+var hopHeaders = []string{
+ "Connection",
+ "Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google
+ "Keep-Alive",
+ "Proxy-Authenticate",
+ "Proxy-Authorization",
+ "Te", // canonicalized version of "TE"
+ "Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+func (p *ReverseProxy) defaultErrorHandler(rw http.ResponseWriter, req *http.Request, err error) {
+ p.logf("http: proxy error: %v", err)
+ rw.WriteHeader(http.StatusBadGateway)
+}
+
+func (p *ReverseProxy) getErrorHandler() func(http.ResponseWriter, *http.Request, error) {
+ if p.ErrorHandler != nil {
+ return p.ErrorHandler
+ }
+ return p.defaultErrorHandler
+}
+
+// modifyResponse conditionally runs the optional ModifyResponse hook
+// and reports whether the request should proceed.
+func (p *ReverseProxy) modifyResponse(rw http.ResponseWriter, res *http.Response, req *http.Request) bool {
+ if p.ModifyResponse == nil {
+ return true
+ }
+ if err := p.ModifyResponse(res); err != nil {
+ res.Body.Close()
+ p.getErrorHandler()(rw, req, err)
+ return false
+ }
+ return true
+}
+
+func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ transport := p.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ ctx := req.Context()
+ if ctx.Done() != nil {
+ // CloseNotifier predates context.Context, and has been
+ // entirely superseded by it. If the request contains
+ // a Context that carries a cancellation signal, don't
+ // bother spinning up a goroutine to watch the CloseNotify
+ // channel (if any).
+ //
+ // If the request Context has a nil Done channel (which
+ // means it is either context.Background, or a custom
+ // Context implementation with no cancellation signal),
+ // then consult the CloseNotifier if available.
+ } else if cn, ok := rw.(http.CloseNotifier); ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithCancel(ctx)
+ defer cancel()
+ notifyChan := cn.CloseNotify()
+ go func() {
+ select {
+ case <-notifyChan:
+ cancel()
+ case <-ctx.Done():
+ }
+ }()
+ }
+
+ outreq := req.Clone(ctx)
+ if req.ContentLength == 0 {
+ outreq.Body = nil // Issue 16036: nil Body for http.Transport retries
+ }
+ if outreq.Body != nil {
+ // Reading from the request body after returning from a handler is not
+ // allowed, and the RoundTrip goroutine that reads the Body can outlive
+ // this handler. This can lead to a crash if the handler panics (see
+ // Issue 46866). Although calling Close doesn't guarantee there isn't
+ // any Read in flight after the handle returns, in practice it's safe to
+ // read after closing it.
+ defer outreq.Body.Close()
+ }
+ if outreq.Header == nil {
+ outreq.Header = make(http.Header) // Issue 33142: historical behavior was to always allocate
+ }
+
+ if (p.Director != nil) == (p.Rewrite != nil) {
+ p.getErrorHandler()(rw, req, errors.New("ReverseProxy must have exactly one of Director or Rewrite set"))
+ return
+ }
+
+ if p.Director != nil {
+ p.Director(outreq)
+ if outreq.Form != nil {
+ outreq.URL.RawQuery = cleanQueryParams(outreq.URL.RawQuery)
+ }
+ }
+ outreq.Close = false
+
+ reqUpType := upgradeType(outreq.Header)
+ if !ascii.IsPrint(reqUpType) {
+ p.getErrorHandler()(rw, req, fmt.Errorf("client tried to switch to invalid protocol %q", reqUpType))
+ return
+ }
+ removeHopByHopHeaders(outreq.Header)
+
+ // Issue 21096: tell backend applications that care about trailer support
+ // that we support trailers. (We do, but we don't go out of our way to
+ // advertise that unless the incoming client request thought it was worth
+ // mentioning.) Note that we look at req.Header, not outreq.Header, since
+ // the latter has passed through removeHopByHopHeaders.
+ if httpguts.HeaderValuesContainsToken(req.Header["Te"], "trailers") {
+ outreq.Header.Set("Te", "trailers")
+ }
+
+ // After stripping all the hop-by-hop connection headers above, add back any
+ // necessary for protocol upgrades, such as for websockets.
+ if reqUpType != "" {
+ outreq.Header.Set("Connection", "Upgrade")
+ outreq.Header.Set("Upgrade", reqUpType)
+ }
+
+ if p.Rewrite != nil {
+ // Strip client-provided forwarding headers.
+ // The Rewrite func may use SetXForwarded to set new values
+ // for these or copy the previous values from the inbound request.
+ outreq.Header.Del("Forwarded")
+ outreq.Header.Del("X-Forwarded-For")
+ outreq.Header.Del("X-Forwarded-Host")
+ outreq.Header.Del("X-Forwarded-Proto")
+
+ // Remove unparsable query parameters from the outbound request.
+ outreq.URL.RawQuery = cleanQueryParams(outreq.URL.RawQuery)
+
+ pr := &ProxyRequest{
+ In: req,
+ Out: outreq,
+ }
+ p.Rewrite(pr)
+ outreq = pr.Out
+ } else {
+ if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
+ // If we aren't the first proxy retain prior
+ // X-Forwarded-For information as a comma+space
+ // separated list and fold multiple headers into one.
+ prior, ok := outreq.Header["X-Forwarded-For"]
+ omit := ok && prior == nil // Issue 38079: nil now means don't populate the header
+ if len(prior) > 0 {
+ clientIP = strings.Join(prior, ", ") + ", " + clientIP
+ }
+ if !omit {
+ outreq.Header.Set("X-Forwarded-For", clientIP)
+ }
+ }
+ }
+
+ if _, ok := outreq.Header["User-Agent"]; !ok {
+ // If the outbound request doesn't have a User-Agent header set,
+ // don't send the default Go HTTP client User-Agent.
+ outreq.Header.Set("User-Agent", "")
+ }
+
+ trace := &httptrace.ClientTrace{
+ Got1xxResponse: func(code int, header textproto.MIMEHeader) error {
+ h := rw.Header()
+ copyHeader(h, http.Header(header))
+ rw.WriteHeader(code)
+
+ // Clear headers, it's not automatically done by ResponseWriter.WriteHeader() for 1xx responses
+ clear(h)
+ return nil
+ },
+ }
+ outreq = outreq.WithContext(httptrace.WithClientTrace(outreq.Context(), trace))
+
+ res, err := transport.RoundTrip(outreq)
+ if err != nil {
+ p.getErrorHandler()(rw, outreq, err)
+ return
+ }
+
+ // Deal with 101 Switching Protocols responses: (WebSocket, h2c, etc)
+ if res.StatusCode == http.StatusSwitchingProtocols {
+ if !p.modifyResponse(rw, res, outreq) {
+ return
+ }
+ p.handleUpgradeResponse(rw, outreq, res)
+ return
+ }
+
+ removeHopByHopHeaders(res.Header)
+
+ if !p.modifyResponse(rw, res, outreq) {
+ return
+ }
+
+ copyHeader(rw.Header(), res.Header)
+
+ // The "Trailer" header isn't included in the Transport's response,
+ // at least for *http.Transport. Build it up from Trailer.
+ announcedTrailers := len(res.Trailer)
+ if announcedTrailers > 0 {
+ trailerKeys := make([]string, 0, len(res.Trailer))
+ for k := range res.Trailer {
+ trailerKeys = append(trailerKeys, k)
+ }
+ rw.Header().Add("Trailer", strings.Join(trailerKeys, ", "))
+ }
+
+ rw.WriteHeader(res.StatusCode)
+
+ err = p.copyResponse(rw, res.Body, p.flushInterval(res))
+ if err != nil {
+ defer res.Body.Close()
+ // Since we're streaming the response, if we run into an error all we can do
+ // is abort the request. Issue 23643: ReverseProxy should use ErrAbortHandler
+ // on read error while copying body.
+ if !shouldPanicOnCopyError(req) {
+ p.logf("suppressing panic for copyResponse error in test; copy error: %v", err)
+ return
+ }
+ panic(http.ErrAbortHandler)
+ }
+ res.Body.Close() // close now, instead of defer, to populate res.Trailer
+
+ if len(res.Trailer) > 0 {
+ // Force chunking if we saw a response trailer.
+ // This prevents net/http from calculating the length for short
+ // bodies and adding a Content-Length.
+ http.NewResponseController(rw).Flush()
+ }
+
+ if len(res.Trailer) == announcedTrailers {
+ copyHeader(rw.Header(), res.Trailer)
+ return
+ }
+
+ for k, vv := range res.Trailer {
+ k = http.TrailerPrefix + k
+ for _, v := range vv {
+ rw.Header().Add(k, v)
+ }
+ }
+}
+
+var inOurTests bool // whether we're in our own tests
+
+// shouldPanicOnCopyError reports whether the reverse proxy should
+// panic with http.ErrAbortHandler. This is the right thing to do by
+// default, but Go 1.10 and earlier did not, so existing unit tests
+// weren't expecting panics. Only panic in our own tests, or when
+// running under the HTTP server.
+func shouldPanicOnCopyError(req *http.Request) bool {
+ if inOurTests {
+ // Our tests know to handle this panic.
+ return true
+ }
+ if req.Context().Value(http.ServerContextKey) != nil {
+ // We seem to be running under an HTTP server, so
+ // it'll recover the panic.
+ return true
+ }
+ // Otherwise act like Go 1.10 and earlier to not break
+ // existing tests.
+ return false
+}
+
+// removeHopByHopHeaders removes hop-by-hop headers.
+func removeHopByHopHeaders(h http.Header) {
+ // RFC 7230, section 6.1: Remove headers listed in the "Connection" header.
+ for _, f := range h["Connection"] {
+ for _, sf := range strings.Split(f, ",") {
+ if sf = textproto.TrimString(sf); sf != "" {
+ h.Del(sf)
+ }
+ }
+ }
+ // RFC 2616, section 13.5.1: Remove a set of known hop-by-hop headers.
+ // This behavior is superseded by the RFC 7230 Connection header, but
+ // preserve it for backwards compatibility.
+ for _, f := range hopHeaders {
+ h.Del(f)
+ }
+}
+
+// flushInterval returns the p.FlushInterval value, conditionally
+// overriding its value for a specific request/response.
+func (p *ReverseProxy) flushInterval(res *http.Response) time.Duration {
+ resCT := res.Header.Get("Content-Type")
+
+ // For Server-Sent Events responses, flush immediately.
+ // The MIME type is defined in https://www.w3.org/TR/eventsource/#text-event-stream
+ if baseCT, _, _ := mime.ParseMediaType(resCT); baseCT == "text/event-stream" {
+ return -1 // negative means immediately
+ }
+
+ // We might have the case of streaming for which Content-Length might be unset.
+ if res.ContentLength == -1 {
+ return -1
+ }
+
+ return p.FlushInterval
+}
+
+func (p *ReverseProxy) copyResponse(dst http.ResponseWriter, src io.Reader, flushInterval time.Duration) error {
+ var w io.Writer = dst
+
+ if flushInterval != 0 {
+ mlw := &maxLatencyWriter{
+ dst: dst,
+ flush: http.NewResponseController(dst).Flush,
+ latency: flushInterval,
+ }
+ defer mlw.stop()
+
+ // set up initial timer so headers get flushed even if body writes are delayed
+ mlw.flushPending = true
+ mlw.t = time.AfterFunc(flushInterval, mlw.delayedFlush)
+
+ w = mlw
+ }
+
+ var buf []byte
+ if p.BufferPool != nil {
+ buf = p.BufferPool.Get()
+ defer p.BufferPool.Put(buf)
+ }
+ _, err := p.copyBuffer(w, src, buf)
+ return err
+}
+
+// copyBuffer returns any write errors or non-EOF read errors, and the amount
+// of bytes written.
+func (p *ReverseProxy) copyBuffer(dst io.Writer, src io.Reader, buf []byte) (int64, error) {
+ if len(buf) == 0 {
+ buf = make([]byte, 32*1024)
+ }
+ var written int64
+ for {
+ nr, rerr := src.Read(buf)
+ if rerr != nil && rerr != io.EOF && rerr != context.Canceled {
+ p.logf("httputil: ReverseProxy read error during body copy: %v", rerr)
+ }
+ if nr > 0 {
+ nw, werr := dst.Write(buf[:nr])
+ if nw > 0 {
+ written += int64(nw)
+ }
+ if werr != nil {
+ return written, werr
+ }
+ if nr != nw {
+ return written, io.ErrShortWrite
+ }
+ }
+ if rerr != nil {
+ if rerr == io.EOF {
+ rerr = nil
+ }
+ return written, rerr
+ }
+ }
+}
+
+func (p *ReverseProxy) logf(format string, args ...any) {
+ if p.ErrorLog != nil {
+ p.ErrorLog.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+type maxLatencyWriter struct {
+ dst io.Writer
+ flush func() error
+ latency time.Duration // non-zero; negative means to flush immediately
+
+ mu sync.Mutex // protects t, flushPending, and dst.Flush
+ t *time.Timer
+ flushPending bool
+}
+
+func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ n, err = m.dst.Write(p)
+ if m.latency < 0 {
+ m.flush()
+ return
+ }
+ if m.flushPending {
+ return
+ }
+ if m.t == nil {
+ m.t = time.AfterFunc(m.latency, m.delayedFlush)
+ } else {
+ m.t.Reset(m.latency)
+ }
+ m.flushPending = true
+ return
+}
+
+func (m *maxLatencyWriter) delayedFlush() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if !m.flushPending { // if stop was called but AfterFunc already started this goroutine
+ return
+ }
+ m.flush()
+ m.flushPending = false
+}
+
+func (m *maxLatencyWriter) stop() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.flushPending = false
+ if m.t != nil {
+ m.t.Stop()
+ }
+}
+
+func upgradeType(h http.Header) string {
+ if !httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") {
+ return ""
+ }
+ return h.Get("Upgrade")
+}
+
+func (p *ReverseProxy) handleUpgradeResponse(rw http.ResponseWriter, req *http.Request, res *http.Response) {
+ reqUpType := upgradeType(req.Header)
+ resUpType := upgradeType(res.Header)
+ if !ascii.IsPrint(resUpType) { // We know reqUpType is ASCII, it's checked by the caller.
+ p.getErrorHandler()(rw, req, fmt.Errorf("backend tried to switch to invalid protocol %q", resUpType))
+ }
+ if !ascii.EqualFold(reqUpType, resUpType) {
+ p.getErrorHandler()(rw, req, fmt.Errorf("backend tried to switch protocol %q when %q was requested", resUpType, reqUpType))
+ return
+ }
+
+ backConn, ok := res.Body.(io.ReadWriteCloser)
+ if !ok {
+ p.getErrorHandler()(rw, req, fmt.Errorf("internal error: 101 switching protocols response with non-writable body"))
+ return
+ }
+
+ rc := http.NewResponseController(rw)
+ conn, brw, hijackErr := rc.Hijack()
+ if errors.Is(hijackErr, http.ErrNotSupported) {
+ p.getErrorHandler()(rw, req, fmt.Errorf("can't switch protocols using non-Hijacker ResponseWriter type %T", rw))
+ return
+ }
+
+ backConnCloseCh := make(chan bool)
+ go func() {
+ // Ensure that the cancellation of a request closes the backend.
+ // See issue https://golang.org/issue/35559.
+ select {
+ case <-req.Context().Done():
+ case <-backConnCloseCh:
+ }
+ backConn.Close()
+ }()
+ defer close(backConnCloseCh)
+
+ if hijackErr != nil {
+ p.getErrorHandler()(rw, req, fmt.Errorf("Hijack failed on protocol switch: %v", hijackErr))
+ return
+ }
+ defer conn.Close()
+
+ copyHeader(rw.Header(), res.Header)
+
+ res.Header = rw.Header()
+ res.Body = nil // so res.Write only writes the headers; we have res.Body in backConn above
+ if err := res.Write(brw); err != nil {
+ p.getErrorHandler()(rw, req, fmt.Errorf("response write: %v", err))
+ return
+ }
+ if err := brw.Flush(); err != nil {
+ p.getErrorHandler()(rw, req, fmt.Errorf("response flush: %v", err))
+ return
+ }
+ errc := make(chan error, 1)
+ spc := switchProtocolCopier{user: conn, backend: backConn}
+ go spc.copyToBackend(errc)
+ go spc.copyFromBackend(errc)
+ <-errc
+}
+
+// switchProtocolCopier exists so goroutines proxying data back and
+// forth have nice names in stacks.
+type switchProtocolCopier struct {
+ user, backend io.ReadWriter
+}
+
+func (c switchProtocolCopier) copyFromBackend(errc chan<- error) {
+ _, err := io.Copy(c.user, c.backend)
+ errc <- err
+}
+
+func (c switchProtocolCopier) copyToBackend(errc chan<- error) {
+ _, err := io.Copy(c.backend, c.user)
+ errc <- err
+}
+
+func cleanQueryParams(s string) string {
+ reencode := func(s string) string {
+ v, _ := url.ParseQuery(s)
+ return v.Encode()
+ }
+ for i := 0; i < len(s); {
+ switch s[i] {
+ case ';':
+ return reencode(s)
+ case '%':
+ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
+ return reencode(s)
+ }
+ i += 3
+ default:
+ i++
+ }
+ }
+ return s
+}
+
+func ishex(c byte) bool {
+ switch {
+ case '0' <= c && c <= '9':
+ return true
+ case 'a' <= c && c <= 'f':
+ return true
+ case 'A' <= c && c <= 'F':
+ return true
+ }
+ return false
+}
diff --git a/contrib/go/_std_1.22/src/net/http/httputil/ya.make b/contrib/go/_std_1.22/src/net/http/httputil/ya.make
new file mode 100644
index 0000000000..782432467c
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/httputil/ya.make
@@ -0,0 +1,10 @@
+GO_LIBRARY()
+IF (TRUE)
+ SRCS(
+ dump.go
+ httputil.go
+ persist.go
+ reverseproxy.go
+ )
+ENDIF()
+END()
diff --git a/contrib/go/_std_1.22/src/net/http/internal/ascii/print.go b/contrib/go/_std_1.22/src/net/http/internal/ascii/print.go
new file mode 100644
index 0000000000..98dbf4e3d2
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/internal/ascii/print.go
@@ -0,0 +1,61 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ascii
+
+import (
+ "strings"
+ "unicode"
+)
+
+// EqualFold is [strings.EqualFold], ASCII only. It reports whether s and t
+// are equal, ASCII-case-insensitively.
+func EqualFold(s, t string) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if lower(s[i]) != lower(t[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// lower returns the ASCII lowercase version of b.
+func lower(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// IsPrint returns whether s is ASCII and printable according to
+// https://tools.ietf.org/html/rfc20#section-4.2.
+func IsPrint(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] < ' ' || s[i] > '~' {
+ return false
+ }
+ }
+ return true
+}
+
+// Is returns whether s is ASCII.
+func Is(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] > unicode.MaxASCII {
+ return false
+ }
+ }
+ return true
+}
+
+// ToLower returns the lowercase version of s if s is ASCII and printable.
+func ToLower(s string) (lower string, ok bool) {
+ if !IsPrint(s) {
+ return "", false
+ }
+ return strings.ToLower(s), true
+}
diff --git a/contrib/go/_std_1.22/src/net/http/internal/ascii/ya.make b/contrib/go/_std_1.22/src/net/http/internal/ascii/ya.make
new file mode 100644
index 0000000000..0d6dbf08b8
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/internal/ascii/ya.make
@@ -0,0 +1,7 @@
+GO_LIBRARY()
+IF (TRUE)
+ SRCS(
+ print.go
+ )
+ENDIF()
+END()
diff --git a/contrib/go/_std_1.22/src/net/http/internal/chunked.go b/contrib/go/_std_1.22/src/net/http/internal/chunked.go
new file mode 100644
index 0000000000..196b5d8925
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/internal/chunked.go
@@ -0,0 +1,287 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The wire protocol for HTTP's "chunked" Transfer-Encoding.
+
+// Package internal contains HTTP internals shared by net/http and
+// net/http/httputil.
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+)
+
+const maxLineLength = 4096 // assumed <= bufio.defaultBufSize
+
+var ErrLineTooLong = errors.New("header line too long")
+
+// NewChunkedReader returns a new chunkedReader that translates the data read from r
+// out of HTTP "chunked" format before returning it.
+// The chunkedReader returns [io.EOF] when the final 0-length chunk is read.
+//
+// NewChunkedReader is not needed by normal applications. The http package
+// automatically decodes chunking when reading response bodies.
+func NewChunkedReader(r io.Reader) io.Reader {
+ br, ok := r.(*bufio.Reader)
+ if !ok {
+ br = bufio.NewReader(r)
+ }
+ return &chunkedReader{r: br}
+}
+
+type chunkedReader struct {
+ r *bufio.Reader
+ n uint64 // unread bytes in chunk
+ err error
+ buf [2]byte
+ checkEnd bool // whether need to check for \r\n chunk footer
+ excess int64 // "excessive" chunk overhead, for malicious sender detection
+}
+
+func (cr *chunkedReader) beginChunk() {
+ // chunk-size CRLF
+ var line []byte
+ line, cr.err = readChunkLine(cr.r)
+ if cr.err != nil {
+ return
+ }
+ cr.excess += int64(len(line)) + 2 // header, plus \r\n after the chunk data
+ line = trimTrailingWhitespace(line)
+ line, cr.err = removeChunkExtension(line)
+ if cr.err != nil {
+ return
+ }
+ cr.n, cr.err = parseHexUint(line)
+ if cr.err != nil {
+ return
+ }
+ // A sender who sends one byte per chunk will send 5 bytes of overhead
+ // for every byte of data. ("1\r\nX\r\n" to send "X".)
+ // We want to allow this, since streaming a byte at a time can be legitimate.
+ //
+ // A sender can use chunk extensions to add arbitrary amounts of additional
+ // data per byte read. ("1;very long extension\r\nX\r\n" to send "X".)
+ // We don't want to disallow extensions (although we discard them),
+ // but we also don't want to allow a sender to reduce the signal/noise ratio
+ // arbitrarily.
+ //
+ // We track the amount of excess overhead read,
+ // and produce an error if it grows too large.
+ //
+ // Currently, we say that we're willing to accept 16 bytes of overhead per chunk,
+ // plus twice the amount of real data in the chunk.
+ cr.excess -= 16 + (2 * int64(cr.n))
+ cr.excess = max(cr.excess, 0)
+ if cr.excess > 16*1024 {
+ cr.err = errors.New("chunked encoding contains too much non-data")
+ }
+ if cr.n == 0 {
+ cr.err = io.EOF
+ }
+}
+
+func (cr *chunkedReader) chunkHeaderAvailable() bool {
+ n := cr.r.Buffered()
+ if n > 0 {
+ peek, _ := cr.r.Peek(n)
+ return bytes.IndexByte(peek, '\n') >= 0
+ }
+ return false
+}
+
+func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
+ for cr.err == nil {
+ if cr.checkEnd {
+ if n > 0 && cr.r.Buffered() < 2 {
+ // We have some data. Return early (per the io.Reader
+ // contract) instead of potentially blocking while
+ // reading more.
+ break
+ }
+ if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil {
+ if string(cr.buf[:]) != "\r\n" {
+ cr.err = errors.New("malformed chunked encoding")
+ break
+ }
+ } else {
+ if cr.err == io.EOF {
+ cr.err = io.ErrUnexpectedEOF
+ }
+ break
+ }
+ cr.checkEnd = false
+ }
+ if cr.n == 0 {
+ if n > 0 && !cr.chunkHeaderAvailable() {
+ // We've read enough. Don't potentially block
+ // reading a new chunk header.
+ break
+ }
+ cr.beginChunk()
+ continue
+ }
+ if len(b) == 0 {
+ break
+ }
+ rbuf := b
+ if uint64(len(rbuf)) > cr.n {
+ rbuf = rbuf[:cr.n]
+ }
+ var n0 int
+ n0, cr.err = cr.r.Read(rbuf)
+ n += n0
+ b = b[n0:]
+ cr.n -= uint64(n0)
+ // If we're at the end of a chunk, read the next two
+ // bytes to verify they are "\r\n".
+ if cr.n == 0 && cr.err == nil {
+ cr.checkEnd = true
+ } else if cr.err == io.EOF {
+ cr.err = io.ErrUnexpectedEOF
+ }
+ }
+ return n, cr.err
+}
+
+// Read a line of bytes (up to \n) from b.
+// Give up if the line exceeds maxLineLength.
+// The returned bytes are owned by the bufio.Reader
+// so they are only valid until the next bufio read.
+func readChunkLine(b *bufio.Reader) ([]byte, error) {
+ p, err := b.ReadSlice('\n')
+ if err != nil {
+ // We always know when EOF is coming.
+ // If the caller asked for a line, there should be a line.
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ } else if err == bufio.ErrBufferFull {
+ err = ErrLineTooLong
+ }
+ return nil, err
+ }
+ if len(p) >= maxLineLength {
+ return nil, ErrLineTooLong
+ }
+ return p, nil
+}
+
+func trimTrailingWhitespace(b []byte) []byte {
+ for len(b) > 0 && isASCIISpace(b[len(b)-1]) {
+ b = b[:len(b)-1]
+ }
+ return b
+}
+
+func isASCIISpace(b byte) bool {
+ return b == ' ' || b == '\t' || b == '\n' || b == '\r'
+}
+
+var semi = []byte(";")
+
+// removeChunkExtension removes any chunk-extension from p.
+// For example,
+//
+// "0" => "0"
+// "0;token" => "0"
+// "0;token=val" => "0"
+// `0;token="quoted string"` => "0"
+func removeChunkExtension(p []byte) ([]byte, error) {
+ p, _, _ = bytes.Cut(p, semi)
+ // TODO: care about exact syntax of chunk extensions? We're
+ // ignoring and stripping them anyway. For now just never
+ // return an error.
+ return p, nil
+}
+
+// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP
+// "chunked" format before writing them to w. Closing the returned chunkedWriter
+// sends the final 0-length chunk that marks the end of the stream but does
+// not send the final CRLF that appears after trailers; trailers and the last
+// CRLF must be written separately.
+//
+// NewChunkedWriter is not needed by normal applications. The http
+// package adds chunking automatically if handlers don't set a
+// Content-Length header. Using newChunkedWriter inside a handler
+// would result in double chunking or chunking with a Content-Length
+// length, both of which are wrong.
+func NewChunkedWriter(w io.Writer) io.WriteCloser {
+ return &chunkedWriter{w}
+}
+
+// Writing to chunkedWriter translates to writing in HTTP chunked Transfer
+// Encoding wire format to the underlying Wire chunkedWriter.
+type chunkedWriter struct {
+ Wire io.Writer
+}
+
+// Write the contents of data as one chunk to Wire.
+// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
+// a bug since it does not check for success of [io.WriteString]
+func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
+
+ // Don't send 0-length data. It looks like EOF for chunked encoding.
+ if len(data) == 0 {
+ return 0, nil
+ }
+
+ if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil {
+ return 0, err
+ }
+ if n, err = cw.Wire.Write(data); err != nil {
+ return
+ }
+ if n != len(data) {
+ err = io.ErrShortWrite
+ return
+ }
+ if _, err = io.WriteString(cw.Wire, "\r\n"); err != nil {
+ return
+ }
+ if bw, ok := cw.Wire.(*FlushAfterChunkWriter); ok {
+ err = bw.Flush()
+ }
+ return
+}
+
+func (cw *chunkedWriter) Close() error {
+ _, err := io.WriteString(cw.Wire, "0\r\n")
+ return err
+}
+
+// FlushAfterChunkWriter signals from the caller of [NewChunkedWriter]
+// that each chunk should be followed by a flush. It is used by the
+// [net/http.Transport] code to keep the buffering behavior for headers and
+// trailers, but flush out chunks aggressively in the middle for
+// request bodies which may be generated slowly. See Issue 6574.
+type FlushAfterChunkWriter struct {
+ *bufio.Writer
+}
+
+func parseHexUint(v []byte) (n uint64, err error) {
+ if len(v) == 0 {
+ return 0, errors.New("empty hex number for chunk length")
+ }
+ for i, b := range v {
+ switch {
+ case '0' <= b && b <= '9':
+ b = b - '0'
+ case 'a' <= b && b <= 'f':
+ b = b - 'a' + 10
+ case 'A' <= b && b <= 'F':
+ b = b - 'A' + 10
+ default:
+ return 0, errors.New("invalid byte in chunk length")
+ }
+ if i == 16 {
+ return 0, errors.New("http chunk length too large")
+ }
+ n <<= 4
+ n |= uint64(b)
+ }
+ return
+}
diff --git a/contrib/go/_std_1.22/src/net/http/internal/testcert/testcert.go b/contrib/go/_std_1.22/src/net/http/internal/testcert/testcert.go
new file mode 100644
index 0000000000..d510e791d6
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/internal/testcert/testcert.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testcert contains a test-only localhost certificate.
+package testcert
+
+import "strings"
+
+// LocalhostCert is a PEM-encoded TLS cert with SAN IPs
+// "127.0.0.1" and "[::1]", expiring at Jan 29 16:00:00 2084 GMT.
+// generated from src/crypto/tls:
+// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
+var LocalhostCert = []byte(`-----BEGIN CERTIFICATE-----
+MIIDOTCCAiGgAwIBAgIQSRJrEpBGFc7tNb1fb5pKFzANBgkqhkiG9w0BAQsFADAS
+MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
+MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEA6Gba5tHV1dAKouAaXO3/ebDUU4rvwCUg/CNaJ2PT5xLD4N1Vcb8r
+bFSW2HXKq+MPfVdwIKR/1DczEoAGf/JWQTW7EgzlXrCd3rlajEX2D73faWJekD0U
+aUgz5vtrTXZ90BQL7WvRICd7FlEZ6FPOcPlumiyNmzUqtwGhO+9ad1W5BqJaRI6P
+YfouNkwR6Na4TzSj5BrqUfP0FwDizKSJ0XXmh8g8G9mtwxOSN3Ru1QFc61Xyeluk
+POGKBV/q6RBNklTNe0gI8usUMlYyoC7ytppNMW7X2vodAelSu25jgx2anj9fDVZu
+h7AXF5+4nJS4AAt0n1lNY7nGSsdZas8PbQIDAQABo4GIMIGFMA4GA1UdDwEB/wQE
+AwICpDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBStsdjh3/JCXXYlQryOrL4Sh7BW5TAuBgNVHREEJzAlggtleGFtcGxlLmNv
+bYcEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATANBgkqhkiG9w0BAQsFAAOCAQEAxWGI
+5NhpF3nwwy/4yB4i/CwwSpLrWUa70NyhvprUBC50PxiXav1TeDzwzLx/o5HyNwsv
+cxv3HdkLW59i/0SlJSrNnWdfZ19oTcS+6PtLoVyISgtyN6DpkKpdG1cOkW3Cy2P2
++tK/tKHRP1Y/Ra0RiDpOAmqn0gCOFGz8+lqDIor/T7MTpibL3IxqWfPrvfVRHL3B
+grw/ZQTTIVjjh4JBSW3WyWgNo/ikC1lrVxzl4iPUGptxT36Cr7Zk2Bsg0XqwbOvK
+5d+NTDREkSnUbie4GeutujmX3Dsx88UiV6UY/4lHJa6I5leHUNOHahRbpbWeOfs/
+WkBKOclmOV2xlTVuPw==
+-----END CERTIFICATE-----`)
+
+// LocalhostKey is the private key for LocalhostCert.
+var LocalhostKey = []byte(testingKey(`-----BEGIN RSA TESTING KEY-----
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDoZtrm0dXV0Aqi
+4Bpc7f95sNRTiu/AJSD8I1onY9PnEsPg3VVxvytsVJbYdcqr4w99V3AgpH/UNzMS
+gAZ/8lZBNbsSDOVesJ3euVqMRfYPvd9pYl6QPRRpSDPm+2tNdn3QFAvta9EgJ3sW
+URnoU85w+W6aLI2bNSq3AaE771p3VbkGolpEjo9h+i42TBHo1rhPNKPkGupR8/QX
+AOLMpInRdeaHyDwb2a3DE5I3dG7VAVzrVfJ6W6Q84YoFX+rpEE2SVM17SAjy6xQy
+VjKgLvK2mk0xbtfa+h0B6VK7bmODHZqeP18NVm6HsBcXn7iclLgAC3SfWU1jucZK
+x1lqzw9tAgMBAAECggEABWzxS1Y2wckblnXY57Z+sl6YdmLV+gxj2r8Qib7g4ZIk
+lIlWR1OJNfw7kU4eryib4fc6nOh6O4AWZyYqAK6tqNQSS/eVG0LQTLTTEldHyVJL
+dvBe+MsUQOj4nTndZW+QvFzbcm2D8lY5n2nBSxU5ypVoKZ1EqQzytFcLZpTN7d89
+EPj0qDyrV4NZlWAwL1AygCwnlwhMQjXEalVF1ylXwU3QzyZ/6MgvF6d3SSUlh+sq
+XefuyigXw484cQQgbzopv6niMOmGP3of+yV4JQqUSb3IDmmT68XjGd2Dkxl4iPki
+6ZwXf3CCi+c+i/zVEcufgZ3SLf8D99kUGE7v7fZ6AQKBgQD1ZX3RAla9hIhxCf+O
+3D+I1j2LMrdjAh0ZKKqwMR4JnHX3mjQI6LwqIctPWTU8wYFECSh9klEclSdCa64s
+uI/GNpcqPXejd0cAAdqHEEeG5sHMDt0oFSurL4lyud0GtZvwlzLuwEweuDtvT9cJ
+Wfvl86uyO36IW8JdvUprYDctrQKBgQDycZ697qutBieZlGkHpnYWUAeImVA878sJ
+w44NuXHvMxBPz+lbJGAg8Cn8fcxNAPqHIraK+kx3po8cZGQywKHUWsxi23ozHoxo
++bGqeQb9U661TnfdDspIXia+xilZt3mm5BPzOUuRqlh4Y9SOBpSWRmEhyw76w4ZP
+OPxjWYAgwQKBgA/FehSYxeJgRjSdo+MWnK66tjHgDJE8bYpUZsP0JC4R9DL5oiaA
+brd2fI6Y+SbyeNBallObt8LSgzdtnEAbjIH8uDJqyOmknNePRvAvR6mP4xyuR+Bv
+m+Lgp0DMWTw5J9CKpydZDItc49T/mJ5tPhdFVd+am0NAQnmr1MCZ6nHxAoGABS3Y
+LkaC9FdFUUqSU8+Chkd/YbOkuyiENdkvl6t2e52jo5DVc1T7mLiIrRQi4SI8N9bN
+/3oJWCT+uaSLX2ouCtNFunblzWHBrhxnZzTeqVq4SLc8aESAnbslKL4i8/+vYZlN
+s8xtiNcSvL+lMsOBORSXzpj/4Ot8WwTkn1qyGgECgYBKNTypzAHeLE6yVadFp3nQ
+Ckq9yzvP/ib05rvgbvrne00YeOxqJ9gtTrzgh7koqJyX1L4NwdkEza4ilDWpucn0
+xiUZS4SoaJq6ZvcBYS62Yr1t8n09iG47YL8ibgtmH3L+svaotvpVxVK+d7BLevA/
+ZboOWVe3icTy64BT3OQhmg==
+-----END RSA TESTING KEY-----`))
+
+func testingKey(s string) string { return strings.ReplaceAll(s, "TESTING KEY", "PRIVATE KEY") }
diff --git a/contrib/go/_std_1.22/src/net/http/internal/testcert/ya.make b/contrib/go/_std_1.22/src/net/http/internal/testcert/ya.make
new file mode 100644
index 0000000000..adb7784e5f
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/internal/testcert/ya.make
@@ -0,0 +1,7 @@
+GO_LIBRARY()
+IF (TRUE)
+ SRCS(
+ testcert.go
+ )
+ENDIF()
+END()
diff --git a/contrib/go/_std_1.22/src/net/http/internal/ya.make b/contrib/go/_std_1.22/src/net/http/internal/ya.make
new file mode 100644
index 0000000000..0a61ad1124
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/internal/ya.make
@@ -0,0 +1,7 @@
+GO_LIBRARY()
+IF (TRUE)
+ SRCS(
+ chunked.go
+ )
+ENDIF()
+END()
diff --git a/contrib/go/_std_1.22/src/net/http/jar.go b/contrib/go/_std_1.22/src/net/http/jar.go
new file mode 100644
index 0000000000..5c3de0dad2
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/jar.go
@@ -0,0 +1,27 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "net/url"
+)
+
+// A CookieJar manages storage and use of cookies in HTTP requests.
+//
+// Implementations of CookieJar must be safe for concurrent use by multiple
+// goroutines.
+//
+// The net/http/cookiejar package provides a CookieJar implementation.
+type CookieJar interface {
+ // SetCookies handles the receipt of the cookies in a reply for the
+ // given URL. It may or may not choose to save the cookies, depending
+ // on the jar's policy and implementation.
+ SetCookies(u *url.URL, cookies []*Cookie)
+
+ // Cookies returns the cookies to send in a request for the given URL.
+ // It is up to the implementation to honor the standard cookie use
+ // restrictions such as in RFC 6265.
+ Cookies(u *url.URL) []*Cookie
+}
diff --git a/contrib/go/_std_1.22/src/net/http/mapping.go b/contrib/go/_std_1.22/src/net/http/mapping.go
new file mode 100644
index 0000000000..87e6d5ae5d
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/mapping.go
@@ -0,0 +1,78 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+// A mapping is a collection of key-value pairs where the keys are unique.
+// A zero mapping is empty and ready to use.
+// A mapping tries to pick a representation that makes [mapping.find] most efficient.
+type mapping[K comparable, V any] struct {
+ s []entry[K, V] // for few pairs
+ m map[K]V // for many pairs
+}
+
+type entry[K comparable, V any] struct {
+ key K
+ value V
+}
+
+// maxSlice is the maximum number of pairs for which a slice is used.
+// It is a variable for benchmarking.
+var maxSlice int = 8
+
+// add adds a key-value pair to the mapping.
+func (h *mapping[K, V]) add(k K, v V) {
+ if h.m == nil && len(h.s) < maxSlice {
+ h.s = append(h.s, entry[K, V]{k, v})
+ } else {
+ if h.m == nil {
+ h.m = map[K]V{}
+ for _, e := range h.s {
+ h.m[e.key] = e.value
+ }
+ h.s = nil
+ }
+ h.m[k] = v
+ }
+}
+
+// find returns the value corresponding to the given key.
+// The second return value is false if there is no value
+// with that key.
+func (h *mapping[K, V]) find(k K) (v V, found bool) {
+ if h == nil {
+ return v, false
+ }
+ if h.m != nil {
+ v, found = h.m[k]
+ return v, found
+ }
+ for _, e := range h.s {
+ if e.key == k {
+ return e.value, true
+ }
+ }
+ return v, false
+}
+
+// eachPair calls f for each pair in the mapping.
+// If f returns false, pairs returns immediately.
+func (h *mapping[K, V]) eachPair(f func(k K, v V) bool) {
+ if h == nil {
+ return
+ }
+ if h.m != nil {
+ for k, v := range h.m {
+ if !f(k, v) {
+ return
+ }
+ }
+ } else {
+ for _, e := range h.s {
+ if !f(e.key, e.value) {
+ return
+ }
+ }
+ }
+}
diff --git a/contrib/go/_std_1.22/src/net/http/method.go b/contrib/go/_std_1.22/src/net/http/method.go
new file mode 100644
index 0000000000..6f46155069
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/method.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+// Common HTTP methods.
+//
+// Unless otherwise noted, these are defined in RFC 7231 section 4.3.
+const (
+ MethodGet = "GET"
+ MethodHead = "HEAD"
+ MethodPost = "POST"
+ MethodPut = "PUT"
+ MethodPatch = "PATCH" // RFC 5789
+ MethodDelete = "DELETE"
+ MethodConnect = "CONNECT"
+ MethodOptions = "OPTIONS"
+ MethodTrace = "TRACE"
+)
diff --git a/contrib/go/_std_1.22/src/net/http/pattern.go b/contrib/go/_std_1.22/src/net/http/pattern.go
new file mode 100644
index 0000000000..f6af19b0f4
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/pattern.go
@@ -0,0 +1,529 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Patterns for ServeMux routing.
+
+package http
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+ "unicode"
+)
+
+// A pattern is something that can be matched against an HTTP request.
+// It has an optional method, an optional host, and a path.
+type pattern struct {
+ str string // original string
+ method string
+ host string
+ // The representation of a path differs from the surface syntax, which
+ // simplifies most algorithms.
+ //
+ // Paths ending in '/' are represented with an anonymous "..." wildcard.
+ // For example, the path "a/" is represented as a literal segment "a" followed
+ // by a segment with multi==true.
+ //
+ // Paths ending in "{$}" are represented with the literal segment "/".
+ // For example, the path "a/{$}" is represented as a literal segment "a" followed
+ // by a literal segment "/".
+ segments []segment
+ loc string // source location of registering call, for helpful messages
+}
+
+func (p *pattern) String() string { return p.str }
+
+func (p *pattern) lastSegment() segment {
+ return p.segments[len(p.segments)-1]
+}
+
+// A segment is a pattern piece that matches one or more path segments, or
+// a trailing slash.
+//
+// If wild is false, it matches a literal segment, or, if s == "/", a trailing slash.
+// Examples:
+//
+// "a" => segment{s: "a"}
+// "/{$}" => segment{s: "/"}
+//
+// If wild is true and multi is false, it matches a single path segment.
+// Example:
+//
+// "{x}" => segment{s: "x", wild: true}
+//
+// If both wild and multi are true, it matches all remaining path segments.
+// Example:
+//
+// "{rest...}" => segment{s: "rest", wild: true, multi: true}
+type segment struct {
+ s string // literal or wildcard name or "/" for "/{$}".
+ wild bool
+ multi bool // "..." wildcard
+}
+
+// parsePattern parses a string into a Pattern.
+// The string's syntax is
+//
+// [METHOD] [HOST]/[PATH]
+//
+// where:
+// - METHOD is an HTTP method
+// - HOST is a hostname
+// - PATH consists of slash-separated segments, where each segment is either
+// a literal or a wildcard of the form "{name}", "{name...}", or "{$}".
+//
+// METHOD, HOST and PATH are all optional; that is, the string can be "/".
+// If METHOD is present, it must be followed by a single space.
+// Wildcard names must be valid Go identifiers.
+// The "{$}" and "{name...}" wildcard must occur at the end of PATH.
+// PATH may end with a '/'.
+// Wildcard names in a path must be distinct.
+func parsePattern(s string) (_ *pattern, err error) {
+ if len(s) == 0 {
+ return nil, errors.New("empty pattern")
+ }
+ off := 0 // offset into string
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("at offset %d: %w", off, err)
+ }
+ }()
+
+ method, rest, found := strings.Cut(s, " ")
+ if !found {
+ rest = method
+ method = ""
+ }
+ if method != "" && !validMethod(method) {
+ return nil, fmt.Errorf("invalid method %q", method)
+ }
+ p := &pattern{str: s, method: method}
+
+ if found {
+ off = len(method) + 1
+ }
+ i := strings.IndexByte(rest, '/')
+ if i < 0 {
+ return nil, errors.New("host/path missing /")
+ }
+ p.host = rest[:i]
+ rest = rest[i:]
+ if j := strings.IndexByte(p.host, '{'); j >= 0 {
+ off += j
+ return nil, errors.New("host contains '{' (missing initial '/'?)")
+ }
+ // At this point, rest is the path.
+ off += i
+
+ // An unclean path with a method that is not CONNECT can never match,
+ // because paths are cleaned before matching.
+ if method != "" && method != "CONNECT" && rest != cleanPath(rest) {
+ return nil, errors.New("non-CONNECT pattern with unclean path can never match")
+ }
+
+ seenNames := map[string]bool{} // remember wildcard names to catch dups
+ for len(rest) > 0 {
+ // Invariant: rest[0] == '/'.
+ rest = rest[1:]
+ off = len(s) - len(rest)
+ if len(rest) == 0 {
+ // Trailing slash.
+ p.segments = append(p.segments, segment{wild: true, multi: true})
+ break
+ }
+ i := strings.IndexByte(rest, '/')
+ if i < 0 {
+ i = len(rest)
+ }
+ var seg string
+ seg, rest = rest[:i], rest[i:]
+ if i := strings.IndexByte(seg, '{'); i < 0 {
+ // Literal.
+ seg = pathUnescape(seg)
+ p.segments = append(p.segments, segment{s: seg})
+ } else {
+ // Wildcard.
+ if i != 0 {
+ return nil, errors.New("bad wildcard segment (must start with '{')")
+ }
+ if seg[len(seg)-1] != '}' {
+ return nil, errors.New("bad wildcard segment (must end with '}')")
+ }
+ name := seg[1 : len(seg)-1]
+ if name == "$" {
+ if len(rest) != 0 {
+ return nil, errors.New("{$} not at end")
+ }
+ p.segments = append(p.segments, segment{s: "/"})
+ break
+ }
+ name, multi := strings.CutSuffix(name, "...")
+ if multi && len(rest) != 0 {
+ return nil, errors.New("{...} wildcard not at end")
+ }
+ if name == "" {
+ return nil, errors.New("empty wildcard")
+ }
+ if !isValidWildcardName(name) {
+ return nil, fmt.Errorf("bad wildcard name %q", name)
+ }
+ if seenNames[name] {
+ return nil, fmt.Errorf("duplicate wildcard name %q", name)
+ }
+ seenNames[name] = true
+ p.segments = append(p.segments, segment{s: name, wild: true, multi: multi})
+ }
+ }
+ return p, nil
+}
+
+func isValidWildcardName(s string) bool {
+ if s == "" {
+ return false
+ }
+ // Valid Go identifier.
+ for i, c := range s {
+ if !unicode.IsLetter(c) && c != '_' && (i == 0 || !unicode.IsDigit(c)) {
+ return false
+ }
+ }
+ return true
+}
+
+func pathUnescape(path string) string {
+ u, err := url.PathUnescape(path)
+ if err != nil {
+ // Invalidly escaped path; use the original
+ return path
+ }
+ return u
+}
+
+// relationship is a relationship between two patterns, p1 and p2.
+type relationship string
+
+const (
+ equivalent relationship = "equivalent" // both match the same requests
+ moreGeneral relationship = "moreGeneral" // p1 matches everything p2 does & more
+ moreSpecific relationship = "moreSpecific" // p2 matches everything p1 does & more
+ disjoint relationship = "disjoint" // there is no request that both match
+ overlaps relationship = "overlaps" // there is a request that both match, but neither is more specific
+)
+
+// conflictsWith reports whether p1 conflicts with p2, that is, whether
+// there is a request that both match but where neither is higher precedence
+// than the other.
+//
+// Precedence is defined by two rules:
+// 1. Patterns with a host win over patterns without a host.
+// 2. Patterns whose method and path is more specific win. One pattern is more
+// specific than another if the second matches all the (method, path) pairs
+// of the first and more.
+//
+// If rule 1 doesn't apply, then two patterns conflict if their relationship
+// is either equivalence (they match the same set of requests) or overlap
+// (they both match some requests, but neither is more specific than the other).
+func (p1 *pattern) conflictsWith(p2 *pattern) bool {
+ if p1.host != p2.host {
+ // Either one host is empty and the other isn't, in which case the
+ // one with the host wins by rule 1, or neither host is empty
+ // and they differ, so they won't match the same paths.
+ return false
+ }
+ rel := p1.comparePathsAndMethods(p2)
+ return rel == equivalent || rel == overlaps
+}
+
+func (p1 *pattern) comparePathsAndMethods(p2 *pattern) relationship {
+ mrel := p1.compareMethods(p2)
+ // Optimization: avoid a call to comparePaths.
+ if mrel == disjoint {
+ return disjoint
+ }
+ prel := p1.comparePaths(p2)
+ return combineRelationships(mrel, prel)
+}
+
+// compareMethods determines the relationship between the method
+// part of patterns p1 and p2.
+//
+// A method can either be empty, "GET", or something else.
+// The empty string matches any method, so it is the most general.
+// "GET" matches both GET and HEAD.
+// Anything else matches only itself.
+func (p1 *pattern) compareMethods(p2 *pattern) relationship {
+ if p1.method == p2.method {
+ return equivalent
+ }
+ if p1.method == "" {
+ // p1 matches any method, but p2 does not, so p1 is more general.
+ return moreGeneral
+ }
+ if p2.method == "" {
+ return moreSpecific
+ }
+ if p1.method == "GET" && p2.method == "HEAD" {
+ // p1 matches GET and HEAD; p2 matches only HEAD.
+ return moreGeneral
+ }
+ if p2.method == "GET" && p1.method == "HEAD" {
+ return moreSpecific
+ }
+ return disjoint
+}
+
+// comparePaths determines the relationship between the path
+// part of two patterns.
+func (p1 *pattern) comparePaths(p2 *pattern) relationship {
+ // Optimization: if a path pattern doesn't end in a multi ("...") wildcard, then it
+ // can only match paths with the same number of segments.
+ if len(p1.segments) != len(p2.segments) && !p1.lastSegment().multi && !p2.lastSegment().multi {
+ return disjoint
+ }
+
+ // Consider corresponding segments in the two path patterns.
+ var segs1, segs2 []segment
+ rel := equivalent
+ for segs1, segs2 = p1.segments, p2.segments; len(segs1) > 0 && len(segs2) > 0; segs1, segs2 = segs1[1:], segs2[1:] {
+ rel = combineRelationships(rel, compareSegments(segs1[0], segs2[0]))
+ if rel == disjoint {
+ return rel
+ }
+ }
+ // We've reached the end of the corresponding segments of the patterns.
+ // If they have the same number of segments, then we've already determined
+ // their relationship.
+ if len(segs1) == 0 && len(segs2) == 0 {
+ return rel
+ }
+ // Otherwise, the only way they could fail to be disjoint is if the shorter
+ // pattern ends in a multi. In that case, that multi is more general
+ // than the remainder of the longer pattern, so combine those two relationships.
+ if len(segs1) < len(segs2) && p1.lastSegment().multi {
+ return combineRelationships(rel, moreGeneral)
+ }
+ if len(segs2) < len(segs1) && p2.lastSegment().multi {
+ return combineRelationships(rel, moreSpecific)
+ }
+ return disjoint
+}
+
+// compareSegments determines the relationship between two segments.
+func compareSegments(s1, s2 segment) relationship {
+ if s1.multi && s2.multi {
+ return equivalent
+ }
+ if s1.multi {
+ return moreGeneral
+ }
+ if s2.multi {
+ return moreSpecific
+ }
+ if s1.wild && s2.wild {
+ return equivalent
+ }
+ if s1.wild {
+ if s2.s == "/" {
+ // A single wildcard doesn't match a trailing slash.
+ return disjoint
+ }
+ return moreGeneral
+ }
+ if s2.wild {
+ if s1.s == "/" {
+ return disjoint
+ }
+ return moreSpecific
+ }
+ // Both literals.
+ if s1.s == s2.s {
+ return equivalent
+ }
+ return disjoint
+}
+
+// combineRelationships determines the overall relationship of two patterns
+// given the relationships of a partition of the patterns into two parts.
+//
+// For example, if p1 is more general than p2 in one way but equivalent
+// in the other, then it is more general overall.
+//
+// Or if p1 is more general in one way and more specific in the other, then
+// they overlap.
+func combineRelationships(r1, r2 relationship) relationship {
+ switch r1 {
+ case equivalent:
+ return r2
+ case disjoint:
+ return disjoint
+ case overlaps:
+ if r2 == disjoint {
+ return disjoint
+ }
+ return overlaps
+ case moreGeneral, moreSpecific:
+ switch r2 {
+ case equivalent:
+ return r1
+ case inverseRelationship(r1):
+ return overlaps
+ default:
+ return r2
+ }
+ default:
+ panic(fmt.Sprintf("unknown relationship %q", r1))
+ }
+}
+
+// If p1 has relationship `r` to p2, then
+// p2 has inverseRelationship(r) to p1.
+func inverseRelationship(r relationship) relationship {
+ switch r {
+ case moreSpecific:
+ return moreGeneral
+ case moreGeneral:
+ return moreSpecific
+ default:
+ return r
+ }
+}
+
+// isLitOrSingle reports whether the segment is a non-dollar literal or a single wildcard.
+func isLitOrSingle(seg segment) bool {
+ if seg.wild {
+ return !seg.multi
+ }
+ return seg.s != "/"
+}
+
+// describeConflict returns an explanation of why two patterns conflict.
+func describeConflict(p1, p2 *pattern) string {
+ mrel := p1.compareMethods(p2)
+ prel := p1.comparePaths(p2)
+ rel := combineRelationships(mrel, prel)
+ if rel == equivalent {
+ return fmt.Sprintf("%s matches the same requests as %s", p1, p2)
+ }
+ if rel != overlaps {
+ panic("describeConflict called with non-conflicting patterns")
+ }
+ if prel == overlaps {
+ return fmt.Sprintf(`%[1]s and %[2]s both match some paths, like %[3]q.
+But neither is more specific than the other.
+%[1]s matches %[4]q, but %[2]s doesn't.
+%[2]s matches %[5]q, but %[1]s doesn't.`,
+ p1, p2, commonPath(p1, p2), differencePath(p1, p2), differencePath(p2, p1))
+ }
+ if mrel == moreGeneral && prel == moreSpecific {
+ return fmt.Sprintf("%s matches more methods than %s, but has a more specific path pattern", p1, p2)
+ }
+ if mrel == moreSpecific && prel == moreGeneral {
+ return fmt.Sprintf("%s matches fewer methods than %s, but has a more general path pattern", p1, p2)
+ }
+ return fmt.Sprintf("bug: unexpected way for two patterns %s and %s to conflict: methods %s, paths %s", p1, p2, mrel, prel)
+}
+
+// writeMatchingPath writes to b a path that matches the segments.
+func writeMatchingPath(b *strings.Builder, segs []segment) {
+ for _, s := range segs {
+ writeSegment(b, s)
+ }
+}
+
+func writeSegment(b *strings.Builder, s segment) {
+ b.WriteByte('/')
+ if !s.multi && s.s != "/" {
+ b.WriteString(s.s)
+ }
+}
+
+// commonPath returns a path that both p1 and p2 match.
+// It assumes there is such a path.
+func commonPath(p1, p2 *pattern) string {
+ var b strings.Builder
+ var segs1, segs2 []segment
+ for segs1, segs2 = p1.segments, p2.segments; len(segs1) > 0 && len(segs2) > 0; segs1, segs2 = segs1[1:], segs2[1:] {
+ if s1 := segs1[0]; s1.wild {
+ writeSegment(&b, segs2[0])
+ } else {
+ writeSegment(&b, s1)
+ }
+ }
+ if len(segs1) > 0 {
+ writeMatchingPath(&b, segs1)
+ } else if len(segs2) > 0 {
+ writeMatchingPath(&b, segs2)
+ }
+ return b.String()
+}
+
+// differencePath returns a path that p1 matches and p2 doesn't.
+// It assumes there is such a path.
+func differencePath(p1, p2 *pattern) string {
+ var b strings.Builder
+
+ var segs1, segs2 []segment
+ for segs1, segs2 = p1.segments, p2.segments; len(segs1) > 0 && len(segs2) > 0; segs1, segs2 = segs1[1:], segs2[1:] {
+ s1 := segs1[0]
+ s2 := segs2[0]
+ if s1.multi && s2.multi {
+ // From here the patterns match the same paths, so we must have found a difference earlier.
+ b.WriteByte('/')
+ return b.String()
+
+ }
+ if s1.multi && !s2.multi {
+ // s1 ends in a "..." wildcard but s2 does not.
+ // A trailing slash will distinguish them, unless s2 ends in "{$}",
+ // in which case any segment will do; prefer the wildcard name if
+ // it has one.
+ b.WriteByte('/')
+ if s2.s == "/" {
+ if s1.s != "" {
+ b.WriteString(s1.s)
+ } else {
+ b.WriteString("x")
+ }
+ }
+ return b.String()
+ }
+ if !s1.multi && s2.multi {
+ writeSegment(&b, s1)
+ } else if s1.wild && s2.wild {
+ // Both patterns will match whatever we put here; use
+ // the first wildcard name.
+ writeSegment(&b, s1)
+ } else if s1.wild && !s2.wild {
+ // s1 is a wildcard, s2 is a literal.
+ // Any segment other than s2.s will work.
+ // Prefer the wildcard name, but if it's the same as the literal,
+ // tweak the literal.
+ if s1.s != s2.s {
+ writeSegment(&b, s1)
+ } else {
+ b.WriteByte('/')
+ b.WriteString(s2.s + "x")
+ }
+ } else if !s1.wild && s2.wild {
+ writeSegment(&b, s1)
+ } else {
+ // Both are literals. A precondition of this function is that the
+ // patterns overlap, so they must be the same literal. Use it.
+ if s1.s != s2.s {
+ panic(fmt.Sprintf("literals differ: %q and %q", s1.s, s2.s))
+ }
+ writeSegment(&b, s1)
+ }
+ }
+ if len(segs1) > 0 {
+ // p1 is longer than p2, and p2 does not end in a multi.
+ // Anything that matches the rest of p1 will do.
+ writeMatchingPath(&b, segs1)
+ } else if len(segs2) > 0 {
+ writeMatchingPath(&b, segs2)
+ }
+ return b.String()
+}
diff --git a/contrib/go/_std_1.22/src/net/http/pprof/pprof.go b/contrib/go/_std_1.22/src/net/http/pprof/pprof.go
new file mode 100644
index 0000000000..bc48f11834
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/pprof/pprof.go
@@ -0,0 +1,464 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pprof serves via its HTTP server runtime profiling data
+// in the format expected by the pprof visualization tool.
+//
+// The package is typically only imported for the side effect of
+// registering its HTTP handlers.
+// The handled paths all begin with /debug/pprof/.
+//
+// To use pprof, link this package into your program:
+//
+// import _ "net/http/pprof"
+//
+// If your application is not already running an http server, you
+// need to start one. Add "net/http" and "log" to your imports and
+// the following code to your main function:
+//
+// go func() {
+// log.Println(http.ListenAndServe("localhost:6060", nil))
+// }()
+//
+// By default, all the profiles listed in [runtime/pprof.Profile] are
+// available (via [Handler]), in addition to the [Cmdline], [Profile], [Symbol],
+// and [Trace] profiles defined in this package.
+// If you are not using DefaultServeMux, you will have to register handlers
+// with the mux you are using.
+//
+// # Parameters
+//
+// Parameters can be passed via GET query params:
+//
+// - debug=N (all profiles): response format: N = 0: binary (default), N > 0: plaintext
+// - gc=N (heap profile): N > 0: run a garbage collection cycle before profiling
+// - seconds=N (allocs, block, goroutine, heap, mutex, threadcreate profiles): return a delta profile
+// - seconds=N (cpu (profile), trace profiles): profile for the given duration
+//
+// # Usage examples
+//
+// Use the pprof tool to look at the heap profile:
+//
+// go tool pprof http://localhost:6060/debug/pprof/heap
+//
+// Or to look at a 30-second CPU profile:
+//
+// go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30
+//
+// Or to look at the goroutine blocking profile, after calling
+// [runtime.SetBlockProfileRate] in your program:
+//
+// go tool pprof http://localhost:6060/debug/pprof/block
+//
+// Or to look at the holders of contended mutexes, after calling
+// [runtime.SetMutexProfileFraction] in your program:
+//
+// go tool pprof http://localhost:6060/debug/pprof/mutex
+//
+// The package also exports a handler that serves execution trace data
+// for the "go tool trace" command. To collect a 5-second execution trace:
+//
+// curl -o trace.out http://localhost:6060/debug/pprof/trace?seconds=5
+// go tool trace trace.out
+//
+// To view all available profiles, open http://localhost:6060/debug/pprof/
+// in your browser.
+//
+// For a study of the facility in action, visit
+// https://blog.golang.org/2011/06/profiling-go-programs.html.
+package pprof
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "html"
+ "internal/profile"
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "runtime/trace"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+func init() {
+ http.HandleFunc("/debug/pprof/", Index)
+ http.HandleFunc("/debug/pprof/cmdline", Cmdline)
+ http.HandleFunc("/debug/pprof/profile", Profile)
+ http.HandleFunc("/debug/pprof/symbol", Symbol)
+ http.HandleFunc("/debug/pprof/trace", Trace)
+}
+
+// Cmdline responds with the running program's
+// command line, with arguments separated by NUL bytes.
+// The package initialization registers it as /debug/pprof/cmdline.
+func Cmdline(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprint(w, strings.Join(os.Args, "\x00"))
+}
+
+func sleep(r *http.Request, d time.Duration) {
+ select {
+ case <-time.After(d):
+ case <-r.Context().Done():
+ }
+}
+
+func durationExceedsWriteTimeout(r *http.Request, seconds float64) bool {
+ srv, ok := r.Context().Value(http.ServerContextKey).(*http.Server)
+ return ok && srv.WriteTimeout != 0 && seconds >= srv.WriteTimeout.Seconds()
+}
+
+func serveError(w http.ResponseWriter, status int, txt string) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("X-Go-Pprof", "1")
+ w.Header().Del("Content-Disposition")
+ w.WriteHeader(status)
+ fmt.Fprintln(w, txt)
+}
+
+// Profile responds with the pprof-formatted cpu profile.
+// Profiling lasts for duration specified in seconds GET parameter, or for 30 seconds if not specified.
+// The package initialization registers it as /debug/pprof/profile.
+func Profile(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ sec, err := strconv.ParseInt(r.FormValue("seconds"), 10, 64)
+ if sec <= 0 || err != nil {
+ sec = 30
+ }
+
+ if durationExceedsWriteTimeout(r, float64(sec)) {
+ serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout")
+ return
+ }
+
+ // Set Content Type assuming StartCPUProfile will work,
+ // because if it does it starts writing.
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", `attachment; filename="profile"`)
+ if err := pprof.StartCPUProfile(w); err != nil {
+ // StartCPUProfile failed, so no writes yet.
+ serveError(w, http.StatusInternalServerError,
+ fmt.Sprintf("Could not enable CPU profiling: %s", err))
+ return
+ }
+ sleep(r, time.Duration(sec)*time.Second)
+ pprof.StopCPUProfile()
+}
+
+// Trace responds with the execution trace in binary form.
+// Tracing lasts for duration specified in seconds GET parameter, or for 1 second if not specified.
+// The package initialization registers it as /debug/pprof/trace.
+func Trace(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ sec, err := strconv.ParseFloat(r.FormValue("seconds"), 64)
+ if sec <= 0 || err != nil {
+ sec = 1
+ }
+
+ if durationExceedsWriteTimeout(r, sec) {
+ serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout")
+ return
+ }
+
+ // Set Content Type assuming trace.Start will work,
+ // because if it does it starts writing.
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", `attachment; filename="trace"`)
+ if err := trace.Start(w); err != nil {
+ // trace.Start failed, so no writes yet.
+ serveError(w, http.StatusInternalServerError,
+ fmt.Sprintf("Could not enable tracing: %s", err))
+ return
+ }
+ sleep(r, time.Duration(sec*float64(time.Second)))
+ trace.Stop()
+}
+
+// Symbol looks up the program counters listed in the request,
+// responding with a table mapping program counters to function names.
+// The package initialization registers it as /debug/pprof/symbol.
+func Symbol(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+
+ // We have to read the whole POST body before
+ // writing any output. Buffer the output here.
+ var buf bytes.Buffer
+
+ // We don't know how many symbols we have, but we
+ // do have symbol information. Pprof only cares whether
+ // this number is 0 (no symbols available) or > 0.
+ fmt.Fprintf(&buf, "num_symbols: 1\n")
+
+ var b *bufio.Reader
+ if r.Method == "POST" {
+ b = bufio.NewReader(r.Body)
+ } else {
+ b = bufio.NewReader(strings.NewReader(r.URL.RawQuery))
+ }
+
+ for {
+ word, err := b.ReadSlice('+')
+ if err == nil {
+ word = word[0 : len(word)-1] // trim +
+ }
+ pc, _ := strconv.ParseUint(string(word), 0, 64)
+ if pc != 0 {
+ f := runtime.FuncForPC(uintptr(pc))
+ if f != nil {
+ fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name())
+ }
+ }
+
+ // Wait until here to check for err; the last
+ // symbol will have an err because it doesn't end in +.
+ if err != nil {
+ if err != io.EOF {
+ fmt.Fprintf(&buf, "reading request: %v\n", err)
+ }
+ break
+ }
+ }
+
+ w.Write(buf.Bytes())
+}
+
+// Handler returns an HTTP handler that serves the named profile.
+// Available profiles can be found in [runtime/pprof.Profile].
+func Handler(name string) http.Handler {
+ return handler(name)
+}
+
+type handler string
+
+func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ p := pprof.Lookup(string(name))
+ if p == nil {
+ serveError(w, http.StatusNotFound, "Unknown profile")
+ return
+ }
+ if sec := r.FormValue("seconds"); sec != "" {
+ name.serveDeltaProfile(w, r, p, sec)
+ return
+ }
+ gc, _ := strconv.Atoi(r.FormValue("gc"))
+ if name == "heap" && gc > 0 {
+ runtime.GC()
+ }
+ debug, _ := strconv.Atoi(r.FormValue("debug"))
+ if debug != 0 {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ } else {
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, name))
+ }
+ p.WriteTo(w, debug)
+}
+
+func (name handler) serveDeltaProfile(w http.ResponseWriter, r *http.Request, p *pprof.Profile, secStr string) {
+ sec, err := strconv.ParseInt(secStr, 10, 64)
+ if err != nil || sec <= 0 {
+ serveError(w, http.StatusBadRequest, `invalid value for "seconds" - must be a positive integer`)
+ return
+ }
+ if !profileSupportsDelta[name] {
+ serveError(w, http.StatusBadRequest, `"seconds" parameter is not supported for this profile type`)
+ return
+ }
+ // 'name' should be a key in profileSupportsDelta.
+ if durationExceedsWriteTimeout(r, float64(sec)) {
+ serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout")
+ return
+ }
+ debug, _ := strconv.Atoi(r.FormValue("debug"))
+ if debug != 0 {
+ serveError(w, http.StatusBadRequest, "seconds and debug params are incompatible")
+ return
+ }
+ p0, err := collectProfile(p)
+ if err != nil {
+ serveError(w, http.StatusInternalServerError, "failed to collect profile")
+ return
+ }
+
+ t := time.NewTimer(time.Duration(sec) * time.Second)
+ defer t.Stop()
+
+ select {
+ case <-r.Context().Done():
+ err := r.Context().Err()
+ if err == context.DeadlineExceeded {
+ serveError(w, http.StatusRequestTimeout, err.Error())
+ } else { // TODO: what's a good status code for canceled requests? 400?
+ serveError(w, http.StatusInternalServerError, err.Error())
+ }
+ return
+ case <-t.C:
+ }
+
+ p1, err := collectProfile(p)
+ if err != nil {
+ serveError(w, http.StatusInternalServerError, "failed to collect profile")
+ return
+ }
+ ts := p1.TimeNanos
+ dur := p1.TimeNanos - p0.TimeNanos
+
+ p0.Scale(-1)
+
+ p1, err = profile.Merge([]*profile.Profile{p0, p1})
+ if err != nil {
+ serveError(w, http.StatusInternalServerError, "failed to compute delta")
+ return
+ }
+
+ p1.TimeNanos = ts // set since we don't know what profile.Merge set for TimeNanos.
+ p1.DurationNanos = dur
+
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s-delta"`, name))
+ p1.Write(w)
+}
+
+func collectProfile(p *pprof.Profile) (*profile.Profile, error) {
+ var buf bytes.Buffer
+ if err := p.WriteTo(&buf, 0); err != nil {
+ return nil, err
+ }
+ ts := time.Now().UnixNano()
+ p0, err := profile.Parse(&buf)
+ if err != nil {
+ return nil, err
+ }
+ p0.TimeNanos = ts
+ return p0, nil
+}
+
+var profileSupportsDelta = map[handler]bool{
+ "allocs": true,
+ "block": true,
+ "goroutine": true,
+ "heap": true,
+ "mutex": true,
+ "threadcreate": true,
+}
+
+var profileDescriptions = map[string]string{
+ "allocs": "A sampling of all past memory allocations",
+ "block": "Stack traces that led to blocking on synchronization primitives",
+ "cmdline": "The command line invocation of the current program",
+ "goroutine": "Stack traces of all current goroutines. Use debug=2 as a query parameter to export in the same format as an unrecovered panic.",
+ "heap": "A sampling of memory allocations of live objects. You can specify the gc GET parameter to run GC before taking the heap sample.",
+ "mutex": "Stack traces of holders of contended mutexes",
+ "profile": "CPU profile. You can specify the duration in the seconds GET parameter. After you get the profile file, use the go tool pprof command to investigate the profile.",
+ "threadcreate": "Stack traces that led to the creation of new OS threads",
+ "trace": "A trace of execution of the current program. You can specify the duration in the seconds GET parameter. After you get the trace file, use the go tool trace command to investigate the trace.",
+}
+
+type profileEntry struct {
+ Name string
+ Href string
+ Desc string
+ Count int
+}
+
+// Index responds with the pprof-formatted profile named by the request.
+// For example, "/debug/pprof/heap" serves the "heap" profile.
+// Index responds to a request for "/debug/pprof/" with an HTML page
+// listing the available profiles.
+func Index(w http.ResponseWriter, r *http.Request) {
+ if name, found := strings.CutPrefix(r.URL.Path, "/debug/pprof/"); found {
+ if name != "" {
+ handler(name).ServeHTTP(w, r)
+ return
+ }
+ }
+
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+
+ var profiles []profileEntry
+ for _, p := range pprof.Profiles() {
+ profiles = append(profiles, profileEntry{
+ Name: p.Name(),
+ Href: p.Name(),
+ Desc: profileDescriptions[p.Name()],
+ Count: p.Count(),
+ })
+ }
+
+ // Adding other profiles exposed from within this package
+ for _, p := range []string{"cmdline", "profile", "trace"} {
+ profiles = append(profiles, profileEntry{
+ Name: p,
+ Href: p,
+ Desc: profileDescriptions[p],
+ })
+ }
+
+ sort.Slice(profiles, func(i, j int) bool {
+ return profiles[i].Name < profiles[j].Name
+ })
+
+ if err := indexTmplExecute(w, profiles); err != nil {
+ log.Print(err)
+ }
+}
+
+func indexTmplExecute(w io.Writer, profiles []profileEntry) error {
+ var b bytes.Buffer
+ b.WriteString(`<html>
+<head>
+<title>/debug/pprof/</title>
+<style>
+.profile-name{
+ display:inline-block;
+ width:6rem;
+}
+</style>
+</head>
+<body>
+/debug/pprof/
+<br>
+<p>Set debug=1 as a query parameter to export in legacy text format</p>
+<br>
+Types of profiles available:
+<table>
+<thead><td>Count</td><td>Profile</td></thead>
+`)
+
+ for _, profile := range profiles {
+ link := &url.URL{Path: profile.Href, RawQuery: "debug=1"}
+ fmt.Fprintf(&b, "<tr><td>%d</td><td><a href='%s'>%s</a></td></tr>\n", profile.Count, link, html.EscapeString(profile.Name))
+ }
+
+ b.WriteString(`</table>
+<a href="goroutine?debug=2">full goroutine stack dump</a>
+<br>
+<p>
+Profile Descriptions:
+<ul>
+`)
+ for _, profile := range profiles {
+ fmt.Fprintf(&b, "<li><div class=profile-name>%s: </div> %s</li>\n", html.EscapeString(profile.Name), html.EscapeString(profile.Desc))
+ }
+ b.WriteString(`</ul>
+</p>
+</body>
+</html>`)
+
+ _, err := w.Write(b.Bytes())
+ return err
+}
diff --git a/contrib/go/_std_1.22/src/net/http/pprof/ya.make b/contrib/go/_std_1.22/src/net/http/pprof/ya.make
new file mode 100644
index 0000000000..808cb83687
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/pprof/ya.make
@@ -0,0 +1,7 @@
+GO_LIBRARY()
+IF (TRUE)
+ SRCS(
+ pprof.go
+ )
+ENDIF()
+END()
diff --git a/contrib/go/_std_1.22/src/net/http/request.go b/contrib/go/_std_1.22/src/net/http/request.go
new file mode 100644
index 0000000000..99fdebcf9b
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/request.go
@@ -0,0 +1,1554 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP Request reading and parsing.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/tls"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "mime"
+ "mime/multipart"
+ "net/http/httptrace"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "net/url"
+ urlpkg "net/url"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/idna"
+)
+
+const (
+ defaultMaxMemory = 32 << 20 // 32 MB
+)
+
+// ErrMissingFile is returned by FormFile when the provided file field name
+// is either not present in the request or not a file field.
+var ErrMissingFile = errors.New("http: no such file")
+
+// ProtocolError represents an HTTP protocol error.
+//
+// Deprecated: Not all errors in the http package related to protocol errors
+// are of type ProtocolError.
+type ProtocolError struct {
+ ErrorString string
+}
+
+func (pe *ProtocolError) Error() string { return pe.ErrorString }
+
+// Is lets http.ErrNotSupported match errors.ErrUnsupported.
+func (pe *ProtocolError) Is(err error) bool {
+ return pe == ErrNotSupported && err == errors.ErrUnsupported
+}
+
+var (
+ // ErrNotSupported indicates that a feature is not supported.
+ //
+ // It is returned by ResponseController methods to indicate that
+ // the handler does not support the method, and by the Push method
+ // of Pusher implementations to indicate that HTTP/2 Push support
+ // is not available.
+ ErrNotSupported = &ProtocolError{"feature not supported"}
+
+ // Deprecated: ErrUnexpectedTrailer is no longer returned by
+ // anything in the net/http package. Callers should not
+ // compare errors against this variable.
+ ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"}
+
+ // ErrMissingBoundary is returned by Request.MultipartReader when the
+ // request's Content-Type does not include a "boundary" parameter.
+ ErrMissingBoundary = &ProtocolError{"no multipart boundary param in Content-Type"}
+
+ // ErrNotMultipart is returned by Request.MultipartReader when the
+ // request's Content-Type is not multipart/form-data.
+ ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"}
+
+ // Deprecated: ErrHeaderTooLong is no longer returned by
+ // anything in the net/http package. Callers should not
+ // compare errors against this variable.
+ ErrHeaderTooLong = &ProtocolError{"header too long"}
+
+ // Deprecated: ErrShortBody is no longer returned by
+ // anything in the net/http package. Callers should not
+ // compare errors against this variable.
+ ErrShortBody = &ProtocolError{"entity body too short"}
+
+ // Deprecated: ErrMissingContentLength is no longer returned by
+ // anything in the net/http package. Callers should not
+ // compare errors against this variable.
+ ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"}
+)
+
+func badStringError(what, val string) error { return fmt.Errorf("%s %q", what, val) }
+
+// Headers that Request.Write handles itself and should be skipped.
+var reqWriteExcludeHeader = map[string]bool{
+ "Host": true, // not in Header map anyway
+ "User-Agent": true,
+ "Content-Length": true,
+ "Transfer-Encoding": true,
+ "Trailer": true,
+}
+
+// A Request represents an HTTP request received by a server
+// or to be sent by a client.
+//
+// The field semantics differ slightly between client and server
+// usage. In addition to the notes on the fields below, see the
+// documentation for [Request.Write] and [RoundTripper].
+type Request struct {
+ // Method specifies the HTTP method (GET, POST, PUT, etc.).
+ // For client requests, an empty string means GET.
+ Method string
+
+ // URL specifies either the URI being requested (for server
+ // requests) or the URL to access (for client requests).
+ //
+ // For server requests, the URL is parsed from the URI
+ // supplied on the Request-Line as stored in RequestURI. For
+ // most requests, fields other than Path and RawQuery will be
+ // empty. (See RFC 7230, Section 5.3)
+ //
+ // For client requests, the URL's Host specifies the server to
+ // connect to, while the Request's Host field optionally
+ // specifies the Host header value to send in the HTTP
+ // request.
+ URL *url.URL
+
+ // The protocol version for incoming server requests.
+ //
+ // For client requests, these fields are ignored. The HTTP
+ // client code always uses either HTTP/1.1 or HTTP/2.
+ // See the docs on Transport for details.
+ Proto string // "HTTP/1.0"
+ ProtoMajor int // 1
+ ProtoMinor int // 0
+
+ // Header contains the request header fields either received
+ // by the server or to be sent by the client.
+ //
+ // If a server received a request with header lines,
+ //
+ // Host: example.com
+ // accept-encoding: gzip, deflate
+ // Accept-Language: en-us
+ // fOO: Bar
+ // foo: two
+ //
+ // then
+ //
+ // Header = map[string][]string{
+ // "Accept-Encoding": {"gzip, deflate"},
+ // "Accept-Language": {"en-us"},
+ // "Foo": {"Bar", "two"},
+ // }
+ //
+ // For incoming requests, the Host header is promoted to the
+ // Request.Host field and removed from the Header map.
+ //
+ // HTTP defines that header names are case-insensitive. The
+ // request parser implements this by using CanonicalHeaderKey,
+ // making the first character and any characters following a
+ // hyphen uppercase and the rest lowercase.
+ //
+ // For client requests, certain headers such as Content-Length
+ // and Connection are automatically written when needed and
+ // values in Header may be ignored. See the documentation
+ // for the Request.Write method.
+ Header Header
+
+ // Body is the request's body.
+ //
+ // For client requests, a nil body means the request has no
+ // body, such as a GET request. The HTTP Client's Transport
+ // is responsible for calling the Close method.
+ //
+ // For server requests, the Request Body is always non-nil
+ // but will return EOF immediately when no body is present.
+ // The Server will close the request body. The ServeHTTP
+ // Handler does not need to.
+ //
+ // Body must allow Read to be called concurrently with Close.
+ // In particular, calling Close should unblock a Read waiting
+ // for input.
+ Body io.ReadCloser
+
+ // GetBody defines an optional func to return a new copy of
+ // Body. It is used for client requests when a redirect requires
+ // reading the body more than once. Use of GetBody still
+ // requires setting Body.
+ //
+ // For server requests, it is unused.
+ GetBody func() (io.ReadCloser, error)
+
+ // ContentLength records the length of the associated content.
+ // The value -1 indicates that the length is unknown.
+ // Values >= 0 indicate that the given number of bytes may
+ // be read from Body.
+ //
+ // For client requests, a value of 0 with a non-nil Body is
+ // also treated as unknown.
+ ContentLength int64
+
+ // TransferEncoding lists the transfer encodings from outermost to
+ // innermost. An empty list denotes the "identity" encoding.
+ // TransferEncoding can usually be ignored; chunked encoding is
+ // automatically added and removed as necessary when sending and
+ // receiving requests.
+ TransferEncoding []string
+
+ // Close indicates whether to close the connection after
+ // replying to this request (for servers) or after sending this
+ // request and reading its response (for clients).
+ //
+ // For server requests, the HTTP server handles this automatically
+ // and this field is not needed by Handlers.
+ //
+ // For client requests, setting this field prevents re-use of
+ // TCP connections between requests to the same hosts, as if
+ // Transport.DisableKeepAlives were set.
+ Close bool
+
+ // For server requests, Host specifies the host on which the
+ // URL is sought. For HTTP/1 (per RFC 7230, section 5.4), this
+ // is either the value of the "Host" header or the host name
+ // given in the URL itself. For HTTP/2, it is the value of the
+ // ":authority" pseudo-header field.
+ // It may be of the form "host:port". For international domain
+ // names, Host may be in Punycode or Unicode form. Use
+ // golang.org/x/net/idna to convert it to either format if
+ // needed.
+ // To prevent DNS rebinding attacks, server Handlers should
+ // validate that the Host header has a value for which the
+ // Handler considers itself authoritative. The included
+ // ServeMux supports patterns registered to particular host
+ // names and thus protects its registered Handlers.
+ //
+ // For client requests, Host optionally overrides the Host
+ // header to send. If empty, the Request.Write method uses
+ // the value of URL.Host. Host may contain an international
+ // domain name.
+ Host string
+
+ // Form contains the parsed form data, including both the URL
+ // field's query parameters and the PATCH, POST, or PUT form data.
+ // This field is only available after ParseForm is called.
+ // The HTTP client ignores Form and uses Body instead.
+ Form url.Values
+
+ // PostForm contains the parsed form data from PATCH, POST
+ // or PUT body parameters.
+ //
+ // This field is only available after ParseForm is called.
+ // The HTTP client ignores PostForm and uses Body instead.
+ PostForm url.Values
+
+ // MultipartForm is the parsed multipart form, including file uploads.
+ // This field is only available after ParseMultipartForm is called.
+ // The HTTP client ignores MultipartForm and uses Body instead.
+ MultipartForm *multipart.Form
+
+ // Trailer specifies additional headers that are sent after the request
+ // body.
+ //
+ // For server requests, the Trailer map initially contains only the
+ // trailer keys, with nil values. (The client declares which trailers it
+ // will later send.) While the handler is reading from Body, it must
+ // not reference Trailer. After reading from Body returns EOF, Trailer
+ // can be read again and will contain non-nil values, if they were sent
+ // by the client.
+ //
+ // For client requests, Trailer must be initialized to a map containing
+ // the trailer keys to later send. The values may be nil or their final
+ // values. The ContentLength must be 0 or -1, to send a chunked request.
+ // After the HTTP request is sent the map values can be updated while
+ // the request body is read. Once the body returns EOF, the caller must
+ // not mutate Trailer.
+ //
+ // Few HTTP clients, servers, or proxies support HTTP trailers.
+ Trailer Header
+
+ // RemoteAddr allows HTTP servers and other software to record
+ // the network address that sent the request, usually for
+ // logging. This field is not filled in by ReadRequest and
+ // has no defined format. The HTTP server in this package
+ // sets RemoteAddr to an "IP:port" address before invoking a
+ // handler.
+ // This field is ignored by the HTTP client.
+ RemoteAddr string
+
+ // RequestURI is the unmodified request-target of the
+ // Request-Line (RFC 7230, Section 3.1.1) as sent by the client
+ // to a server. Usually the URL field should be used instead.
+ // It is an error to set this field in an HTTP client request.
+ RequestURI string
+
+ // TLS allows HTTP servers and other software to record
+ // information about the TLS connection on which the request
+ // was received. This field is not filled in by ReadRequest.
+ // The HTTP server in this package sets the field for
+ // TLS-enabled connections before invoking a handler;
+ // otherwise it leaves the field nil.
+ // This field is ignored by the HTTP client.
+ TLS *tls.ConnectionState
+
+ // Cancel is an optional channel whose closure indicates that the client
+ // request should be regarded as canceled. Not all implementations of
+ // RoundTripper may support Cancel.
+ //
+ // For server requests, this field is not applicable.
+ //
+ // Deprecated: Set the Request's context with NewRequestWithContext
+ // instead. If a Request's Cancel field and context are both
+ // set, it is undefined whether Cancel is respected.
+ Cancel <-chan struct{}
+
+ // Response is the redirect response which caused this request
+ // to be created. This field is only populated during client
+ // redirects.
+ Response *Response
+
+ // ctx is either the client or server context. It should only
+ // be modified via copying the whole Request using Clone or WithContext.
+ // It is unexported to prevent people from using Context wrong
+ // and mutating the contexts held by callers of the same request.
+ ctx context.Context
+
+ // The following fields are for requests matched by ServeMux.
+ pat *pattern // the pattern that matched
+ matches []string // values for the matching wildcards in pat
+ otherValues map[string]string // for calls to SetPathValue that don't match a wildcard
+}
+
+// Context returns the request's context. To change the context, use
+// [Request.Clone] or [Request.WithContext].
+//
+// The returned context is always non-nil; it defaults to the
+// background context.
+//
+// For outgoing client requests, the context controls cancellation.
+//
+// For incoming server requests, the context is canceled when the
+// client's connection closes, the request is canceled (with HTTP/2),
+// or when the ServeHTTP method returns.
+func (r *Request) Context() context.Context {
+ if r.ctx != nil {
+ return r.ctx
+ }
+ return context.Background()
+}
+
+// WithContext returns a shallow copy of r with its context changed
+// to ctx. The provided ctx must be non-nil.
+//
+// For outgoing client request, the context controls the entire
+// lifetime of a request and its response: obtaining a connection,
+// sending the request, and reading the response headers and body.
+//
+// To create a new request with a context, use [NewRequestWithContext].
+// To make a deep copy of a request with a new context, use [Request.Clone].
+func (r *Request) WithContext(ctx context.Context) *Request {
+ if ctx == nil {
+ panic("nil context")
+ }
+ r2 := new(Request)
+ *r2 = *r
+ r2.ctx = ctx
+ return r2
+}
+
+// Clone returns a deep copy of r with its context changed to ctx.
+// The provided ctx must be non-nil.
+//
+// For an outgoing client request, the context controls the entire
+// lifetime of a request and its response: obtaining a connection,
+// sending the request, and reading the response headers and body.
+func (r *Request) Clone(ctx context.Context) *Request {
+ if ctx == nil {
+ panic("nil context")
+ }
+ r2 := new(Request)
+ *r2 = *r
+ r2.ctx = ctx
+ r2.URL = cloneURL(r.URL)
+ if r.Header != nil {
+ r2.Header = r.Header.Clone()
+ }
+ if r.Trailer != nil {
+ r2.Trailer = r.Trailer.Clone()
+ }
+ if s := r.TransferEncoding; s != nil {
+ s2 := make([]string, len(s))
+ copy(s2, s)
+ r2.TransferEncoding = s2
+ }
+ r2.Form = cloneURLValues(r.Form)
+ r2.PostForm = cloneURLValues(r.PostForm)
+ r2.MultipartForm = cloneMultipartForm(r.MultipartForm)
+
+ // Copy matches and otherValues. See issue 61410.
+ if s := r.matches; s != nil {
+ s2 := make([]string, len(s))
+ copy(s2, s)
+ r2.matches = s2
+ }
+ if s := r.otherValues; s != nil {
+ s2 := make(map[string]string, len(s))
+ for k, v := range s {
+ s2[k] = v
+ }
+ r2.otherValues = s2
+ }
+ return r2
+}
+
+// ProtoAtLeast reports whether the HTTP protocol used
+// in the request is at least major.minor.
+func (r *Request) ProtoAtLeast(major, minor int) bool {
+ return r.ProtoMajor > major ||
+ r.ProtoMajor == major && r.ProtoMinor >= minor
+}
+
+// UserAgent returns the client's User-Agent, if sent in the request.
+func (r *Request) UserAgent() string {
+ return r.Header.Get("User-Agent")
+}
+
+// Cookies parses and returns the HTTP cookies sent with the request.
+func (r *Request) Cookies() []*Cookie {
+ return readCookies(r.Header, "")
+}
+
+// ErrNoCookie is returned by Request's Cookie method when a cookie is not found.
+var ErrNoCookie = errors.New("http: named cookie not present")
+
+// Cookie returns the named cookie provided in the request or
+// [ErrNoCookie] if not found.
+// If multiple cookies match the given name, only one cookie will
+// be returned.
+func (r *Request) Cookie(name string) (*Cookie, error) {
+ if name == "" {
+ return nil, ErrNoCookie
+ }
+ for _, c := range readCookies(r.Header, name) {
+ return c, nil
+ }
+ return nil, ErrNoCookie
+}
+
+// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4,
+// AddCookie does not attach more than one [Cookie] header field. That
+// means all cookies, if any, are written into the same line,
+// separated by semicolon.
+// AddCookie only sanitizes c's name and value, and does not sanitize
+// a Cookie header already present in the request.
+func (r *Request) AddCookie(c *Cookie) {
+ s := fmt.Sprintf("%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value))
+ if c := r.Header.Get("Cookie"); c != "" {
+ r.Header.Set("Cookie", c+"; "+s)
+ } else {
+ r.Header.Set("Cookie", s)
+ }
+}
+
+// Referer returns the referring URL, if sent in the request.
+//
+// Referer is misspelled as in the request itself, a mistake from the
+// earliest days of HTTP. This value can also be fetched from the
+// [Header] map as Header["Referer"]; the benefit of making it available
+// as a method is that the compiler can diagnose programs that use the
+// alternate (correct English) spelling req.Referrer() but cannot
+// diagnose programs that use Header["Referrer"].
+func (r *Request) Referer() string {
+ return r.Header.Get("Referer")
+}
+
+// multipartByReader is a sentinel value.
+// Its presence in Request.MultipartForm indicates that parsing of the request
+// body has been handed off to a MultipartReader instead of ParseMultipartForm.
+var multipartByReader = &multipart.Form{
+ Value: make(map[string][]string),
+ File: make(map[string][]*multipart.FileHeader),
+}
+
+// MultipartReader returns a MIME multipart reader if this is a
+// multipart/form-data or a multipart/mixed POST request, else returns nil and an error.
+// Use this function instead of [Request.ParseMultipartForm] to
+// process the request body as a stream.
+func (r *Request) MultipartReader() (*multipart.Reader, error) {
+ if r.MultipartForm == multipartByReader {
+ return nil, errors.New("http: MultipartReader called twice")
+ }
+ if r.MultipartForm != nil {
+ return nil, errors.New("http: multipart handled by ParseMultipartForm")
+ }
+ r.MultipartForm = multipartByReader
+ return r.multipartReader(true)
+}
+
+func (r *Request) multipartReader(allowMixed bool) (*multipart.Reader, error) {
+ v := r.Header.Get("Content-Type")
+ if v == "" {
+ return nil, ErrNotMultipart
+ }
+ if r.Body == nil {
+ return nil, errors.New("missing form body")
+ }
+ d, params, err := mime.ParseMediaType(v)
+ if err != nil || !(d == "multipart/form-data" || allowMixed && d == "multipart/mixed") {
+ return nil, ErrNotMultipart
+ }
+ boundary, ok := params["boundary"]
+ if !ok {
+ return nil, ErrMissingBoundary
+ }
+ return multipart.NewReader(r.Body, boundary), nil
+}
+
+// isH2Upgrade reports whether r represents the http2 "client preface"
+// magic string.
+func (r *Request) isH2Upgrade() bool {
+ return r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0"
+}
+
+// Return value if nonempty, def otherwise.
+func valueOrDefault(value, def string) string {
+ if value != "" {
+ return value
+ }
+ return def
+}
+
+// NOTE: This is not intended to reflect the actual Go version being used.
+// It was changed at the time of Go 1.1 release because the former User-Agent
+// had ended up blocked by some intrusion detection systems.
+// See https://codereview.appspot.com/7532043.
+const defaultUserAgent = "Go-http-client/1.1"
+
+// Write writes an HTTP/1.1 request, which is the header and body, in wire format.
+// This method consults the following fields of the request:
+//
+// Host
+// URL
+// Method (defaults to "GET")
+// Header
+// ContentLength
+// TransferEncoding
+// Body
+//
+// If Body is present, Content-Length is <= 0 and [Request.TransferEncoding]
+// hasn't been set to "identity", Write adds "Transfer-Encoding:
+// chunked" to the header. Body is closed after it is sent.
+func (r *Request) Write(w io.Writer) error {
+ return r.write(w, false, nil, nil)
+}
+
+// WriteProxy is like [Request.Write] but writes the request in the form
+// expected by an HTTP proxy. In particular, [Request.WriteProxy] writes the
+// initial Request-URI line of the request with an absolute URI, per
+// section 5.3 of RFC 7230, including the scheme and host.
+// In either case, WriteProxy also writes a Host header, using
+// either r.Host or r.URL.Host.
+func (r *Request) WriteProxy(w io.Writer) error {
+ return r.write(w, true, nil, nil)
+}
+
+// errMissingHost is returned by Write when there is no Host or URL present in
+// the Request.
+var errMissingHost = errors.New("http: Request.Write on Request with no Host or URL set")
+
+// extraHeaders may be nil
+// waitForContinue may be nil
+// always closes body
+func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitForContinue func() bool) (err error) {
+ trace := httptrace.ContextClientTrace(r.Context())
+ if trace != nil && trace.WroteRequest != nil {
+ defer func() {
+ trace.WroteRequest(httptrace.WroteRequestInfo{
+ Err: err,
+ })
+ }()
+ }
+ closed := false
+ defer func() {
+ if closed {
+ return
+ }
+ if closeErr := r.closeBody(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ }()
+
+ // Find the target host. Prefer the Host: header, but if that
+ // is not given, use the host from the request URL.
+ //
+ // Clean the host, in case it arrives with unexpected stuff in it.
+ host := r.Host
+ if host == "" {
+ if r.URL == nil {
+ return errMissingHost
+ }
+ host = r.URL.Host
+ }
+ host, err = httpguts.PunycodeHostPort(host)
+ if err != nil {
+ return err
+ }
+ // Validate that the Host header is a valid header in general,
+ // but don't validate the host itself. This is sufficient to avoid
+ // header or request smuggling via the Host field.
+ // The server can (and will, if it's a net/http server) reject
+ // the request if it doesn't consider the host valid.
+ if !httpguts.ValidHostHeader(host) {
+ // Historically, we would truncate the Host header after '/' or ' '.
+ // Some users have relied on this truncation to convert a network
+ // address such as Unix domain socket path into a valid, ignored
+ // Host header (see https://go.dev/issue/61431).
+ //
+ // We don't preserve the truncation, because sending an altered
+ // header field opens a smuggling vector. Instead, zero out the
+ // Host header entirely if it isn't valid. (An empty Host is valid;
+ // see RFC 9112 Section 3.2.)
+ //
+ // Return an error if we're sending to a proxy, since the proxy
+ // probably can't do anything useful with an empty Host header.
+ if !usingProxy {
+ host = ""
+ } else {
+ return errors.New("http: invalid Host header")
+ }
+ }
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+ // intermediary must remove any IPv6 zone identifier attached
+ // to an outgoing URI.
+ host = removeZone(host)
+
+ ruri := r.URL.RequestURI()
+ if usingProxy && r.URL.Scheme != "" && r.URL.Opaque == "" {
+ ruri = r.URL.Scheme + "://" + host + ruri
+ } else if r.Method == "CONNECT" && r.URL.Path == "" {
+ // CONNECT requests normally give just the host and port, not a full URL.
+ ruri = host
+ if r.URL.Opaque != "" {
+ ruri = r.URL.Opaque
+ }
+ }
+ if stringContainsCTLByte(ruri) {
+ return errors.New("net/http: can't write control character in Request.URL")
+ }
+ // TODO: validate r.Method too? At least it's less likely to
+ // come from an attacker (more likely to be a constant in
+ // code).
+
+ // Wrap the writer in a bufio Writer if it's not already buffered.
+ // Don't always call NewWriter, as that forces a bytes.Buffer
+ // and other small bufio Writers to have a minimum 4k buffer
+ // size.
+ var bw *bufio.Writer
+ if _, ok := w.(io.ByteWriter); !ok {
+ bw = bufio.NewWriter(w)
+ w = bw
+ }
+
+ _, err = fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(r.Method, "GET"), ruri)
+ if err != nil {
+ return err
+ }
+
+ // Header lines
+ _, err = fmt.Fprintf(w, "Host: %s\r\n", host)
+ if err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Host", []string{host})
+ }
+
+ // Use the defaultUserAgent unless the Header contains one, which
+ // may be blank to not send the header.
+ userAgent := defaultUserAgent
+ if r.Header.has("User-Agent") {
+ userAgent = r.Header.Get("User-Agent")
+ }
+ if userAgent != "" {
+ userAgent = headerNewlineToSpace.Replace(userAgent)
+ userAgent = textproto.TrimString(userAgent)
+ _, err = fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent)
+ if err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("User-Agent", []string{userAgent})
+ }
+ }
+
+ // Process Body,ContentLength,Close,Trailer
+ tw, err := newTransferWriter(r)
+ if err != nil {
+ return err
+ }
+ err = tw.writeHeader(w, trace)
+ if err != nil {
+ return err
+ }
+
+ err = r.Header.writeSubset(w, reqWriteExcludeHeader, trace)
+ if err != nil {
+ return err
+ }
+
+ if extraHeaders != nil {
+ err = extraHeaders.write(w, trace)
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err = io.WriteString(w, "\r\n")
+ if err != nil {
+ return err
+ }
+
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+
+ // Flush and wait for 100-continue if expected.
+ if waitForContinue != nil {
+ if bw, ok := w.(*bufio.Writer); ok {
+ err = bw.Flush()
+ if err != nil {
+ return err
+ }
+ }
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+ if !waitForContinue() {
+ closed = true
+ r.closeBody()
+ return nil
+ }
+ }
+
+ if bw, ok := w.(*bufio.Writer); ok && tw.FlushHeaders {
+ if err := bw.Flush(); err != nil {
+ return err
+ }
+ }
+
+ // Write body and trailer
+ closed = true
+ err = tw.writeBody(w)
+ if err != nil {
+ if tw.bodyReadError == err {
+ err = requestBodyReadError{err}
+ }
+ return err
+ }
+
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// requestBodyReadError wraps an error from (*Request).write to indicate
+// that the error came from a Read call on the Request.Body.
+// This error type should not escape the net/http package to users.
+type requestBodyReadError struct{ error }
+
+func idnaASCII(v string) (string, error) {
+ // TODO: Consider removing this check after verifying performance is okay.
+ // Right now punycode verification, length checks, context checks, and the
+ // permissible character tests are all omitted. It also prevents the ToASCII
+ // call from salvaging an invalid IDN, when possible. As a result it may be
+ // possible to have two IDNs that appear identical to the user where the
+ // ASCII-only version causes an error downstream whereas the non-ASCII
+ // version does not.
+ // Note that for correct ASCII IDNs ToASCII will only do considerably more
+ // work, but it will not cause an allocation.
+ if ascii.Is(v) {
+ return v, nil
+ }
+ return idna.Lookup.ToASCII(v)
+}
+
+// removeZone removes IPv6 zone identifier from host.
+// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+func removeZone(host string) string {
+ if !strings.HasPrefix(host, "[") {
+ return host
+ }
+ i := strings.LastIndex(host, "]")
+ if i < 0 {
+ return host
+ }
+ j := strings.LastIndex(host[:i], "%")
+ if j < 0 {
+ return host
+ }
+ return host[:j] + host[i:]
+}
+
+// ParseHTTPVersion parses an HTTP version string according to RFC 7230, section 2.6.
+// "HTTP/1.0" returns (1, 0, true). Note that strings without
+// a minor version, such as "HTTP/2", are not valid.
+func ParseHTTPVersion(vers string) (major, minor int, ok bool) {
+ switch vers {
+ case "HTTP/1.1":
+ return 1, 1, true
+ case "HTTP/1.0":
+ return 1, 0, true
+ }
+ if !strings.HasPrefix(vers, "HTTP/") {
+ return 0, 0, false
+ }
+ if len(vers) != len("HTTP/X.Y") {
+ return 0, 0, false
+ }
+ if vers[6] != '.' {
+ return 0, 0, false
+ }
+ maj, err := strconv.ParseUint(vers[5:6], 10, 0)
+ if err != nil {
+ return 0, 0, false
+ }
+ min, err := strconv.ParseUint(vers[7:8], 10, 0)
+ if err != nil {
+ return 0, 0, false
+ }
+ return int(maj), int(min), true
+}
+
+func validMethod(method string) bool {
+ /*
+ Method = "OPTIONS" ; Section 9.2
+ | "GET" ; Section 9.3
+ | "HEAD" ; Section 9.4
+ | "POST" ; Section 9.5
+ | "PUT" ; Section 9.6
+ | "DELETE" ; Section 9.7
+ | "TRACE" ; Section 9.8
+ | "CONNECT" ; Section 9.9
+ | extension-method
+ extension-method = token
+ token = 1*<any CHAR except CTLs or separators>
+ */
+ return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1
+}
+
+// NewRequest wraps [NewRequestWithContext] using [context.Background].
+func NewRequest(method, url string, body io.Reader) (*Request, error) {
+ return NewRequestWithContext(context.Background(), method, url, body)
+}
+
+// NewRequestWithContext returns a new [Request] given a method, URL, and
+// optional body.
+//
+// If the provided body is also an [io.Closer], the returned
+// [Request.Body] is set to body and will be closed (possibly
+// asynchronously) by the Client methods Do, Post, and PostForm,
+// and [Transport.RoundTrip].
+//
+// NewRequestWithContext returns a Request suitable for use with
+// [Client.Do] or [Transport.RoundTrip]. To create a request for use with
+// testing a Server Handler, either use the [NewRequest] function in the
+// net/http/httptest package, use [ReadRequest], or manually update the
+// Request fields. For an outgoing client request, the context
+// controls the entire lifetime of a request and its response:
+// obtaining a connection, sending the request, and reading the
+// response headers and body. See the Request type's documentation for
+// the difference between inbound and outbound request fields.
+//
+// If body is of type [*bytes.Buffer], [*bytes.Reader], or
+// [*strings.Reader], the returned request's ContentLength is set to its
+// exact value (instead of -1), GetBody is populated (so 307 and 308
+// redirects can replay the body), and Body is set to [NoBody] if the
+// ContentLength is 0.
+func NewRequestWithContext(ctx context.Context, method, url string, body io.Reader) (*Request, error) {
+ if method == "" {
+ // We document that "" means "GET" for Request.Method, and people have
+ // relied on that from NewRequest, so keep that working.
+ // We still enforce validMethod for non-empty methods.
+ method = "GET"
+ }
+ if !validMethod(method) {
+ return nil, fmt.Errorf("net/http: invalid method %q", method)
+ }
+ if ctx == nil {
+ return nil, errors.New("net/http: nil Context")
+ }
+ u, err := urlpkg.Parse(url)
+ if err != nil {
+ return nil, err
+ }
+ rc, ok := body.(io.ReadCloser)
+ if !ok && body != nil {
+ rc = io.NopCloser(body)
+ }
+ // The host's colon:port should be normalized. See Issue 14836.
+ u.Host = removeEmptyPort(u.Host)
+ req := &Request{
+ ctx: ctx,
+ Method: method,
+ URL: u,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(Header),
+ Body: rc,
+ Host: u.Host,
+ }
+ if body != nil {
+ switch v := body.(type) {
+ case *bytes.Buffer:
+ req.ContentLength = int64(v.Len())
+ buf := v.Bytes()
+ req.GetBody = func() (io.ReadCloser, error) {
+ r := bytes.NewReader(buf)
+ return io.NopCloser(r), nil
+ }
+ case *bytes.Reader:
+ req.ContentLength = int64(v.Len())
+ snapshot := *v
+ req.GetBody = func() (io.ReadCloser, error) {
+ r := snapshot
+ return io.NopCloser(&r), nil
+ }
+ case *strings.Reader:
+ req.ContentLength = int64(v.Len())
+ snapshot := *v
+ req.GetBody = func() (io.ReadCloser, error) {
+ r := snapshot
+ return io.NopCloser(&r), nil
+ }
+ default:
+ // This is where we'd set it to -1 (at least
+ // if body != NoBody) to mean unknown, but
+ // that broke people during the Go 1.8 testing
+ // period. People depend on it being 0 I
+ // guess. Maybe retry later. See Issue 18117.
+ }
+ // For client requests, Request.ContentLength of 0
+ // means either actually 0, or unknown. The only way
+ // to explicitly say that the ContentLength is zero is
+ // to set the Body to nil. But turns out too much code
+ // depends on NewRequest returning a non-nil Body,
+ // so we use a well-known ReadCloser variable instead
+ // and have the http package also treat that sentinel
+ // variable to mean explicitly zero.
+ if req.GetBody != nil && req.ContentLength == 0 {
+ req.Body = NoBody
+ req.GetBody = func() (io.ReadCloser, error) { return NoBody, nil }
+ }
+ }
+
+ return req, nil
+}
+
+// BasicAuth returns the username and password provided in the request's
+// Authorization header, if the request uses HTTP Basic Authentication.
+// See RFC 2617, Section 2.
+func (r *Request) BasicAuth() (username, password string, ok bool) {
+ auth := r.Header.Get("Authorization")
+ if auth == "" {
+ return "", "", false
+ }
+ return parseBasicAuth(auth)
+}
+
+// parseBasicAuth parses an HTTP Basic Authentication string.
+// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true).
+func parseBasicAuth(auth string) (username, password string, ok bool) {
+ const prefix = "Basic "
+ // Case insensitive prefix match. See Issue 22736.
+ if len(auth) < len(prefix) || !ascii.EqualFold(auth[:len(prefix)], prefix) {
+ return "", "", false
+ }
+ c, err := base64.StdEncoding.DecodeString(auth[len(prefix):])
+ if err != nil {
+ return "", "", false
+ }
+ cs := string(c)
+ username, password, ok = strings.Cut(cs, ":")
+ if !ok {
+ return "", "", false
+ }
+ return username, password, true
+}
+
+// SetBasicAuth sets the request's Authorization header to use HTTP
+// Basic Authentication with the provided username and password.
+//
+// With HTTP Basic Authentication the provided username and password
+// are not encrypted. It should generally only be used in an HTTPS
+// request.
+//
+// The username may not contain a colon. Some protocols may impose
+// additional requirements on pre-escaping the username and
+// password. For instance, when used with OAuth2, both arguments must
+// be URL encoded first with [url.QueryEscape].
+func (r *Request) SetBasicAuth(username, password string) {
+ r.Header.Set("Authorization", "Basic "+basicAuth(username, password))
+}
+
+// parseRequestLine parses "GET /foo HTTP/1.1" into its three parts.
+func parseRequestLine(line string) (method, requestURI, proto string, ok bool) {
+ method, rest, ok1 := strings.Cut(line, " ")
+ requestURI, proto, ok2 := strings.Cut(rest, " ")
+ if !ok1 || !ok2 {
+ return "", "", "", false
+ }
+ return method, requestURI, proto, true
+}
+
+var textprotoReaderPool sync.Pool
+
+func newTextprotoReader(br *bufio.Reader) *textproto.Reader {
+ if v := textprotoReaderPool.Get(); v != nil {
+ tr := v.(*textproto.Reader)
+ tr.R = br
+ return tr
+ }
+ return textproto.NewReader(br)
+}
+
+func putTextprotoReader(r *textproto.Reader) {
+ r.R = nil
+ textprotoReaderPool.Put(r)
+}
+
+// ReadRequest reads and parses an incoming request from b.
+//
+// ReadRequest is a low-level function and should only be used for
+// specialized applications; most code should use the [Server] to read
+// requests and handle them via the [Handler] interface. ReadRequest
+// only supports HTTP/1.x requests. For HTTP/2, use golang.org/x/net/http2.
+func ReadRequest(b *bufio.Reader) (*Request, error) {
+ req, err := readRequest(b)
+ if err != nil {
+ return nil, err
+ }
+
+ delete(req.Header, "Host")
+ return req, err
+}
+
+func readRequest(b *bufio.Reader) (req *Request, err error) {
+ tp := newTextprotoReader(b)
+ defer putTextprotoReader(tp)
+
+ req = new(Request)
+
+ // First line: GET /index.html HTTP/1.0
+ var s string
+ if s, err = tp.ReadLine(); err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ }()
+
+ var ok bool
+ req.Method, req.RequestURI, req.Proto, ok = parseRequestLine(s)
+ if !ok {
+ return nil, badStringError("malformed HTTP request", s)
+ }
+ if !validMethod(req.Method) {
+ return nil, badStringError("invalid method", req.Method)
+ }
+ rawurl := req.RequestURI
+ if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok {
+ return nil, badStringError("malformed HTTP version", req.Proto)
+ }
+
+ // CONNECT requests are used two different ways, and neither uses a full URL:
+ // The standard use is to tunnel HTTPS through an HTTP proxy.
+ // It looks like "CONNECT www.google.com:443 HTTP/1.1", and the parameter is
+ // just the authority section of a URL. This information should go in req.URL.Host.
+ //
+ // The net/rpc package also uses CONNECT, but there the parameter is a path
+ // that starts with a slash. It can be parsed with the regular URL parser,
+ // and the path will end up in req.URL.Path, where it needs to be in order for
+ // RPC to work.
+ justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/")
+ if justAuthority {
+ rawurl = "http://" + rawurl
+ }
+
+ if req.URL, err = url.ParseRequestURI(rawurl); err != nil {
+ return nil, err
+ }
+
+ if justAuthority {
+ // Strip the bogus "http://" back off.
+ req.URL.Scheme = ""
+ }
+
+ // Subsequent lines: Key: value.
+ mimeHeader, err := tp.ReadMIMEHeader()
+ if err != nil {
+ return nil, err
+ }
+ req.Header = Header(mimeHeader)
+ if len(req.Header["Host"]) > 1 {
+ return nil, fmt.Errorf("too many Host headers")
+ }
+
+ // RFC 7230, section 5.3: Must treat
+ // GET /index.html HTTP/1.1
+ // Host: www.google.com
+ // and
+ // GET http://www.google.com/index.html HTTP/1.1
+ // Host: doesntmatter
+ // the same. In the second case, any Host line is ignored.
+ req.Host = req.URL.Host
+ if req.Host == "" {
+ req.Host = req.Header.get("Host")
+ }
+
+ fixPragmaCacheControl(req.Header)
+
+ req.Close = shouldClose(req.ProtoMajor, req.ProtoMinor, req.Header, false)
+
+ err = readTransfer(req, b)
+ if err != nil {
+ return nil, err
+ }
+
+ if req.isH2Upgrade() {
+ // Because it's neither chunked, nor declared:
+ req.ContentLength = -1
+
+ // We want to give handlers a chance to hijack the
+ // connection, but we need to prevent the Server from
+ // dealing with the connection further if it's not
+ // hijacked. Set Close to ensure that:
+ req.Close = true
+ }
+ return req, nil
+}
+
+// MaxBytesReader is similar to [io.LimitReader] but is intended for
+// limiting the size of incoming request bodies. In contrast to
+// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a
+// non-nil error of type [*MaxBytesError] for a Read beyond the limit,
+// and closes the underlying reader when its Close method is called.
+//
+// MaxBytesReader prevents clients from accidentally or maliciously
+// sending a large request and wasting server resources. If possible,
+// it tells the [ResponseWriter] to close the connection after the limit
+// has been reached.
+func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser {
+ if n < 0 { // Treat negative limits as equivalent to 0.
+ n = 0
+ }
+ return &maxBytesReader{w: w, r: r, i: n, n: n}
+}
+
+// MaxBytesError is returned by [MaxBytesReader] when its read limit is exceeded.
+type MaxBytesError struct {
+ Limit int64
+}
+
+func (e *MaxBytesError) Error() string {
+ // Due to Hyrum's law, this text cannot be changed.
+ return "http: request body too large"
+}
+
+type maxBytesReader struct {
+ w ResponseWriter
+ r io.ReadCloser // underlying reader
+ i int64 // max bytes initially, for MaxBytesError
+ n int64 // max bytes remaining
+ err error // sticky error
+}
+
+func (l *maxBytesReader) Read(p []byte) (n int, err error) {
+ if l.err != nil {
+ return 0, l.err
+ }
+ if len(p) == 0 {
+ return 0, nil
+ }
+ // If they asked for a 32KB byte read but only 5 bytes are
+ // remaining, no need to read 32KB. 6 bytes will answer the
+ // question of the whether we hit the limit or go past it.
+ // 0 < len(p) < 2^63
+ if int64(len(p))-1 > l.n {
+ p = p[:l.n+1]
+ }
+ n, err = l.r.Read(p)
+
+ if int64(n) <= l.n {
+ l.n -= int64(n)
+ l.err = err
+ return n, err
+ }
+
+ n = int(l.n)
+ l.n = 0
+
+ // The server code and client code both use
+ // maxBytesReader. This "requestTooLarge" check is
+ // only used by the server code. To prevent binaries
+ // which only using the HTTP Client code (such as
+ // cmd/go) from also linking in the HTTP server, don't
+ // use a static type assertion to the server
+ // "*response" type. Check this interface instead:
+ type requestTooLarger interface {
+ requestTooLarge()
+ }
+ if res, ok := l.w.(requestTooLarger); ok {
+ res.requestTooLarge()
+ }
+ l.err = &MaxBytesError{l.i}
+ return n, l.err
+}
+
+func (l *maxBytesReader) Close() error {
+ return l.r.Close()
+}
+
+func copyValues(dst, src url.Values) {
+ for k, vs := range src {
+ dst[k] = append(dst[k], vs...)
+ }
+}
+
+func parsePostForm(r *Request) (vs url.Values, err error) {
+ if r.Body == nil {
+ err = errors.New("missing form body")
+ return
+ }
+ ct := r.Header.Get("Content-Type")
+ // RFC 7231, section 3.1.1.5 - empty type
+ // MAY be treated as application/octet-stream
+ if ct == "" {
+ ct = "application/octet-stream"
+ }
+ ct, _, err = mime.ParseMediaType(ct)
+ switch {
+ case ct == "application/x-www-form-urlencoded":
+ var reader io.Reader = r.Body
+ maxFormSize := int64(1<<63 - 1)
+ if _, ok := r.Body.(*maxBytesReader); !ok {
+ maxFormSize = int64(10 << 20) // 10 MB is a lot of text.
+ reader = io.LimitReader(r.Body, maxFormSize+1)
+ }
+ b, e := io.ReadAll(reader)
+ if e != nil {
+ if err == nil {
+ err = e
+ }
+ break
+ }
+ if int64(len(b)) > maxFormSize {
+ err = errors.New("http: POST too large")
+ return
+ }
+ vs, e = url.ParseQuery(string(b))
+ if err == nil {
+ err = e
+ }
+ case ct == "multipart/form-data":
+ // handled by ParseMultipartForm (which is calling us, or should be)
+ // TODO(bradfitz): there are too many possible
+ // orders to call too many functions here.
+ // Clean this up and write more tests.
+ // request_test.go contains the start of this,
+ // in TestParseMultipartFormOrder and others.
+ }
+ return
+}
+
+// ParseForm populates r.Form and r.PostForm.
+//
+// For all requests, ParseForm parses the raw query from the URL and updates
+// r.Form.
+//
+// For POST, PUT, and PATCH requests, it also reads the request body, parses it
+// as a form and puts the results into both r.PostForm and r.Form. Request body
+// parameters take precedence over URL query string values in r.Form.
+//
+// If the request Body's size has not already been limited by [MaxBytesReader],
+// the size is capped at 10MB.
+//
+// For other HTTP methods, or when the Content-Type is not
+// application/x-www-form-urlencoded, the request Body is not read, and
+// r.PostForm is initialized to a non-nil, empty value.
+//
+// [Request.ParseMultipartForm] calls ParseForm automatically.
+// ParseForm is idempotent.
+func (r *Request) ParseForm() error {
+ var err error
+ if r.PostForm == nil {
+ if r.Method == "POST" || r.Method == "PUT" || r.Method == "PATCH" {
+ r.PostForm, err = parsePostForm(r)
+ }
+ if r.PostForm == nil {
+ r.PostForm = make(url.Values)
+ }
+ }
+ if r.Form == nil {
+ if len(r.PostForm) > 0 {
+ r.Form = make(url.Values)
+ copyValues(r.Form, r.PostForm)
+ }
+ var newValues url.Values
+ if r.URL != nil {
+ var e error
+ newValues, e = url.ParseQuery(r.URL.RawQuery)
+ if err == nil {
+ err = e
+ }
+ }
+ if newValues == nil {
+ newValues = make(url.Values)
+ }
+ if r.Form == nil {
+ r.Form = newValues
+ } else {
+ copyValues(r.Form, newValues)
+ }
+ }
+ return err
+}
+
+// ParseMultipartForm parses a request body as multipart/form-data.
+// The whole request body is parsed and up to a total of maxMemory bytes of
+// its file parts are stored in memory, with the remainder stored on
+// disk in temporary files.
+// ParseMultipartForm calls [Request.ParseForm] if necessary.
+// If ParseForm returns an error, ParseMultipartForm returns it but also
+// continues parsing the request body.
+// After one call to ParseMultipartForm, subsequent calls have no effect.
+func (r *Request) ParseMultipartForm(maxMemory int64) error {
+ if r.MultipartForm == multipartByReader {
+ return errors.New("http: multipart handled by MultipartReader")
+ }
+ var parseFormErr error
+ if r.Form == nil {
+ // Let errors in ParseForm fall through, and just
+ // return it at the end.
+ parseFormErr = r.ParseForm()
+ }
+ if r.MultipartForm != nil {
+ return nil
+ }
+
+ mr, err := r.multipartReader(false)
+ if err != nil {
+ return err
+ }
+
+ f, err := mr.ReadForm(maxMemory)
+ if err != nil {
+ return err
+ }
+
+ if r.PostForm == nil {
+ r.PostForm = make(url.Values)
+ }
+ for k, v := range f.Value {
+ r.Form[k] = append(r.Form[k], v...)
+ // r.PostForm should also be populated. See Issue 9305.
+ r.PostForm[k] = append(r.PostForm[k], v...)
+ }
+
+ r.MultipartForm = f
+
+ return parseFormErr
+}
+
+// FormValue returns the first value for the named component of the query.
+// The precedence order:
+// 1. application/x-www-form-urlencoded form body (POST, PUT, PATCH only)
+// 2. query parameters (always)
+// 3. multipart/form-data form body (always)
+//
+// FormValue calls [Request.ParseMultipartForm] and [Request.ParseForm]
+// if necessary and ignores any errors returned by these functions.
+// If key is not present, FormValue returns the empty string.
+// To access multiple values of the same key, call ParseForm and
+// then inspect [Request.Form] directly.
+func (r *Request) FormValue(key string) string {
+ if r.Form == nil {
+ r.ParseMultipartForm(defaultMaxMemory)
+ }
+ if vs := r.Form[key]; len(vs) > 0 {
+ return vs[0]
+ }
+ return ""
+}
+
+// PostFormValue returns the first value for the named component of the POST,
+// PUT, or PATCH request body. URL query parameters are ignored.
+// PostFormValue calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary and ignores
+// any errors returned by these functions.
+// If key is not present, PostFormValue returns the empty string.
+func (r *Request) PostFormValue(key string) string {
+ if r.PostForm == nil {
+ r.ParseMultipartForm(defaultMaxMemory)
+ }
+ if vs := r.PostForm[key]; len(vs) > 0 {
+ return vs[0]
+ }
+ return ""
+}
+
+// FormFile returns the first file for the provided form key.
+// FormFile calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary.
+func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) {
+ if r.MultipartForm == multipartByReader {
+ return nil, nil, errors.New("http: multipart handled by MultipartReader")
+ }
+ if r.MultipartForm == nil {
+ err := r.ParseMultipartForm(defaultMaxMemory)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ if r.MultipartForm != nil && r.MultipartForm.File != nil {
+ if fhs := r.MultipartForm.File[key]; len(fhs) > 0 {
+ f, err := fhs[0].Open()
+ return f, fhs[0], err
+ }
+ }
+ return nil, nil, ErrMissingFile
+}
+
+// PathValue returns the value for the named path wildcard in the [ServeMux] pattern
+// that matched the request.
+// It returns the empty string if the request was not matched against a pattern
+// or there is no such wildcard in the pattern.
+func (r *Request) PathValue(name string) string {
+ if i := r.patIndex(name); i >= 0 {
+ return r.matches[i]
+ }
+ return r.otherValues[name]
+}
+
+// SetPathValue sets name to value, so that subsequent calls to r.PathValue(name)
+// return value.
+func (r *Request) SetPathValue(name, value string) {
+ if i := r.patIndex(name); i >= 0 {
+ r.matches[i] = value
+ } else {
+ if r.otherValues == nil {
+ r.otherValues = map[string]string{}
+ }
+ r.otherValues[name] = value
+ }
+}
+
+// patIndex returns the index of name in the list of named wildcards of the
+// request's pattern, or -1 if there is no such name.
+func (r *Request) patIndex(name string) int {
+ // The linear search seems expensive compared to a map, but just creating the map
+ // takes a lot of time, and most patterns will just have a couple of wildcards.
+ if r.pat == nil {
+ return -1
+ }
+ i := 0
+ for _, seg := range r.pat.segments {
+ if seg.wild && seg.s != "" {
+ if name == seg.s {
+ return i
+ }
+ i++
+ }
+ }
+ return -1
+}
+
+func (r *Request) expectsContinue() bool {
+ return hasToken(r.Header.get("Expect"), "100-continue")
+}
+
+func (r *Request) wantsHttp10KeepAlive() bool {
+ if r.ProtoMajor != 1 || r.ProtoMinor != 0 {
+ return false
+ }
+ return hasToken(r.Header.get("Connection"), "keep-alive")
+}
+
+func (r *Request) wantsClose() bool {
+ if r.Close {
+ return true
+ }
+ return hasToken(r.Header.get("Connection"), "close")
+}
+
+func (r *Request) closeBody() error {
+ if r.Body == nil {
+ return nil
+ }
+ return r.Body.Close()
+}
+
+func (r *Request) isReplayable() bool {
+ if r.Body == nil || r.Body == NoBody || r.GetBody != nil {
+ switch valueOrDefault(r.Method, "GET") {
+ case "GET", "HEAD", "OPTIONS", "TRACE":
+ return true
+ }
+ // The Idempotency-Key, while non-standard, is widely used to
+ // mean a POST or other request is idempotent. See
+ // https://golang.org/issue/19943#issuecomment-421092421
+ if r.Header.has("Idempotency-Key") || r.Header.has("X-Idempotency-Key") {
+ return true
+ }
+ }
+ return false
+}
+
+// outgoingLength reports the Content-Length of this outgoing (Client) request.
+// It maps 0 into -1 (unknown) when the Body is non-nil.
+func (r *Request) outgoingLength() int64 {
+ if r.Body == nil || r.Body == NoBody {
+ return 0
+ }
+ if r.ContentLength != 0 {
+ return r.ContentLength
+ }
+ return -1
+}
+
+// requestMethodUsuallyLacksBody reports whether the given request
+// method is one that typically does not involve a request body.
+// This is used by the Transport (via
+// transferWriter.shouldSendChunkedRequestBody) to determine whether
+// we try to test-read a byte from a non-nil Request.Body when
+// Request.outgoingLength() returns -1. See the comments in
+// shouldSendChunkedRequestBody.
+func requestMethodUsuallyLacksBody(method string) bool {
+ switch method {
+ case "GET", "HEAD", "DELETE", "OPTIONS", "PROPFIND", "SEARCH":
+ return true
+ }
+ return false
+}
+
+// requiresHTTP1 reports whether this request requires being sent on
+// an HTTP/1 connection.
+func (r *Request) requiresHTTP1() bool {
+ return hasToken(r.Header.Get("Connection"), "upgrade") &&
+ ascii.EqualFold(r.Header.Get("Upgrade"), "websocket")
+}
diff --git a/contrib/go/_std_1.22/src/net/http/response.go b/contrib/go/_std_1.22/src/net/http/response.go
new file mode 100644
index 0000000000..0c3d7f6d85
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/response.go
@@ -0,0 +1,371 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP Response reading and parsing.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net/textproto"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+var respExcludeHeader = map[string]bool{
+ "Content-Length": true,
+ "Transfer-Encoding": true,
+ "Trailer": true,
+}
+
+// Response represents the response from an HTTP request.
+//
+// The [Client] and [Transport] return Responses from servers once
+// the response headers have been received. The response body
+// is streamed on demand as the Body field is read.
+type Response struct {
+ Status string // e.g. "200 OK"
+ StatusCode int // e.g. 200
+ Proto string // e.g. "HTTP/1.0"
+ ProtoMajor int // e.g. 1
+ ProtoMinor int // e.g. 0
+
+ // Header maps header keys to values. If the response had multiple
+ // headers with the same key, they may be concatenated, with comma
+ // delimiters. (RFC 7230, section 3.2.2 requires that multiple headers
+ // be semantically equivalent to a comma-delimited sequence.) When
+ // Header values are duplicated by other fields in this struct (e.g.,
+ // ContentLength, TransferEncoding, Trailer), the field values are
+ // authoritative.
+ //
+ // Keys in the map are canonicalized (see CanonicalHeaderKey).
+ Header Header
+
+ // Body represents the response body.
+ //
+ // The response body is streamed on demand as the Body field
+ // is read. If the network connection fails or the server
+ // terminates the response, Body.Read calls return an error.
+ //
+ // The http Client and Transport guarantee that Body is always
+ // non-nil, even on responses without a body or responses with
+ // a zero-length body. It is the caller's responsibility to
+ // close Body. The default HTTP client's Transport may not
+ // reuse HTTP/1.x "keep-alive" TCP connections if the Body is
+ // not read to completion and closed.
+ //
+ // The Body is automatically dechunked if the server replied
+ // with a "chunked" Transfer-Encoding.
+ //
+ // As of Go 1.12, the Body will also implement io.Writer
+ // on a successful "101 Switching Protocols" response,
+ // as used by WebSockets and HTTP/2's "h2c" mode.
+ Body io.ReadCloser
+
+ // ContentLength records the length of the associated content. The
+ // value -1 indicates that the length is unknown. Unless Request.Method
+ // is "HEAD", values >= 0 indicate that the given number of bytes may
+ // be read from Body.
+ ContentLength int64
+
+ // Contains transfer encodings from outer-most to inner-most. Value is
+ // nil, means that "identity" encoding is used.
+ TransferEncoding []string
+
+ // Close records whether the header directed that the connection be
+ // closed after reading Body. The value is advice for clients: neither
+ // ReadResponse nor Response.Write ever closes a connection.
+ Close bool
+
+ // Uncompressed reports whether the response was sent compressed but
+ // was decompressed by the http package. When true, reading from
+ // Body yields the uncompressed content instead of the compressed
+ // content actually set from the server, ContentLength is set to -1,
+ // and the "Content-Length" and "Content-Encoding" fields are deleted
+ // from the responseHeader. To get the original response from
+ // the server, set Transport.DisableCompression to true.
+ Uncompressed bool
+
+ // Trailer maps trailer keys to values in the same
+ // format as Header.
+ //
+ // The Trailer initially contains only nil values, one for
+ // each key specified in the server's "Trailer" header
+ // value. Those values are not added to Header.
+ //
+ // Trailer must not be accessed concurrently with Read calls
+ // on the Body.
+ //
+ // After Body.Read has returned io.EOF, Trailer will contain
+ // any trailer values sent by the server.
+ Trailer Header
+
+ // Request is the request that was sent to obtain this Response.
+ // Request's Body is nil (having already been consumed).
+ // This is only populated for Client requests.
+ Request *Request
+
+ // TLS contains information about the TLS connection on which the
+ // response was received. It is nil for unencrypted responses.
+ // The pointer is shared between responses and should not be
+ // modified.
+ TLS *tls.ConnectionState
+}
+
+// Cookies parses and returns the cookies set in the Set-Cookie headers.
+func (r *Response) Cookies() []*Cookie {
+ return readSetCookies(r.Header)
+}
+
+// ErrNoLocation is returned by the [Response.Location] method
+// when no Location header is present.
+var ErrNoLocation = errors.New("http: no Location header in response")
+
+// Location returns the URL of the response's "Location" header,
+// if present. Relative redirects are resolved relative to
+// [Response.Request]. [ErrNoLocation] is returned if no
+// Location header is present.
+func (r *Response) Location() (*url.URL, error) {
+ lv := r.Header.Get("Location")
+ if lv == "" {
+ return nil, ErrNoLocation
+ }
+ if r.Request != nil && r.Request.URL != nil {
+ return r.Request.URL.Parse(lv)
+ }
+ return url.Parse(lv)
+}
+
+// ReadResponse reads and returns an HTTP response from r.
+// The req parameter optionally specifies the [Request] that corresponds
+// to this [Response]. If nil, a GET request is assumed.
+// Clients must call resp.Body.Close when finished reading resp.Body.
+// After that call, clients can inspect resp.Trailer to find key/value
+// pairs included in the response trailer.
+func ReadResponse(r *bufio.Reader, req *Request) (*Response, error) {
+ tp := textproto.NewReader(r)
+ resp := &Response{
+ Request: req,
+ }
+
+ // Parse the first line of the response.
+ line, err := tp.ReadLine()
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return nil, err
+ }
+ proto, status, ok := strings.Cut(line, " ")
+ if !ok {
+ return nil, badStringError("malformed HTTP response", line)
+ }
+ resp.Proto = proto
+ resp.Status = strings.TrimLeft(status, " ")
+
+ statusCode, _, _ := strings.Cut(resp.Status, " ")
+ if len(statusCode) != 3 {
+ return nil, badStringError("malformed HTTP status code", statusCode)
+ }
+ resp.StatusCode, err = strconv.Atoi(statusCode)
+ if err != nil || resp.StatusCode < 0 {
+ return nil, badStringError("malformed HTTP status code", statusCode)
+ }
+ if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {
+ return nil, badStringError("malformed HTTP version", resp.Proto)
+ }
+
+ // Parse the response headers.
+ mimeHeader, err := tp.ReadMIMEHeader()
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return nil, err
+ }
+ resp.Header = Header(mimeHeader)
+
+ fixPragmaCacheControl(resp.Header)
+
+ err = readTransfer(resp, r)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// RFC 7234, section 5.4: Should treat
+//
+// Pragma: no-cache
+//
+// like
+//
+// Cache-Control: no-cache
+func fixPragmaCacheControl(header Header) {
+ if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" {
+ if _, presentcc := header["Cache-Control"]; !presentcc {
+ header["Cache-Control"] = []string{"no-cache"}
+ }
+ }
+}
+
+// ProtoAtLeast reports whether the HTTP protocol used
+// in the response is at least major.minor.
+func (r *Response) ProtoAtLeast(major, minor int) bool {
+ return r.ProtoMajor > major ||
+ r.ProtoMajor == major && r.ProtoMinor >= minor
+}
+
+// Write writes r to w in the HTTP/1.x server response format,
+// including the status line, headers, body, and optional trailer.
+//
+// This method consults the following fields of the response r:
+//
+// StatusCode
+// ProtoMajor
+// ProtoMinor
+// Request.Method
+// TransferEncoding
+// Trailer
+// Body
+// ContentLength
+// Header, values for non-canonical keys will have unpredictable behavior
+//
+// The Response Body is closed after it is sent.
+func (r *Response) Write(w io.Writer) error {
+ // Status line
+ text := r.Status
+ if text == "" {
+ text = StatusText(r.StatusCode)
+ if text == "" {
+ text = "status code " + strconv.Itoa(r.StatusCode)
+ }
+ } else {
+ // Just to reduce stutter, if user set r.Status to "200 OK" and StatusCode to 200.
+ // Not important.
+ text = strings.TrimPrefix(text, strconv.Itoa(r.StatusCode)+" ")
+ }
+
+ if _, err := fmt.Fprintf(w, "HTTP/%d.%d %03d %s\r\n", r.ProtoMajor, r.ProtoMinor, r.StatusCode, text); err != nil {
+ return err
+ }
+
+ // Clone it, so we can modify r1 as needed.
+ r1 := new(Response)
+ *r1 = *r
+ if r1.ContentLength == 0 && r1.Body != nil {
+ // Is it actually 0 length? Or just unknown?
+ var buf [1]byte
+ n, err := r1.Body.Read(buf[:])
+ if err != nil && err != io.EOF {
+ return err
+ }
+ if n == 0 {
+ // Reset it to a known zero reader, in case underlying one
+ // is unhappy being read repeatedly.
+ r1.Body = NoBody
+ } else {
+ r1.ContentLength = -1
+ r1.Body = struct {
+ io.Reader
+ io.Closer
+ }{
+ io.MultiReader(bytes.NewReader(buf[:1]), r.Body),
+ r.Body,
+ }
+ }
+ }
+ // If we're sending a non-chunked HTTP/1.1 response without a
+ // content-length, the only way to do that is the old HTTP/1.0
+ // way, by noting the EOF with a connection close, so we need
+ // to set Close.
+ if r1.ContentLength == -1 && !r1.Close && r1.ProtoAtLeast(1, 1) && !chunked(r1.TransferEncoding) && !r1.Uncompressed {
+ r1.Close = true
+ }
+
+ // Process Body,ContentLength,Close,Trailer
+ tw, err := newTransferWriter(r1)
+ if err != nil {
+ return err
+ }
+ err = tw.writeHeader(w, nil)
+ if err != nil {
+ return err
+ }
+
+ // Rest of header
+ err = r.Header.WriteSubset(w, respExcludeHeader)
+ if err != nil {
+ return err
+ }
+
+ // contentLengthAlreadySent may have been already sent for
+ // POST/PUT requests, even if zero length. See Issue 8180.
+ contentLengthAlreadySent := tw.shouldSendContentLength()
+ if r1.ContentLength == 0 && !chunked(r1.TransferEncoding) && !contentLengthAlreadySent && bodyAllowedForStatus(r.StatusCode) {
+ if _, err := io.WriteString(w, "Content-Length: 0\r\n"); err != nil {
+ return err
+ }
+ }
+
+ // End-of-header
+ if _, err := io.WriteString(w, "\r\n"); err != nil {
+ return err
+ }
+
+ // Write body and trailer
+ err = tw.writeBody(w)
+ if err != nil {
+ return err
+ }
+
+ // Success
+ return nil
+}
+
+func (r *Response) closeBody() {
+ if r.Body != nil {
+ r.Body.Close()
+ }
+}
+
+// bodyIsWritable reports whether the Body supports writing. The
+// Transport returns Writable bodies for 101 Switching Protocols
+// responses.
+// The Transport uses this method to determine whether a persistent
+// connection is done being managed from its perspective. Once we
+// return a writable response body to a user, the net/http package is
+// done managing that connection.
+func (r *Response) bodyIsWritable() bool {
+ _, ok := r.Body.(io.Writer)
+ return ok
+}
+
+// isProtocolSwitch reports whether the response code and header
+// indicate a successful protocol upgrade response.
+func (r *Response) isProtocolSwitch() bool {
+ return isProtocolSwitchResponse(r.StatusCode, r.Header)
+}
+
+// isProtocolSwitchResponse reports whether the response code and
+// response header indicate a successful protocol upgrade response.
+func isProtocolSwitchResponse(code int, h Header) bool {
+ return code == StatusSwitchingProtocols && isProtocolSwitchHeader(h)
+}
+
+// isProtocolSwitchHeader reports whether the request or response header
+// is for a protocol switch.
+func isProtocolSwitchHeader(h Header) bool {
+ return h.Get("Upgrade") != "" &&
+ httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade")
+}
diff --git a/contrib/go/_std_1.22/src/net/http/responsecontroller.go b/contrib/go/_std_1.22/src/net/http/responsecontroller.go
new file mode 100644
index 0000000000..f3f24c1273
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/responsecontroller.go
@@ -0,0 +1,147 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "time"
+)
+
+// A ResponseController is used by an HTTP handler to control the response.
+//
+// A ResponseController may not be used after the [Handler.ServeHTTP] method has returned.
+type ResponseController struct {
+ rw ResponseWriter
+}
+
+// NewResponseController creates a [ResponseController] for a request.
+//
+// The ResponseWriter should be the original value passed to the [Handler.ServeHTTP] method,
+// or have an Unwrap method returning the original ResponseWriter.
+//
+// If the ResponseWriter implements any of the following methods, the ResponseController
+// will call them as appropriate:
+//
+// Flush()
+// FlushError() error // alternative Flush returning an error
+// Hijack() (net.Conn, *bufio.ReadWriter, error)
+// SetReadDeadline(deadline time.Time) error
+// SetWriteDeadline(deadline time.Time) error
+// EnableFullDuplex() error
+//
+// If the ResponseWriter does not support a method, ResponseController returns
+// an error matching [ErrNotSupported].
+func NewResponseController(rw ResponseWriter) *ResponseController {
+ return &ResponseController{rw}
+}
+
+type rwUnwrapper interface {
+ Unwrap() ResponseWriter
+}
+
+// Flush flushes buffered data to the client.
+func (c *ResponseController) Flush() error {
+ rw := c.rw
+ for {
+ switch t := rw.(type) {
+ case interface{ FlushError() error }:
+ return t.FlushError()
+ case Flusher:
+ t.Flush()
+ return nil
+ case rwUnwrapper:
+ rw = t.Unwrap()
+ default:
+ return errNotSupported()
+ }
+ }
+}
+
+// Hijack lets the caller take over the connection.
+// See the Hijacker interface for details.
+func (c *ResponseController) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ rw := c.rw
+ for {
+ switch t := rw.(type) {
+ case Hijacker:
+ return t.Hijack()
+ case rwUnwrapper:
+ rw = t.Unwrap()
+ default:
+ return nil, nil, errNotSupported()
+ }
+ }
+}
+
+// SetReadDeadline sets the deadline for reading the entire request, including the body.
+// Reads from the request body after the deadline has been exceeded will return an error.
+// A zero value means no deadline.
+//
+// Setting the read deadline after it has been exceeded will not extend it.
+func (c *ResponseController) SetReadDeadline(deadline time.Time) error {
+ rw := c.rw
+ for {
+ switch t := rw.(type) {
+ case interface{ SetReadDeadline(time.Time) error }:
+ return t.SetReadDeadline(deadline)
+ case rwUnwrapper:
+ rw = t.Unwrap()
+ default:
+ return errNotSupported()
+ }
+ }
+}
+
+// SetWriteDeadline sets the deadline for writing the response.
+// Writes to the response body after the deadline has been exceeded will not block,
+// but may succeed if the data has been buffered.
+// A zero value means no deadline.
+//
+// Setting the write deadline after it has been exceeded will not extend it.
+func (c *ResponseController) SetWriteDeadline(deadline time.Time) error {
+ rw := c.rw
+ for {
+ switch t := rw.(type) {
+ case interface{ SetWriteDeadline(time.Time) error }:
+ return t.SetWriteDeadline(deadline)
+ case rwUnwrapper:
+ rw = t.Unwrap()
+ default:
+ return errNotSupported()
+ }
+ }
+}
+
+// EnableFullDuplex indicates that the request handler will interleave reads from [Request.Body]
+// with writes to the [ResponseWriter].
+//
+// For HTTP/1 requests, the Go HTTP server by default consumes any unread portion of
+// the request body before beginning to write the response, preventing handlers from
+// concurrently reading from the request and writing the response.
+// Calling EnableFullDuplex disables this behavior and permits handlers to continue to read
+// from the request while concurrently writing the response.
+//
+// For HTTP/2 requests, the Go HTTP server always permits concurrent reads and responses.
+func (c *ResponseController) EnableFullDuplex() error {
+ rw := c.rw
+ for {
+ switch t := rw.(type) {
+ case interface{ EnableFullDuplex() error }:
+ return t.EnableFullDuplex()
+ case rwUnwrapper:
+ rw = t.Unwrap()
+ default:
+ return errNotSupported()
+ }
+ }
+}
+
+// errNotSupported returns an error that Is ErrNotSupported,
+// but is not == to it.
+func errNotSupported() error {
+ return fmt.Errorf("%w", ErrNotSupported)
+}
diff --git a/contrib/go/_std_1.22/src/net/http/roundtrip.go b/contrib/go/_std_1.22/src/net/http/roundtrip.go
new file mode 100644
index 0000000000..08c270179a
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/roundtrip.go
@@ -0,0 +1,18 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !js
+
+package http
+
+// RoundTrip implements the [RoundTripper] interface.
+//
+// For higher-level HTTP client support (such as handling of cookies
+// and redirects), see [Get], [Post], and the [Client] type.
+//
+// Like the RoundTripper interface, the error types returned
+// by RoundTrip are unspecified.
+func (t *Transport) RoundTrip(req *Request) (*Response, error) {
+ return t.roundTrip(req)
+}
diff --git a/contrib/go/_std_1.22/src/net/http/routing_index.go b/contrib/go/_std_1.22/src/net/http/routing_index.go
new file mode 100644
index 0000000000..9ac42c997d
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/routing_index.go
@@ -0,0 +1,124 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import "math"
+
+// A routingIndex optimizes conflict detection by indexing patterns.
+//
+// The basic idea is to rule out patterns that cannot conflict with a given
+// pattern because they have a different literal in a corresponding segment.
+// See the comments in [routingIndex.possiblyConflictingPatterns] for more details.
+type routingIndex struct {
+ // map from a particular segment position and value to all registered patterns
+ // with that value in that position.
+ // For example, the key {1, "b"} would hold the patterns "/a/b" and "/a/b/c"
+ // but not "/a", "b/a", "/a/c" or "/a/{x}".
+ segments map[routingIndexKey][]*pattern
+ // All patterns that end in a multi wildcard (including trailing slash).
+ // We do not try to be clever about indexing multi patterns, because there
+ // are unlikely to be many of them.
+ multis []*pattern
+}
+
+type routingIndexKey struct {
+ pos int // 0-based segment position
+ s string // literal, or empty for wildcard
+}
+
+func (idx *routingIndex) addPattern(pat *pattern) {
+ if pat.lastSegment().multi {
+ idx.multis = append(idx.multis, pat)
+ } else {
+ if idx.segments == nil {
+ idx.segments = map[routingIndexKey][]*pattern{}
+ }
+ for pos, seg := range pat.segments {
+ key := routingIndexKey{pos: pos, s: ""}
+ if !seg.wild {
+ key.s = seg.s
+ }
+ idx.segments[key] = append(idx.segments[key], pat)
+ }
+ }
+}
+
+// possiblyConflictingPatterns calls f on all patterns that might conflict with
+// pat. If f returns a non-nil error, possiblyConflictingPatterns returns immediately
+// with that error.
+//
+// To be correct, possiblyConflictingPatterns must include all patterns that
+// might conflict. But it may also include patterns that cannot conflict.
+// For instance, an implementation that returns all registered patterns is correct.
+// We use this fact throughout, simplifying the implementation by returning more
+// patterns that we might need to.
+func (idx *routingIndex) possiblyConflictingPatterns(pat *pattern, f func(*pattern) error) (err error) {
+ // Terminology:
+ // dollar pattern: one ending in "{$}"
+ // multi pattern: one ending in a trailing slash or "{x...}" wildcard
+ // ordinary pattern: neither of the above
+
+ // apply f to all the pats, stopping on error.
+ apply := func(pats []*pattern) error {
+ if err != nil {
+ return err
+ }
+ for _, p := range pats {
+ err = f(p)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ // Our simple indexing scheme doesn't try to prune multi patterns; assume
+ // any of them can match the argument.
+ if err := apply(idx.multis); err != nil {
+ return err
+ }
+ if pat.lastSegment().s == "/" {
+ // All paths that a dollar pattern matches end in a slash; no paths that
+ // an ordinary pattern matches do. So only other dollar or multi
+ // patterns can conflict with a dollar pattern. Furthermore, conflicting
+ // dollar patterns must have the {$} in the same position.
+ return apply(idx.segments[routingIndexKey{s: "/", pos: len(pat.segments) - 1}])
+ }
+ // For ordinary and multi patterns, the only conflicts can be with a multi,
+ // or a pattern that has the same literal or a wildcard at some literal
+ // position.
+ // We could intersect all the possible matches at each position, but we
+ // do something simpler: we find the position with the fewest patterns.
+ var lmin, wmin []*pattern
+ min := math.MaxInt
+ hasLit := false
+ for i, seg := range pat.segments {
+ if seg.multi {
+ break
+ }
+ if !seg.wild {
+ hasLit = true
+ lpats := idx.segments[routingIndexKey{s: seg.s, pos: i}]
+ wpats := idx.segments[routingIndexKey{s: "", pos: i}]
+ if sum := len(lpats) + len(wpats); sum < min {
+ lmin = lpats
+ wmin = wpats
+ min = sum
+ }
+ }
+ }
+ if hasLit {
+ apply(lmin)
+ apply(wmin)
+ return err
+ }
+
+ // This pattern is all wildcards.
+ // Check it against everything.
+ for _, pats := range idx.segments {
+ apply(pats)
+ }
+ return err
+}
diff --git a/contrib/go/_std_1.22/src/net/http/routing_tree.go b/contrib/go/_std_1.22/src/net/http/routing_tree.go
new file mode 100644
index 0000000000..8812ed04e2
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/routing_tree.go
@@ -0,0 +1,240 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a decision tree for fast matching of requests to
+// patterns.
+//
+// The root of the tree branches on the host of the request.
+// The next level branches on the method.
+// The remaining levels branch on consecutive segments of the path.
+//
+// The "more specific wins" precedence rule can result in backtracking.
+// For example, given the patterns
+// /a/b/z
+// /a/{x}/c
+// we will first try to match the path "/a/b/c" with /a/b/z, and
+// when that fails we will try against /a/{x}/c.
+
+package http
+
+import (
+ "strings"
+)
+
+// A routingNode is a node in the decision tree.
+// The same struct is used for leaf and interior nodes.
+type routingNode struct {
+ // A leaf node holds a single pattern and the Handler it was registered
+ // with.
+ pattern *pattern
+ handler Handler
+
+ // An interior node maps parts of the incoming request to child nodes.
+ // special children keys:
+ // "/" trailing slash (resulting from {$})
+ // "" single wildcard
+ // "*" multi wildcard
+ children mapping[string, *routingNode]
+ emptyChild *routingNode // optimization: child with key ""
+}
+
+// addPattern adds a pattern and its associated Handler to the tree
+// at root.
+func (root *routingNode) addPattern(p *pattern, h Handler) {
+ // First level of tree is host.
+ n := root.addChild(p.host)
+ // Second level of tree is method.
+ n = n.addChild(p.method)
+ // Remaining levels are path.
+ n.addSegments(p.segments, p, h)
+}
+
+// addSegments adds the given segments to the tree rooted at n.
+// If there are no segments, then n is a leaf node that holds
+// the given pattern and handler.
+func (n *routingNode) addSegments(segs []segment, p *pattern, h Handler) {
+ if len(segs) == 0 {
+ n.set(p, h)
+ return
+ }
+ seg := segs[0]
+ if seg.multi {
+ if len(segs) != 1 {
+ panic("multi wildcard not last")
+ }
+ n.addChild("*").set(p, h)
+ } else if seg.wild {
+ n.addChild("").addSegments(segs[1:], p, h)
+ } else {
+ n.addChild(seg.s).addSegments(segs[1:], p, h)
+ }
+}
+
+// set sets the pattern and handler for n, which
+// must be a leaf node.
+func (n *routingNode) set(p *pattern, h Handler) {
+ if n.pattern != nil || n.handler != nil {
+ panic("non-nil leaf fields")
+ }
+ n.pattern = p
+ n.handler = h
+}
+
+// addChild adds a child node with the given key to n
+// if one does not exist, and returns the child.
+func (n *routingNode) addChild(key string) *routingNode {
+ if key == "" {
+ if n.emptyChild == nil {
+ n.emptyChild = &routingNode{}
+ }
+ return n.emptyChild
+ }
+ if c := n.findChild(key); c != nil {
+ return c
+ }
+ c := &routingNode{}
+ n.children.add(key, c)
+ return c
+}
+
+// findChild returns the child of n with the given key, or nil
+// if there is no child with that key.
+func (n *routingNode) findChild(key string) *routingNode {
+ if key == "" {
+ return n.emptyChild
+ }
+ r, _ := n.children.find(key)
+ return r
+}
+
+// match returns the leaf node under root that matches the arguments, and a list
+// of values for pattern wildcards in the order that the wildcards appear.
+// For example, if the request path is "/a/b/c" and the pattern is "/{x}/b/{y}",
+// then the second return value will be []string{"a", "c"}.
+func (root *routingNode) match(host, method, path string) (*routingNode, []string) {
+ if host != "" {
+ // There is a host. If there is a pattern that specifies that host and it
+ // matches, we are done. If the pattern doesn't match, fall through to
+ // try patterns with no host.
+ if l, m := root.findChild(host).matchMethodAndPath(method, path); l != nil {
+ return l, m
+ }
+ }
+ return root.emptyChild.matchMethodAndPath(method, path)
+}
+
+// matchMethodAndPath matches the method and path.
+// Its return values are the same as [routingNode.match].
+// The receiver should be a child of the root.
+func (n *routingNode) matchMethodAndPath(method, path string) (*routingNode, []string) {
+ if n == nil {
+ return nil, nil
+ }
+ if l, m := n.findChild(method).matchPath(path, nil); l != nil {
+ // Exact match of method name.
+ return l, m
+ }
+ if method == "HEAD" {
+ // GET matches HEAD too.
+ if l, m := n.findChild("GET").matchPath(path, nil); l != nil {
+ return l, m
+ }
+ }
+ // No exact match; try patterns with no method.
+ return n.emptyChild.matchPath(path, nil)
+}
+
+// matchPath matches a path.
+// Its return values are the same as [routingNode.match].
+// matchPath calls itself recursively. The matches argument holds the wildcard matches
+// found so far.
+func (n *routingNode) matchPath(path string, matches []string) (*routingNode, []string) {
+ if n == nil {
+ return nil, nil
+ }
+ // If path is empty, then we are done.
+ // If n is a leaf node, we found a match; return it.
+ // If n is an interior node (which means it has a nil pattern),
+ // then we failed to match.
+ if path == "" {
+ if n.pattern == nil {
+ return nil, nil
+ }
+ return n, matches
+ }
+ // Get the first segment of path.
+ seg, rest := firstSegment(path)
+ // First try matching against patterns that have a literal for this position.
+ // We know by construction that such patterns are more specific than those
+ // with a wildcard at this position (they are either more specific, equivalent,
+ // or overlap, and we ruled out the first two when the patterns were registered).
+ if n, m := n.findChild(seg).matchPath(rest, matches); n != nil {
+ return n, m
+ }
+ // If matching a literal fails, try again with patterns that have a single
+ // wildcard (represented by an empty string in the child mapping).
+ // Again, by construction, patterns with a single wildcard must be more specific than
+ // those with a multi wildcard.
+ // We skip this step if the segment is a trailing slash, because single wildcards
+ // don't match trailing slashes.
+ if seg != "/" {
+ if n, m := n.emptyChild.matchPath(rest, append(matches, seg)); n != nil {
+ return n, m
+ }
+ }
+ // Lastly, match the pattern (there can be at most one) that has a multi
+ // wildcard in this position to the rest of the path.
+ if c := n.findChild("*"); c != nil {
+ // Don't record a match for a nameless wildcard (which arises from a
+ // trailing slash in the pattern).
+ if c.pattern.lastSegment().s != "" {
+ matches = append(matches, pathUnescape(path[1:])) // remove initial slash
+ }
+ return c, matches
+ }
+ return nil, nil
+}
+
+// firstSegment splits path into its first segment, and the rest.
+// The path must begin with "/".
+// If path consists of only a slash, firstSegment returns ("/", "").
+// The segment is returned unescaped, if possible.
+func firstSegment(path string) (seg, rest string) {
+ if path == "/" {
+ return "/", ""
+ }
+ path = path[1:] // drop initial slash
+ i := strings.IndexByte(path, '/')
+ if i < 0 {
+ i = len(path)
+ }
+ return pathUnescape(path[:i]), path[i:]
+}
+
+// matchingMethods adds to methodSet all the methods that would result in a
+// match if passed to routingNode.match with the given host and path.
+func (root *routingNode) matchingMethods(host, path string, methodSet map[string]bool) {
+ if host != "" {
+ root.findChild(host).matchingMethodsPath(path, methodSet)
+ }
+ root.emptyChild.matchingMethodsPath(path, methodSet)
+ if methodSet["GET"] {
+ methodSet["HEAD"] = true
+ }
+}
+
+func (n *routingNode) matchingMethodsPath(path string, set map[string]bool) {
+ if n == nil {
+ return
+ }
+ n.children.eachPair(func(method string, c *routingNode) bool {
+ if p, _ := c.matchPath(path, nil); p != nil {
+ set[method] = true
+ }
+ return true
+ })
+ // Don't look at the empty child. If there were an empty
+ // child, it would match on any method, but we only
+ // call this when we fail to match on a method.
+}
diff --git a/contrib/go/_std_1.22/src/net/http/servemux121.go b/contrib/go/_std_1.22/src/net/http/servemux121.go
new file mode 100644
index 0000000000..c0a4b77010
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/servemux121.go
@@ -0,0 +1,211 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+// This file implements ServeMux behavior as in Go 1.21.
+// The behavior is controlled by a GODEBUG setting.
+// Most of this code is derived from commit 08e35cc334.
+// Changes are minimal: aside from the different receiver type,
+// they mostly involve renaming functions, usually by unexporting them.
+
+import (
+ "internal/godebug"
+ "net/url"
+ "sort"
+ "strings"
+ "sync"
+)
+
+var httpmuxgo121 = godebug.New("httpmuxgo121")
+
+var use121 bool
+
+// Read httpmuxgo121 once at startup, since dealing with changes to it during
+// program execution is too complex and error-prone.
+func init() {
+ if httpmuxgo121.Value() == "1" {
+ use121 = true
+ httpmuxgo121.IncNonDefault()
+ }
+}
+
+// serveMux121 holds the state of a ServeMux needed for Go 1.21 behavior.
+type serveMux121 struct {
+ mu sync.RWMutex
+ m map[string]muxEntry
+ es []muxEntry // slice of entries sorted from longest to shortest.
+ hosts bool // whether any patterns contain hostnames
+}
+
+type muxEntry struct {
+ h Handler
+ pattern string
+}
+
+// Formerly ServeMux.Handle.
+func (mux *serveMux121) handle(pattern string, handler Handler) {
+ mux.mu.Lock()
+ defer mux.mu.Unlock()
+
+ if pattern == "" {
+ panic("http: invalid pattern")
+ }
+ if handler == nil {
+ panic("http: nil handler")
+ }
+ if _, exist := mux.m[pattern]; exist {
+ panic("http: multiple registrations for " + pattern)
+ }
+
+ if mux.m == nil {
+ mux.m = make(map[string]muxEntry)
+ }
+ e := muxEntry{h: handler, pattern: pattern}
+ mux.m[pattern] = e
+ if pattern[len(pattern)-1] == '/' {
+ mux.es = appendSorted(mux.es, e)
+ }
+
+ if pattern[0] != '/' {
+ mux.hosts = true
+ }
+}
+
+func appendSorted(es []muxEntry, e muxEntry) []muxEntry {
+ n := len(es)
+ i := sort.Search(n, func(i int) bool {
+ return len(es[i].pattern) < len(e.pattern)
+ })
+ if i == n {
+ return append(es, e)
+ }
+ // we now know that i points at where we want to insert
+ es = append(es, muxEntry{}) // try to grow the slice in place, any entry works.
+ copy(es[i+1:], es[i:]) // Move shorter entries down
+ es[i] = e
+ return es
+}
+
+// Formerly ServeMux.HandleFunc.
+func (mux *serveMux121) handleFunc(pattern string, handler func(ResponseWriter, *Request)) {
+ if handler == nil {
+ panic("http: nil handler")
+ }
+ mux.handle(pattern, HandlerFunc(handler))
+}
+
+// Formerly ServeMux.Handler.
+func (mux *serveMux121) findHandler(r *Request) (h Handler, pattern string) {
+
+ // CONNECT requests are not canonicalized.
+ if r.Method == "CONNECT" {
+ // If r.URL.Path is /tree and its handler is not registered,
+ // the /tree -> /tree/ redirect applies to CONNECT requests
+ // but the path canonicalization does not.
+ if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok {
+ return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
+ }
+
+ return mux.handler(r.Host, r.URL.Path)
+ }
+
+ // All other requests have any port stripped and path cleaned
+ // before passing to mux.handler.
+ host := stripHostPort(r.Host)
+ path := cleanPath(r.URL.Path)
+
+ // If the given path is /tree and its handler is not registered,
+ // redirect for /tree/.
+ if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok {
+ return RedirectHandler(u.String(), StatusMovedPermanently), u.Path
+ }
+
+ if path != r.URL.Path {
+ _, pattern = mux.handler(host, path)
+ u := &url.URL{Path: path, RawQuery: r.URL.RawQuery}
+ return RedirectHandler(u.String(), StatusMovedPermanently), pattern
+ }
+
+ return mux.handler(host, r.URL.Path)
+}
+
+// handler is the main implementation of findHandler.
+// The path is known to be in canonical form, except for CONNECT methods.
+func (mux *serveMux121) handler(host, path string) (h Handler, pattern string) {
+ mux.mu.RLock()
+ defer mux.mu.RUnlock()
+
+ // Host-specific pattern takes precedence over generic ones
+ if mux.hosts {
+ h, pattern = mux.match(host + path)
+ }
+ if h == nil {
+ h, pattern = mux.match(path)
+ }
+ if h == nil {
+ h, pattern = NotFoundHandler(), ""
+ }
+ return
+}
+
+// Find a handler on a handler map given a path string.
+// Most-specific (longest) pattern wins.
+func (mux *serveMux121) match(path string) (h Handler, pattern string) {
+ // Check for exact match first.
+ v, ok := mux.m[path]
+ if ok {
+ return v.h, v.pattern
+ }
+
+ // Check for longest valid match. mux.es contains all patterns
+ // that end in / sorted from longest to shortest.
+ for _, e := range mux.es {
+ if strings.HasPrefix(path, e.pattern) {
+ return e.h, e.pattern
+ }
+ }
+ return nil, ""
+}
+
+// redirectToPathSlash determines if the given path needs appending "/" to it.
+// This occurs when a handler for path + "/" was already registered, but
+// not for path itself. If the path needs appending to, it creates a new
+// URL, setting the path to u.Path + "/" and returning true to indicate so.
+func (mux *serveMux121) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) {
+ mux.mu.RLock()
+ shouldRedirect := mux.shouldRedirectRLocked(host, path)
+ mux.mu.RUnlock()
+ if !shouldRedirect {
+ return u, false
+ }
+ path = path + "/"
+ u = &url.URL{Path: path, RawQuery: u.RawQuery}
+ return u, true
+}
+
+// shouldRedirectRLocked reports whether the given path and host should be redirected to
+// path+"/". This should happen if a handler is registered for path+"/" but
+// not path -- see comments at ServeMux.
+func (mux *serveMux121) shouldRedirectRLocked(host, path string) bool {
+ p := []string{path, host + path}
+
+ for _, c := range p {
+ if _, exist := mux.m[c]; exist {
+ return false
+ }
+ }
+
+ n := len(path)
+ if n == 0 {
+ return false
+ }
+ for _, c := range p {
+ if _, exist := mux.m[c+"/"]; exist {
+ return path[n-1] != '/'
+ }
+ }
+
+ return false
+}
diff --git a/contrib/go/_std_1.22/src/net/http/server.go b/contrib/go/_std_1.22/src/net/http/server.go
new file mode 100644
index 0000000000..acac78bcd0
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/server.go
@@ -0,0 +1,3843 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP server. See RFC 7230 through 7235.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "internal/godebug"
+ "io"
+ "log"
+ "math/rand"
+ "net"
+ "net/textproto"
+ "net/url"
+ urlpkg "net/url"
+ "path"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+// Errors used by the HTTP server.
+var (
+ // ErrBodyNotAllowed is returned by ResponseWriter.Write calls
+ // when the HTTP method or response code does not permit a
+ // body.
+ ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body")
+
+ // ErrHijacked is returned by ResponseWriter.Write calls when
+ // the underlying connection has been hijacked using the
+ // Hijacker interface. A zero-byte write on a hijacked
+ // connection will return ErrHijacked without any other side
+ // effects.
+ ErrHijacked = errors.New("http: connection has been hijacked")
+
+ // ErrContentLength is returned by ResponseWriter.Write calls
+ // when a Handler set a Content-Length response header with a
+ // declared size and then attempted to write more bytes than
+ // declared.
+ ErrContentLength = errors.New("http: wrote more than the declared Content-Length")
+
+ // Deprecated: ErrWriteAfterFlush is no longer returned by
+ // anything in the net/http package. Callers should not
+ // compare errors against this variable.
+ ErrWriteAfterFlush = errors.New("unused")
+)
+
+// A Handler responds to an HTTP request.
+//
+// [Handler.ServeHTTP] should write reply headers and data to the [ResponseWriter]
+// and then return. Returning signals that the request is finished; it
+// is not valid to use the [ResponseWriter] or read from the
+// [Request.Body] after or concurrently with the completion of the
+// ServeHTTP call.
+//
+// Depending on the HTTP client software, HTTP protocol version, and
+// any intermediaries between the client and the Go server, it may not
+// be possible to read from the [Request.Body] after writing to the
+// [ResponseWriter]. Cautious handlers should read the [Request.Body]
+// first, and then reply.
+//
+// Except for reading the body, handlers should not modify the
+// provided Request.
+//
+// If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
+// that the effect of the panic was isolated to the active request.
+// It recovers the panic, logs a stack trace to the server error log,
+// and either closes the network connection or sends an HTTP/2
+// RST_STREAM, depending on the HTTP protocol. To abort a handler so
+// the client sees an interrupted response but the server doesn't log
+// an error, panic with the value [ErrAbortHandler].
+type Handler interface {
+ ServeHTTP(ResponseWriter, *Request)
+}
+
+// A ResponseWriter interface is used by an HTTP handler to
+// construct an HTTP response.
+//
+// A ResponseWriter may not be used after [Handler.ServeHTTP] has returned.
+type ResponseWriter interface {
+ // Header returns the header map that will be sent by
+ // [ResponseWriter.WriteHeader]. The [Header] map also is the mechanism with which
+ // [Handler] implementations can set HTTP trailers.
+ //
+ // Changing the header map after a call to [ResponseWriter.WriteHeader] (or
+ // [ResponseWriter.Write]) has no effect unless the HTTP status code was of the
+ // 1xx class or the modified headers are trailers.
+ //
+ // There are two ways to set Trailers. The preferred way is to
+ // predeclare in the headers which trailers you will later
+ // send by setting the "Trailer" header to the names of the
+ // trailer keys which will come later. In this case, those
+ // keys of the Header map are treated as if they were
+ // trailers. See the example. The second way, for trailer
+ // keys not known to the [Handler] until after the first [ResponseWriter.Write],
+ // is to prefix the [Header] map keys with the [TrailerPrefix]
+ // constant value.
+ //
+ // To suppress automatic response headers (such as "Date"), set
+ // their value to nil.
+ Header() Header
+
+ // Write writes the data to the connection as part of an HTTP reply.
+ //
+ // If [ResponseWriter.WriteHeader] has not yet been called, Write calls
+ // WriteHeader(http.StatusOK) before writing the data. If the Header
+ // does not contain a Content-Type line, Write adds a Content-Type set
+ // to the result of passing the initial 512 bytes of written data to
+ // [DetectContentType]. Additionally, if the total size of all written
+ // data is under a few KB and there are no Flush calls, the
+ // Content-Length header is added automatically.
+ //
+ // Depending on the HTTP protocol version and the client, calling
+ // Write or WriteHeader may prevent future reads on the
+ // Request.Body. For HTTP/1.x requests, handlers should read any
+ // needed request body data before writing the response. Once the
+ // headers have been flushed (due to either an explicit Flusher.Flush
+ // call or writing enough data to trigger a flush), the request body
+ // may be unavailable. For HTTP/2 requests, the Go HTTP server permits
+ // handlers to continue to read the request body while concurrently
+ // writing the response. However, such behavior may not be supported
+ // by all HTTP/2 clients. Handlers should read before writing if
+ // possible to maximize compatibility.
+ Write([]byte) (int, error)
+
+ // WriteHeader sends an HTTP response header with the provided
+ // status code.
+ //
+ // If WriteHeader is not called explicitly, the first call to Write
+ // will trigger an implicit WriteHeader(http.StatusOK).
+ // Thus explicit calls to WriteHeader are mainly used to
+ // send error codes or 1xx informational responses.
+ //
+ // The provided code must be a valid HTTP 1xx-5xx status code.
+ // Any number of 1xx headers may be written, followed by at most
+ // one 2xx-5xx header. 1xx headers are sent immediately, but 2xx-5xx
+ // headers may be buffered. Use the Flusher interface to send
+ // buffered data. The header map is cleared when 2xx-5xx headers are
+ // sent, but not with 1xx headers.
+ //
+ // The server will automatically send a 100 (Continue) header
+ // on the first read from the request body if the request has
+ // an "Expect: 100-continue" header.
+ WriteHeader(statusCode int)
+}
+
+// The Flusher interface is implemented by ResponseWriters that allow
+// an HTTP handler to flush buffered data to the client.
+//
+// The default HTTP/1.x and HTTP/2 [ResponseWriter] implementations
+// support [Flusher], but ResponseWriter wrappers may not. Handlers
+// should always test for this ability at runtime.
+//
+// Note that even for ResponseWriters that support Flush,
+// if the client is connected through an HTTP proxy,
+// the buffered data may not reach the client until the response
+// completes.
+type Flusher interface {
+ // Flush sends any buffered data to the client.
+ Flush()
+}
+
+// The Hijacker interface is implemented by ResponseWriters that allow
+// an HTTP handler to take over the connection.
+//
+// The default [ResponseWriter] for HTTP/1.x connections supports
+// Hijacker, but HTTP/2 connections intentionally do not.
+// ResponseWriter wrappers may also not support Hijacker. Handlers
+// should always test for this ability at runtime.
+type Hijacker interface {
+ // Hijack lets the caller take over the connection.
+ // After a call to Hijack the HTTP server library
+ // will not do anything else with the connection.
+ //
+ // It becomes the caller's responsibility to manage
+ // and close the connection.
+ //
+ // The returned net.Conn may have read or write deadlines
+ // already set, depending on the configuration of the
+ // Server. It is the caller's responsibility to set
+ // or clear those deadlines as needed.
+ //
+ // The returned bufio.Reader may contain unprocessed buffered
+ // data from the client.
+ //
+ // After a call to Hijack, the original Request.Body must not
+ // be used. The original Request's Context remains valid and
+ // is not canceled until the Request's ServeHTTP method
+ // returns.
+ Hijack() (net.Conn, *bufio.ReadWriter, error)
+}
+
+// The CloseNotifier interface is implemented by ResponseWriters which
+// allow detecting when the underlying connection has gone away.
+//
+// This mechanism can be used to cancel long operations on the server
+// if the client has disconnected before the response is ready.
+//
+// Deprecated: the CloseNotifier interface predates Go's context package.
+// New code should use [Request.Context] instead.
+type CloseNotifier interface {
+ // CloseNotify returns a channel that receives at most a
+ // single value (true) when the client connection has gone
+ // away.
+ //
+ // CloseNotify may wait to notify until Request.Body has been
+ // fully read.
+ //
+ // After the Handler has returned, there is no guarantee
+ // that the channel receives a value.
+ //
+ // If the protocol is HTTP/1.1 and CloseNotify is called while
+ // processing an idempotent request (such a GET) while
+ // HTTP/1.1 pipelining is in use, the arrival of a subsequent
+ // pipelined request may cause a value to be sent on the
+ // returned channel. In practice HTTP/1.1 pipelining is not
+ // enabled in browsers and not seen often in the wild. If this
+ // is a problem, use HTTP/2 or only use CloseNotify on methods
+ // such as POST.
+ CloseNotify() <-chan bool
+}
+
+var (
+ // ServerContextKey is a context key. It can be used in HTTP
+ // handlers with Context.Value to access the server that
+ // started the handler. The associated value will be of
+ // type *Server.
+ ServerContextKey = &contextKey{"http-server"}
+
+ // LocalAddrContextKey is a context key. It can be used in
+ // HTTP handlers with Context.Value to access the local
+ // address the connection arrived on.
+ // The associated value will be of type net.Addr.
+ LocalAddrContextKey = &contextKey{"local-addr"}
+)
+
+// A conn represents the server side of an HTTP connection.
+type conn struct {
+ // server is the server on which the connection arrived.
+ // Immutable; never nil.
+ server *Server
+
+ // cancelCtx cancels the connection-level context.
+ cancelCtx context.CancelFunc
+
+ // rwc is the underlying network connection.
+ // This is never wrapped by other types and is the value given out
+ // to CloseNotifier callers. It is usually of type *net.TCPConn or
+ // *tls.Conn.
+ rwc net.Conn
+
+ // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously
+ // inside the Listener's Accept goroutine, as some implementations block.
+ // It is populated immediately inside the (*conn).serve goroutine.
+ // This is the value of a Handler's (*Request).RemoteAddr.
+ remoteAddr string
+
+ // tlsState is the TLS connection state when using TLS.
+ // nil means not TLS.
+ tlsState *tls.ConnectionState
+
+ // werr is set to the first write error to rwc.
+ // It is set via checkConnErrorWriter{w}, where bufw writes.
+ werr error
+
+ // r is bufr's read source. It's a wrapper around rwc that provides
+ // io.LimitedReader-style limiting (while reading request headers)
+ // and functionality to support CloseNotifier. See *connReader docs.
+ r *connReader
+
+ // bufr reads from r.
+ bufr *bufio.Reader
+
+ // bufw writes to checkConnErrorWriter{c}, which populates werr on error.
+ bufw *bufio.Writer
+
+ // lastMethod is the method of the most recent request
+ // on this connection, if any.
+ lastMethod string
+
+ curReq atomic.Pointer[response] // (which has a Request in it)
+
+ curState atomic.Uint64 // packed (unixtime<<8|uint8(ConnState))
+
+ // mu guards hijackedv
+ mu sync.Mutex
+
+ // hijackedv is whether this connection has been hijacked
+ // by a Handler with the Hijacker interface.
+ // It is guarded by mu.
+ hijackedv bool
+}
+
+func (c *conn) hijacked() bool {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.hijackedv
+}
+
+// c.mu must be held.
+func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
+ if c.hijackedv {
+ return nil, nil, ErrHijacked
+ }
+ c.r.abortPendingRead()
+
+ c.hijackedv = true
+ rwc = c.rwc
+ rwc.SetDeadline(time.Time{})
+
+ buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))
+ if c.r.hasByte {
+ if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil {
+ return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err)
+ }
+ }
+ c.setState(rwc, StateHijacked, runHooks)
+ return
+}
+
+// This should be >= 512 bytes for DetectContentType,
+// but otherwise it's somewhat arbitrary.
+const bufferBeforeChunkingSize = 2048
+
+// chunkWriter writes to a response's conn buffer, and is the writer
+// wrapped by the response.w buffered writer.
+//
+// chunkWriter also is responsible for finalizing the Header, including
+// conditionally setting the Content-Type and setting a Content-Length
+// in cases where the handler's final output is smaller than the buffer
+// size. It also conditionally adds chunk headers, when in chunking mode.
+//
+// See the comment above (*response).Write for the entire write flow.
+type chunkWriter struct {
+ res *response
+
+ // header is either nil or a deep clone of res.handlerHeader
+ // at the time of res.writeHeader, if res.writeHeader is
+ // called and extra buffering is being done to calculate
+ // Content-Type and/or Content-Length.
+ header Header
+
+ // wroteHeader tells whether the header's been written to "the
+ // wire" (or rather: w.conn.buf). this is unlike
+ // (*response).wroteHeader, which tells only whether it was
+ // logically written.
+ wroteHeader bool
+
+ // set by the writeHeader method:
+ chunking bool // using chunked transfer encoding for reply body
+}
+
+var (
+ crlf = []byte("\r\n")
+ colonSpace = []byte(": ")
+)
+
+func (cw *chunkWriter) Write(p []byte) (n int, err error) {
+ if !cw.wroteHeader {
+ cw.writeHeader(p)
+ }
+ if cw.res.req.Method == "HEAD" {
+ // Eat writes.
+ return len(p), nil
+ }
+ if cw.chunking {
+ _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p))
+ if err != nil {
+ cw.res.conn.rwc.Close()
+ return
+ }
+ }
+ n, err = cw.res.conn.bufw.Write(p)
+ if cw.chunking && err == nil {
+ _, err = cw.res.conn.bufw.Write(crlf)
+ }
+ if err != nil {
+ cw.res.conn.rwc.Close()
+ }
+ return
+}
+
+func (cw *chunkWriter) flush() error {
+ if !cw.wroteHeader {
+ cw.writeHeader(nil)
+ }
+ return cw.res.conn.bufw.Flush()
+}
+
+func (cw *chunkWriter) close() {
+ if !cw.wroteHeader {
+ cw.writeHeader(nil)
+ }
+ if cw.chunking {
+ bw := cw.res.conn.bufw // conn's bufio writer
+ // zero chunk to mark EOF
+ bw.WriteString("0\r\n")
+ if trailers := cw.res.finalTrailers(); trailers != nil {
+ trailers.Write(bw) // the writer handles noting errors
+ }
+ // final blank line after the trailers (whether
+ // present or not)
+ bw.WriteString("\r\n")
+ }
+}
+
+// A response represents the server side of an HTTP response.
+type response struct {
+ conn *conn
+ req *Request // request for this response
+ reqBody io.ReadCloser
+ cancelCtx context.CancelFunc // when ServeHTTP exits
+ wroteHeader bool // a non-1xx header has been (logically) written
+ wroteContinue bool // 100 Continue response was written
+ wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive"
+ wantsClose bool // HTTP request has Connection "close"
+
+ // canWriteContinue is an atomic boolean that says whether or
+ // not a 100 Continue header can be written to the
+ // connection.
+ // writeContinueMu must be held while writing the header.
+ // These two fields together synchronize the body reader (the
+ // expectContinueReader, which wants to write 100 Continue)
+ // against the main writer.
+ canWriteContinue atomic.Bool
+ writeContinueMu sync.Mutex
+
+ w *bufio.Writer // buffers output in chunks to chunkWriter
+ cw chunkWriter
+
+ // handlerHeader is the Header that Handlers get access to,
+ // which may be retained and mutated even after WriteHeader.
+ // handlerHeader is copied into cw.header at WriteHeader
+ // time, and privately mutated thereafter.
+ handlerHeader Header
+ calledHeader bool // handler accessed handlerHeader via Header
+
+ written int64 // number of bytes written in body
+ contentLength int64 // explicitly-declared Content-Length; or -1
+ status int // status code passed to WriteHeader
+
+ // close connection after this reply. set on request and
+ // updated after response from handler if there's a
+ // "Connection: keep-alive" response header and a
+ // Content-Length.
+ closeAfterReply bool
+
+ // When fullDuplex is false (the default), we consume any remaining
+ // request body before starting to write a response.
+ fullDuplex bool
+
+ // requestBodyLimitHit is set by requestTooLarge when
+ // maxBytesReader hits its max size. It is checked in
+ // WriteHeader, to make sure we don't consume the
+ // remaining request body to try to advance to the next HTTP
+ // request. Instead, when this is set, we stop reading
+ // subsequent requests on this connection and stop reading
+ // input from it.
+ requestBodyLimitHit bool
+
+ // trailers are the headers to be sent after the handler
+ // finishes writing the body. This field is initialized from
+ // the Trailer response header when the response header is
+ // written.
+ trailers []string
+
+ handlerDone atomic.Bool // set true when the handler exits
+
+ // Buffers for Date, Content-Length, and status code
+ dateBuf [len(TimeFormat)]byte
+ clenBuf [10]byte
+ statusBuf [3]byte
+
+ // closeNotifyCh is the channel returned by CloseNotify.
+ // TODO(bradfitz): this is currently (for Go 1.8) always
+ // non-nil. Make this lazily-created again as it used to be?
+ closeNotifyCh chan bool
+ didCloseNotify atomic.Bool // atomic (only false->true winner should send)
+}
+
+func (c *response) SetReadDeadline(deadline time.Time) error {
+ return c.conn.rwc.SetReadDeadline(deadline)
+}
+
+func (c *response) SetWriteDeadline(deadline time.Time) error {
+ return c.conn.rwc.SetWriteDeadline(deadline)
+}
+
+func (c *response) EnableFullDuplex() error {
+ c.fullDuplex = true
+ return nil
+}
+
+// TrailerPrefix is a magic prefix for [ResponseWriter.Header] map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+//
+// https://pkg.go.dev/net/http#ResponseWriter
+// https://pkg.go.dev/net/http#example-ResponseWriter-Trailers
+const TrailerPrefix = "Trailer:"
+
+// finalTrailers is called after the Handler exits and returns a non-nil
+// value if the Handler set any trailers.
+func (w *response) finalTrailers() Header {
+ var t Header
+ for k, vv := range w.handlerHeader {
+ if kk, found := strings.CutPrefix(k, TrailerPrefix); found {
+ if t == nil {
+ t = make(Header)
+ }
+ t[kk] = vv
+ }
+ }
+ for _, k := range w.trailers {
+ if t == nil {
+ t = make(Header)
+ }
+ for _, v := range w.handlerHeader[k] {
+ t.Add(k, v)
+ }
+ }
+ return t
+}
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (w *response) declareTrailer(k string) {
+ k = CanonicalHeaderKey(k)
+ if !httpguts.ValidTrailerHeader(k) {
+ // Forbidden by RFC 7230, section 4.1.2
+ return
+ }
+ w.trailers = append(w.trailers, k)
+}
+
+// requestTooLarge is called by maxBytesReader when too much input has
+// been read from the client.
+func (w *response) requestTooLarge() {
+ w.closeAfterReply = true
+ w.requestBodyLimitHit = true
+ if !w.wroteHeader {
+ w.Header().Set("Connection", "close")
+ }
+}
+
+// writerOnly hides an io.Writer value's optional ReadFrom method
+// from io.Copy.
+type writerOnly struct {
+ io.Writer
+}
+
+// ReadFrom is here to optimize copying from an [*os.File] regular file
+// to a [*net.TCPConn] with sendfile, or from a supported src type such
+// as a *net.TCPConn on Linux with splice.
+func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
+ buf := getCopyBuf()
+ defer putCopyBuf(buf)
+
+ // Our underlying w.conn.rwc is usually a *TCPConn (with its
+ // own ReadFrom method). If not, just fall back to the normal
+ // copy method.
+ rf, ok := w.conn.rwc.(io.ReaderFrom)
+ if !ok {
+ return io.CopyBuffer(writerOnly{w}, src, buf)
+ }
+
+ // Copy the first sniffLen bytes before switching to ReadFrom.
+ // This ensures we don't start writing the response before the
+ // source is available (see golang.org/issue/5660) and provides
+ // enough bytes to perform Content-Type sniffing when required.
+ if !w.cw.wroteHeader {
+ n0, err := io.CopyBuffer(writerOnly{w}, io.LimitReader(src, sniffLen), buf)
+ n += n0
+ if err != nil || n0 < sniffLen {
+ return n, err
+ }
+ }
+
+ w.w.Flush() // get rid of any previous writes
+ w.cw.flush() // make sure Header is written; flush data to rwc
+
+ // Now that cw has been flushed, its chunking field is guaranteed initialized.
+ if !w.cw.chunking && w.bodyAllowed() {
+ n0, err := rf.ReadFrom(src)
+ n += n0
+ w.written += n0
+ return n, err
+ }
+
+ n0, err := io.CopyBuffer(writerOnly{w}, src, buf)
+ n += n0
+ return n, err
+}
+
+// debugServerConnections controls whether all server connections are wrapped
+// with a verbose logging wrapper.
+const debugServerConnections = false
+
+// Create new connection from rwc.
+func (srv *Server) newConn(rwc net.Conn) *conn {
+ c := &conn{
+ server: srv,
+ rwc: rwc,
+ }
+ if debugServerConnections {
+ c.rwc = newLoggingConn("server", c.rwc)
+ }
+ return c
+}
+
+type readResult struct {
+ _ incomparable
+ n int
+ err error
+ b byte // byte read, if n == 1
+}
+
+// connReader is the io.Reader wrapper used by *conn. It combines a
+// selectively-activated io.LimitedReader (to bound request header
+// read sizes) with support for selectively keeping an io.Reader.Read
+// call blocked in a background goroutine to wait for activity and
+// trigger a CloseNotifier channel.
+type connReader struct {
+ conn *conn
+
+ mu sync.Mutex // guards following
+ hasByte bool
+ byteBuf [1]byte
+ cond *sync.Cond
+ inRead bool
+ aborted bool // set true before conn.rwc deadline is set to past
+ remain int64 // bytes remaining
+}
+
+func (cr *connReader) lock() {
+ cr.mu.Lock()
+ if cr.cond == nil {
+ cr.cond = sync.NewCond(&cr.mu)
+ }
+}
+
+func (cr *connReader) unlock() { cr.mu.Unlock() }
+
+func (cr *connReader) startBackgroundRead() {
+ cr.lock()
+ defer cr.unlock()
+ if cr.inRead {
+ panic("invalid concurrent Body.Read call")
+ }
+ if cr.hasByte {
+ return
+ }
+ cr.inRead = true
+ cr.conn.rwc.SetReadDeadline(time.Time{})
+ go cr.backgroundRead()
+}
+
+func (cr *connReader) backgroundRead() {
+ n, err := cr.conn.rwc.Read(cr.byteBuf[:])
+ cr.lock()
+ if n == 1 {
+ cr.hasByte = true
+ // We were past the end of the previous request's body already
+ // (since we wouldn't be in a background read otherwise), so
+ // this is a pipelined HTTP request. Prior to Go 1.11 we used to
+ // send on the CloseNotify channel and cancel the context here,
+ // but the behavior was documented as only "may", and we only
+ // did that because that's how CloseNotify accidentally behaved
+ // in very early Go releases prior to context support. Once we
+ // added context support, people used a Handler's
+ // Request.Context() and passed it along. Having that context
+ // cancel on pipelined HTTP requests caused problems.
+ // Fortunately, almost nothing uses HTTP/1.x pipelining.
+ // Unfortunately, apt-get does, or sometimes does.
+ // New Go 1.11 behavior: don't fire CloseNotify or cancel
+ // contexts on pipelined requests. Shouldn't affect people, but
+ // fixes cases like Issue 23921. This does mean that a client
+ // closing their TCP connection after sending a pipelined
+ // request won't cancel the context, but we'll catch that on any
+ // write failure (in checkConnErrorWriter.Write).
+ // If the server never writes, yes, there are still contrived
+ // server & client behaviors where this fails to ever cancel the
+ // context, but that's kinda why HTTP/1.x pipelining died
+ // anyway.
+ }
+ if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() {
+ // Ignore this error. It's the expected error from
+ // another goroutine calling abortPendingRead.
+ } else if err != nil {
+ cr.handleReadError(err)
+ }
+ cr.aborted = false
+ cr.inRead = false
+ cr.unlock()
+ cr.cond.Broadcast()
+}
+
+func (cr *connReader) abortPendingRead() {
+ cr.lock()
+ defer cr.unlock()
+ if !cr.inRead {
+ return
+ }
+ cr.aborted = true
+ cr.conn.rwc.SetReadDeadline(aLongTimeAgo)
+ for cr.inRead {
+ cr.cond.Wait()
+ }
+ cr.conn.rwc.SetReadDeadline(time.Time{})
+}
+
+func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain }
+func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 }
+func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 }
+
+// handleReadError is called whenever a Read from the client returns a
+// non-nil error.
+//
+// The provided non-nil err is almost always io.EOF or a "use of
+// closed network connection". In any case, the error is not
+// particularly interesting, except perhaps for debugging during
+// development. Any error means the connection is dead and we should
+// down its context.
+//
+// It may be called from multiple goroutines.
+func (cr *connReader) handleReadError(_ error) {
+ cr.conn.cancelCtx()
+ cr.closeNotify()
+}
+
+// may be called from multiple goroutines.
+func (cr *connReader) closeNotify() {
+ res := cr.conn.curReq.Load()
+ if res != nil && !res.didCloseNotify.Swap(true) {
+ res.closeNotifyCh <- true
+ }
+}
+
+func (cr *connReader) Read(p []byte) (n int, err error) {
+ cr.lock()
+ if cr.inRead {
+ cr.unlock()
+ if cr.conn.hijacked() {
+ panic("invalid Body.Read call. After hijacked, the original Request must not be used")
+ }
+ panic("invalid concurrent Body.Read call")
+ }
+ if cr.hitReadLimit() {
+ cr.unlock()
+ return 0, io.EOF
+ }
+ if len(p) == 0 {
+ cr.unlock()
+ return 0, nil
+ }
+ if int64(len(p)) > cr.remain {
+ p = p[:cr.remain]
+ }
+ if cr.hasByte {
+ p[0] = cr.byteBuf[0]
+ cr.hasByte = false
+ cr.unlock()
+ return 1, nil
+ }
+ cr.inRead = true
+ cr.unlock()
+ n, err = cr.conn.rwc.Read(p)
+
+ cr.lock()
+ cr.inRead = false
+ if err != nil {
+ cr.handleReadError(err)
+ }
+ cr.remain -= int64(n)
+ cr.unlock()
+
+ cr.cond.Broadcast()
+ return n, err
+}
+
+var (
+ bufioReaderPool sync.Pool
+ bufioWriter2kPool sync.Pool
+ bufioWriter4kPool sync.Pool
+)
+
+const copyBufPoolSize = 32 * 1024
+
+var copyBufPool = sync.Pool{New: func() any { return new([copyBufPoolSize]byte) }}
+
+func getCopyBuf() []byte {
+ return copyBufPool.Get().(*[copyBufPoolSize]byte)[:]
+}
+func putCopyBuf(b []byte) {
+ if len(b) != copyBufPoolSize {
+ panic("trying to put back buffer of the wrong size in the copyBufPool")
+ }
+ copyBufPool.Put((*[copyBufPoolSize]byte)(b))
+}
+
+func bufioWriterPool(size int) *sync.Pool {
+ switch size {
+ case 2 << 10:
+ return &bufioWriter2kPool
+ case 4 << 10:
+ return &bufioWriter4kPool
+ }
+ return nil
+}
+
+func newBufioReader(r io.Reader) *bufio.Reader {
+ if v := bufioReaderPool.Get(); v != nil {
+ br := v.(*bufio.Reader)
+ br.Reset(r)
+ return br
+ }
+ // Note: if this reader size is ever changed, update
+ // TestHandlerBodyClose's assumptions.
+ return bufio.NewReader(r)
+}
+
+func putBufioReader(br *bufio.Reader) {
+ br.Reset(nil)
+ bufioReaderPool.Put(br)
+}
+
+func newBufioWriterSize(w io.Writer, size int) *bufio.Writer {
+ pool := bufioWriterPool(size)
+ if pool != nil {
+ if v := pool.Get(); v != nil {
+ bw := v.(*bufio.Writer)
+ bw.Reset(w)
+ return bw
+ }
+ }
+ return bufio.NewWriterSize(w, size)
+}
+
+func putBufioWriter(bw *bufio.Writer) {
+ bw.Reset(nil)
+ if pool := bufioWriterPool(bw.Available()); pool != nil {
+ pool.Put(bw)
+ }
+}
+
+// DefaultMaxHeaderBytes is the maximum permitted size of the headers
+// in an HTTP request.
+// This can be overridden by setting [Server.MaxHeaderBytes].
+const DefaultMaxHeaderBytes = 1 << 20 // 1 MB
+
+func (srv *Server) maxHeaderBytes() int {
+ if srv.MaxHeaderBytes > 0 {
+ return srv.MaxHeaderBytes
+ }
+ return DefaultMaxHeaderBytes
+}
+
+func (srv *Server) initialReadLimitSize() int64 {
+ return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
+}
+
+// tlsHandshakeTimeout returns the time limit permitted for the TLS
+// handshake, or zero for unlimited.
+//
+// It returns the minimum of any positive ReadHeaderTimeout,
+// ReadTimeout, or WriteTimeout.
+func (srv *Server) tlsHandshakeTimeout() time.Duration {
+ var ret time.Duration
+ for _, v := range [...]time.Duration{
+ srv.ReadHeaderTimeout,
+ srv.ReadTimeout,
+ srv.WriteTimeout,
+ } {
+ if v <= 0 {
+ continue
+ }
+ if ret == 0 || v < ret {
+ ret = v
+ }
+ }
+ return ret
+}
+
+// wrapper around io.ReadCloser which on first read, sends an
+// HTTP/1.1 100 Continue header
+type expectContinueReader struct {
+ resp *response
+ readCloser io.ReadCloser
+ closed atomic.Bool
+ sawEOF atomic.Bool
+}
+
+func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
+ if ecr.closed.Load() {
+ return 0, ErrBodyReadAfterClose
+ }
+ w := ecr.resp
+ if !w.wroteContinue && w.canWriteContinue.Load() && !w.conn.hijacked() {
+ w.wroteContinue = true
+ w.writeContinueMu.Lock()
+ if w.canWriteContinue.Load() {
+ w.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
+ w.conn.bufw.Flush()
+ w.canWriteContinue.Store(false)
+ }
+ w.writeContinueMu.Unlock()
+ }
+ n, err = ecr.readCloser.Read(p)
+ if err == io.EOF {
+ ecr.sawEOF.Store(true)
+ }
+ return
+}
+
+func (ecr *expectContinueReader) Close() error {
+ ecr.closed.Store(true)
+ return ecr.readCloser.Close()
+}
+
+// TimeFormat is the time format to use when generating times in HTTP
+// headers. It is like [time.RFC1123] but hard-codes GMT as the time
+// zone. The time being formatted must be in UTC for Format to
+// generate the correct format.
+//
+// For parsing this time format, see [ParseTime].
+const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
+
+// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat))
+func appendTime(b []byte, t time.Time) []byte {
+ const days = "SunMonTueWedThuFriSat"
+ const months = "JanFebMarAprMayJunJulAugSepOctNovDec"
+
+ t = t.UTC()
+ yy, mm, dd := t.Date()
+ hh, mn, ss := t.Clock()
+ day := days[3*t.Weekday():]
+ mon := months[3*(mm-1):]
+
+ return append(b,
+ day[0], day[1], day[2], ',', ' ',
+ byte('0'+dd/10), byte('0'+dd%10), ' ',
+ mon[0], mon[1], mon[2], ' ',
+ byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ',
+ byte('0'+hh/10), byte('0'+hh%10), ':',
+ byte('0'+mn/10), byte('0'+mn%10), ':',
+ byte('0'+ss/10), byte('0'+ss%10), ' ',
+ 'G', 'M', 'T')
+}
+
+var errTooLarge = errors.New("http: request too large")
+
+// Read next request from connection.
+func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
+ if c.hijacked() {
+ return nil, ErrHijacked
+ }
+
+ var (
+ wholeReqDeadline time.Time // or zero if none
+ hdrDeadline time.Time // or zero if none
+ )
+ t0 := time.Now()
+ if d := c.server.readHeaderTimeout(); d > 0 {
+ hdrDeadline = t0.Add(d)
+ }
+ if d := c.server.ReadTimeout; d > 0 {
+ wholeReqDeadline = t0.Add(d)
+ }
+ c.rwc.SetReadDeadline(hdrDeadline)
+ if d := c.server.WriteTimeout; d > 0 {
+ defer func() {
+ c.rwc.SetWriteDeadline(time.Now().Add(d))
+ }()
+ }
+
+ c.r.setReadLimit(c.server.initialReadLimitSize())
+ if c.lastMethod == "POST" {
+ // RFC 7230 section 3 tolerance for old buggy clients.
+ peek, _ := c.bufr.Peek(4) // ReadRequest will get err below
+ c.bufr.Discard(numLeadingCRorLF(peek))
+ }
+ req, err := readRequest(c.bufr)
+ if err != nil {
+ if c.r.hitReadLimit() {
+ return nil, errTooLarge
+ }
+ return nil, err
+ }
+
+ if !http1ServerSupportsRequest(req) {
+ return nil, statusError{StatusHTTPVersionNotSupported, "unsupported protocol version"}
+ }
+
+ c.lastMethod = req.Method
+ c.r.setInfiniteReadLimit()
+
+ hosts, haveHost := req.Header["Host"]
+ isH2Upgrade := req.isH2Upgrade()
+ if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
+ return nil, badRequestError("missing required Host header")
+ }
+ if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) {
+ return nil, badRequestError("malformed Host header")
+ }
+ for k, vv := range req.Header {
+ if !httpguts.ValidHeaderFieldName(k) {
+ return nil, badRequestError("invalid header name")
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ return nil, badRequestError("invalid header value")
+ }
+ }
+ }
+ delete(req.Header, "Host")
+
+ ctx, cancelCtx := context.WithCancel(ctx)
+ req.ctx = ctx
+ req.RemoteAddr = c.remoteAddr
+ req.TLS = c.tlsState
+ if body, ok := req.Body.(*body); ok {
+ body.doEarlyClose = true
+ }
+
+ // Adjust the read deadline if necessary.
+ if !hdrDeadline.Equal(wholeReqDeadline) {
+ c.rwc.SetReadDeadline(wholeReqDeadline)
+ }
+
+ w = &response{
+ conn: c,
+ cancelCtx: cancelCtx,
+ req: req,
+ reqBody: req.Body,
+ handlerHeader: make(Header),
+ contentLength: -1,
+ closeNotifyCh: make(chan bool, 1),
+
+ // We populate these ahead of time so we're not
+ // reading from req.Header after their Handler starts
+ // and maybe mutates it (Issue 14940)
+ wants10KeepAlive: req.wantsHttp10KeepAlive(),
+ wantsClose: req.wantsClose(),
+ }
+ if isH2Upgrade {
+ w.closeAfterReply = true
+ }
+ w.cw.res = w
+ w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize)
+ return w, nil
+}
+
+// http1ServerSupportsRequest reports whether Go's HTTP/1.x server
+// supports the given request.
+func http1ServerSupportsRequest(req *Request) bool {
+ if req.ProtoMajor == 1 {
+ return true
+ }
+ // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can
+ // wire up their own HTTP/2 upgrades.
+ if req.ProtoMajor == 2 && req.ProtoMinor == 0 &&
+ req.Method == "PRI" && req.RequestURI == "*" {
+ return true
+ }
+ // Reject HTTP/0.x, and all other HTTP/2+ requests (which
+ // aren't encoded in ASCII anyway).
+ return false
+}
+
+func (w *response) Header() Header {
+ if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader {
+ // Accessing the header between logically writing it
+ // and physically writing it means we need to allocate
+ // a clone to snapshot the logically written state.
+ w.cw.header = w.handlerHeader.Clone()
+ }
+ w.calledHeader = true
+ return w.handlerHeader
+}
+
+// maxPostHandlerReadBytes is the max number of Request.Body bytes not
+// consumed by a handler that the server will read from the client
+// in order to keep a connection alive. If there are more bytes than
+// this then the server to be paranoid instead sends a "Connection:
+// close" response.
+//
+// This number is approximately what a typical machine's TCP buffer
+// size is anyway. (if we have the bytes on the machine, we might as
+// well read them)
+const maxPostHandlerReadBytes = 256 << 10
+
+func checkWriteHeaderCode(code int) {
+ // Issue 22880: require valid WriteHeader status codes.
+ // For now we only enforce that it's three digits.
+ // In the future we might block things over 599 (600 and above aren't defined
+ // at https://httpwg.org/specs/rfc7231.html#status.codes).
+ // But for now any three digits.
+ //
+ // We used to send "HTTP/1.1 000 0" on the wire in responses but there's
+ // no equivalent bogus thing we can realistically send in HTTP/2,
+ // so we'll consistently panic instead and help people find their bugs
+ // early. (We can't return an error from WriteHeader even if we wanted to.)
+ if code < 100 || code > 999 {
+ panic(fmt.Sprintf("invalid WriteHeader code %v", code))
+ }
+}
+
+// relevantCaller searches the call stack for the first function outside of net/http.
+// The purpose of this function is to provide more helpful error messages.
+func relevantCaller() runtime.Frame {
+ pc := make([]uintptr, 16)
+ n := runtime.Callers(1, pc)
+ frames := runtime.CallersFrames(pc[:n])
+ var frame runtime.Frame
+ for {
+ frame, more := frames.Next()
+ if !strings.HasPrefix(frame.Function, "net/http.") {
+ return frame
+ }
+ if !more {
+ break
+ }
+ }
+ return frame
+}
+
+func (w *response) WriteHeader(code int) {
+ if w.conn.hijacked() {
+ caller := relevantCaller()
+ w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
+ return
+ }
+ if w.wroteHeader {
+ caller := relevantCaller()
+ w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
+ return
+ }
+ checkWriteHeaderCode(code)
+
+ // Handle informational headers.
+ //
+ // We shouldn't send any further headers after 101 Switching Protocols,
+ // so it takes the non-informational path.
+ if code >= 100 && code <= 199 && code != StatusSwitchingProtocols {
+ // Prevent a potential race with an automatically-sent 100 Continue triggered by Request.Body.Read()
+ if code == 100 && w.canWriteContinue.Load() {
+ w.writeContinueMu.Lock()
+ w.canWriteContinue.Store(false)
+ w.writeContinueMu.Unlock()
+ }
+
+ writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
+
+ // Per RFC 8297 we must not clear the current header map
+ w.handlerHeader.WriteSubset(w.conn.bufw, excludedHeadersNoBody)
+ w.conn.bufw.Write(crlf)
+ w.conn.bufw.Flush()
+
+ return
+ }
+
+ w.wroteHeader = true
+ w.status = code
+
+ if w.calledHeader && w.cw.header == nil {
+ w.cw.header = w.handlerHeader.Clone()
+ }
+
+ if cl := w.handlerHeader.get("Content-Length"); cl != "" {
+ v, err := strconv.ParseInt(cl, 10, 64)
+ if err == nil && v >= 0 {
+ w.contentLength = v
+ } else {
+ w.conn.server.logf("http: invalid Content-Length of %q", cl)
+ w.handlerHeader.Del("Content-Length")
+ }
+ }
+}
+
+// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader.
+// This type is used to avoid extra allocations from cloning and/or populating
+// the response Header map and all its 1-element slices.
+type extraHeader struct {
+ contentType string
+ connection string
+ transferEncoding string
+ date []byte // written if not nil
+ contentLength []byte // written if not nil
+}
+
+// Sorted the same as extraHeader.Write's loop.
+var extraHeaderKeys = [][]byte{
+ []byte("Content-Type"),
+ []byte("Connection"),
+ []byte("Transfer-Encoding"),
+}
+
+var (
+ headerContentLength = []byte("Content-Length: ")
+ headerDate = []byte("Date: ")
+)
+
+// Write writes the headers described in h to w.
+//
+// This method has a value receiver, despite the somewhat large size
+// of h, because it prevents an allocation. The escape analysis isn't
+// smart enough to realize this function doesn't mutate h.
+func (h extraHeader) Write(w *bufio.Writer) {
+ if h.date != nil {
+ w.Write(headerDate)
+ w.Write(h.date)
+ w.Write(crlf)
+ }
+ if h.contentLength != nil {
+ w.Write(headerContentLength)
+ w.Write(h.contentLength)
+ w.Write(crlf)
+ }
+ for i, v := range []string{h.contentType, h.connection, h.transferEncoding} {
+ if v != "" {
+ w.Write(extraHeaderKeys[i])
+ w.Write(colonSpace)
+ w.WriteString(v)
+ w.Write(crlf)
+ }
+ }
+}
+
+// writeHeader finalizes the header sent to the client and writes it
+// to cw.res.conn.bufw.
+//
+// p is not written by writeHeader, but is the first chunk of the body
+// that will be written. It is sniffed for a Content-Type if none is
+// set explicitly. It's also used to set the Content-Length, if the
+// total body size was small and the handler has already finished
+// running.
+func (cw *chunkWriter) writeHeader(p []byte) {
+ if cw.wroteHeader {
+ return
+ }
+ cw.wroteHeader = true
+
+ w := cw.res
+ keepAlivesEnabled := w.conn.server.doKeepAlives()
+ isHEAD := w.req.Method == "HEAD"
+
+ // header is written out to w.conn.buf below. Depending on the
+ // state of the handler, we either own the map or not. If we
+ // don't own it, the exclude map is created lazily for
+ // WriteSubset to remove headers. The setHeader struct holds
+ // headers we need to add.
+ header := cw.header
+ owned := header != nil
+ if !owned {
+ header = w.handlerHeader
+ }
+ var excludeHeader map[string]bool
+ delHeader := func(key string) {
+ if owned {
+ header.Del(key)
+ return
+ }
+ if _, ok := header[key]; !ok {
+ return
+ }
+ if excludeHeader == nil {
+ excludeHeader = make(map[string]bool)
+ }
+ excludeHeader[key] = true
+ }
+ var setHeader extraHeader
+
+ // Don't write out the fake "Trailer:foo" keys. See TrailerPrefix.
+ trailers := false
+ for k := range cw.header {
+ if strings.HasPrefix(k, TrailerPrefix) {
+ if excludeHeader == nil {
+ excludeHeader = make(map[string]bool)
+ }
+ excludeHeader[k] = true
+ trailers = true
+ }
+ }
+ for _, v := range cw.header["Trailer"] {
+ trailers = true
+ foreachHeaderElement(v, cw.res.declareTrailer)
+ }
+
+ te := header.get("Transfer-Encoding")
+ hasTE := te != ""
+
+ // If the handler is done but never sent a Content-Length
+ // response header and this is our first (and last) write, set
+ // it, even to zero. This helps HTTP/1.0 clients keep their
+ // "keep-alive" connections alive.
+ // Exceptions: 304/204/1xx responses never get Content-Length, and if
+ // it was a HEAD request, we don't know the difference between
+ // 0 actual bytes and 0 bytes because the handler noticed it
+ // was a HEAD request and chose not to write anything. So for
+ // HEAD, the handler should either write the Content-Length or
+ // write non-zero bytes. If it's actually 0 bytes and the
+ // handler never looked at the Request.Method, we just don't
+ // send a Content-Length header.
+ // Further, we don't send an automatic Content-Length if they
+ // set a Transfer-Encoding, because they're generally incompatible.
+ if w.handlerDone.Load() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && !header.has("Content-Length") && (!isHEAD || len(p) > 0) {
+ w.contentLength = int64(len(p))
+ setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
+ }
+
+ // If this was an HTTP/1.0 request with keep-alive and we sent a
+ // Content-Length back, we can make this a keep-alive response ...
+ if w.wants10KeepAlive && keepAlivesEnabled {
+ sentLength := header.get("Content-Length") != ""
+ if sentLength && header.get("Connection") == "keep-alive" {
+ w.closeAfterReply = false
+ }
+ }
+
+ // Check for an explicit (and valid) Content-Length header.
+ hasCL := w.contentLength != -1
+
+ if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) {
+ _, connectionHeaderSet := header["Connection"]
+ if !connectionHeaderSet {
+ setHeader.connection = "keep-alive"
+ }
+ } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose {
+ w.closeAfterReply = true
+ }
+
+ if header.get("Connection") == "close" || !keepAlivesEnabled {
+ w.closeAfterReply = true
+ }
+
+ // If the client wanted a 100-continue but we never sent it to
+ // them (or, more strictly: we never finished reading their
+ // request body), don't reuse this connection because it's now
+ // in an unknown state: we might be sending this response at
+ // the same time the client is now sending its request body
+ // after a timeout. (Some HTTP clients send Expect:
+ // 100-continue but knowing that some servers don't support
+ // it, the clients set a timer and send the body later anyway)
+ // If we haven't seen EOF, we can't skip over the unread body
+ // because we don't know if the next bytes on the wire will be
+ // the body-following-the-timer or the subsequent request.
+ // See Issue 11549.
+ if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF.Load() {
+ w.closeAfterReply = true
+ }
+
+ // We do this by default because there are a number of clients that
+ // send a full request before starting to read the response, and they
+ // can deadlock if we start writing the response with unconsumed body
+ // remaining. See Issue 15527 for some history.
+ //
+ // If full duplex mode has been enabled with ResponseController.EnableFullDuplex,
+ // then leave the request body alone.
+ if w.req.ContentLength != 0 && !w.closeAfterReply && !w.fullDuplex {
+ var discard, tooBig bool
+
+ switch bdy := w.req.Body.(type) {
+ case *expectContinueReader:
+ if bdy.resp.wroteContinue {
+ discard = true
+ }
+ case *body:
+ bdy.mu.Lock()
+ switch {
+ case bdy.closed:
+ if !bdy.sawEOF {
+ // Body was closed in handler with non-EOF error.
+ w.closeAfterReply = true
+ }
+ case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
+ tooBig = true
+ default:
+ discard = true
+ }
+ bdy.mu.Unlock()
+ default:
+ discard = true
+ }
+
+ if discard {
+ _, err := io.CopyN(io.Discard, w.reqBody, maxPostHandlerReadBytes+1)
+ switch err {
+ case nil:
+ // There must be even more data left over.
+ tooBig = true
+ case ErrBodyReadAfterClose:
+ // Body was already consumed and closed.
+ case io.EOF:
+ // The remaining body was just consumed, close it.
+ err = w.reqBody.Close()
+ if err != nil {
+ w.closeAfterReply = true
+ }
+ default:
+ // Some other kind of error occurred, like a read timeout, or
+ // corrupt chunked encoding. In any case, whatever remains
+ // on the wire must not be parsed as another HTTP request.
+ w.closeAfterReply = true
+ }
+ }
+
+ if tooBig {
+ w.requestTooLarge()
+ delHeader("Connection")
+ setHeader.connection = "close"
+ }
+ }
+
+ code := w.status
+ if bodyAllowedForStatus(code) {
+ // If no content type, apply sniffing algorithm to body.
+ _, haveType := header["Content-Type"]
+
+ // If the Content-Encoding was set and is non-blank,
+ // we shouldn't sniff the body. See Issue 31753.
+ ce := header.Get("Content-Encoding")
+ hasCE := len(ce) > 0
+ if !hasCE && !haveType && !hasTE && len(p) > 0 {
+ setHeader.contentType = DetectContentType(p)
+ }
+ } else {
+ for _, k := range suppressedHeaders(code) {
+ delHeader(k)
+ }
+ }
+
+ if !header.has("Date") {
+ setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
+ }
+
+ if hasCL && hasTE && te != "identity" {
+ // TODO: return an error if WriteHeader gets a return parameter
+ // For now just ignore the Content-Length.
+ w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d",
+ te, w.contentLength)
+ delHeader("Content-Length")
+ hasCL = false
+ }
+
+ if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) || code == StatusNoContent {
+ // Response has no body.
+ delHeader("Transfer-Encoding")
+ } else if hasCL {
+ // Content-Length has been provided, so no chunking is to be done.
+ delHeader("Transfer-Encoding")
+ } else if w.req.ProtoAtLeast(1, 1) {
+ // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no
+ // content-length has been provided. The connection must be closed after the
+ // reply is written, and no chunking is to be done. This is the setup
+ // recommended in the Server-Sent Events candidate recommendation 11,
+ // section 8.
+ if hasTE && te == "identity" {
+ cw.chunking = false
+ w.closeAfterReply = true
+ delHeader("Transfer-Encoding")
+ } else {
+ // HTTP/1.1 or greater: use chunked transfer encoding
+ // to avoid closing the connection at EOF.
+ cw.chunking = true
+ setHeader.transferEncoding = "chunked"
+ if hasTE && te == "chunked" {
+ // We will send the chunked Transfer-Encoding header later.
+ delHeader("Transfer-Encoding")
+ }
+ }
+ } else {
+ // HTTP version < 1.1: cannot do chunked transfer
+ // encoding and we don't know the Content-Length so
+ // signal EOF by closing connection.
+ w.closeAfterReply = true
+ delHeader("Transfer-Encoding") // in case already set
+ }
+
+ // Cannot use Content-Length with non-identity Transfer-Encoding.
+ if cw.chunking {
+ delHeader("Content-Length")
+ }
+ if !w.req.ProtoAtLeast(1, 0) {
+ return
+ }
+
+ // Only override the Connection header if it is not a successful
+ // protocol switch response and if KeepAlives are not enabled.
+ // See https://golang.org/issue/36381.
+ delConnectionHeader := w.closeAfterReply &&
+ (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) &&
+ !isProtocolSwitchResponse(w.status, header)
+ if delConnectionHeader {
+ delHeader("Connection")
+ if w.req.ProtoAtLeast(1, 1) {
+ setHeader.connection = "close"
+ }
+ }
+
+ writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
+ cw.header.WriteSubset(w.conn.bufw, excludeHeader)
+ setHeader.Write(w.conn.bufw)
+ w.conn.bufw.Write(crlf)
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 7230 section 7 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
+// writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2)
+// to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0.
+// code is the response status code.
+// scratch is an optional scratch buffer. If it has at least capacity 3, it's used.
+func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
+ if is11 {
+ bw.WriteString("HTTP/1.1 ")
+ } else {
+ bw.WriteString("HTTP/1.0 ")
+ }
+ if text := StatusText(code); text != "" {
+ bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))
+ bw.WriteByte(' ')
+ bw.WriteString(text)
+ bw.WriteString("\r\n")
+ } else {
+ // don't worry about performance
+ fmt.Fprintf(bw, "%03d status code %d\r\n", code, code)
+ }
+}
+
+// bodyAllowed reports whether a Write is allowed for this response type.
+// It's illegal to call this before the header has been flushed.
+func (w *response) bodyAllowed() bool {
+ if !w.wroteHeader {
+ panic("")
+ }
+ return bodyAllowedForStatus(w.status)
+}
+
+// The Life Of A Write is like this:
+//
+// Handler starts. No header has been sent. The handler can either
+// write a header, or just start writing. Writing before sending a header
+// sends an implicitly empty 200 OK header.
+//
+// If the handler didn't declare a Content-Length up front, we either
+// go into chunking mode or, if the handler finishes running before
+// the chunking buffer size, we compute a Content-Length and send that
+// in the header instead.
+//
+// Likewise, if the handler didn't set a Content-Type, we sniff that
+// from the initial chunk of output.
+//
+// The Writers are wired together like:
+//
+// 1. *response (the ResponseWriter) ->
+// 2. (*response).w, a [*bufio.Writer] of bufferBeforeChunkingSize bytes ->
+// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type)
+// and which writes the chunk headers, if needed ->
+// 4. conn.bufw, a *bufio.Writer of default (4kB) bytes, writing to ->
+// 5. checkConnErrorWriter{c}, which notes any non-nil error on Write
+// and populates c.werr with it if so, but otherwise writes to ->
+// 6. the rwc, the [net.Conn].
+//
+// TODO(bradfitz): short-circuit some of the buffering when the
+// initial header contains both a Content-Type and Content-Length.
+// Also short-circuit in (1) when the header's been sent and not in
+// chunking mode, writing directly to (4) instead, if (2) has no
+// buffered data. More generally, we could short-circuit from (1) to
+// (3) even in chunking mode if the write size from (1) is over some
+// threshold and nothing is in (2). The answer might be mostly making
+// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal
+// with this instead.
+func (w *response) Write(data []byte) (n int, err error) {
+ return w.write(len(data), data, "")
+}
+
+func (w *response) WriteString(data string) (n int, err error) {
+ return w.write(len(data), nil, data)
+}
+
+// either dataB or dataS is non-zero.
+func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ if w.conn.hijacked() {
+ if lenData > 0 {
+ caller := relevantCaller()
+ w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
+ }
+ return 0, ErrHijacked
+ }
+
+ if w.canWriteContinue.Load() {
+ // Body reader wants to write 100 Continue but hasn't yet.
+ // Tell it not to. The store must be done while holding the lock
+ // because the lock makes sure that there is not an active write
+ // this very moment.
+ w.writeContinueMu.Lock()
+ w.canWriteContinue.Store(false)
+ w.writeContinueMu.Unlock()
+ }
+
+ if !w.wroteHeader {
+ w.WriteHeader(StatusOK)
+ }
+ if lenData == 0 {
+ return 0, nil
+ }
+ if !w.bodyAllowed() {
+ return 0, ErrBodyNotAllowed
+ }
+
+ w.written += int64(lenData) // ignoring errors, for errorKludge
+ if w.contentLength != -1 && w.written > w.contentLength {
+ return 0, ErrContentLength
+ }
+ if dataB != nil {
+ return w.w.Write(dataB)
+ } else {
+ return w.w.WriteString(dataS)
+ }
+}
+
+func (w *response) finishRequest() {
+ w.handlerDone.Store(true)
+
+ if !w.wroteHeader {
+ w.WriteHeader(StatusOK)
+ }
+
+ w.w.Flush()
+ putBufioWriter(w.w)
+ w.cw.close()
+ w.conn.bufw.Flush()
+
+ w.conn.r.abortPendingRead()
+
+ // Close the body (regardless of w.closeAfterReply) so we can
+ // re-use its bufio.Reader later safely.
+ w.reqBody.Close()
+
+ if w.req.MultipartForm != nil {
+ w.req.MultipartForm.RemoveAll()
+ }
+}
+
+// shouldReuseConnection reports whether the underlying TCP connection can be reused.
+// It must only be called after the handler is done executing.
+func (w *response) shouldReuseConnection() bool {
+ if w.closeAfterReply {
+ // The request or something set while executing the
+ // handler indicated we shouldn't reuse this
+ // connection.
+ return false
+ }
+
+ if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
+ // Did not write enough. Avoid getting out of sync.
+ return false
+ }
+
+ // There was some error writing to the underlying connection
+ // during the request, so don't re-use this conn.
+ if w.conn.werr != nil {
+ return false
+ }
+
+ if w.closedRequestBodyEarly() {
+ return false
+ }
+
+ return true
+}
+
+func (w *response) closedRequestBodyEarly() bool {
+ body, ok := w.req.Body.(*body)
+ return ok && body.didEarlyClose()
+}
+
+func (w *response) Flush() {
+ w.FlushError()
+}
+
+func (w *response) FlushError() error {
+ if !w.wroteHeader {
+ w.WriteHeader(StatusOK)
+ }
+ err := w.w.Flush()
+ e2 := w.cw.flush()
+ if err == nil {
+ err = e2
+ }
+ return err
+}
+
+func (c *conn) finalFlush() {
+ if c.bufr != nil {
+ // Steal the bufio.Reader (~4KB worth of memory) and its associated
+ // reader for a future connection.
+ putBufioReader(c.bufr)
+ c.bufr = nil
+ }
+
+ if c.bufw != nil {
+ c.bufw.Flush()
+ // Steal the bufio.Writer (~4KB worth of memory) and its associated
+ // writer for a future connection.
+ putBufioWriter(c.bufw)
+ c.bufw = nil
+ }
+}
+
+// Close the connection.
+func (c *conn) close() {
+ c.finalFlush()
+ c.rwc.Close()
+}
+
+// rstAvoidanceDelay is the amount of time we sleep after closing the
+// write side of a TCP connection before closing the entire socket.
+// By sleeping, we increase the chances that the client sees our FIN
+// and processes its final data before they process the subsequent RST
+// from closing a connection with known unread data.
+// This RST seems to occur mostly on BSD systems. (And Windows?)
+// This timeout is somewhat arbitrary (~latency around the planet),
+// and may be modified by tests.
+//
+// TODO(bcmills): This should arguably be a server configuration parameter,
+// not a hard-coded value.
+var rstAvoidanceDelay = 500 * time.Millisecond
+
+type closeWriter interface {
+ CloseWrite() error
+}
+
+var _ closeWriter = (*net.TCPConn)(nil)
+
+// closeWriteAndWait flushes any outstanding data and sends a FIN packet (if
+// client is connected via TCP), signaling that we're done. We then
+// pause for a bit, hoping the client processes it before any
+// subsequent RST.
+//
+// See https://golang.org/issue/3595
+func (c *conn) closeWriteAndWait() {
+ c.finalFlush()
+ if tcp, ok := c.rwc.(closeWriter); ok {
+ tcp.CloseWrite()
+ }
+
+ // When we return from closeWriteAndWait, the caller will fully close the
+ // connection. If client is still writing to the connection, this will cause
+ // the write to fail with ECONNRESET or similar. Unfortunately, many TCP
+ // implementations will also drop unread packets from the client's read buffer
+ // when a write fails, causing our final response to be truncated away too.
+ //
+ // As a result, https://www.rfc-editor.org/rfc/rfc7230#section-6.6 recommends
+ // that “[t]he server … continues to read from the connection until it
+ // receives a corresponding close by the client, or until the server is
+ // reasonably certain that its own TCP stack has received the client's
+ // acknowledgement of the packet(s) containing the server's last response.”
+ //
+ // Unfortunately, we have no straightforward way to be “reasonably certain”
+ // that we have received the client's ACK, and at any rate we don't want to
+ // allow a misbehaving client to soak up server connections indefinitely by
+ // withholding an ACK, nor do we want to go through the complexity or overhead
+ // of using low-level APIs to figure out when a TCP round-trip has completed.
+ //
+ // Instead, we declare that we are “reasonably certain” that we received the
+ // ACK if maxRSTAvoidanceDelay has elapsed.
+ time.Sleep(rstAvoidanceDelay)
+}
+
+// validNextProto reports whether the proto is a valid ALPN protocol name.
+// Everything is valid except the empty string and built-in protocol types,
+// so that those can't be overridden with alternate implementations.
+func validNextProto(proto string) bool {
+ switch proto {
+ case "", "http/1.1", "http/1.0":
+ return false
+ }
+ return true
+}
+
+const (
+ runHooks = true
+ skipHooks = false
+)
+
+func (c *conn) setState(nc net.Conn, state ConnState, runHook bool) {
+ srv := c.server
+ switch state {
+ case StateNew:
+ srv.trackConn(c, true)
+ case StateHijacked, StateClosed:
+ srv.trackConn(c, false)
+ }
+ if state > 0xff || state < 0 {
+ panic("internal error")
+ }
+ packedState := uint64(time.Now().Unix()<<8) | uint64(state)
+ c.curState.Store(packedState)
+ if !runHook {
+ return
+ }
+ if hook := srv.ConnState; hook != nil {
+ hook(nc, state)
+ }
+}
+
+func (c *conn) getState() (state ConnState, unixSec int64) {
+ packedState := c.curState.Load()
+ return ConnState(packedState & 0xff), int64(packedState >> 8)
+}
+
+// badRequestError is a literal string (used by in the server in HTML,
+// unescaped) to tell the user why their request was bad. It should
+// be plain text without user info or other embedded errors.
+func badRequestError(e string) error { return statusError{StatusBadRequest, e} }
+
+// statusError is an error used to respond to a request with an HTTP status.
+// The text should be plain text without user info or other embedded errors.
+type statusError struct {
+ code int
+ text string
+}
+
+func (e statusError) Error() string { return StatusText(e.code) + ": " + e.text }
+
+// ErrAbortHandler is a sentinel panic value to abort a handler.
+// While any panic from ServeHTTP aborts the response to the client,
+// panicking with ErrAbortHandler also suppresses logging of a stack
+// trace to the server's error log.
+var ErrAbortHandler = errors.New("net/http: abort Handler")
+
+// isCommonNetReadError reports whether err is a common error
+// encountered during reading a request off the network when the
+// client has gone away or had its read fail somehow. This is used to
+// determine which logs are interesting enough to log about.
+func isCommonNetReadError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
+ return true
+ }
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ return true
+ }
+ return false
+}
+
+// Serve a new connection.
+func (c *conn) serve(ctx context.Context) {
+ if ra := c.rwc.RemoteAddr(); ra != nil {
+ c.remoteAddr = ra.String()
+ }
+ ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
+ var inFlightResponse *response
+ defer func() {
+ if err := recover(); err != nil && err != ErrAbortHandler {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf)
+ }
+ if inFlightResponse != nil {
+ inFlightResponse.cancelCtx()
+ }
+ if !c.hijacked() {
+ if inFlightResponse != nil {
+ inFlightResponse.conn.r.abortPendingRead()
+ inFlightResponse.reqBody.Close()
+ }
+ c.close()
+ c.setState(c.rwc, StateClosed, runHooks)
+ }
+ }()
+
+ if tlsConn, ok := c.rwc.(*tls.Conn); ok {
+ tlsTO := c.server.tlsHandshakeTimeout()
+ if tlsTO > 0 {
+ dl := time.Now().Add(tlsTO)
+ c.rwc.SetReadDeadline(dl)
+ c.rwc.SetWriteDeadline(dl)
+ }
+ if err := tlsConn.HandshakeContext(ctx); err != nil {
+ // If the handshake failed due to the client not speaking
+ // TLS, assume they're speaking plaintext HTTP and write a
+ // 400 response on the TLS conn's underlying net.Conn.
+ if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) {
+ io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n")
+ re.Conn.Close()
+ return
+ }
+ c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err)
+ return
+ }
+ // Restore Conn-level deadlines.
+ if tlsTO > 0 {
+ c.rwc.SetReadDeadline(time.Time{})
+ c.rwc.SetWriteDeadline(time.Time{})
+ }
+ c.tlsState = new(tls.ConnectionState)
+ *c.tlsState = tlsConn.ConnectionState()
+ if proto := c.tlsState.NegotiatedProtocol; validNextProto(proto) {
+ if fn := c.server.TLSNextProto[proto]; fn != nil {
+ h := initALPNRequest{ctx, tlsConn, serverHandler{c.server}}
+ // Mark freshly created HTTP/2 as active and prevent any server state hooks
+ // from being run on these connections. This prevents closeIdleConns from
+ // closing such connections. See issue https://golang.org/issue/39776.
+ c.setState(c.rwc, StateActive, skipHooks)
+ fn(c.server, tlsConn, h)
+ }
+ return
+ }
+ }
+
+ // HTTP/1.x from here on.
+
+ ctx, cancelCtx := context.WithCancel(ctx)
+ c.cancelCtx = cancelCtx
+ defer cancelCtx()
+
+ c.r = &connReader{conn: c}
+ c.bufr = newBufioReader(c.r)
+ c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
+
+ for {
+ w, err := c.readRequest(ctx)
+ if c.r.remain != c.server.initialReadLimitSize() {
+ // If we read any bytes off the wire, we're active.
+ c.setState(c.rwc, StateActive, runHooks)
+ }
+ if err != nil {
+ const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n"
+
+ switch {
+ case err == errTooLarge:
+ // Their HTTP client may or may not be
+ // able to read this if we're
+ // responding to them and hanging up
+ // while they're still writing their
+ // request. Undefined behavior.
+ const publicErr = "431 Request Header Fields Too Large"
+ fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
+ c.closeWriteAndWait()
+ return
+
+ case isUnsupportedTEError(err):
+ // Respond as per RFC 7230 Section 3.3.1 which says,
+ // A server that receives a request message with a
+ // transfer coding it does not understand SHOULD
+ // respond with 501 (Unimplemented).
+ code := StatusNotImplemented
+
+ // We purposefully aren't echoing back the transfer-encoding's value,
+ // so as to mitigate the risk of cross side scripting by an attacker.
+ fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders)
+ return
+
+ case isCommonNetReadError(err):
+ return // don't reply
+
+ default:
+ if v, ok := err.(statusError); ok {
+ fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s: %s%s%d %s: %s", v.code, StatusText(v.code), v.text, errorHeaders, v.code, StatusText(v.code), v.text)
+ return
+ }
+ const publicErr = "400 Bad Request"
+ fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr)
+ return
+ }
+ }
+
+ // Expect 100 Continue support
+ req := w.req
+ if req.expectsContinue() {
+ if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 {
+ // Wrap the Body reader with one that replies on the connection
+ req.Body = &expectContinueReader{readCloser: req.Body, resp: w}
+ w.canWriteContinue.Store(true)
+ }
+ } else if req.Header.get("Expect") != "" {
+ w.sendExpectationFailed()
+ return
+ }
+
+ c.curReq.Store(w)
+
+ if requestBodyRemains(req.Body) {
+ registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead)
+ } else {
+ w.conn.r.startBackgroundRead()
+ }
+
+ // HTTP cannot have multiple simultaneous active requests.[*]
+ // Until the server replies to this request, it can't read another,
+ // so we might as well run the handler in this goroutine.
+ // [*] Not strictly true: HTTP pipelining. We could let them all process
+ // in parallel even if their responses need to be serialized.
+ // But we're not going to implement HTTP pipelining because it
+ // was never deployed in the wild and the answer is HTTP/2.
+ inFlightResponse = w
+ serverHandler{c.server}.ServeHTTP(w, w.req)
+ inFlightResponse = nil
+ w.cancelCtx()
+ if c.hijacked() {
+ return
+ }
+ w.finishRequest()
+ c.rwc.SetWriteDeadline(time.Time{})
+ if !w.shouldReuseConnection() {
+ if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
+ c.closeWriteAndWait()
+ }
+ return
+ }
+ c.setState(c.rwc, StateIdle, runHooks)
+ c.curReq.Store(nil)
+
+ if !w.conn.server.doKeepAlives() {
+ // We're in shutdown mode. We might've replied
+ // to the user without "Connection: close" and
+ // they might think they can send another
+ // request, but such is life with HTTP/1.1.
+ return
+ }
+
+ if d := c.server.idleTimeout(); d != 0 {
+ c.rwc.SetReadDeadline(time.Now().Add(d))
+ } else {
+ c.rwc.SetReadDeadline(time.Time{})
+ }
+
+ // Wait for the connection to become readable again before trying to
+ // read the next request. This prevents a ReadHeaderTimeout or
+ // ReadTimeout from starting until the first bytes of the next request
+ // have been received.
+ if _, err := c.bufr.Peek(4); err != nil {
+ return
+ }
+
+ c.rwc.SetReadDeadline(time.Time{})
+ }
+}
+
+func (w *response) sendExpectationFailed() {
+ // TODO(bradfitz): let ServeHTTP handlers handle
+ // requests with non-standard expectation[s]? Seems
+ // theoretical at best, and doesn't fit into the
+ // current ServeHTTP model anyway. We'd need to
+ // make the ResponseWriter an optional
+ // "ExpectReplier" interface or something.
+ //
+ // For now we'll just obey RFC 7231 5.1.1 which says
+ // "A server that receives an Expect field-value other
+ // than 100-continue MAY respond with a 417 (Expectation
+ // Failed) status code to indicate that the unexpected
+ // expectation cannot be met."
+ w.Header().Set("Connection", "close")
+ w.WriteHeader(StatusExpectationFailed)
+ w.finishRequest()
+}
+
+// Hijack implements the [Hijacker.Hijack] method. Our response is both a [ResponseWriter]
+// and a [Hijacker].
+func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
+ if w.handlerDone.Load() {
+ panic("net/http: Hijack called after ServeHTTP finished")
+ }
+ if w.wroteHeader {
+ w.cw.flush()
+ }
+
+ c := w.conn
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Release the bufioWriter that writes to the chunk writer, it is not
+ // used after a connection has been hijacked.
+ rwc, buf, err = c.hijackLocked()
+ if err == nil {
+ putBufioWriter(w.w)
+ w.w = nil
+ }
+ return rwc, buf, err
+}
+
+func (w *response) CloseNotify() <-chan bool {
+ if w.handlerDone.Load() {
+ panic("net/http: CloseNotify called after ServeHTTP finished")
+ }
+ return w.closeNotifyCh
+}
+
+func registerOnHitEOF(rc io.ReadCloser, fn func()) {
+ switch v := rc.(type) {
+ case *expectContinueReader:
+ registerOnHitEOF(v.readCloser, fn)
+ case *body:
+ v.registerOnHitEOF(fn)
+ default:
+ panic("unexpected type " + fmt.Sprintf("%T", rc))
+ }
+}
+
+// requestBodyRemains reports whether future calls to Read
+// on rc might yield more data.
+func requestBodyRemains(rc io.ReadCloser) bool {
+ if rc == NoBody {
+ return false
+ }
+ switch v := rc.(type) {
+ case *expectContinueReader:
+ return requestBodyRemains(v.readCloser)
+ case *body:
+ return v.bodyRemains()
+ default:
+ panic("unexpected type " + fmt.Sprintf("%T", rc))
+ }
+}
+
+// The HandlerFunc type is an adapter to allow the use of
+// ordinary functions as HTTP handlers. If f is a function
+// with the appropriate signature, HandlerFunc(f) is a
+// [Handler] that calls f.
+type HandlerFunc func(ResponseWriter, *Request)
+
+// ServeHTTP calls f(w, r).
+func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
+ f(w, r)
+}
+
+// Helper handlers
+
+// Error replies to the request with the specified error message and HTTP code.
+// It does not otherwise end the request; the caller should ensure no further
+// writes are done to w.
+// The error message should be plain text.
+func Error(w ResponseWriter, error string, code int) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ w.WriteHeader(code)
+ fmt.Fprintln(w, error)
+}
+
+// NotFound replies to the request with an HTTP 404 not found error.
+func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) }
+
+// NotFoundHandler returns a simple request handler
+// that replies to each request with a “404 page not found” reply.
+func NotFoundHandler() Handler { return HandlerFunc(NotFound) }
+
+// StripPrefix returns a handler that serves HTTP requests by removing the
+// given prefix from the request URL's Path (and RawPath if set) and invoking
+// the handler h. StripPrefix handles a request for a path that doesn't begin
+// with prefix by replying with an HTTP 404 not found error. The prefix must
+// match exactly: if the prefix in the request contains escaped characters
+// the reply is also an HTTP 404 not found error.
+func StripPrefix(prefix string, h Handler) Handler {
+ if prefix == "" {
+ return h
+ }
+ return HandlerFunc(func(w ResponseWriter, r *Request) {
+ p := strings.TrimPrefix(r.URL.Path, prefix)
+ rp := strings.TrimPrefix(r.URL.RawPath, prefix)
+ if len(p) < len(r.URL.Path) && (r.URL.RawPath == "" || len(rp) < len(r.URL.RawPath)) {
+ r2 := new(Request)
+ *r2 = *r
+ r2.URL = new(url.URL)
+ *r2.URL = *r.URL
+ r2.URL.Path = p
+ r2.URL.RawPath = rp
+ h.ServeHTTP(w, r2)
+ } else {
+ NotFound(w, r)
+ }
+ })
+}
+
+// Redirect replies to the request with a redirect to url,
+// which may be a path relative to the request path.
+//
+// The provided code should be in the 3xx range and is usually
+// [StatusMovedPermanently], [StatusFound] or [StatusSeeOther].
+//
+// If the Content-Type header has not been set, [Redirect] sets it
+// to "text/html; charset=utf-8" and writes a small HTML body.
+// Setting the Content-Type header to any value, including nil,
+// disables that behavior.
+func Redirect(w ResponseWriter, r *Request, url string, code int) {
+ if u, err := urlpkg.Parse(url); err == nil {
+ // If url was relative, make its path absolute by
+ // combining with request path.
+ // The client would probably do this for us,
+ // but doing it ourselves is more reliable.
+ // See RFC 7231, section 7.1.2
+ if u.Scheme == "" && u.Host == "" {
+ oldpath := r.URL.Path
+ if oldpath == "" { // should not happen, but avoid a crash if it does
+ oldpath = "/"
+ }
+
+ // no leading http://server
+ if url == "" || url[0] != '/' {
+ // make relative path absolute
+ olddir, _ := path.Split(oldpath)
+ url = olddir + url
+ }
+
+ var query string
+ if i := strings.Index(url, "?"); i != -1 {
+ url, query = url[:i], url[i:]
+ }
+
+ // clean up but preserve trailing slash
+ trailing := strings.HasSuffix(url, "/")
+ url = path.Clean(url)
+ if trailing && !strings.HasSuffix(url, "/") {
+ url += "/"
+ }
+ url += query
+ }
+ }
+
+ h := w.Header()
+
+ // RFC 7231 notes that a short HTML body is usually included in
+ // the response because older user agents may not understand 301/307.
+ // Do it only if the request didn't already have a Content-Type header.
+ _, hadCT := h["Content-Type"]
+
+ h.Set("Location", hexEscapeNonASCII(url))
+ if !hadCT && (r.Method == "GET" || r.Method == "HEAD") {
+ h.Set("Content-Type", "text/html; charset=utf-8")
+ }
+ w.WriteHeader(code)
+
+ // Shouldn't send the body for POST or HEAD; that leaves GET.
+ if !hadCT && r.Method == "GET" {
+ body := "<a href=\"" + htmlEscape(url) + "\">" + StatusText(code) + "</a>.\n"
+ fmt.Fprintln(w, body)
+ }
+}
+
+var htmlReplacer = strings.NewReplacer(
+ "&", "&amp;",
+ "<", "&lt;",
+ ">", "&gt;",
+ // "&#34;" is shorter than "&quot;".
+ `"`, "&#34;",
+ // "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5.
+ "'", "&#39;",
+)
+
+func htmlEscape(s string) string {
+ return htmlReplacer.Replace(s)
+}
+
+// Redirect to a fixed URL
+type redirectHandler struct {
+ url string
+ code int
+}
+
+func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ Redirect(w, r, rh.url, rh.code)
+}
+
+// RedirectHandler returns a request handler that redirects
+// each request it receives to the given url using the given
+// status code.
+//
+// The provided code should be in the 3xx range and is usually
+// [StatusMovedPermanently], [StatusFound] or [StatusSeeOther].
+func RedirectHandler(url string, code int) Handler {
+ return &redirectHandler{url, code}
+}
+
+// ServeMux is an HTTP request multiplexer.
+// It matches the URL of each incoming request against a list of registered
+// patterns and calls the handler for the pattern that
+// most closely matches the URL.
+//
+// # Patterns
+//
+// Patterns can match the method, host and path of a request.
+// Some examples:
+//
+// - "/index.html" matches the path "/index.html" for any host and method.
+// - "GET /static/" matches a GET request whose path begins with "/static/".
+// - "example.com/" matches any request to the host "example.com".
+// - "example.com/{$}" matches requests with host "example.com" and path "/".
+// - "/b/{bucket}/o/{objectname...}" matches paths whose first segment is "b"
+// and whose third segment is "o". The name "bucket" denotes the second
+// segment and "objectname" denotes the remainder of the path.
+//
+// In general, a pattern looks like
+//
+// [METHOD ][HOST]/[PATH]
+//
+// All three parts are optional; "/" is a valid pattern.
+// If METHOD is present, it must be followed by a single space.
+//
+// Literal (that is, non-wildcard) parts of a pattern match
+// the corresponding parts of a request case-sensitively.
+//
+// A pattern with no method matches every method. A pattern
+// with the method GET matches both GET and HEAD requests.
+// Otherwise, the method must match exactly.
+//
+// A pattern with no host matches every host.
+// A pattern with a host matches URLs on that host only.
+//
+// A path can include wildcard segments of the form {NAME} or {NAME...}.
+// For example, "/b/{bucket}/o/{objectname...}".
+// The wildcard name must be a valid Go identifier.
+// Wildcards must be full path segments: they must be preceded by a slash and followed by
+// either a slash or the end of the string.
+// For example, "/b_{bucket}" is not a valid pattern.
+//
+// Normally a wildcard matches only a single path segment,
+// ending at the next literal slash (not %2F) in the request URL.
+// But if the "..." is present, then the wildcard matches the remainder of the URL path, including slashes.
+// (Therefore it is invalid for a "..." wildcard to appear anywhere but at the end of a pattern.)
+// The match for a wildcard can be obtained by calling [Request.PathValue] with the wildcard's name.
+// A trailing slash in a path acts as an anonymous "..." wildcard.
+//
+// The special wildcard {$} matches only the end of the URL.
+// For example, the pattern "/{$}" matches only the path "/",
+// whereas the pattern "/" matches every path.
+//
+// For matching, both pattern paths and incoming request paths are unescaped segment by segment.
+// So, for example, the path "/a%2Fb/100%25" is treated as having two segments, "a/b" and "100%".
+// The pattern "/a%2fb/" matches it, but the pattern "/a/b/" does not.
+//
+// # Precedence
+//
+// If two or more patterns match a request, then the most specific pattern takes precedence.
+// A pattern P1 is more specific than P2 if P1 matches a strict subset of P2’s requests;
+// that is, if P2 matches all the requests of P1 and more.
+// If neither is more specific, then the patterns conflict.
+// There is one exception to this rule, for backwards compatibility:
+// if two patterns would otherwise conflict and one has a host while the other does not,
+// then the pattern with the host takes precedence.
+// If a pattern passed [ServeMux.Handle] or [ServeMux.HandleFunc] conflicts with
+// another pattern that is already registered, those functions panic.
+//
+// As an example of the general rule, "/images/thumbnails/" is more specific than "/images/",
+// so both can be registered.
+// The former matches paths beginning with "/images/thumbnails/"
+// and the latter will match any other path in the "/images/" subtree.
+//
+// As another example, consider the patterns "GET /" and "/index.html":
+// both match a GET request for "/index.html", but the former pattern
+// matches all other GET and HEAD requests, while the latter matches any
+// request for "/index.html" that uses a different method.
+// The patterns conflict.
+//
+// # Trailing-slash redirection
+//
+// Consider a [ServeMux] with a handler for a subtree, registered using a trailing slash or "..." wildcard.
+// If the ServeMux receives a request for the subtree root without a trailing slash,
+// it redirects the request by adding the trailing slash.
+// This behavior can be overridden with a separate registration for the path without
+// the trailing slash or "..." wildcard. For example, registering "/images/" causes ServeMux
+// to redirect a request for "/images" to "/images/", unless "/images" has
+// been registered separately.
+//
+// # Request sanitizing
+//
+// ServeMux also takes care of sanitizing the URL request path and the Host
+// header, stripping the port number and redirecting any request containing . or
+// .. segments or repeated slashes to an equivalent, cleaner URL.
+//
+// # Compatibility
+//
+// The pattern syntax and matching behavior of ServeMux changed significantly
+// in Go 1.22. To restore the old behavior, set the GODEBUG environment variable
+// to "httpmuxgo121=1". This setting is read once, at program startup; changes
+// during execution will be ignored.
+//
+// The backwards-incompatible changes include:
+// - Wildcards are just ordinary literal path segments in 1.21.
+// For example, the pattern "/{x}" will match only that path in 1.21,
+// but will match any one-segment path in 1.22.
+// - In 1.21, no pattern was rejected, unless it was empty or conflicted with an existing pattern.
+// In 1.22, syntactically invalid patterns will cause [ServeMux.Handle] and [ServeMux.HandleFunc] to panic.
+// For example, in 1.21, the patterns "/{" and "/a{x}" match themselves,
+// but in 1.22 they are invalid and will cause a panic when registered.
+// - In 1.22, each segment of a pattern is unescaped; this was not done in 1.21.
+// For example, in 1.22 the pattern "/%61" matches the path "/a" ("%61" being the URL escape sequence for "a"),
+// but in 1.21 it would match only the path "/%2561" (where "%25" is the escape for the percent sign).
+// - When matching patterns to paths, in 1.22 each segment of the path is unescaped; in 1.21, the entire path is unescaped.
+// This change mostly affects how paths with %2F escapes adjacent to slashes are treated.
+// See https://go.dev/issue/21955 for details.
+type ServeMux struct {
+ mu sync.RWMutex
+ tree routingNode
+ index routingIndex
+ patterns []*pattern // TODO(jba): remove if possible
+ mux121 serveMux121 // used only when GODEBUG=httpmuxgo121=1
+}
+
+// NewServeMux allocates and returns a new [ServeMux].
+func NewServeMux() *ServeMux {
+ return &ServeMux{}
+}
+
+// DefaultServeMux is the default [ServeMux] used by [Serve].
+var DefaultServeMux = &defaultServeMux
+
+var defaultServeMux ServeMux
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root;
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ // Fast path for common case of p being the string we want:
+ if len(p) == len(np)+1 && strings.HasPrefix(p, np) {
+ np = p
+ } else {
+ np += "/"
+ }
+ }
+ return np
+}
+
+// stripHostPort returns h without any trailing ":<port>".
+func stripHostPort(h string) string {
+ // If no port on host, return unchanged
+ if !strings.Contains(h, ":") {
+ return h
+ }
+ host, _, err := net.SplitHostPort(h)
+ if err != nil {
+ return h // on error, return unchanged
+ }
+ return host
+}
+
+// Handler returns the handler to use for the given request,
+// consulting r.Method, r.Host, and r.URL.Path. It always returns
+// a non-nil handler. If the path is not in its canonical form, the
+// handler will be an internally-generated handler that redirects
+// to the canonical path. If the host contains a port, it is ignored
+// when matching handlers.
+//
+// The path and host are used unchanged for CONNECT requests.
+//
+// Handler also returns the registered pattern that matches the
+// request or, in the case of internally-generated redirects,
+// the path that will match after following the redirect.
+//
+// If there is no registered handler that applies to the request,
+// Handler returns a “page not found” handler and an empty pattern.
+func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
+ if use121 {
+ return mux.mux121.findHandler(r)
+ }
+ h, p, _, _ := mux.findHandler(r)
+ return h, p
+}
+
+// findHandler finds a handler for a request.
+// If there is a matching handler, it returns it and the pattern that matched.
+// Otherwise it returns a Redirect or NotFound handler with the path that would match
+// after the redirect.
+func (mux *ServeMux) findHandler(r *Request) (h Handler, patStr string, _ *pattern, matches []string) {
+ var n *routingNode
+ host := r.URL.Host
+ escapedPath := r.URL.EscapedPath()
+ path := escapedPath
+ // CONNECT requests are not canonicalized.
+ if r.Method == "CONNECT" {
+ // If r.URL.Path is /tree and its handler is not registered,
+ // the /tree -> /tree/ redirect applies to CONNECT requests
+ // but the path canonicalization does not.
+ _, _, u := mux.matchOrRedirect(host, r.Method, path, r.URL)
+ if u != nil {
+ return RedirectHandler(u.String(), StatusMovedPermanently), u.Path, nil, nil
+ }
+ // Redo the match, this time with r.Host instead of r.URL.Host.
+ // Pass a nil URL to skip the trailing-slash redirect logic.
+ n, matches, _ = mux.matchOrRedirect(r.Host, r.Method, path, nil)
+ } else {
+ // All other requests have any port stripped and path cleaned
+ // before passing to mux.handler.
+ host = stripHostPort(r.Host)
+ path = cleanPath(path)
+
+ // If the given path is /tree and its handler is not registered,
+ // redirect for /tree/.
+ var u *url.URL
+ n, matches, u = mux.matchOrRedirect(host, r.Method, path, r.URL)
+ if u != nil {
+ return RedirectHandler(u.String(), StatusMovedPermanently), u.Path, nil, nil
+ }
+ if path != escapedPath {
+ // Redirect to cleaned path.
+ patStr := ""
+ if n != nil {
+ patStr = n.pattern.String()
+ }
+ u := &url.URL{Path: path, RawQuery: r.URL.RawQuery}
+ return RedirectHandler(u.String(), StatusMovedPermanently), patStr, nil, nil
+ }
+ }
+ if n == nil {
+ // We didn't find a match with the request method. To distinguish between
+ // Not Found and Method Not Allowed, see if there is another pattern that
+ // matches except for the method.
+ allowedMethods := mux.matchingMethods(host, path)
+ if len(allowedMethods) > 0 {
+ return HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Allow", strings.Join(allowedMethods, ", "))
+ Error(w, StatusText(StatusMethodNotAllowed), StatusMethodNotAllowed)
+ }), "", nil, nil
+ }
+ return NotFoundHandler(), "", nil, nil
+ }
+ return n.handler, n.pattern.String(), n.pattern, matches
+}
+
+// matchOrRedirect looks up a node in the tree that matches the host, method and path.
+//
+// If the url argument is non-nil, handler also deals with trailing-slash
+// redirection: when a path doesn't match exactly, the match is tried again
+// after appending "/" to the path. If that second match succeeds, the last
+// return value is the URL to redirect to.
+func (mux *ServeMux) matchOrRedirect(host, method, path string, u *url.URL) (_ *routingNode, matches []string, redirectTo *url.URL) {
+ mux.mu.RLock()
+ defer mux.mu.RUnlock()
+
+ n, matches := mux.tree.match(host, method, path)
+ // If we have an exact match, or we were asked not to try trailing-slash redirection,
+ // then we're done.
+ if !exactMatch(n, path) && u != nil {
+ // If there is an exact match with a trailing slash, then redirect.
+ path += "/"
+ n2, _ := mux.tree.match(host, method, path)
+ if exactMatch(n2, path) {
+ return nil, nil, &url.URL{Path: cleanPath(u.Path) + "/", RawQuery: u.RawQuery}
+ }
+ }
+ return n, matches, nil
+}
+
+// exactMatch reports whether the node's pattern exactly matches the path.
+// As a special case, if the node is nil, exactMatch return false.
+//
+// Before wildcards were introduced, it was clear that an exact match meant
+// that the pattern and path were the same string. The only other possibility
+// was that a trailing-slash pattern, like "/", matched a path longer than
+// it, like "/a".
+//
+// With wildcards, we define an inexact match as any one where a multi wildcard
+// matches a non-empty string. All other matches are exact.
+// For example, these are all exact matches:
+//
+// pattern path
+// /a /a
+// /{x} /a
+// /a/{$} /a/
+// /a/ /a/
+//
+// The last case has a multi wildcard (implicitly), but the match is exact because
+// the wildcard matches the empty string.
+//
+// Examples of matches that are not exact:
+//
+// pattern path
+// / /a
+// /a/{x...} /a/b
+func exactMatch(n *routingNode, path string) bool {
+ if n == nil {
+ return false
+ }
+ // We can't directly implement the definition (empty match for multi
+ // wildcard) because we don't record a match for anonymous multis.
+
+ // If there is no multi, the match is exact.
+ if !n.pattern.lastSegment().multi {
+ return true
+ }
+
+ // If the path doesn't end in a trailing slash, then the multi match
+ // is non-empty.
+ if len(path) > 0 && path[len(path)-1] != '/' {
+ return false
+ }
+ // Only patterns ending in {$} or a multi wildcard can
+ // match a path with a trailing slash.
+ // For the match to be exact, the number of pattern
+ // segments should be the same as the number of slashes in the path.
+ // E.g. "/a/b/{$}" and "/a/b/{...}" exactly match "/a/b/", but "/a/" does not.
+ return len(n.pattern.segments) == strings.Count(path, "/")
+}
+
+// matchingMethods return a sorted list of all methods that would match with the given host and path.
+func (mux *ServeMux) matchingMethods(host, path string) []string {
+ // Hold the read lock for the entire method so that the two matches are done
+ // on the same set of registered patterns.
+ mux.mu.RLock()
+ defer mux.mu.RUnlock()
+ ms := map[string]bool{}
+ mux.tree.matchingMethods(host, path, ms)
+ // matchOrRedirect will try appending a trailing slash if there is no match.
+ mux.tree.matchingMethods(host, path+"/", ms)
+ methods := mapKeys(ms)
+ sort.Strings(methods)
+ return methods
+}
+
+// TODO(jba): replace with maps.Keys when it is defined.
+func mapKeys[K comparable, V any](m map[K]V) []K {
+ var ks []K
+ for k := range m {
+ ks = append(ks, k)
+ }
+ return ks
+}
+
+// ServeHTTP dispatches the request to the handler whose
+// pattern most closely matches the request URL.
+func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) {
+ if r.RequestURI == "*" {
+ if r.ProtoAtLeast(1, 1) {
+ w.Header().Set("Connection", "close")
+ }
+ w.WriteHeader(StatusBadRequest)
+ return
+ }
+ var h Handler
+ if use121 {
+ h, _ = mux.mux121.findHandler(r)
+ } else {
+ h, _, r.pat, r.matches = mux.findHandler(r)
+ }
+ h.ServeHTTP(w, r)
+}
+
+// The four functions below all call ServeMux.register so that callerLocation
+// always refers to user code.
+
+// Handle registers the handler for the given pattern.
+// If the given pattern conflicts, with one that is already registered, Handle
+// panics.
+func (mux *ServeMux) Handle(pattern string, handler Handler) {
+ if use121 {
+ mux.mux121.handle(pattern, handler)
+ } else {
+ mux.register(pattern, handler)
+ }
+}
+
+// HandleFunc registers the handler function for the given pattern.
+// If the given pattern conflicts, with one that is already registered, HandleFunc
+// panics.
+func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
+ if use121 {
+ mux.mux121.handleFunc(pattern, handler)
+ } else {
+ mux.register(pattern, HandlerFunc(handler))
+ }
+}
+
+// Handle registers the handler for the given pattern in [DefaultServeMux].
+// The documentation for [ServeMux] explains how patterns are matched.
+func Handle(pattern string, handler Handler) {
+ if use121 {
+ DefaultServeMux.mux121.handle(pattern, handler)
+ } else {
+ DefaultServeMux.register(pattern, handler)
+ }
+}
+
+// HandleFunc registers the handler function for the given pattern in [DefaultServeMux].
+// The documentation for [ServeMux] explains how patterns are matched.
+func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) {
+ if use121 {
+ DefaultServeMux.mux121.handleFunc(pattern, handler)
+ } else {
+ DefaultServeMux.register(pattern, HandlerFunc(handler))
+ }
+}
+
+func (mux *ServeMux) register(pattern string, handler Handler) {
+ if err := mux.registerErr(pattern, handler); err != nil {
+ panic(err)
+ }
+}
+
+func (mux *ServeMux) registerErr(patstr string, handler Handler) error {
+ if patstr == "" {
+ return errors.New("http: invalid pattern")
+ }
+ if handler == nil {
+ return errors.New("http: nil handler")
+ }
+ if f, ok := handler.(HandlerFunc); ok && f == nil {
+ return errors.New("http: nil handler")
+ }
+
+ pat, err := parsePattern(patstr)
+ if err != nil {
+ return fmt.Errorf("parsing %q: %w", patstr, err)
+ }
+
+ // Get the caller's location, for better conflict error messages.
+ // Skip register and whatever calls it.
+ _, file, line, ok := runtime.Caller(3)
+ if !ok {
+ pat.loc = "unknown location"
+ } else {
+ pat.loc = fmt.Sprintf("%s:%d", file, line)
+ }
+
+ mux.mu.Lock()
+ defer mux.mu.Unlock()
+ // Check for conflict.
+ if err := mux.index.possiblyConflictingPatterns(pat, func(pat2 *pattern) error {
+ if pat.conflictsWith(pat2) {
+ d := describeConflict(pat, pat2)
+ return fmt.Errorf("pattern %q (registered at %s) conflicts with pattern %q (registered at %s):\n%s",
+ pat, pat.loc, pat2, pat2.loc, d)
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ mux.tree.addPattern(pat, handler)
+ mux.index.addPattern(pat)
+ mux.patterns = append(mux.patterns, pat)
+ return nil
+}
+
+// Serve accepts incoming HTTP connections on the listener l,
+// creating a new service goroutine for each. The service goroutines
+// read requests and then call handler to reply to them.
+//
+// The handler is typically nil, in which case [DefaultServeMux] is used.
+//
+// HTTP/2 support is only enabled if the Listener returns [*tls.Conn]
+// connections and they were configured with "h2" in the TLS
+// Config.NextProtos.
+//
+// Serve always returns a non-nil error.
+func Serve(l net.Listener, handler Handler) error {
+ srv := &Server{Handler: handler}
+ return srv.Serve(l)
+}
+
+// ServeTLS accepts incoming HTTPS connections on the listener l,
+// creating a new service goroutine for each. The service goroutines
+// read requests and then call handler to reply to them.
+//
+// The handler is typically nil, in which case [DefaultServeMux] is used.
+//
+// Additionally, files containing a certificate and matching private key
+// for the server must be provided. If the certificate is signed by a
+// certificate authority, the certFile should be the concatenation
+// of the server's certificate, any intermediates, and the CA's certificate.
+//
+// ServeTLS always returns a non-nil error.
+func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error {
+ srv := &Server{Handler: handler}
+ return srv.ServeTLS(l, certFile, keyFile)
+}
+
+// A Server defines parameters for running an HTTP server.
+// The zero value for Server is a valid configuration.
+type Server struct {
+ // Addr optionally specifies the TCP address for the server to listen on,
+ // in the form "host:port". If empty, ":http" (port 80) is used.
+ // The service names are defined in RFC 6335 and assigned by IANA.
+ // See net.Dial for details of the address format.
+ Addr string
+
+ Handler Handler // handler to invoke, http.DefaultServeMux if nil
+
+ // DisableGeneralOptionsHandler, if true, passes "OPTIONS *" requests to the Handler,
+ // otherwise responds with 200 OK and Content-Length: 0.
+ DisableGeneralOptionsHandler bool
+
+ // TLSConfig optionally provides a TLS configuration for use
+ // by ServeTLS and ListenAndServeTLS. Note that this value is
+ // cloned by ServeTLS and ListenAndServeTLS, so it's not
+ // possible to modify the configuration with methods like
+ // tls.Config.SetSessionTicketKeys. To use
+ // SetSessionTicketKeys, use Server.Serve with a TLS Listener
+ // instead.
+ TLSConfig *tls.Config
+
+ // ReadTimeout is the maximum duration for reading the entire
+ // request, including the body. A zero or negative value means
+ // there will be no timeout.
+ //
+ // Because ReadTimeout does not let Handlers make per-request
+ // decisions on each request body's acceptable deadline or
+ // upload rate, most users will prefer to use
+ // ReadHeaderTimeout. It is valid to use them both.
+ ReadTimeout time.Duration
+
+ // ReadHeaderTimeout is the amount of time allowed to read
+ // request headers. The connection's read deadline is reset
+ // after reading the headers and the Handler can decide what
+ // is considered too slow for the body. If ReadHeaderTimeout
+ // is zero, the value of ReadTimeout is used. If both are
+ // zero, there is no timeout.
+ ReadHeaderTimeout time.Duration
+
+ // WriteTimeout is the maximum duration before timing out
+ // writes of the response. It is reset whenever a new
+ // request's header is read. Like ReadTimeout, it does not
+ // let Handlers make decisions on a per-request basis.
+ // A zero or negative value means there will be no timeout.
+ WriteTimeout time.Duration
+
+ // IdleTimeout is the maximum amount of time to wait for the
+ // next request when keep-alives are enabled. If IdleTimeout
+ // is zero, the value of ReadTimeout is used. If both are
+ // zero, there is no timeout.
+ IdleTimeout time.Duration
+
+ // MaxHeaderBytes controls the maximum number of bytes the
+ // server will read parsing the request header's keys and
+ // values, including the request line. It does not limit the
+ // size of the request body.
+ // If zero, DefaultMaxHeaderBytes is used.
+ MaxHeaderBytes int
+
+ // TLSNextProto optionally specifies a function to take over
+ // ownership of the provided TLS connection when an ALPN
+ // protocol upgrade has occurred. The map key is the protocol
+ // name negotiated. The Handler argument should be used to
+ // handle HTTP requests and will initialize the Request's TLS
+ // and RemoteAddr if not already set. The connection is
+ // automatically closed when the function returns.
+ // If TLSNextProto is not nil, HTTP/2 support is not enabled
+ // automatically.
+ TLSNextProto map[string]func(*Server, *tls.Conn, Handler)
+
+ // ConnState specifies an optional callback function that is
+ // called when a client connection changes state. See the
+ // ConnState type and associated constants for details.
+ ConnState func(net.Conn, ConnState)
+
+ // ErrorLog specifies an optional logger for errors accepting
+ // connections, unexpected behavior from handlers, and
+ // underlying FileSystem errors.
+ // If nil, logging is done via the log package's standard logger.
+ ErrorLog *log.Logger
+
+ // BaseContext optionally specifies a function that returns
+ // the base context for incoming requests on this server.
+ // The provided Listener is the specific Listener that's
+ // about to start accepting requests.
+ // If BaseContext is nil, the default is context.Background().
+ // If non-nil, it must return a non-nil context.
+ BaseContext func(net.Listener) context.Context
+
+ // ConnContext optionally specifies a function that modifies
+ // the context used for a new connection c. The provided ctx
+ // is derived from the base context and has a ServerContextKey
+ // value.
+ ConnContext func(ctx context.Context, c net.Conn) context.Context
+
+ inShutdown atomic.Bool // true when server is in shutdown
+
+ disableKeepAlives atomic.Bool
+ nextProtoOnce sync.Once // guards setupHTTP2_* init
+ nextProtoErr error // result of http2.ConfigureServer if used
+
+ mu sync.Mutex
+ listeners map[*net.Listener]struct{}
+ activeConn map[*conn]struct{}
+ onShutdown []func()
+
+ listenerGroup sync.WaitGroup
+}
+
+// Close immediately closes all active net.Listeners and any
+// connections in state [StateNew], [StateActive], or [StateIdle]. For a
+// graceful shutdown, use [Server.Shutdown].
+//
+// Close does not attempt to close (and does not even know about)
+// any hijacked connections, such as WebSockets.
+//
+// Close returns any error returned from closing the [Server]'s
+// underlying Listener(s).
+func (srv *Server) Close() error {
+ srv.inShutdown.Store(true)
+ srv.mu.Lock()
+ defer srv.mu.Unlock()
+ err := srv.closeListenersLocked()
+
+ // Unlock srv.mu while waiting for listenerGroup.
+ // The group Add and Done calls are made with srv.mu held,
+ // to avoid adding a new listener in the window between
+ // us setting inShutdown above and waiting here.
+ srv.mu.Unlock()
+ srv.listenerGroup.Wait()
+ srv.mu.Lock()
+
+ for c := range srv.activeConn {
+ c.rwc.Close()
+ delete(srv.activeConn, c)
+ }
+ return err
+}
+
+// shutdownPollIntervalMax is the max polling interval when checking
+// quiescence during Server.Shutdown. Polling starts with a small
+// interval and backs off to the max.
+// Ideally we could find a solution that doesn't involve polling,
+// but which also doesn't have a high runtime cost (and doesn't
+// involve any contentious mutexes), but that is left as an
+// exercise for the reader.
+const shutdownPollIntervalMax = 500 * time.Millisecond
+
+// Shutdown gracefully shuts down the server without interrupting any
+// active connections. Shutdown works by first closing all open
+// listeners, then closing all idle connections, and then waiting
+// indefinitely for connections to return to idle and then shut down.
+// If the provided context expires before the shutdown is complete,
+// Shutdown returns the context's error, otherwise it returns any
+// error returned from closing the [Server]'s underlying Listener(s).
+//
+// When Shutdown is called, [Serve], [ListenAndServe], and
+// [ListenAndServeTLS] immediately return [ErrServerClosed]. Make sure the
+// program doesn't exit and waits instead for Shutdown to return.
+//
+// Shutdown does not attempt to close nor wait for hijacked
+// connections such as WebSockets. The caller of Shutdown should
+// separately notify such long-lived connections of shutdown and wait
+// for them to close, if desired. See [Server.RegisterOnShutdown] for a way to
+// register shutdown notification functions.
+//
+// Once Shutdown has been called on a server, it may not be reused;
+// future calls to methods such as Serve will return ErrServerClosed.
+func (srv *Server) Shutdown(ctx context.Context) error {
+ srv.inShutdown.Store(true)
+
+ srv.mu.Lock()
+ lnerr := srv.closeListenersLocked()
+ for _, f := range srv.onShutdown {
+ go f()
+ }
+ srv.mu.Unlock()
+ srv.listenerGroup.Wait()
+
+ pollIntervalBase := time.Millisecond
+ nextPollInterval := func() time.Duration {
+ // Add 10% jitter.
+ interval := pollIntervalBase + time.Duration(rand.Intn(int(pollIntervalBase/10)))
+ // Double and clamp for next time.
+ pollIntervalBase *= 2
+ if pollIntervalBase > shutdownPollIntervalMax {
+ pollIntervalBase = shutdownPollIntervalMax
+ }
+ return interval
+ }
+
+ timer := time.NewTimer(nextPollInterval())
+ defer timer.Stop()
+ for {
+ if srv.closeIdleConns() {
+ return lnerr
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-timer.C:
+ timer.Reset(nextPollInterval())
+ }
+ }
+}
+
+// RegisterOnShutdown registers a function to call on [Server.Shutdown].
+// This can be used to gracefully shutdown connections that have
+// undergone ALPN protocol upgrade or that have been hijacked.
+// This function should start protocol-specific graceful shutdown,
+// but should not wait for shutdown to complete.
+func (srv *Server) RegisterOnShutdown(f func()) {
+ srv.mu.Lock()
+ srv.onShutdown = append(srv.onShutdown, f)
+ srv.mu.Unlock()
+}
+
+// closeIdleConns closes all idle connections and reports whether the
+// server is quiescent.
+func (s *Server) closeIdleConns() bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ quiescent := true
+ for c := range s.activeConn {
+ st, unixSec := c.getState()
+ // Issue 22682: treat StateNew connections as if
+ // they're idle if we haven't read the first request's
+ // header in over 5 seconds.
+ if st == StateNew && unixSec < time.Now().Unix()-5 {
+ st = StateIdle
+ }
+ if st != StateIdle || unixSec == 0 {
+ // Assume unixSec == 0 means it's a very new
+ // connection, without state set yet.
+ quiescent = false
+ continue
+ }
+ c.rwc.Close()
+ delete(s.activeConn, c)
+ }
+ return quiescent
+}
+
+func (s *Server) closeListenersLocked() error {
+ var err error
+ for ln := range s.listeners {
+ if cerr := (*ln).Close(); cerr != nil && err == nil {
+ err = cerr
+ }
+ }
+ return err
+}
+
+// A ConnState represents the state of a client connection to a server.
+// It's used by the optional [Server.ConnState] hook.
+type ConnState int
+
+const (
+ // StateNew represents a new connection that is expected to
+ // send a request immediately. Connections begin at this
+ // state and then transition to either StateActive or
+ // StateClosed.
+ StateNew ConnState = iota
+
+ // StateActive represents a connection that has read 1 or more
+ // bytes of a request. The Server.ConnState hook for
+ // StateActive fires before the request has entered a handler
+ // and doesn't fire again until the request has been
+ // handled. After the request is handled, the state
+ // transitions to StateClosed, StateHijacked, or StateIdle.
+ // For HTTP/2, StateActive fires on the transition from zero
+ // to one active request, and only transitions away once all
+ // active requests are complete. That means that ConnState
+ // cannot be used to do per-request work; ConnState only notes
+ // the overall state of the connection.
+ StateActive
+
+ // StateIdle represents a connection that has finished
+ // handling a request and is in the keep-alive state, waiting
+ // for a new request. Connections transition from StateIdle
+ // to either StateActive or StateClosed.
+ StateIdle
+
+ // StateHijacked represents a hijacked connection.
+ // This is a terminal state. It does not transition to StateClosed.
+ StateHijacked
+
+ // StateClosed represents a closed connection.
+ // This is a terminal state. Hijacked connections do not
+ // transition to StateClosed.
+ StateClosed
+)
+
+var stateName = map[ConnState]string{
+ StateNew: "new",
+ StateActive: "active",
+ StateIdle: "idle",
+ StateHijacked: "hijacked",
+ StateClosed: "closed",
+}
+
+func (c ConnState) String() string {
+ return stateName[c]
+}
+
+// serverHandler delegates to either the server's Handler or
+// DefaultServeMux and also handles "OPTIONS *" requests.
+type serverHandler struct {
+ srv *Server
+}
+
+func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) {
+ handler := sh.srv.Handler
+ if handler == nil {
+ handler = DefaultServeMux
+ }
+ if !sh.srv.DisableGeneralOptionsHandler && req.RequestURI == "*" && req.Method == "OPTIONS" {
+ handler = globalOptionsHandler{}
+ }
+
+ handler.ServeHTTP(rw, req)
+}
+
+// AllowQuerySemicolons returns a handler that serves requests by converting any
+// unescaped semicolons in the URL query to ampersands, and invoking the handler h.
+//
+// This restores the pre-Go 1.17 behavior of splitting query parameters on both
+// semicolons and ampersands. (See golang.org/issue/25192). Note that this
+// behavior doesn't match that of many proxies, and the mismatch can lead to
+// security issues.
+//
+// AllowQuerySemicolons should be invoked before [Request.ParseForm] is called.
+func AllowQuerySemicolons(h Handler) Handler {
+ return HandlerFunc(func(w ResponseWriter, r *Request) {
+ if strings.Contains(r.URL.RawQuery, ";") {
+ r2 := new(Request)
+ *r2 = *r
+ r2.URL = new(url.URL)
+ *r2.URL = *r.URL
+ r2.URL.RawQuery = strings.ReplaceAll(r.URL.RawQuery, ";", "&")
+ h.ServeHTTP(w, r2)
+ } else {
+ h.ServeHTTP(w, r)
+ }
+ })
+}
+
+// ListenAndServe listens on the TCP network address srv.Addr and then
+// calls [Serve] to handle requests on incoming connections.
+// Accepted connections are configured to enable TCP keep-alives.
+//
+// If srv.Addr is blank, ":http" is used.
+//
+// ListenAndServe always returns a non-nil error. After [Server.Shutdown] or [Server.Close],
+// the returned error is [ErrServerClosed].
+func (srv *Server) ListenAndServe() error {
+ if srv.shuttingDown() {
+ return ErrServerClosed
+ }
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":http"
+ }
+ ln, err := net.Listen("tcp", addr)
+ if err != nil {
+ return err
+ }
+ return srv.Serve(ln)
+}
+
+var testHookServerServe func(*Server, net.Listener) // used if non-nil
+
+// shouldConfigureHTTP2ForServe reports whether Server.Serve should configure
+// automatic HTTP/2. (which sets up the srv.TLSNextProto map)
+func (srv *Server) shouldConfigureHTTP2ForServe() bool {
+ if srv.TLSConfig == nil {
+ // Compatibility with Go 1.6:
+ // If there's no TLSConfig, it's possible that the user just
+ // didn't set it on the http.Server, but did pass it to
+ // tls.NewListener and passed that listener to Serve.
+ // So we should configure HTTP/2 (to set up srv.TLSNextProto)
+ // in case the listener returns an "h2" *tls.Conn.
+ return true
+ }
+ // The user specified a TLSConfig on their http.Server.
+ // In this, case, only configure HTTP/2 if their tls.Config
+ // explicitly mentions "h2". Otherwise http2.ConfigureServer
+ // would modify the tls.Config to add it, but they probably already
+ // passed this tls.Config to tls.NewListener. And if they did,
+ // it's too late anyway to fix it. It would only be potentially racy.
+ // See Issue 15908.
+ return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
+}
+
+// ErrServerClosed is returned by the [Server.Serve], [ServeTLS], [ListenAndServe],
+// and [ListenAndServeTLS] methods after a call to [Server.Shutdown] or [Server.Close].
+var ErrServerClosed = errors.New("http: Server closed")
+
+// Serve accepts incoming connections on the Listener l, creating a
+// new service goroutine for each. The service goroutines read requests and
+// then call srv.Handler to reply to them.
+//
+// HTTP/2 support is only enabled if the Listener returns [*tls.Conn]
+// connections and they were configured with "h2" in the TLS
+// Config.NextProtos.
+//
+// Serve always returns a non-nil error and closes l.
+// After [Server.Shutdown] or [Server.Close], the returned error is [ErrServerClosed].
+func (srv *Server) Serve(l net.Listener) error {
+ if fn := testHookServerServe; fn != nil {
+ fn(srv, l) // call hook with unwrapped listener
+ }
+
+ origListener := l
+ l = &onceCloseListener{Listener: l}
+ defer l.Close()
+
+ if err := srv.setupHTTP2_Serve(); err != nil {
+ return err
+ }
+
+ if !srv.trackListener(&l, true) {
+ return ErrServerClosed
+ }
+ defer srv.trackListener(&l, false)
+
+ baseCtx := context.Background()
+ if srv.BaseContext != nil {
+ baseCtx = srv.BaseContext(origListener)
+ if baseCtx == nil {
+ panic("BaseContext returned a nil context")
+ }
+ }
+
+ var tempDelay time.Duration // how long to sleep on accept failure
+
+ ctx := context.WithValue(baseCtx, ServerContextKey, srv)
+ for {
+ rw, err := l.Accept()
+ if err != nil {
+ if srv.shuttingDown() {
+ return ErrServerClosed
+ }
+ if ne, ok := err.(net.Error); ok && ne.Temporary() {
+ if tempDelay == 0 {
+ tempDelay = 5 * time.Millisecond
+ } else {
+ tempDelay *= 2
+ }
+ if max := 1 * time.Second; tempDelay > max {
+ tempDelay = max
+ }
+ srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay)
+ time.Sleep(tempDelay)
+ continue
+ }
+ return err
+ }
+ connCtx := ctx
+ if cc := srv.ConnContext; cc != nil {
+ connCtx = cc(connCtx, rw)
+ if connCtx == nil {
+ panic("ConnContext returned nil")
+ }
+ }
+ tempDelay = 0
+ c := srv.newConn(rw)
+ c.setState(c.rwc, StateNew, runHooks) // before Serve can return
+ go c.serve(connCtx)
+ }
+}
+
+// ServeTLS accepts incoming connections on the Listener l, creating a
+// new service goroutine for each. The service goroutines perform TLS
+// setup and then read requests, calling srv.Handler to reply to them.
+//
+// Files containing a certificate and matching private key for the
+// server must be provided if neither the [Server]'s
+// TLSConfig.Certificates nor TLSConfig.GetCertificate are populated.
+// If the certificate is signed by a certificate authority, the
+// certFile should be the concatenation of the server's certificate,
+// any intermediates, and the CA's certificate.
+//
+// ServeTLS always returns a non-nil error. After [Server.Shutdown] or [Server.Close], the
+// returned error is [ErrServerClosed].
+func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
+ // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
+ // before we clone it and create the TLS Listener.
+ if err := srv.setupHTTP2_ServeTLS(); err != nil {
+ return err
+ }
+
+ config := cloneTLSConfig(srv.TLSConfig)
+ if !strSliceContains(config.NextProtos, "http/1.1") {
+ config.NextProtos = append(config.NextProtos, "http/1.1")
+ }
+
+ configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
+ if !configHasCert || certFile != "" || keyFile != "" {
+ var err error
+ config.Certificates = make([]tls.Certificate, 1)
+ config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return err
+ }
+ }
+
+ tlsListener := tls.NewListener(l, config)
+ return srv.Serve(tlsListener)
+}
+
+// trackListener adds or removes a net.Listener to the set of tracked
+// listeners.
+//
+// We store a pointer to interface in the map set, in case the
+// net.Listener is not comparable. This is safe because we only call
+// trackListener via Serve and can track+defer untrack the same
+// pointer to local variable there. We never need to compare a
+// Listener from another caller.
+//
+// It reports whether the server is still up (not Shutdown or Closed).
+func (s *Server) trackListener(ln *net.Listener, add bool) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.listeners == nil {
+ s.listeners = make(map[*net.Listener]struct{})
+ }
+ if add {
+ if s.shuttingDown() {
+ return false
+ }
+ s.listeners[ln] = struct{}{}
+ s.listenerGroup.Add(1)
+ } else {
+ delete(s.listeners, ln)
+ s.listenerGroup.Done()
+ }
+ return true
+}
+
+func (s *Server) trackConn(c *conn, add bool) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.activeConn == nil {
+ s.activeConn = make(map[*conn]struct{})
+ }
+ if add {
+ s.activeConn[c] = struct{}{}
+ } else {
+ delete(s.activeConn, c)
+ }
+}
+
+func (s *Server) idleTimeout() time.Duration {
+ if s.IdleTimeout != 0 {
+ return s.IdleTimeout
+ }
+ return s.ReadTimeout
+}
+
+func (s *Server) readHeaderTimeout() time.Duration {
+ if s.ReadHeaderTimeout != 0 {
+ return s.ReadHeaderTimeout
+ }
+ return s.ReadTimeout
+}
+
+func (s *Server) doKeepAlives() bool {
+ return !s.disableKeepAlives.Load() && !s.shuttingDown()
+}
+
+func (s *Server) shuttingDown() bool {
+ return s.inShutdown.Load()
+}
+
+// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled.
+// By default, keep-alives are always enabled. Only very
+// resource-constrained environments or servers in the process of
+// shutting down should disable them.
+func (srv *Server) SetKeepAlivesEnabled(v bool) {
+ if v {
+ srv.disableKeepAlives.Store(false)
+ return
+ }
+ srv.disableKeepAlives.Store(true)
+
+ // Close idle HTTP/1 conns:
+ srv.closeIdleConns()
+
+ // TODO: Issue 26303: close HTTP/2 conns as soon as they become idle.
+}
+
+func (s *Server) logf(format string, args ...any) {
+ if s.ErrorLog != nil {
+ s.ErrorLog.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// logf prints to the ErrorLog of the *Server associated with request r
+// via ServerContextKey. If there's no associated server, or if ErrorLog
+// is nil, logging is done via the log package's standard logger.
+func logf(r *Request, format string, args ...any) {
+ s, _ := r.Context().Value(ServerContextKey).(*Server)
+ if s != nil && s.ErrorLog != nil {
+ s.ErrorLog.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// ListenAndServe listens on the TCP network address addr and then calls
+// [Serve] with handler to handle requests on incoming connections.
+// Accepted connections are configured to enable TCP keep-alives.
+//
+// The handler is typically nil, in which case [DefaultServeMux] is used.
+//
+// ListenAndServe always returns a non-nil error.
+func ListenAndServe(addr string, handler Handler) error {
+ server := &Server{Addr: addr, Handler: handler}
+ return server.ListenAndServe()
+}
+
+// ListenAndServeTLS acts identically to [ListenAndServe], except that it
+// expects HTTPS connections. Additionally, files containing a certificate and
+// matching private key for the server must be provided. If the certificate
+// is signed by a certificate authority, the certFile should be the concatenation
+// of the server's certificate, any intermediates, and the CA's certificate.
+func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
+ server := &Server{Addr: addr, Handler: handler}
+ return server.ListenAndServeTLS(certFile, keyFile)
+}
+
+// ListenAndServeTLS listens on the TCP network address srv.Addr and
+// then calls [ServeTLS] to handle requests on incoming TLS connections.
+// Accepted connections are configured to enable TCP keep-alives.
+//
+// Filenames containing a certificate and matching private key for the
+// server must be provided if neither the [Server]'s TLSConfig.Certificates
+// nor TLSConfig.GetCertificate are populated. If the certificate is
+// signed by a certificate authority, the certFile should be the
+// concatenation of the server's certificate, any intermediates, and
+// the CA's certificate.
+//
+// If srv.Addr is blank, ":https" is used.
+//
+// ListenAndServeTLS always returns a non-nil error. After [Server.Shutdown] or
+// [Server.Close], the returned error is [ErrServerClosed].
+func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
+ if srv.shuttingDown() {
+ return ErrServerClosed
+ }
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":https"
+ }
+
+ ln, err := net.Listen("tcp", addr)
+ if err != nil {
+ return err
+ }
+
+ defer ln.Close()
+
+ return srv.ServeTLS(ln, certFile, keyFile)
+}
+
+// setupHTTP2_ServeTLS conditionally configures HTTP/2 on
+// srv and reports whether there was an error setting it up. If it is
+// not configured for policy reasons, nil is returned.
+func (srv *Server) setupHTTP2_ServeTLS() error {
+ srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
+ return srv.nextProtoErr
+}
+
+// setupHTTP2_Serve is called from (*Server).Serve and conditionally
+// configures HTTP/2 on srv using a more conservative policy than
+// setupHTTP2_ServeTLS because Serve is called after tls.Listen,
+// and may be called concurrently. See shouldConfigureHTTP2ForServe.
+//
+// The tests named TestTransportAutomaticHTTP2* and
+// TestConcurrentServerServe in server_test.go demonstrate some
+// of the supported use cases and motivations.
+func (srv *Server) setupHTTP2_Serve() error {
+ srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve)
+ return srv.nextProtoErr
+}
+
+func (srv *Server) onceSetNextProtoDefaults_Serve() {
+ if srv.shouldConfigureHTTP2ForServe() {
+ srv.onceSetNextProtoDefaults()
+ }
+}
+
+var http2server = godebug.New("http2server")
+
+// onceSetNextProtoDefaults configures HTTP/2, if the user hasn't
+// configured otherwise. (by setting srv.TLSNextProto non-nil)
+// It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*).
+func (srv *Server) onceSetNextProtoDefaults() {
+ if omitBundledHTTP2 {
+ return
+ }
+ if http2server.Value() == "0" {
+ http2server.IncNonDefault()
+ return
+ }
+ // Enable HTTP/2 by default if the user hasn't otherwise
+ // configured their TLSNextProto map.
+ if srv.TLSNextProto == nil {
+ conf := &http2Server{
+ NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) },
+ }
+ srv.nextProtoErr = http2ConfigureServer(srv, conf)
+ }
+}
+
+// TimeoutHandler returns a [Handler] that runs h with the given time limit.
+//
+// The new Handler calls h.ServeHTTP to handle each request, but if a
+// call runs for longer than its time limit, the handler responds with
+// a 503 Service Unavailable error and the given message in its body.
+// (If msg is empty, a suitable default message will be sent.)
+// After such a timeout, writes by h to its [ResponseWriter] will return
+// [ErrHandlerTimeout].
+//
+// TimeoutHandler supports the [Pusher] interface but does not support
+// the [Hijacker] or [Flusher] interfaces.
+func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
+ return &timeoutHandler{
+ handler: h,
+ body: msg,
+ dt: dt,
+ }
+}
+
+// ErrHandlerTimeout is returned on [ResponseWriter] Write calls
+// in handlers which have timed out.
+var ErrHandlerTimeout = errors.New("http: Handler timeout")
+
+type timeoutHandler struct {
+ handler Handler
+ body string
+ dt time.Duration
+
+ // When set, no context will be created and this context will
+ // be used instead.
+ testContext context.Context
+}
+
+func (h *timeoutHandler) errorBody() string {
+ if h.body != "" {
+ return h.body
+ }
+ return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>"
+}
+
+func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ ctx := h.testContext
+ if ctx == nil {
+ var cancelCtx context.CancelFunc
+ ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt)
+ defer cancelCtx()
+ }
+ r = r.WithContext(ctx)
+ done := make(chan struct{})
+ tw := &timeoutWriter{
+ w: w,
+ h: make(Header),
+ req: r,
+ }
+ panicChan := make(chan any, 1)
+ go func() {
+ defer func() {
+ if p := recover(); p != nil {
+ panicChan <- p
+ }
+ }()
+ h.handler.ServeHTTP(tw, r)
+ close(done)
+ }()
+ select {
+ case p := <-panicChan:
+ panic(p)
+ case <-done:
+ tw.mu.Lock()
+ defer tw.mu.Unlock()
+ dst := w.Header()
+ for k, vv := range tw.h {
+ dst[k] = vv
+ }
+ if !tw.wroteHeader {
+ tw.code = StatusOK
+ }
+ w.WriteHeader(tw.code)
+ w.Write(tw.wbuf.Bytes())
+ case <-ctx.Done():
+ tw.mu.Lock()
+ defer tw.mu.Unlock()
+ switch err := ctx.Err(); err {
+ case context.DeadlineExceeded:
+ w.WriteHeader(StatusServiceUnavailable)
+ io.WriteString(w, h.errorBody())
+ tw.err = ErrHandlerTimeout
+ default:
+ w.WriteHeader(StatusServiceUnavailable)
+ tw.err = err
+ }
+ }
+}
+
+type timeoutWriter struct {
+ w ResponseWriter
+ h Header
+ wbuf bytes.Buffer
+ req *Request
+
+ mu sync.Mutex
+ err error
+ wroteHeader bool
+ code int
+}
+
+var _ Pusher = (*timeoutWriter)(nil)
+
+// Push implements the [Pusher] interface.
+func (tw *timeoutWriter) Push(target string, opts *PushOptions) error {
+ if pusher, ok := tw.w.(Pusher); ok {
+ return pusher.Push(target, opts)
+ }
+ return ErrNotSupported
+}
+
+func (tw *timeoutWriter) Header() Header { return tw.h }
+
+func (tw *timeoutWriter) Write(p []byte) (int, error) {
+ tw.mu.Lock()
+ defer tw.mu.Unlock()
+ if tw.err != nil {
+ return 0, tw.err
+ }
+ if !tw.wroteHeader {
+ tw.writeHeaderLocked(StatusOK)
+ }
+ return tw.wbuf.Write(p)
+}
+
+func (tw *timeoutWriter) writeHeaderLocked(code int) {
+ checkWriteHeaderCode(code)
+
+ switch {
+ case tw.err != nil:
+ return
+ case tw.wroteHeader:
+ if tw.req != nil {
+ caller := relevantCaller()
+ logf(tw.req, "http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line)
+ }
+ default:
+ tw.wroteHeader = true
+ tw.code = code
+ }
+}
+
+func (tw *timeoutWriter) WriteHeader(code int) {
+ tw.mu.Lock()
+ defer tw.mu.Unlock()
+ tw.writeHeaderLocked(code)
+}
+
+// onceCloseListener wraps a net.Listener, protecting it from
+// multiple Close calls.
+type onceCloseListener struct {
+ net.Listener
+ once sync.Once
+ closeErr error
+}
+
+func (oc *onceCloseListener) Close() error {
+ oc.once.Do(oc.close)
+ return oc.closeErr
+}
+
+func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() }
+
+// globalOptionsHandler responds to "OPTIONS *" requests.
+type globalOptionsHandler struct{}
+
+func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ w.Header().Set("Content-Length", "0")
+ if r.ContentLength != 0 {
+ // Read up to 4KB of OPTIONS body (as mentioned in the
+ // spec as being reserved for future use), but anything
+ // over that is considered a waste of server resources
+ // (or an attack) and we abort and close the connection,
+ // courtesy of MaxBytesReader's EOF behavior.
+ mb := MaxBytesReader(w, r.Body, 4<<10)
+ io.Copy(io.Discard, mb)
+ }
+}
+
+// initALPNRequest is an HTTP handler that initializes certain
+// uninitialized fields in its *Request. Such partially-initialized
+// Requests come from ALPN protocol handlers.
+type initALPNRequest struct {
+ ctx context.Context
+ c *tls.Conn
+ h serverHandler
+}
+
+// BaseContext is an exported but unadvertised [http.Handler] method
+// recognized by x/net/http2 to pass down a context; the TLSNextProto
+// API predates context support so we shoehorn through the only
+// interface we have available.
+func (h initALPNRequest) BaseContext() context.Context { return h.ctx }
+
+func (h initALPNRequest) ServeHTTP(rw ResponseWriter, req *Request) {
+ if req.TLS == nil {
+ req.TLS = &tls.ConnectionState{}
+ *req.TLS = h.c.ConnectionState()
+ }
+ if req.Body == nil {
+ req.Body = NoBody
+ }
+ if req.RemoteAddr == "" {
+ req.RemoteAddr = h.c.RemoteAddr().String()
+ }
+ h.h.ServeHTTP(rw, req)
+}
+
+// loggingConn is used for debugging.
+type loggingConn struct {
+ name string
+ net.Conn
+}
+
+var (
+ uniqNameMu sync.Mutex
+ uniqNameNext = make(map[string]int)
+)
+
+func newLoggingConn(baseName string, c net.Conn) net.Conn {
+ uniqNameMu.Lock()
+ defer uniqNameMu.Unlock()
+ uniqNameNext[baseName]++
+ return &loggingConn{
+ name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]),
+ Conn: c,
+ }
+}
+
+func (c *loggingConn) Write(p []byte) (n int, err error) {
+ log.Printf("%s.Write(%d) = ....", c.name, len(p))
+ n, err = c.Conn.Write(p)
+ log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err)
+ return
+}
+
+func (c *loggingConn) Read(p []byte) (n int, err error) {
+ log.Printf("%s.Read(%d) = ....", c.name, len(p))
+ n, err = c.Conn.Read(p)
+ log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err)
+ return
+}
+
+func (c *loggingConn) Close() (err error) {
+ log.Printf("%s.Close() = ...", c.name)
+ err = c.Conn.Close()
+ log.Printf("%s.Close() = %v", c.name, err)
+ return
+}
+
+// checkConnErrorWriter writes to c.rwc and records any write errors to c.werr.
+// It only contains one field (and a pointer field at that), so it
+// fits in an interface value without an extra allocation.
+type checkConnErrorWriter struct {
+ c *conn
+}
+
+func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
+ n, err = w.c.rwc.Write(p)
+ if err != nil && w.c.werr == nil {
+ w.c.werr = err
+ w.c.cancelCtx()
+ }
+ return
+}
+
+func numLeadingCRorLF(v []byte) (n int) {
+ for _, b := range v {
+ if b == '\r' || b == '\n' {
+ n++
+ continue
+ }
+ break
+ }
+ return
+}
+
+func strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+// tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header
+// looks like it might've been a misdirected plaintext HTTP request.
+func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool {
+ switch string(hdr[:]) {
+ case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
+ return true
+ }
+ return false
+}
+
+// MaxBytesHandler returns a [Handler] that runs h with its [ResponseWriter] and [Request.Body] wrapped by a MaxBytesReader.
+func MaxBytesHandler(h Handler, n int64) Handler {
+ return HandlerFunc(func(w ResponseWriter, r *Request) {
+ r2 := *r
+ r2.Body = MaxBytesReader(w, r.Body, n)
+ h.ServeHTTP(w, &r2)
+ })
+}
diff --git a/contrib/go/_std_1.22/src/net/http/sniff.go b/contrib/go/_std_1.22/src/net/http/sniff.go
new file mode 100644
index 0000000000..ac18ab979d
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/sniff.go
@@ -0,0 +1,304 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+// The algorithm uses at most sniffLen bytes to make its decision.
+const sniffLen = 512
+
+// DetectContentType implements the algorithm described
+// at https://mimesniff.spec.whatwg.org/ to determine the
+// Content-Type of the given data. It considers at most the
+// first 512 bytes of data. DetectContentType always returns
+// a valid MIME type: if it cannot determine a more specific one, it
+// returns "application/octet-stream".
+func DetectContentType(data []byte) string {
+ if len(data) > sniffLen {
+ data = data[:sniffLen]
+ }
+
+ // Index of the first non-whitespace byte in data.
+ firstNonWS := 0
+ for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ {
+ }
+
+ for _, sig := range sniffSignatures {
+ if ct := sig.match(data, firstNonWS); ct != "" {
+ return ct
+ }
+ }
+
+ return "application/octet-stream" // fallback
+}
+
+// isWS reports whether the provided byte is a whitespace byte (0xWS)
+// as defined in https://mimesniff.spec.whatwg.org/#terminology.
+func isWS(b byte) bool {
+ switch b {
+ case '\t', '\n', '\x0c', '\r', ' ':
+ return true
+ }
+ return false
+}
+
+// isTT reports whether the provided byte is a tag-terminating byte (0xTT)
+// as defined in https://mimesniff.spec.whatwg.org/#terminology.
+func isTT(b byte) bool {
+ switch b {
+ case ' ', '>':
+ return true
+ }
+ return false
+}
+
+type sniffSig interface {
+ // match returns the MIME type of the data, or "" if unknown.
+ match(data []byte, firstNonWS int) string
+}
+
+// Data matching the table in section 6.
+var sniffSignatures = []sniffSig{
+ htmlSig("<!DOCTYPE HTML"),
+ htmlSig("<HTML"),
+ htmlSig("<HEAD"),
+ htmlSig("<SCRIPT"),
+ htmlSig("<IFRAME"),
+ htmlSig("<H1"),
+ htmlSig("<DIV"),
+ htmlSig("<FONT"),
+ htmlSig("<TABLE"),
+ htmlSig("<A"),
+ htmlSig("<STYLE"),
+ htmlSig("<TITLE"),
+ htmlSig("<B"),
+ htmlSig("<BODY"),
+ htmlSig("<BR"),
+ htmlSig("<P"),
+ htmlSig("<!--"),
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\xFF"),
+ pat: []byte("<?xml"),
+ skipWS: true,
+ ct: "text/xml; charset=utf-8"},
+ &exactSig{[]byte("%PDF-"), "application/pdf"},
+ &exactSig{[]byte("%!PS-Adobe-"), "application/postscript"},
+
+ // UTF BOMs.
+ &maskedSig{
+ mask: []byte("\xFF\xFF\x00\x00"),
+ pat: []byte("\xFE\xFF\x00\x00"),
+ ct: "text/plain; charset=utf-16be",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\x00\x00"),
+ pat: []byte("\xFF\xFE\x00\x00"),
+ ct: "text/plain; charset=utf-16le",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\x00"),
+ pat: []byte("\xEF\xBB\xBF\x00"),
+ ct: "text/plain; charset=utf-8",
+ },
+
+ // Image types
+ // For posterity, we originally returned "image/vnd.microsoft.icon" from
+ // https://tools.ietf.org/html/draft-ietf-websec-mime-sniff-03#section-7
+ // https://codereview.appspot.com/4746042
+ // but that has since been replaced with "image/x-icon" in Section 6.2
+ // of https://mimesniff.spec.whatwg.org/#matching-an-image-type-pattern
+ &exactSig{[]byte("\x00\x00\x01\x00"), "image/x-icon"},
+ &exactSig{[]byte("\x00\x00\x02\x00"), "image/x-icon"},
+ &exactSig{[]byte("BM"), "image/bmp"},
+ &exactSig{[]byte("GIF87a"), "image/gif"},
+ &exactSig{[]byte("GIF89a"), "image/gif"},
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF"),
+ pat: []byte("RIFF\x00\x00\x00\x00WEBPVP"),
+ ct: "image/webp",
+ },
+ &exactSig{[]byte("\x89PNG\x0D\x0A\x1A\x0A"), "image/png"},
+ &exactSig{[]byte("\xFF\xD8\xFF"), "image/jpeg"},
+
+ // Audio and Video types
+ // Enforce the pattern match ordering as prescribed in
+ // https://mimesniff.spec.whatwg.org/#matching-an-audio-or-video-type-pattern
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
+ pat: []byte("FORM\x00\x00\x00\x00AIFF"),
+ ct: "audio/aiff",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF"),
+ pat: []byte("ID3"),
+ ct: "audio/mpeg",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\xFF"),
+ pat: []byte("OggS\x00"),
+ ct: "application/ogg",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"),
+ pat: []byte("MThd\x00\x00\x00\x06"),
+ ct: "audio/midi",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
+ pat: []byte("RIFF\x00\x00\x00\x00AVI "),
+ ct: "video/avi",
+ },
+ &maskedSig{
+ mask: []byte("\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF"),
+ pat: []byte("RIFF\x00\x00\x00\x00WAVE"),
+ ct: "audio/wave",
+ },
+ // 6.2.0.2. video/mp4
+ mp4Sig{},
+ // 6.2.0.3. video/webm
+ &exactSig{[]byte("\x1A\x45\xDF\xA3"), "video/webm"},
+
+ // Font types
+ &maskedSig{
+ // 34 NULL bytes followed by the string "LP"
+ pat: []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LP"),
+ // 34 NULL bytes followed by \xF\xF
+ mask: []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF\xFF"),
+ ct: "application/vnd.ms-fontobject",
+ },
+ &exactSig{[]byte("\x00\x01\x00\x00"), "font/ttf"},
+ &exactSig{[]byte("OTTO"), "font/otf"},
+ &exactSig{[]byte("ttcf"), "font/collection"},
+ &exactSig{[]byte("wOFF"), "font/woff"},
+ &exactSig{[]byte("wOF2"), "font/woff2"},
+
+ // Archive types
+ &exactSig{[]byte("\x1F\x8B\x08"), "application/x-gzip"},
+ &exactSig{[]byte("PK\x03\x04"), "application/zip"},
+ // RAR's signatures are incorrectly defined by the MIME spec as per
+ // https://github.com/whatwg/mimesniff/issues/63
+ // However, RAR Labs correctly defines it at:
+ // https://www.rarlab.com/technote.htm#rarsign
+ // so we use the definition from RAR Labs.
+ // TODO: do whatever the spec ends up doing.
+ &exactSig{[]byte("Rar!\x1A\x07\x00"), "application/x-rar-compressed"}, // RAR v1.5-v4.0
+ &exactSig{[]byte("Rar!\x1A\x07\x01\x00"), "application/x-rar-compressed"}, // RAR v5+
+
+ &exactSig{[]byte("\x00\x61\x73\x6D"), "application/wasm"},
+
+ textSig{}, // should be last
+}
+
+type exactSig struct {
+ sig []byte
+ ct string
+}
+
+func (e *exactSig) match(data []byte, firstNonWS int) string {
+ if bytes.HasPrefix(data, e.sig) {
+ return e.ct
+ }
+ return ""
+}
+
+type maskedSig struct {
+ mask, pat []byte
+ skipWS bool
+ ct string
+}
+
+func (m *maskedSig) match(data []byte, firstNonWS int) string {
+ // pattern matching algorithm section 6
+ // https://mimesniff.spec.whatwg.org/#pattern-matching-algorithm
+
+ if m.skipWS {
+ data = data[firstNonWS:]
+ }
+ if len(m.pat) != len(m.mask) {
+ return ""
+ }
+ if len(data) < len(m.pat) {
+ return ""
+ }
+ for i, pb := range m.pat {
+ maskedData := data[i] & m.mask[i]
+ if maskedData != pb {
+ return ""
+ }
+ }
+ return m.ct
+}
+
+type htmlSig []byte
+
+func (h htmlSig) match(data []byte, firstNonWS int) string {
+ data = data[firstNonWS:]
+ if len(data) < len(h)+1 {
+ return ""
+ }
+ for i, b := range h {
+ db := data[i]
+ if 'A' <= b && b <= 'Z' {
+ db &= 0xDF
+ }
+ if b != db {
+ return ""
+ }
+ }
+ // Next byte must be a tag-terminating byte(0xTT).
+ if !isTT(data[len(h)]) {
+ return ""
+ }
+ return "text/html; charset=utf-8"
+}
+
+var mp4ftype = []byte("ftyp")
+var mp4 = []byte("mp4")
+
+type mp4Sig struct{}
+
+func (mp4Sig) match(data []byte, firstNonWS int) string {
+ // https://mimesniff.spec.whatwg.org/#signature-for-mp4
+ // c.f. section 6.2.1
+ if len(data) < 12 {
+ return ""
+ }
+ boxSize := int(binary.BigEndian.Uint32(data[:4]))
+ if len(data) < boxSize || boxSize%4 != 0 {
+ return ""
+ }
+ if !bytes.Equal(data[4:8], mp4ftype) {
+ return ""
+ }
+ for st := 8; st < boxSize; st += 4 {
+ if st == 12 {
+ // Ignores the four bytes that correspond to the version number of the "major brand".
+ continue
+ }
+ if bytes.Equal(data[st:st+3], mp4) {
+ return "video/mp4"
+ }
+ }
+ return ""
+}
+
+type textSig struct{}
+
+func (textSig) match(data []byte, firstNonWS int) string {
+ // c.f. section 5, step 4.
+ for _, b := range data[firstNonWS:] {
+ switch {
+ case b <= 0x08,
+ b == 0x0B,
+ 0x0E <= b && b <= 0x1A,
+ 0x1C <= b && b <= 0x1F:
+ return ""
+ }
+ }
+ return "text/plain; charset=utf-8"
+}
diff --git a/contrib/go/_std_1.22/src/net/http/socks_bundle.go b/contrib/go/_std_1.22/src/net/http/socks_bundle.go
new file mode 100644
index 0000000000..776b03d941
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/socks_bundle.go
@@ -0,0 +1,473 @@
+// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
+//go:generate bundle -o socks_bundle.go -prefix socks golang.org/x/net/internal/socks
+
+// Package socks provides a SOCKS version 5 client implementation.
+//
+// SOCKS protocol version 5 is defined in RFC 1928.
+// Username/Password authentication for SOCKS version 5 is defined in
+// RFC 1929.
+//
+
+package http
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net"
+ "strconv"
+ "time"
+)
+
+var (
+ socksnoDeadline = time.Time{}
+ socksaLongTimeAgo = time.Unix(1, 0)
+)
+
+func (d *socksDialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) {
+ host, port, err := sockssplitHostPort(address)
+ if err != nil {
+ return nil, err
+ }
+ if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() {
+ c.SetDeadline(deadline)
+ defer c.SetDeadline(socksnoDeadline)
+ }
+ if ctx != context.Background() {
+ errCh := make(chan error, 1)
+ done := make(chan struct{})
+ defer func() {
+ close(done)
+ if ctxErr == nil {
+ ctxErr = <-errCh
+ }
+ }()
+ go func() {
+ select {
+ case <-ctx.Done():
+ c.SetDeadline(socksaLongTimeAgo)
+ errCh <- ctx.Err()
+ case <-done:
+ errCh <- nil
+ }
+ }()
+ }
+
+ b := make([]byte, 0, 6+len(host)) // the size here is just an estimate
+ b = append(b, socksVersion5)
+ if len(d.AuthMethods) == 0 || d.Authenticate == nil {
+ b = append(b, 1, byte(socksAuthMethodNotRequired))
+ } else {
+ ams := d.AuthMethods
+ if len(ams) > 255 {
+ return nil, errors.New("too many authentication methods")
+ }
+ b = append(b, byte(len(ams)))
+ for _, am := range ams {
+ b = append(b, byte(am))
+ }
+ }
+ if _, ctxErr = c.Write(b); ctxErr != nil {
+ return
+ }
+
+ if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil {
+ return
+ }
+ if b[0] != socksVersion5 {
+ return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
+ }
+ am := socksAuthMethod(b[1])
+ if am == socksAuthMethodNoAcceptableMethods {
+ return nil, errors.New("no acceptable authentication methods")
+ }
+ if d.Authenticate != nil {
+ if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil {
+ return
+ }
+ }
+
+ b = b[:0]
+ b = append(b, socksVersion5, byte(d.cmd), 0)
+ if ip := net.ParseIP(host); ip != nil {
+ if ip4 := ip.To4(); ip4 != nil {
+ b = append(b, socksAddrTypeIPv4)
+ b = append(b, ip4...)
+ } else if ip6 := ip.To16(); ip6 != nil {
+ b = append(b, socksAddrTypeIPv6)
+ b = append(b, ip6...)
+ } else {
+ return nil, errors.New("unknown address type")
+ }
+ } else {
+ if len(host) > 255 {
+ return nil, errors.New("FQDN too long")
+ }
+ b = append(b, socksAddrTypeFQDN)
+ b = append(b, byte(len(host)))
+ b = append(b, host...)
+ }
+ b = append(b, byte(port>>8), byte(port))
+ if _, ctxErr = c.Write(b); ctxErr != nil {
+ return
+ }
+
+ if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil {
+ return
+ }
+ if b[0] != socksVersion5 {
+ return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
+ }
+ if cmdErr := socksReply(b[1]); cmdErr != socksStatusSucceeded {
+ return nil, errors.New("unknown error " + cmdErr.String())
+ }
+ if b[2] != 0 {
+ return nil, errors.New("non-zero reserved field")
+ }
+ l := 2
+ var a socksAddr
+ switch b[3] {
+ case socksAddrTypeIPv4:
+ l += net.IPv4len
+ a.IP = make(net.IP, net.IPv4len)
+ case socksAddrTypeIPv6:
+ l += net.IPv6len
+ a.IP = make(net.IP, net.IPv6len)
+ case socksAddrTypeFQDN:
+ if _, err := io.ReadFull(c, b[:1]); err != nil {
+ return nil, err
+ }
+ l += int(b[0])
+ default:
+ return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3])))
+ }
+ if cap(b) < l {
+ b = make([]byte, l)
+ } else {
+ b = b[:l]
+ }
+ if _, ctxErr = io.ReadFull(c, b); ctxErr != nil {
+ return
+ }
+ if a.IP != nil {
+ copy(a.IP, b)
+ } else {
+ a.Name = string(b[:len(b)-2])
+ }
+ a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1])
+ return &a, nil
+}
+
+func sockssplitHostPort(address string) (string, int, error) {
+ host, port, err := net.SplitHostPort(address)
+ if err != nil {
+ return "", 0, err
+ }
+ portnum, err := strconv.Atoi(port)
+ if err != nil {
+ return "", 0, err
+ }
+ if 1 > portnum || portnum > 0xffff {
+ return "", 0, errors.New("port number out of range " + port)
+ }
+ return host, portnum, nil
+}
+
+// A Command represents a SOCKS command.
+type socksCommand int
+
+func (cmd socksCommand) String() string {
+ switch cmd {
+ case socksCmdConnect:
+ return "socks connect"
+ case sockscmdBind:
+ return "socks bind"
+ default:
+ return "socks " + strconv.Itoa(int(cmd))
+ }
+}
+
+// An AuthMethod represents a SOCKS authentication method.
+type socksAuthMethod int
+
+// A Reply represents a SOCKS command reply code.
+type socksReply int
+
+func (code socksReply) String() string {
+ switch code {
+ case socksStatusSucceeded:
+ return "succeeded"
+ case 0x01:
+ return "general SOCKS server failure"
+ case 0x02:
+ return "connection not allowed by ruleset"
+ case 0x03:
+ return "network unreachable"
+ case 0x04:
+ return "host unreachable"
+ case 0x05:
+ return "connection refused"
+ case 0x06:
+ return "TTL expired"
+ case 0x07:
+ return "command not supported"
+ case 0x08:
+ return "address type not supported"
+ default:
+ return "unknown code: " + strconv.Itoa(int(code))
+ }
+}
+
+// Wire protocol constants.
+const (
+ socksVersion5 = 0x05
+
+ socksAddrTypeIPv4 = 0x01
+ socksAddrTypeFQDN = 0x03
+ socksAddrTypeIPv6 = 0x04
+
+ socksCmdConnect socksCommand = 0x01 // establishes an active-open forward proxy connection
+ sockscmdBind socksCommand = 0x02 // establishes a passive-open forward proxy connection
+
+ socksAuthMethodNotRequired socksAuthMethod = 0x00 // no authentication required
+ socksAuthMethodUsernamePassword socksAuthMethod = 0x02 // use username/password
+ socksAuthMethodNoAcceptableMethods socksAuthMethod = 0xff // no acceptable authentication methods
+
+ socksStatusSucceeded socksReply = 0x00
+)
+
+// An Addr represents a SOCKS-specific address.
+// Either Name or IP is used exclusively.
+type socksAddr struct {
+ Name string // fully-qualified domain name
+ IP net.IP
+ Port int
+}
+
+func (a *socksAddr) Network() string { return "socks" }
+
+func (a *socksAddr) String() string {
+ if a == nil {
+ return "<nil>"
+ }
+ port := strconv.Itoa(a.Port)
+ if a.IP == nil {
+ return net.JoinHostPort(a.Name, port)
+ }
+ return net.JoinHostPort(a.IP.String(), port)
+}
+
+// A Conn represents a forward proxy connection.
+type socksConn struct {
+ net.Conn
+
+ boundAddr net.Addr
+}
+
+// BoundAddr returns the address assigned by the proxy server for
+// connecting to the command target address from the proxy server.
+func (c *socksConn) BoundAddr() net.Addr {
+ if c == nil {
+ return nil
+ }
+ return c.boundAddr
+}
+
+// A Dialer holds SOCKS-specific options.
+type socksDialer struct {
+ cmd socksCommand // either CmdConnect or cmdBind
+ proxyNetwork string // network between a proxy server and a client
+ proxyAddress string // proxy server address
+
+ // ProxyDial specifies the optional dial function for
+ // establishing the transport connection.
+ ProxyDial func(context.Context, string, string) (net.Conn, error)
+
+ // AuthMethods specifies the list of request authentication
+ // methods.
+ // If empty, SOCKS client requests only AuthMethodNotRequired.
+ AuthMethods []socksAuthMethod
+
+ // Authenticate specifies the optional authentication
+ // function. It must be non-nil when AuthMethods is not empty.
+ // It must return an error when the authentication is failed.
+ Authenticate func(context.Context, io.ReadWriter, socksAuthMethod) error
+}
+
+// DialContext connects to the provided address on the provided
+// network.
+//
+// The returned error value may be a net.OpError. When the Op field of
+// net.OpError contains "socks", the Source field contains a proxy
+// server address and the Addr field contains a command target
+// address.
+//
+// See func Dial of the net package of standard library for a
+// description of the network and address parameters.
+func (d *socksDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ if err := d.validateTarget(network, address); err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ if ctx == nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
+ }
+ var err error
+ var c net.Conn
+ if d.ProxyDial != nil {
+ c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress)
+ } else {
+ var dd net.Dialer
+ c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress)
+ }
+ if err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ a, err := d.connect(ctx, c, address)
+ if err != nil {
+ c.Close()
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ return &socksConn{Conn: c, boundAddr: a}, nil
+}
+
+// DialWithConn initiates a connection from SOCKS server to the target
+// network and address using the connection c that is already
+// connected to the SOCKS server.
+//
+// It returns the connection's local address assigned by the SOCKS
+// server.
+func (d *socksDialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) {
+ if err := d.validateTarget(network, address); err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ if ctx == nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
+ }
+ a, err := d.connect(ctx, c, address)
+ if err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ return a, nil
+}
+
+// Dial connects to the provided address on the provided network.
+//
+// Unlike DialContext, it returns a raw transport connection instead
+// of a forward proxy connection.
+//
+// Deprecated: Use DialContext or DialWithConn instead.
+func (d *socksDialer) Dial(network, address string) (net.Conn, error) {
+ if err := d.validateTarget(network, address); err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ var err error
+ var c net.Conn
+ if d.ProxyDial != nil {
+ c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress)
+ } else {
+ c, err = net.Dial(d.proxyNetwork, d.proxyAddress)
+ }
+ if err != nil {
+ proxy, dst, _ := d.pathAddrs(address)
+ return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
+ }
+ if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil {
+ c.Close()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (d *socksDialer) validateTarget(network, address string) error {
+ switch network {
+ case "tcp", "tcp6", "tcp4":
+ default:
+ return errors.New("network not implemented")
+ }
+ switch d.cmd {
+ case socksCmdConnect, sockscmdBind:
+ default:
+ return errors.New("command not implemented")
+ }
+ return nil
+}
+
+func (d *socksDialer) pathAddrs(address string) (proxy, dst net.Addr, err error) {
+ for i, s := range []string{d.proxyAddress, address} {
+ host, port, err := sockssplitHostPort(s)
+ if err != nil {
+ return nil, nil, err
+ }
+ a := &socksAddr{Port: port}
+ a.IP = net.ParseIP(host)
+ if a.IP == nil {
+ a.Name = host
+ }
+ if i == 0 {
+ proxy = a
+ } else {
+ dst = a
+ }
+ }
+ return
+}
+
+// NewDialer returns a new Dialer that dials through the provided
+// proxy server's network and address.
+func socksNewDialer(network, address string) *socksDialer {
+ return &socksDialer{proxyNetwork: network, proxyAddress: address, cmd: socksCmdConnect}
+}
+
+const (
+ socksauthUsernamePasswordVersion = 0x01
+ socksauthStatusSucceeded = 0x00
+)
+
+// UsernamePassword are the credentials for the username/password
+// authentication method.
+type socksUsernamePassword struct {
+ Username string
+ Password string
+}
+
+// Authenticate authenticates a pair of username and password with the
+// proxy server.
+func (up *socksUsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth socksAuthMethod) error {
+ switch auth {
+ case socksAuthMethodNotRequired:
+ return nil
+ case socksAuthMethodUsernamePassword:
+ if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) > 255 {
+ return errors.New("invalid username/password")
+ }
+ b := []byte{socksauthUsernamePasswordVersion}
+ b = append(b, byte(len(up.Username)))
+ b = append(b, up.Username...)
+ b = append(b, byte(len(up.Password)))
+ b = append(b, up.Password...)
+ // TODO(mikio): handle IO deadlines and cancelation if
+ // necessary
+ if _, err := rw.Write(b); err != nil {
+ return err
+ }
+ if _, err := io.ReadFull(rw, b[:2]); err != nil {
+ return err
+ }
+ if b[0] != socksauthUsernamePasswordVersion {
+ return errors.New("invalid username/password version")
+ }
+ if b[1] != socksauthStatusSucceeded {
+ return errors.New("username/password authentication failed")
+ }
+ return nil
+ }
+ return errors.New("unsupported authentication method " + strconv.Itoa(int(auth)))
+}
diff --git a/contrib/go/_std_1.22/src/net/http/status.go b/contrib/go/_std_1.22/src/net/http/status.go
new file mode 100644
index 0000000000..cd90877ef0
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/status.go
@@ -0,0 +1,210 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+// HTTP status codes as registered with IANA.
+// See: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
+const (
+ StatusContinue = 100 // RFC 9110, 15.2.1
+ StatusSwitchingProtocols = 101 // RFC 9110, 15.2.2
+ StatusProcessing = 102 // RFC 2518, 10.1
+ StatusEarlyHints = 103 // RFC 8297
+
+ StatusOK = 200 // RFC 9110, 15.3.1
+ StatusCreated = 201 // RFC 9110, 15.3.2
+ StatusAccepted = 202 // RFC 9110, 15.3.3
+ StatusNonAuthoritativeInfo = 203 // RFC 9110, 15.3.4
+ StatusNoContent = 204 // RFC 9110, 15.3.5
+ StatusResetContent = 205 // RFC 9110, 15.3.6
+ StatusPartialContent = 206 // RFC 9110, 15.3.7
+ StatusMultiStatus = 207 // RFC 4918, 11.1
+ StatusAlreadyReported = 208 // RFC 5842, 7.1
+ StatusIMUsed = 226 // RFC 3229, 10.4.1
+
+ StatusMultipleChoices = 300 // RFC 9110, 15.4.1
+ StatusMovedPermanently = 301 // RFC 9110, 15.4.2
+ StatusFound = 302 // RFC 9110, 15.4.3
+ StatusSeeOther = 303 // RFC 9110, 15.4.4
+ StatusNotModified = 304 // RFC 9110, 15.4.5
+ StatusUseProxy = 305 // RFC 9110, 15.4.6
+ _ = 306 // RFC 9110, 15.4.7 (Unused)
+ StatusTemporaryRedirect = 307 // RFC 9110, 15.4.8
+ StatusPermanentRedirect = 308 // RFC 9110, 15.4.9
+
+ StatusBadRequest = 400 // RFC 9110, 15.5.1
+ StatusUnauthorized = 401 // RFC 9110, 15.5.2
+ StatusPaymentRequired = 402 // RFC 9110, 15.5.3
+ StatusForbidden = 403 // RFC 9110, 15.5.4
+ StatusNotFound = 404 // RFC 9110, 15.5.5
+ StatusMethodNotAllowed = 405 // RFC 9110, 15.5.6
+ StatusNotAcceptable = 406 // RFC 9110, 15.5.7
+ StatusProxyAuthRequired = 407 // RFC 9110, 15.5.8
+ StatusRequestTimeout = 408 // RFC 9110, 15.5.9
+ StatusConflict = 409 // RFC 9110, 15.5.10
+ StatusGone = 410 // RFC 9110, 15.5.11
+ StatusLengthRequired = 411 // RFC 9110, 15.5.12
+ StatusPreconditionFailed = 412 // RFC 9110, 15.5.13
+ StatusRequestEntityTooLarge = 413 // RFC 9110, 15.5.14
+ StatusRequestURITooLong = 414 // RFC 9110, 15.5.15
+ StatusUnsupportedMediaType = 415 // RFC 9110, 15.5.16
+ StatusRequestedRangeNotSatisfiable = 416 // RFC 9110, 15.5.17
+ StatusExpectationFailed = 417 // RFC 9110, 15.5.18
+ StatusTeapot = 418 // RFC 9110, 15.5.19 (Unused)
+ StatusMisdirectedRequest = 421 // RFC 9110, 15.5.20
+ StatusUnprocessableEntity = 422 // RFC 9110, 15.5.21
+ StatusLocked = 423 // RFC 4918, 11.3
+ StatusFailedDependency = 424 // RFC 4918, 11.4
+ StatusTooEarly = 425 // RFC 8470, 5.2.
+ StatusUpgradeRequired = 426 // RFC 9110, 15.5.22
+ StatusPreconditionRequired = 428 // RFC 6585, 3
+ StatusTooManyRequests = 429 // RFC 6585, 4
+ StatusRequestHeaderFieldsTooLarge = 431 // RFC 6585, 5
+ StatusUnavailableForLegalReasons = 451 // RFC 7725, 3
+
+ StatusInternalServerError = 500 // RFC 9110, 15.6.1
+ StatusNotImplemented = 501 // RFC 9110, 15.6.2
+ StatusBadGateway = 502 // RFC 9110, 15.6.3
+ StatusServiceUnavailable = 503 // RFC 9110, 15.6.4
+ StatusGatewayTimeout = 504 // RFC 9110, 15.6.5
+ StatusHTTPVersionNotSupported = 505 // RFC 9110, 15.6.6
+ StatusVariantAlsoNegotiates = 506 // RFC 2295, 8.1
+ StatusInsufficientStorage = 507 // RFC 4918, 11.5
+ StatusLoopDetected = 508 // RFC 5842, 7.2
+ StatusNotExtended = 510 // RFC 2774, 7
+ StatusNetworkAuthenticationRequired = 511 // RFC 6585, 6
+)
+
+// StatusText returns a text for the HTTP status code. It returns the empty
+// string if the code is unknown.
+func StatusText(code int) string {
+ switch code {
+ case StatusContinue:
+ return "Continue"
+ case StatusSwitchingProtocols:
+ return "Switching Protocols"
+ case StatusProcessing:
+ return "Processing"
+ case StatusEarlyHints:
+ return "Early Hints"
+ case StatusOK:
+ return "OK"
+ case StatusCreated:
+ return "Created"
+ case StatusAccepted:
+ return "Accepted"
+ case StatusNonAuthoritativeInfo:
+ return "Non-Authoritative Information"
+ case StatusNoContent:
+ return "No Content"
+ case StatusResetContent:
+ return "Reset Content"
+ case StatusPartialContent:
+ return "Partial Content"
+ case StatusMultiStatus:
+ return "Multi-Status"
+ case StatusAlreadyReported:
+ return "Already Reported"
+ case StatusIMUsed:
+ return "IM Used"
+ case StatusMultipleChoices:
+ return "Multiple Choices"
+ case StatusMovedPermanently:
+ return "Moved Permanently"
+ case StatusFound:
+ return "Found"
+ case StatusSeeOther:
+ return "See Other"
+ case StatusNotModified:
+ return "Not Modified"
+ case StatusUseProxy:
+ return "Use Proxy"
+ case StatusTemporaryRedirect:
+ return "Temporary Redirect"
+ case StatusPermanentRedirect:
+ return "Permanent Redirect"
+ case StatusBadRequest:
+ return "Bad Request"
+ case StatusUnauthorized:
+ return "Unauthorized"
+ case StatusPaymentRequired:
+ return "Payment Required"
+ case StatusForbidden:
+ return "Forbidden"
+ case StatusNotFound:
+ return "Not Found"
+ case StatusMethodNotAllowed:
+ return "Method Not Allowed"
+ case StatusNotAcceptable:
+ return "Not Acceptable"
+ case StatusProxyAuthRequired:
+ return "Proxy Authentication Required"
+ case StatusRequestTimeout:
+ return "Request Timeout"
+ case StatusConflict:
+ return "Conflict"
+ case StatusGone:
+ return "Gone"
+ case StatusLengthRequired:
+ return "Length Required"
+ case StatusPreconditionFailed:
+ return "Precondition Failed"
+ case StatusRequestEntityTooLarge:
+ return "Request Entity Too Large"
+ case StatusRequestURITooLong:
+ return "Request URI Too Long"
+ case StatusUnsupportedMediaType:
+ return "Unsupported Media Type"
+ case StatusRequestedRangeNotSatisfiable:
+ return "Requested Range Not Satisfiable"
+ case StatusExpectationFailed:
+ return "Expectation Failed"
+ case StatusTeapot:
+ return "I'm a teapot"
+ case StatusMisdirectedRequest:
+ return "Misdirected Request"
+ case StatusUnprocessableEntity:
+ return "Unprocessable Entity"
+ case StatusLocked:
+ return "Locked"
+ case StatusFailedDependency:
+ return "Failed Dependency"
+ case StatusTooEarly:
+ return "Too Early"
+ case StatusUpgradeRequired:
+ return "Upgrade Required"
+ case StatusPreconditionRequired:
+ return "Precondition Required"
+ case StatusTooManyRequests:
+ return "Too Many Requests"
+ case StatusRequestHeaderFieldsTooLarge:
+ return "Request Header Fields Too Large"
+ case StatusUnavailableForLegalReasons:
+ return "Unavailable For Legal Reasons"
+ case StatusInternalServerError:
+ return "Internal Server Error"
+ case StatusNotImplemented:
+ return "Not Implemented"
+ case StatusBadGateway:
+ return "Bad Gateway"
+ case StatusServiceUnavailable:
+ return "Service Unavailable"
+ case StatusGatewayTimeout:
+ return "Gateway Timeout"
+ case StatusHTTPVersionNotSupported:
+ return "HTTP Version Not Supported"
+ case StatusVariantAlsoNegotiates:
+ return "Variant Also Negotiates"
+ case StatusInsufficientStorage:
+ return "Insufficient Storage"
+ case StatusLoopDetected:
+ return "Loop Detected"
+ case StatusNotExtended:
+ return "Not Extended"
+ case StatusNetworkAuthenticationRequired:
+ return "Network Authentication Required"
+ default:
+ return ""
+ }
+}
diff --git a/contrib/go/_std_1.22/src/net/http/transfer.go b/contrib/go/_std_1.22/src/net/http/transfer.go
new file mode 100644
index 0000000000..315c6e2723
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/transfer.go
@@ -0,0 +1,1137 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "internal/godebug"
+ "io"
+ "net/http/httptrace"
+ "net/http/internal"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+// ErrLineTooLong is returned when reading request or response bodies
+// with malformed chunked encoding.
+var ErrLineTooLong = internal.ErrLineTooLong
+
+type errorReader struct {
+ err error
+}
+
+func (r errorReader) Read(p []byte) (n int, err error) {
+ return 0, r.err
+}
+
+type byteReader struct {
+ b byte
+ done bool
+}
+
+func (br *byteReader) Read(p []byte) (n int, err error) {
+ if br.done {
+ return 0, io.EOF
+ }
+ if len(p) == 0 {
+ return 0, nil
+ }
+ br.done = true
+ p[0] = br.b
+ return 1, io.EOF
+}
+
+// transferWriter inspects the fields of a user-supplied Request or Response,
+// sanitizes them without changing the user object and provides methods for
+// writing the respective header, body and trailer in wire format.
+type transferWriter struct {
+ Method string
+ Body io.Reader
+ BodyCloser io.Closer
+ ResponseToHEAD bool
+ ContentLength int64 // -1 means unknown, 0 means exactly none
+ Close bool
+ TransferEncoding []string
+ Header Header
+ Trailer Header
+ IsResponse bool
+ bodyReadError error // any non-EOF error from reading Body
+
+ FlushHeaders bool // flush headers to network before body
+ ByteReadCh chan readResult // non-nil if probeRequestBody called
+}
+
+func newTransferWriter(r any) (t *transferWriter, err error) {
+ t = &transferWriter{}
+
+ // Extract relevant fields
+ atLeastHTTP11 := false
+ switch rr := r.(type) {
+ case *Request:
+ if rr.ContentLength != 0 && rr.Body == nil {
+ return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength)
+ }
+ t.Method = valueOrDefault(rr.Method, "GET")
+ t.Close = rr.Close
+ t.TransferEncoding = rr.TransferEncoding
+ t.Header = rr.Header
+ t.Trailer = rr.Trailer
+ t.Body = rr.Body
+ t.BodyCloser = rr.Body
+ t.ContentLength = rr.outgoingLength()
+ if t.ContentLength < 0 && len(t.TransferEncoding) == 0 && t.shouldSendChunkedRequestBody() {
+ t.TransferEncoding = []string{"chunked"}
+ }
+ // If there's a body, conservatively flush the headers
+ // to any bufio.Writer we're writing to, just in case
+ // the server needs the headers early, before we copy
+ // the body and possibly block. We make an exception
+ // for the common standard library in-memory types,
+ // though, to avoid unnecessary TCP packets on the
+ // wire. (Issue 22088.)
+ if t.ContentLength != 0 && !isKnownInMemoryReader(t.Body) {
+ t.FlushHeaders = true
+ }
+
+ atLeastHTTP11 = true // Transport requests are always 1.1 or 2.0
+ case *Response:
+ t.IsResponse = true
+ if rr.Request != nil {
+ t.Method = rr.Request.Method
+ }
+ t.Body = rr.Body
+ t.BodyCloser = rr.Body
+ t.ContentLength = rr.ContentLength
+ t.Close = rr.Close
+ t.TransferEncoding = rr.TransferEncoding
+ t.Header = rr.Header
+ t.Trailer = rr.Trailer
+ atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
+ t.ResponseToHEAD = noResponseBodyExpected(t.Method)
+ }
+
+ // Sanitize Body,ContentLength,TransferEncoding
+ if t.ResponseToHEAD {
+ t.Body = nil
+ if chunked(t.TransferEncoding) {
+ t.ContentLength = -1
+ }
+ } else {
+ if !atLeastHTTP11 || t.Body == nil {
+ t.TransferEncoding = nil
+ }
+ if chunked(t.TransferEncoding) {
+ t.ContentLength = -1
+ } else if t.Body == nil { // no chunking, no body
+ t.ContentLength = 0
+ }
+ }
+
+ // Sanitize Trailer
+ if !chunked(t.TransferEncoding) {
+ t.Trailer = nil
+ }
+
+ return t, nil
+}
+
+// shouldSendChunkedRequestBody reports whether we should try to send a
+// chunked request body to the server. In particular, the case we really
+// want to prevent is sending a GET or other typically-bodyless request to a
+// server with a chunked body when the body has zero bytes, since GETs with
+// bodies (while acceptable according to specs), even zero-byte chunked
+// bodies, are approximately never seen in the wild and confuse most
+// servers. See Issue 18257, as one example.
+//
+// The only reason we'd send such a request is if the user set the Body to a
+// non-nil value (say, io.NopCloser(bytes.NewReader(nil))) and didn't
+// set ContentLength, or NewRequest set it to -1 (unknown), so then we assume
+// there's bytes to send.
+//
+// This code tries to read a byte from the Request.Body in such cases to see
+// whether the body actually has content (super rare) or is actually just
+// a non-nil content-less ReadCloser (the more common case). In that more
+// common case, we act as if their Body were nil instead, and don't send
+// a body.
+func (t *transferWriter) shouldSendChunkedRequestBody() bool {
+ // Note that t.ContentLength is the corrected content length
+ // from rr.outgoingLength, so 0 actually means zero, not unknown.
+ if t.ContentLength >= 0 || t.Body == nil { // redundant checks; caller did them
+ return false
+ }
+ if t.Method == "CONNECT" {
+ return false
+ }
+ if requestMethodUsuallyLacksBody(t.Method) {
+ // Only probe the Request.Body for GET/HEAD/DELETE/etc
+ // requests, because it's only those types of requests
+ // that confuse servers.
+ t.probeRequestBody() // adjusts t.Body, t.ContentLength
+ return t.Body != nil
+ }
+ // For all other request types (PUT, POST, PATCH, or anything
+ // made-up we've never heard of), assume it's normal and the server
+ // can deal with a chunked request body. Maybe we'll adjust this
+ // later.
+ return true
+}
+
+// probeRequestBody reads a byte from t.Body to see whether it's empty
+// (returns io.EOF right away).
+//
+// But because we've had problems with this blocking users in the past
+// (issue 17480) when the body is a pipe (perhaps waiting on the response
+// headers before the pipe is fed data), we need to be careful and bound how
+// long we wait for it. This delay will only affect users if all the following
+// are true:
+// - the request body blocks
+// - the content length is not set (or set to -1)
+// - the method doesn't usually have a body (GET, HEAD, DELETE, ...)
+// - there is no transfer-encoding=chunked already set.
+//
+// In other words, this delay will not normally affect anybody, and there
+// are workarounds if it does.
+func (t *transferWriter) probeRequestBody() {
+ t.ByteReadCh = make(chan readResult, 1)
+ go func(body io.Reader) {
+ var buf [1]byte
+ var rres readResult
+ rres.n, rres.err = body.Read(buf[:])
+ if rres.n == 1 {
+ rres.b = buf[0]
+ }
+ t.ByteReadCh <- rres
+ close(t.ByteReadCh)
+ }(t.Body)
+ timer := time.NewTimer(200 * time.Millisecond)
+ select {
+ case rres := <-t.ByteReadCh:
+ timer.Stop()
+ if rres.n == 0 && rres.err == io.EOF {
+ // It was empty.
+ t.Body = nil
+ t.ContentLength = 0
+ } else if rres.n == 1 {
+ if rres.err != nil {
+ t.Body = io.MultiReader(&byteReader{b: rres.b}, errorReader{rres.err})
+ } else {
+ t.Body = io.MultiReader(&byteReader{b: rres.b}, t.Body)
+ }
+ } else if rres.err != nil {
+ t.Body = errorReader{rres.err}
+ }
+ case <-timer.C:
+ // Too slow. Don't wait. Read it later, and keep
+ // assuming that this is ContentLength == -1
+ // (unknown), which means we'll send a
+ // "Transfer-Encoding: chunked" header.
+ t.Body = io.MultiReader(finishAsyncByteRead{t}, t.Body)
+ // Request that Request.Write flush the headers to the
+ // network before writing the body, since our body may not
+ // become readable until it's seen the response headers.
+ t.FlushHeaders = true
+ }
+}
+
+func noResponseBodyExpected(requestMethod string) bool {
+ return requestMethod == "HEAD"
+}
+
+func (t *transferWriter) shouldSendContentLength() bool {
+ if chunked(t.TransferEncoding) {
+ return false
+ }
+ if t.ContentLength > 0 {
+ return true
+ }
+ if t.ContentLength < 0 {
+ return false
+ }
+ // Many servers expect a Content-Length for these methods
+ if t.Method == "POST" || t.Method == "PUT" || t.Method == "PATCH" {
+ return true
+ }
+ if t.ContentLength == 0 && isIdentity(t.TransferEncoding) {
+ if t.Method == "GET" || t.Method == "HEAD" {
+ return false
+ }
+ return true
+ }
+
+ return false
+}
+
+func (t *transferWriter) writeHeader(w io.Writer, trace *httptrace.ClientTrace) error {
+ if t.Close && !hasToken(t.Header.get("Connection"), "close") {
+ if _, err := io.WriteString(w, "Connection: close\r\n"); err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Connection", []string{"close"})
+ }
+ }
+
+ // Write Content-Length and/or Transfer-Encoding whose values are a
+ // function of the sanitized field triple (Body, ContentLength,
+ // TransferEncoding)
+ if t.shouldSendContentLength() {
+ if _, err := io.WriteString(w, "Content-Length: "); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, strconv.FormatInt(t.ContentLength, 10)+"\r\n"); err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Content-Length", []string{strconv.FormatInt(t.ContentLength, 10)})
+ }
+ } else if chunked(t.TransferEncoding) {
+ if _, err := io.WriteString(w, "Transfer-Encoding: chunked\r\n"); err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Transfer-Encoding", []string{"chunked"})
+ }
+ }
+
+ // Write Trailer header
+ if t.Trailer != nil {
+ keys := make([]string, 0, len(t.Trailer))
+ for k := range t.Trailer {
+ k = CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return badStringError("invalid Trailer key", k)
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ // TODO: could do better allocation-wise here, but trailers are rare,
+ // so being lazy for now.
+ if _, err := io.WriteString(w, "Trailer: "+strings.Join(keys, ",")+"\r\n"); err != nil {
+ return err
+ }
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField("Trailer", keys)
+ }
+ }
+ }
+
+ return nil
+}
+
+// always closes t.BodyCloser
+func (t *transferWriter) writeBody(w io.Writer) (err error) {
+ var ncopy int64
+ closed := false
+ defer func() {
+ if closed || t.BodyCloser == nil {
+ return
+ }
+ if closeErr := t.BodyCloser.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ }()
+
+ // Write body. We "unwrap" the body first if it was wrapped in a
+ // nopCloser or readTrackingBody. This is to ensure that we can take advantage of
+ // OS-level optimizations in the event that the body is an
+ // *os.File.
+ if t.Body != nil {
+ var body = t.unwrapBody()
+ if chunked(t.TransferEncoding) {
+ if bw, ok := w.(*bufio.Writer); ok && !t.IsResponse {
+ w = &internal.FlushAfterChunkWriter{Writer: bw}
+ }
+ cw := internal.NewChunkedWriter(w)
+ _, err = t.doBodyCopy(cw, body)
+ if err == nil {
+ err = cw.Close()
+ }
+ } else if t.ContentLength == -1 {
+ dst := w
+ if t.Method == "CONNECT" {
+ dst = bufioFlushWriter{dst}
+ }
+ ncopy, err = t.doBodyCopy(dst, body)
+ } else {
+ ncopy, err = t.doBodyCopy(w, io.LimitReader(body, t.ContentLength))
+ if err != nil {
+ return err
+ }
+ var nextra int64
+ nextra, err = t.doBodyCopy(io.Discard, body)
+ ncopy += nextra
+ }
+ if err != nil {
+ return err
+ }
+ }
+ if t.BodyCloser != nil {
+ closed = true
+ if err := t.BodyCloser.Close(); err != nil {
+ return err
+ }
+ }
+
+ if !t.ResponseToHEAD && t.ContentLength != -1 && t.ContentLength != ncopy {
+ return fmt.Errorf("http: ContentLength=%d with Body length %d",
+ t.ContentLength, ncopy)
+ }
+
+ if chunked(t.TransferEncoding) {
+ // Write Trailer header
+ if t.Trailer != nil {
+ if err := t.Trailer.Write(w); err != nil {
+ return err
+ }
+ }
+ // Last chunk, empty trailer
+ _, err = io.WriteString(w, "\r\n")
+ }
+ return err
+}
+
+// doBodyCopy wraps a copy operation, with any resulting error also
+// being saved in bodyReadError.
+//
+// This function is only intended for use in writeBody.
+func (t *transferWriter) doBodyCopy(dst io.Writer, src io.Reader) (n int64, err error) {
+ buf := getCopyBuf()
+ defer putCopyBuf(buf)
+
+ n, err = io.CopyBuffer(dst, src, buf)
+ if err != nil && err != io.EOF {
+ t.bodyReadError = err
+ }
+ return
+}
+
+// unwrapBody unwraps the body's inner reader if it's a
+// nopCloser. This is to ensure that body writes sourced from local
+// files (*os.File types) are properly optimized.
+//
+// This function is only intended for use in writeBody.
+func (t *transferWriter) unwrapBody() io.Reader {
+ if r, ok := unwrapNopCloser(t.Body); ok {
+ return r
+ }
+ if r, ok := t.Body.(*readTrackingBody); ok {
+ r.didRead = true
+ return r.ReadCloser
+ }
+ return t.Body
+}
+
+type transferReader struct {
+ // Input
+ Header Header
+ StatusCode int
+ RequestMethod string
+ ProtoMajor int
+ ProtoMinor int
+ // Output
+ Body io.ReadCloser
+ ContentLength int64
+ Chunked bool
+ Close bool
+ Trailer Header
+}
+
+func (t *transferReader) protoAtLeast(m, n int) bool {
+ return t.ProtoMajor > m || (t.ProtoMajor == m && t.ProtoMinor >= n)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 7230, section 3.3.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+var (
+ suppressedHeaders304 = []string{"Content-Type", "Content-Length", "Transfer-Encoding"}
+ suppressedHeadersNoBody = []string{"Content-Length", "Transfer-Encoding"}
+ excludedHeadersNoBody = map[string]bool{"Content-Length": true, "Transfer-Encoding": true}
+)
+
+func suppressedHeaders(status int) []string {
+ switch {
+ case status == 304:
+ // RFC 7232 section 4.1
+ return suppressedHeaders304
+ case !bodyAllowedForStatus(status):
+ return suppressedHeadersNoBody
+ }
+ return nil
+}
+
+// msg is *Request or *Response.
+func readTransfer(msg any, r *bufio.Reader) (err error) {
+ t := &transferReader{RequestMethod: "GET"}
+
+ // Unify input
+ isResponse := false
+ switch rr := msg.(type) {
+ case *Response:
+ t.Header = rr.Header
+ t.StatusCode = rr.StatusCode
+ t.ProtoMajor = rr.ProtoMajor
+ t.ProtoMinor = rr.ProtoMinor
+ t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header, true)
+ isResponse = true
+ if rr.Request != nil {
+ t.RequestMethod = rr.Request.Method
+ }
+ case *Request:
+ t.Header = rr.Header
+ t.RequestMethod = rr.Method
+ t.ProtoMajor = rr.ProtoMajor
+ t.ProtoMinor = rr.ProtoMinor
+ // Transfer semantics for Requests are exactly like those for
+ // Responses with status code 200, responding to a GET method
+ t.StatusCode = 200
+ t.Close = rr.Close
+ default:
+ panic("unexpected type")
+ }
+
+ // Default to HTTP/1.1
+ if t.ProtoMajor == 0 && t.ProtoMinor == 0 {
+ t.ProtoMajor, t.ProtoMinor = 1, 1
+ }
+
+ // Transfer-Encoding: chunked, and overriding Content-Length.
+ if err := t.parseTransferEncoding(); err != nil {
+ return err
+ }
+
+ realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.Chunked)
+ if err != nil {
+ return err
+ }
+ if isResponse && t.RequestMethod == "HEAD" {
+ if n, err := parseContentLength(t.Header["Content-Length"]); err != nil {
+ return err
+ } else {
+ t.ContentLength = n
+ }
+ } else {
+ t.ContentLength = realLength
+ }
+
+ // Trailer
+ t.Trailer, err = fixTrailer(t.Header, t.Chunked)
+ if err != nil {
+ return err
+ }
+
+ // If there is no Content-Length or chunked Transfer-Encoding on a *Response
+ // and the status is not 1xx, 204 or 304, then the body is unbounded.
+ // See RFC 7230, section 3.3.
+ switch msg.(type) {
+ case *Response:
+ if realLength == -1 && !t.Chunked && bodyAllowedForStatus(t.StatusCode) {
+ // Unbounded body.
+ t.Close = true
+ }
+ }
+
+ // Prepare body reader. ContentLength < 0 means chunked encoding
+ // or close connection when finished, since multipart is not supported yet
+ switch {
+ case t.Chunked:
+ if isResponse && (noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode)) {
+ t.Body = NoBody
+ } else {
+ t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close}
+ }
+ case realLength == 0:
+ t.Body = NoBody
+ case realLength > 0:
+ t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close}
+ default:
+ // realLength < 0, i.e. "Content-Length" not mentioned in header
+ if t.Close {
+ // Close semantics (i.e. HTTP/1.0)
+ t.Body = &body{src: r, closing: t.Close}
+ } else {
+ // Persistent connection (i.e. HTTP/1.1)
+ t.Body = NoBody
+ }
+ }
+
+ // Unify output
+ switch rr := msg.(type) {
+ case *Request:
+ rr.Body = t.Body
+ rr.ContentLength = t.ContentLength
+ if t.Chunked {
+ rr.TransferEncoding = []string{"chunked"}
+ }
+ rr.Close = t.Close
+ rr.Trailer = t.Trailer
+ case *Response:
+ rr.Body = t.Body
+ rr.ContentLength = t.ContentLength
+ if t.Chunked {
+ rr.TransferEncoding = []string{"chunked"}
+ }
+ rr.Close = t.Close
+ rr.Trailer = t.Trailer
+ }
+
+ return nil
+}
+
+// Checks whether chunked is part of the encodings stack.
+func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
+
+// Checks whether the encoding is explicitly "identity".
+func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
+
+// unsupportedTEError reports unsupported transfer-encodings.
+type unsupportedTEError struct {
+ err string
+}
+
+func (uste *unsupportedTEError) Error() string {
+ return uste.err
+}
+
+// isUnsupportedTEError checks if the error is of type
+// unsupportedTEError. It is usually invoked with a non-nil err.
+func isUnsupportedTEError(err error) bool {
+ _, ok := err.(*unsupportedTEError)
+ return ok
+}
+
+// parseTransferEncoding sets t.Chunked based on the Transfer-Encoding header.
+func (t *transferReader) parseTransferEncoding() error {
+ raw, present := t.Header["Transfer-Encoding"]
+ if !present {
+ return nil
+ }
+ delete(t.Header, "Transfer-Encoding")
+
+ // Issue 12785; ignore Transfer-Encoding on HTTP/1.0 requests.
+ if !t.protoAtLeast(1, 1) {
+ return nil
+ }
+
+ // Like nginx, we only support a single Transfer-Encoding header field, and
+ // only if set to "chunked". This is one of the most security sensitive
+ // surfaces in HTTP/1.1 due to the risk of request smuggling, so we keep it
+ // strict and simple.
+ if len(raw) != 1 {
+ return &unsupportedTEError{fmt.Sprintf("too many transfer encodings: %q", raw)}
+ }
+ if !ascii.EqualFold(raw[0], "chunked") {
+ return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])}
+ }
+
+ // RFC 7230 3.3.2 says "A sender MUST NOT send a Content-Length header field
+ // in any message that contains a Transfer-Encoding header field."
+ //
+ // but also: "If a message is received with both a Transfer-Encoding and a
+ // Content-Length header field, the Transfer-Encoding overrides the
+ // Content-Length. Such a message might indicate an attempt to perform
+ // request smuggling (Section 9.5) or response splitting (Section 9.4) and
+ // ought to be handled as an error. A sender MUST remove the received
+ // Content-Length field prior to forwarding such a message downstream."
+ //
+ // Reportedly, these appear in the wild.
+ delete(t.Header, "Content-Length")
+
+ t.Chunked = true
+ return nil
+}
+
+// Determine the expected body length, using RFC 7230 Section 3.3. This
+// function is not a method, because ultimately it should be shared by
+// ReadResponse and ReadRequest.
+func fixLength(isResponse bool, status int, requestMethod string, header Header, chunked bool) (int64, error) {
+ isRequest := !isResponse
+ contentLens := header["Content-Length"]
+
+ // Hardening against HTTP request smuggling
+ if len(contentLens) > 1 {
+ // Per RFC 7230 Section 3.3.2, prevent multiple
+ // Content-Length headers if they differ in value.
+ // If there are dups of the value, remove the dups.
+ // See Issue 16490.
+ first := textproto.TrimString(contentLens[0])
+ for _, ct := range contentLens[1:] {
+ if first != textproto.TrimString(ct) {
+ return 0, fmt.Errorf("http: message cannot contain multiple Content-Length headers; got %q", contentLens)
+ }
+ }
+
+ // deduplicate Content-Length
+ header.Del("Content-Length")
+ header.Add("Content-Length", first)
+
+ contentLens = header["Content-Length"]
+ }
+
+ // Logic based on response type or status
+ if isResponse && noResponseBodyExpected(requestMethod) {
+ return 0, nil
+ }
+ if status/100 == 1 {
+ return 0, nil
+ }
+ switch status {
+ case 204, 304:
+ return 0, nil
+ }
+
+ // Logic based on Transfer-Encoding
+ if chunked {
+ return -1, nil
+ }
+
+ if len(contentLens) > 0 {
+ // Logic based on Content-Length
+ n, err := parseContentLength(contentLens)
+ if err != nil {
+ return -1, err
+ }
+ return n, nil
+ }
+
+ header.Del("Content-Length")
+
+ if isRequest {
+ // RFC 7230 neither explicitly permits nor forbids an
+ // entity-body on a GET request so we permit one if
+ // declared, but we default to 0 here (not -1 below)
+ // if there's no mention of a body.
+ // Likewise, all other request methods are assumed to have
+ // no body if neither Transfer-Encoding chunked nor a
+ // Content-Length are set.
+ return 0, nil
+ }
+
+ // Body-EOF logic based on other methods (like closing, or chunked coding)
+ return -1, nil
+}
+
+// Determine whether to hang up after sending a request and body, or
+// receiving a response and body
+// 'header' is the request headers.
+func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool {
+ if major < 1 {
+ return true
+ }
+
+ conv := header["Connection"]
+ hasClose := httpguts.HeaderValuesContainsToken(conv, "close")
+ if major == 1 && minor == 0 {
+ return hasClose || !httpguts.HeaderValuesContainsToken(conv, "keep-alive")
+ }
+
+ if hasClose && removeCloseHeader {
+ header.Del("Connection")
+ }
+
+ return hasClose
+}
+
+// Parse the trailer header.
+func fixTrailer(header Header, chunked bool) (Header, error) {
+ vv, ok := header["Trailer"]
+ if !ok {
+ return nil, nil
+ }
+ if !chunked {
+ // Trailer and no chunking:
+ // this is an invalid use case for trailer header.
+ // Nevertheless, no error will be returned and we
+ // let users decide if this is a valid HTTP message.
+ // The Trailer header will be kept in Response.Header
+ // but not populate Response.Trailer.
+ // See issue #27197.
+ return nil, nil
+ }
+ header.Del("Trailer")
+
+ trailer := make(Header)
+ var err error
+ for _, v := range vv {
+ foreachHeaderElement(v, func(key string) {
+ key = CanonicalHeaderKey(key)
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ if err == nil {
+ err = badStringError("bad trailer key", key)
+ return
+ }
+ }
+ trailer[key] = nil
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ if len(trailer) == 0 {
+ return nil, nil
+ }
+ return trailer, nil
+}
+
+// body turns a Reader into a ReadCloser.
+// Close ensures that the body has been fully read
+// and then reads the trailer if necessary.
+type body struct {
+ src io.Reader
+ hdr any // non-nil (Response or Request) value means read trailer
+ r *bufio.Reader // underlying wire-format reader for the trailer
+ closing bool // is the connection to be closed after reading body?
+ doEarlyClose bool // whether Close should stop early
+
+ mu sync.Mutex // guards following, and calls to Read and Close
+ sawEOF bool
+ closed bool
+ earlyClose bool // Close called and we didn't read to the end of src
+ onHitEOF func() // if non-nil, func to call when EOF is Read
+}
+
+// ErrBodyReadAfterClose is returned when reading a [Request] or [Response]
+// Body after the body has been closed. This typically happens when the body is
+// read after an HTTP [Handler] calls WriteHeader or Write on its
+// [ResponseWriter].
+var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body")
+
+func (b *body) Read(p []byte) (n int, err error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.closed {
+ return 0, ErrBodyReadAfterClose
+ }
+ return b.readLocked(p)
+}
+
+// Must hold b.mu.
+func (b *body) readLocked(p []byte) (n int, err error) {
+ if b.sawEOF {
+ return 0, io.EOF
+ }
+ n, err = b.src.Read(p)
+
+ if err == io.EOF {
+ b.sawEOF = true
+ // Chunked case. Read the trailer.
+ if b.hdr != nil {
+ if e := b.readTrailer(); e != nil {
+ err = e
+ // Something went wrong in the trailer, we must not allow any
+ // further reads of any kind to succeed from body, nor any
+ // subsequent requests on the server connection. See
+ // golang.org/issue/12027
+ b.sawEOF = false
+ b.closed = true
+ }
+ b.hdr = nil
+ } else {
+ // If the server declared the Content-Length, our body is a LimitedReader
+ // and we need to check whether this EOF arrived early.
+ if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > 0 {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ }
+
+ // If we can return an EOF here along with the read data, do
+ // so. This is optional per the io.Reader contract, but doing
+ // so helps the HTTP transport code recycle its connection
+ // earlier (since it will see this EOF itself), even if the
+ // client doesn't do future reads or Close.
+ if err == nil && n > 0 {
+ if lr, ok := b.src.(*io.LimitedReader); ok && lr.N == 0 {
+ err = io.EOF
+ b.sawEOF = true
+ }
+ }
+
+ if b.sawEOF && b.onHitEOF != nil {
+ b.onHitEOF()
+ }
+
+ return n, err
+}
+
+var (
+ singleCRLF = []byte("\r\n")
+ doubleCRLF = []byte("\r\n\r\n")
+)
+
+func seeUpcomingDoubleCRLF(r *bufio.Reader) bool {
+ for peekSize := 4; ; peekSize++ {
+ // This loop stops when Peek returns an error,
+ // which it does when r's buffer has been filled.
+ buf, err := r.Peek(peekSize)
+ if bytes.HasSuffix(buf, doubleCRLF) {
+ return true
+ }
+ if err != nil {
+ break
+ }
+ }
+ return false
+}
+
+var errTrailerEOF = errors.New("http: unexpected EOF reading trailer")
+
+func (b *body) readTrailer() error {
+ // The common case, since nobody uses trailers.
+ buf, err := b.r.Peek(2)
+ if bytes.Equal(buf, singleCRLF) {
+ b.r.Discard(2)
+ return nil
+ }
+ if len(buf) < 2 {
+ return errTrailerEOF
+ }
+ if err != nil {
+ return err
+ }
+
+ // Make sure there's a header terminator coming up, to prevent
+ // a DoS with an unbounded size Trailer. It's not easy to
+ // slip in a LimitReader here, as textproto.NewReader requires
+ // a concrete *bufio.Reader. Also, we can't get all the way
+ // back up to our conn's LimitedReader that *might* be backing
+ // this bufio.Reader. Instead, a hack: we iteratively Peek up
+ // to the bufio.Reader's max size, looking for a double CRLF.
+ // This limits the trailer to the underlying buffer size, typically 4kB.
+ if !seeUpcomingDoubleCRLF(b.r) {
+ return errors.New("http: suspiciously long trailer after chunked body")
+ }
+
+ hdr, err := textproto.NewReader(b.r).ReadMIMEHeader()
+ if err != nil {
+ if err == io.EOF {
+ return errTrailerEOF
+ }
+ return err
+ }
+ switch rr := b.hdr.(type) {
+ case *Request:
+ mergeSetHeader(&rr.Trailer, Header(hdr))
+ case *Response:
+ mergeSetHeader(&rr.Trailer, Header(hdr))
+ }
+ return nil
+}
+
+func mergeSetHeader(dst *Header, src Header) {
+ if *dst == nil {
+ *dst = src
+ return
+ }
+ for k, vv := range src {
+ (*dst)[k] = vv
+ }
+}
+
+// unreadDataSizeLocked returns the number of bytes of unread input.
+// It returns -1 if unknown.
+// b.mu must be held.
+func (b *body) unreadDataSizeLocked() int64 {
+ if lr, ok := b.src.(*io.LimitedReader); ok {
+ return lr.N
+ }
+ return -1
+}
+
+func (b *body) Close() error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.closed {
+ return nil
+ }
+ var err error
+ switch {
+ case b.sawEOF:
+ // Already saw EOF, so no need going to look for it.
+ case b.hdr == nil && b.closing:
+ // no trailer and closing the connection next.
+ // no point in reading to EOF.
+ case b.doEarlyClose:
+ // Read up to maxPostHandlerReadBytes bytes of the body, looking
+ // for EOF (and trailers), so we can re-use this connection.
+ if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > maxPostHandlerReadBytes {
+ // There was a declared Content-Length, and we have more bytes remaining
+ // than our maxPostHandlerReadBytes tolerance. So, give up.
+ b.earlyClose = true
+ } else {
+ var n int64
+ // Consume the body, or, which will also lead to us reading
+ // the trailer headers after the body, if present.
+ n, err = io.CopyN(io.Discard, bodyLocked{b}, maxPostHandlerReadBytes)
+ if err == io.EOF {
+ err = nil
+ }
+ if n == maxPostHandlerReadBytes {
+ b.earlyClose = true
+ }
+ }
+ default:
+ // Fully consume the body, which will also lead to us reading
+ // the trailer headers after the body, if present.
+ _, err = io.Copy(io.Discard, bodyLocked{b})
+ }
+ b.closed = true
+ return err
+}
+
+func (b *body) didEarlyClose() bool {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.earlyClose
+}
+
+// bodyRemains reports whether future Read calls might
+// yield data.
+func (b *body) bodyRemains() bool {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return !b.sawEOF
+}
+
+func (b *body) registerOnHitEOF(fn func()) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.onHitEOF = fn
+}
+
+// bodyLocked is an io.Reader reading from a *body when its mutex is
+// already held.
+type bodyLocked struct {
+ b *body
+}
+
+func (bl bodyLocked) Read(p []byte) (n int, err error) {
+ if bl.b.closed {
+ return 0, ErrBodyReadAfterClose
+ }
+ return bl.b.readLocked(p)
+}
+
+var laxContentLength = godebug.New("httplaxcontentlength")
+
+// parseContentLength checks that the header is valid and then trims
+// whitespace. It returns -1 if no value is set otherwise the value
+// if it's >= 0.
+func parseContentLength(clHeaders []string) (int64, error) {
+ if len(clHeaders) == 0 {
+ return -1, nil
+ }
+ cl := textproto.TrimString(clHeaders[0])
+
+ // The Content-Length must be a valid numeric value.
+ // See: https://datatracker.ietf.org/doc/html/rfc2616/#section-14.13
+ if cl == "" {
+ if laxContentLength.Value() == "1" {
+ laxContentLength.IncNonDefault()
+ return -1, nil
+ }
+ return 0, badStringError("invalid empty Content-Length", cl)
+ }
+ n, err := strconv.ParseUint(cl, 10, 63)
+ if err != nil {
+ return 0, badStringError("bad Content-Length", cl)
+ }
+ return int64(n), nil
+}
+
+// finishAsyncByteRead finishes reading the 1-byte sniff
+// from the ContentLength==0, Body!=nil case.
+type finishAsyncByteRead struct {
+ tw *transferWriter
+}
+
+func (fr finishAsyncByteRead) Read(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return
+ }
+ rres := <-fr.tw.ByteReadCh
+ n, err = rres.n, rres.err
+ if n == 1 {
+ p[0] = rres.b
+ }
+ if err == nil {
+ err = io.EOF
+ }
+ return
+}
+
+var nopCloserType = reflect.TypeOf(io.NopCloser(nil))
+var nopCloserWriterToType = reflect.TypeOf(io.NopCloser(struct {
+ io.Reader
+ io.WriterTo
+}{}))
+
+// unwrapNopCloser return the underlying reader and true if r is a NopCloser
+// else it return false.
+func unwrapNopCloser(r io.Reader) (underlyingReader io.Reader, isNopCloser bool) {
+ switch reflect.TypeOf(r) {
+ case nopCloserType, nopCloserWriterToType:
+ return reflect.ValueOf(r).Field(0).Interface().(io.Reader), true
+ default:
+ return nil, false
+ }
+}
+
+// isKnownInMemoryReader reports whether r is a type known to not
+// block on Read. Its caller uses this as an optional optimization to
+// send fewer TCP packets.
+func isKnownInMemoryReader(r io.Reader) bool {
+ switch r.(type) {
+ case *bytes.Reader, *bytes.Buffer, *strings.Reader:
+ return true
+ }
+ if r, ok := unwrapNopCloser(r); ok {
+ return isKnownInMemoryReader(r)
+ }
+ if r, ok := r.(*readTrackingBody); ok {
+ return isKnownInMemoryReader(r.ReadCloser)
+ }
+ return false
+}
+
+// bufioFlushWriter is an io.Writer wrapper that flushes all writes
+// on its wrapped writer if it's a *bufio.Writer.
+type bufioFlushWriter struct{ w io.Writer }
+
+func (fw bufioFlushWriter) Write(p []byte) (n int, err error) {
+ n, err = fw.w.Write(p)
+ if bw, ok := fw.w.(*bufio.Writer); n > 0 && ok {
+ ferr := bw.Flush()
+ if ferr != nil && err == nil {
+ err = ferr
+ }
+ }
+ return
+}
diff --git a/contrib/go/_std_1.22/src/net/http/transport.go b/contrib/go/_std_1.22/src/net/http/transport.go
new file mode 100644
index 0000000000..17067ac07c
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/transport.go
@@ -0,0 +1,2965 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// HTTP client implementation. See RFC 7230 through 7235.
+//
+// This is the low-level Transport implementation of RoundTripper.
+// The high-level interface is in client.go.
+
+package http
+
+import (
+ "bufio"
+ "compress/gzip"
+ "container/list"
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "internal/godebug"
+ "io"
+ "log"
+ "net"
+ "net/http/httptrace"
+ "net/http/internal/ascii"
+ "net/textproto"
+ "net/url"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http/httpproxy"
+)
+
+// DefaultTransport is the default implementation of [Transport] and is
+// used by [DefaultClient]. It establishes network connections as needed
+// and caches them for reuse by subsequent calls. It uses HTTP proxies
+// as directed by the environment variables HTTP_PROXY, HTTPS_PROXY
+// and NO_PROXY (or the lowercase versions thereof).
+var DefaultTransport RoundTripper = &Transport{
+ Proxy: ProxyFromEnvironment,
+ DialContext: defaultTransportDialContext(&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }),
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+}
+
+// DefaultMaxIdleConnsPerHost is the default value of [Transport]'s
+// MaxIdleConnsPerHost.
+const DefaultMaxIdleConnsPerHost = 2
+
+// Transport is an implementation of [RoundTripper] that supports HTTP,
+// HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT).
+//
+// By default, Transport caches connections for future re-use.
+// This may leave many open connections when accessing many hosts.
+// This behavior can be managed using [Transport.CloseIdleConnections] method
+// and the [Transport.MaxIdleConnsPerHost] and [Transport.DisableKeepAlives] fields.
+//
+// Transports should be reused instead of created as needed.
+// Transports are safe for concurrent use by multiple goroutines.
+//
+// A Transport is a low-level primitive for making HTTP and HTTPS requests.
+// For high-level functionality, such as cookies and redirects, see [Client].
+//
+// Transport uses HTTP/1.1 for HTTP URLs and either HTTP/1.1 or HTTP/2
+// for HTTPS URLs, depending on whether the server supports HTTP/2,
+// and how the Transport is configured. The [DefaultTransport] supports HTTP/2.
+// To explicitly enable HTTP/2 on a transport, use golang.org/x/net/http2
+// and call ConfigureTransport. See the package docs for more about HTTP/2.
+//
+// Responses with status codes in the 1xx range are either handled
+// automatically (100 expect-continue) or ignored. The one
+// exception is HTTP status code 101 (Switching Protocols), which is
+// considered a terminal status and returned by [Transport.RoundTrip]. To see the
+// ignored 1xx responses, use the httptrace trace package's
+// ClientTrace.Got1xxResponse.
+//
+// Transport only retries a request upon encountering a network error
+// if the connection has been already been used successfully and if the
+// request is idempotent and either has no body or has its [Request.GetBody]
+// defined. HTTP requests are considered idempotent if they have HTTP methods
+// GET, HEAD, OPTIONS, or TRACE; or if their [Header] map contains an
+// "Idempotency-Key" or "X-Idempotency-Key" entry. If the idempotency key
+// value is a zero-length slice, the request is treated as idempotent but the
+// header is not sent on the wire.
+type Transport struct {
+ idleMu sync.Mutex
+ closeIdle bool // user has requested to close all idle conns
+ idleConn map[connectMethodKey][]*persistConn // most recently used at end
+ idleConnWait map[connectMethodKey]wantConnQueue // waiting getConns
+ idleLRU connLRU
+
+ reqMu sync.Mutex
+ reqCanceler map[cancelKey]func(error)
+
+ altMu sync.Mutex // guards changing altProto only
+ altProto atomic.Value // of nil or map[string]RoundTripper, key is URI scheme
+
+ connsPerHostMu sync.Mutex
+ connsPerHost map[connectMethodKey]int
+ connsPerHostWait map[connectMethodKey]wantConnQueue // waiting getConns
+
+ // Proxy specifies a function to return a proxy for a given
+ // Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ //
+ // The proxy type is determined by the URL scheme. "http",
+ // "https", and "socks5" are supported. If the scheme is empty,
+ // "http" is assumed.
+ //
+ // If the proxy URL contains a userinfo subcomponent,
+ // the proxy request will pass the username and password
+ // in a Proxy-Authorization header.
+ //
+ // If Proxy is nil or returns a nil *URL, no proxy is used.
+ Proxy func(*Request) (*url.URL, error)
+
+ // OnProxyConnectResponse is called when the Transport gets an HTTP response from
+ // a proxy for a CONNECT request. It's called before the check for a 200 OK response.
+ // If it returns an error, the request fails with that error.
+ OnProxyConnectResponse func(ctx context.Context, proxyURL *url.URL, connectReq *Request, connectRes *Response) error
+
+ // DialContext specifies the dial function for creating unencrypted TCP connections.
+ // If DialContext is nil (and the deprecated Dial below is also nil),
+ // then the transport dials using package net.
+ //
+ // DialContext runs concurrently with calls to RoundTrip.
+ // A RoundTrip call that initiates a dial may end up using
+ // a connection dialed previously when the earlier connection
+ // becomes idle before the later DialContext completes.
+ DialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Dial specifies the dial function for creating unencrypted TCP connections.
+ //
+ // Dial runs concurrently with calls to RoundTrip.
+ // A RoundTrip call that initiates a dial may end up using
+ // a connection dialed previously when the earlier connection
+ // becomes idle before the later Dial completes.
+ //
+ // Deprecated: Use DialContext instead, which allows the transport
+ // to cancel dials as soon as they are no longer needed.
+ // If both are set, DialContext takes priority.
+ Dial func(network, addr string) (net.Conn, error)
+
+ // DialTLSContext specifies an optional dial function for creating
+ // TLS connections for non-proxied HTTPS requests.
+ //
+ // If DialTLSContext is nil (and the deprecated DialTLS below is also nil),
+ // DialContext and TLSClientConfig are used.
+ //
+ // If DialTLSContext is set, the Dial and DialContext hooks are not used for HTTPS
+ // requests and the TLSClientConfig and TLSHandshakeTimeout
+ // are ignored. The returned net.Conn is assumed to already be
+ // past the TLS handshake.
+ DialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for non-proxied HTTPS requests.
+ //
+ // Deprecated: Use DialTLSContext instead, which allows the transport
+ // to cancel dials as soon as they are no longer needed.
+ // If both are set, DialTLSContext takes priority.
+ DialTLS func(network, addr string) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client.
+ // If nil, the default configuration is used.
+ // If non-nil, HTTP/2 support may not be enabled by default.
+ TLSClientConfig *tls.Config
+
+ // TLSHandshakeTimeout specifies the maximum amount of time to
+ // wait for a TLS handshake. Zero means no timeout.
+ TLSHandshakeTimeout time.Duration
+
+ // DisableKeepAlives, if true, disables HTTP keep-alives and
+ // will only use the connection to the server for a single
+ // HTTP request.
+ //
+ // This is unrelated to the similarly named TCP keep-alives.
+ DisableKeepAlives bool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // MaxIdleConns controls the maximum number of idle (keep-alive)
+ // connections across all hosts. Zero means no limit.
+ MaxIdleConns int
+
+ // MaxIdleConnsPerHost, if non-zero, controls the maximum idle
+ // (keep-alive) connections to keep per-host. If zero,
+ // DefaultMaxIdleConnsPerHost is used.
+ MaxIdleConnsPerHost int
+
+ // MaxConnsPerHost optionally limits the total number of
+ // connections per host, including connections in the dialing,
+ // active, and idle states. On limit violation, dials will block.
+ //
+ // Zero means no limit.
+ MaxConnsPerHost int
+
+ // IdleConnTimeout is the maximum amount of time an idle
+ // (keep-alive) connection will remain idle before closing
+ // itself.
+ // Zero means no limit.
+ IdleConnTimeout time.Duration
+
+ // ResponseHeaderTimeout, if non-zero, specifies the amount of
+ // time to wait for a server's response headers after fully
+ // writing the request (including its body, if any). This
+ // time does not include the time to read the response body.
+ ResponseHeaderTimeout time.Duration
+
+ // ExpectContinueTimeout, if non-zero, specifies the amount of
+ // time to wait for a server's first response headers after fully
+ // writing the request headers if the request has an
+ // "Expect: 100-continue" header. Zero means no timeout and
+ // causes the body to be sent immediately, without
+ // waiting for the server to approve.
+ // This time does not include the time to send the request header.
+ ExpectContinueTimeout time.Duration
+
+ // TLSNextProto specifies how the Transport switches to an
+ // alternate protocol (such as HTTP/2) after a TLS ALPN
+ // protocol negotiation. If Transport dials a TLS connection
+ // with a non-empty protocol name and TLSNextProto contains a
+ // map entry for that key (such as "h2"), then the func is
+ // called with the request's authority (such as "example.com"
+ // or "example.com:1234") and the TLS connection. The function
+ // must return a RoundTripper that then handles the request.
+ // If TLSNextProto is not nil, HTTP/2 support is not enabled
+ // automatically.
+ TLSNextProto map[string]func(authority string, c *tls.Conn) RoundTripper
+
+ // ProxyConnectHeader optionally specifies headers to send to
+ // proxies during CONNECT requests.
+ // To set the header dynamically, see GetProxyConnectHeader.
+ ProxyConnectHeader Header
+
+ // GetProxyConnectHeader optionally specifies a func to return
+ // headers to send to proxyURL during a CONNECT request to the
+ // ip:port target.
+ // If it returns an error, the Transport's RoundTrip fails with
+ // that error. It can return (nil, nil) to not add headers.
+ // If GetProxyConnectHeader is non-nil, ProxyConnectHeader is
+ // ignored.
+ GetProxyConnectHeader func(ctx context.Context, proxyURL *url.URL, target string) (Header, error)
+
+ // MaxResponseHeaderBytes specifies a limit on how many
+ // response bytes are allowed in the server's response
+ // header.
+ //
+ // Zero means to use a default limit.
+ MaxResponseHeaderBytes int64
+
+ // WriteBufferSize specifies the size of the write buffer used
+ // when writing to the transport.
+ // If zero, a default (currently 4KB) is used.
+ WriteBufferSize int
+
+ // ReadBufferSize specifies the size of the read buffer used
+ // when reading from the transport.
+ // If zero, a default (currently 4KB) is used.
+ ReadBufferSize int
+
+ // nextProtoOnce guards initialization of TLSNextProto and
+ // h2transport (via onceSetNextProtoDefaults)
+ nextProtoOnce sync.Once
+ h2transport h2Transport // non-nil if http2 wired up
+ tlsNextProtoWasNil bool // whether TLSNextProto was nil when the Once fired
+
+ // ForceAttemptHTTP2 controls whether HTTP/2 is enabled when a non-zero
+ // Dial, DialTLS, or DialContext func or TLSClientConfig is provided.
+ // By default, use of any those fields conservatively disables HTTP/2.
+ // To use a custom dialer or TLS config and still attempt HTTP/2
+ // upgrades, set this to true.
+ ForceAttemptHTTP2 bool
+}
+
+// A cancelKey is the key of the reqCanceler map.
+// We wrap the *Request in this type since we want to use the original request,
+// not any transient one created by roundTrip.
+type cancelKey struct {
+ req *Request
+}
+
+func (t *Transport) writeBufferSize() int {
+ if t.WriteBufferSize > 0 {
+ return t.WriteBufferSize
+ }
+ return 4 << 10
+}
+
+func (t *Transport) readBufferSize() int {
+ if t.ReadBufferSize > 0 {
+ return t.ReadBufferSize
+ }
+ return 4 << 10
+}
+
+// Clone returns a deep copy of t's exported fields.
+func (t *Transport) Clone() *Transport {
+ t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
+ t2 := &Transport{
+ Proxy: t.Proxy,
+ OnProxyConnectResponse: t.OnProxyConnectResponse,
+ DialContext: t.DialContext,
+ Dial: t.Dial,
+ DialTLS: t.DialTLS,
+ DialTLSContext: t.DialTLSContext,
+ TLSHandshakeTimeout: t.TLSHandshakeTimeout,
+ DisableKeepAlives: t.DisableKeepAlives,
+ DisableCompression: t.DisableCompression,
+ MaxIdleConns: t.MaxIdleConns,
+ MaxIdleConnsPerHost: t.MaxIdleConnsPerHost,
+ MaxConnsPerHost: t.MaxConnsPerHost,
+ IdleConnTimeout: t.IdleConnTimeout,
+ ResponseHeaderTimeout: t.ResponseHeaderTimeout,
+ ExpectContinueTimeout: t.ExpectContinueTimeout,
+ ProxyConnectHeader: t.ProxyConnectHeader.Clone(),
+ GetProxyConnectHeader: t.GetProxyConnectHeader,
+ MaxResponseHeaderBytes: t.MaxResponseHeaderBytes,
+ ForceAttemptHTTP2: t.ForceAttemptHTTP2,
+ WriteBufferSize: t.WriteBufferSize,
+ ReadBufferSize: t.ReadBufferSize,
+ }
+ if t.TLSClientConfig != nil {
+ t2.TLSClientConfig = t.TLSClientConfig.Clone()
+ }
+ if !t.tlsNextProtoWasNil {
+ npm := map[string]func(authority string, c *tls.Conn) RoundTripper{}
+ for k, v := range t.TLSNextProto {
+ npm[k] = v
+ }
+ t2.TLSNextProto = npm
+ }
+ return t2
+}
+
+// h2Transport is the interface we expect to be able to call from
+// net/http against an *http2.Transport that's either bundled into
+// h2_bundle.go or supplied by the user via x/net/http2.
+//
+// We name it with the "h2" prefix to stay out of the "http2" prefix
+// namespace used by x/tools/cmd/bundle for h2_bundle.go.
+type h2Transport interface {
+ CloseIdleConnections()
+}
+
+func (t *Transport) hasCustomTLSDialer() bool {
+ return t.DialTLS != nil || t.DialTLSContext != nil
+}
+
+var http2client = godebug.New("http2client")
+
+// onceSetNextProtoDefaults initializes TLSNextProto.
+// It must be called via t.nextProtoOnce.Do.
+func (t *Transport) onceSetNextProtoDefaults() {
+ t.tlsNextProtoWasNil = (t.TLSNextProto == nil)
+ if http2client.Value() == "0" {
+ http2client.IncNonDefault()
+ return
+ }
+
+ // If they've already configured http2 with
+ // golang.org/x/net/http2 instead of the bundled copy, try to
+ // get at its http2.Transport value (via the "https"
+ // altproto map) so we can call CloseIdleConnections on it if
+ // requested. (Issue 22891)
+ altProto, _ := t.altProto.Load().(map[string]RoundTripper)
+ if rv := reflect.ValueOf(altProto["https"]); rv.IsValid() && rv.Type().Kind() == reflect.Struct && rv.Type().NumField() == 1 {
+ if v := rv.Field(0); v.CanInterface() {
+ if h2i, ok := v.Interface().(h2Transport); ok {
+ t.h2transport = h2i
+ return
+ }
+ }
+ }
+
+ if t.TLSNextProto != nil {
+ // This is the documented way to disable http2 on a
+ // Transport.
+ return
+ }
+ if !t.ForceAttemptHTTP2 && (t.TLSClientConfig != nil || t.Dial != nil || t.DialContext != nil || t.hasCustomTLSDialer()) {
+ // Be conservative and don't automatically enable
+ // http2 if they've specified a custom TLS config or
+ // custom dialers. Let them opt-in themselves via
+ // http2.ConfigureTransport so we don't surprise them
+ // by modifying their tls.Config. Issue 14275.
+ // However, if ForceAttemptHTTP2 is true, it overrides the above checks.
+ return
+ }
+ if omitBundledHTTP2 {
+ return
+ }
+ t2, err := http2configureTransports(t)
+ if err != nil {
+ log.Printf("Error enabling Transport HTTP/2 support: %v", err)
+ return
+ }
+ t.h2transport = t2
+
+ // Auto-configure the http2.Transport's MaxHeaderListSize from
+ // the http.Transport's MaxResponseHeaderBytes. They don't
+ // exactly mean the same thing, but they're close.
+ //
+ // TODO: also add this to x/net/http2.Configure Transport, behind
+ // a +build go1.7 build tag:
+ if limit1 := t.MaxResponseHeaderBytes; limit1 != 0 && t2.MaxHeaderListSize == 0 {
+ const h2max = 1<<32 - 1
+ if limit1 >= h2max {
+ t2.MaxHeaderListSize = h2max
+ } else {
+ t2.MaxHeaderListSize = uint32(limit1)
+ }
+ }
+}
+
+// ProxyFromEnvironment returns the URL of the proxy to use for a
+// given request, as indicated by the environment variables
+// HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions
+// thereof). Requests use the proxy from the environment variable
+// matching their scheme, unless excluded by NO_PROXY.
+//
+// The environment values may be either a complete URL or a
+// "host[:port]", in which case the "http" scheme is assumed.
+// The schemes "http", "https", and "socks5" are supported.
+// An error is returned if the value is a different form.
+//
+// A nil URL and nil error are returned if no proxy is defined in the
+// environment, or a proxy should not be used for the given request,
+// as defined by NO_PROXY.
+//
+// As a special case, if req.URL.Host is "localhost" (with or without
+// a port number), then a nil URL and nil error will be returned.
+func ProxyFromEnvironment(req *Request) (*url.URL, error) {
+ return envProxyFunc()(req.URL)
+}
+
+// ProxyURL returns a proxy function (for use in a [Transport])
+// that always returns the same URL.
+func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
+ return func(*Request) (*url.URL, error) {
+ return fixedURL, nil
+ }
+}
+
+// transportRequest is a wrapper around a *Request that adds
+// optional extra headers to write and stores any error to return
+// from roundTrip.
+type transportRequest struct {
+ *Request // original request, not to be mutated
+ extra Header // extra headers to write, or nil
+ trace *httptrace.ClientTrace // optional
+ cancelKey cancelKey
+
+ mu sync.Mutex // guards err
+ err error // first setError value for mapRoundTripError to consider
+}
+
+func (tr *transportRequest) extraHeaders() Header {
+ if tr.extra == nil {
+ tr.extra = make(Header)
+ }
+ return tr.extra
+}
+
+func (tr *transportRequest) setError(err error) {
+ tr.mu.Lock()
+ if tr.err == nil {
+ tr.err = err
+ }
+ tr.mu.Unlock()
+}
+
+// useRegisteredProtocol reports whether an alternate protocol (as registered
+// with Transport.RegisterProtocol) should be respected for this request.
+func (t *Transport) useRegisteredProtocol(req *Request) bool {
+ if req.URL.Scheme == "https" && req.requiresHTTP1() {
+ // If this request requires HTTP/1, don't use the
+ // "https" alternate protocol, which is used by the
+ // HTTP/2 code to take over requests if there's an
+ // existing cached HTTP/2 connection.
+ return false
+ }
+ return true
+}
+
+// alternateRoundTripper returns the alternate RoundTripper to use
+// for this request if the Request's URL scheme requires one,
+// or nil for the normal case of using the Transport.
+func (t *Transport) alternateRoundTripper(req *Request) RoundTripper {
+ if !t.useRegisteredProtocol(req) {
+ return nil
+ }
+ altProto, _ := t.altProto.Load().(map[string]RoundTripper)
+ return altProto[req.URL.Scheme]
+}
+
+// roundTrip implements a RoundTripper over HTTP.
+func (t *Transport) roundTrip(req *Request) (*Response, error) {
+ t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
+ ctx := req.Context()
+ trace := httptrace.ContextClientTrace(ctx)
+
+ if req.URL == nil {
+ req.closeBody()
+ return nil, errors.New("http: nil Request.URL")
+ }
+ if req.Header == nil {
+ req.closeBody()
+ return nil, errors.New("http: nil Request.Header")
+ }
+ scheme := req.URL.Scheme
+ isHTTP := scheme == "http" || scheme == "https"
+ if isHTTP {
+ for k, vv := range req.Header {
+ if !httpguts.ValidHeaderFieldName(k) {
+ req.closeBody()
+ return nil, fmt.Errorf("net/http: invalid header field name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ req.closeBody()
+ // Don't include the value in the error, because it may be sensitive.
+ return nil, fmt.Errorf("net/http: invalid header field value for %q", k)
+ }
+ }
+ }
+ }
+
+ origReq := req
+ cancelKey := cancelKey{origReq}
+ req = setupRewindBody(req)
+
+ if altRT := t.alternateRoundTripper(req); altRT != nil {
+ if resp, err := altRT.RoundTrip(req); err != ErrSkipAltProtocol {
+ return resp, err
+ }
+ var err error
+ req, err = rewindBody(req)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if !isHTTP {
+ req.closeBody()
+ return nil, badStringError("unsupported protocol scheme", scheme)
+ }
+ if req.Method != "" && !validMethod(req.Method) {
+ req.closeBody()
+ return nil, fmt.Errorf("net/http: invalid method %q", req.Method)
+ }
+ if req.URL.Host == "" {
+ req.closeBody()
+ return nil, errors.New("http: no Host in request URL")
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ req.closeBody()
+ return nil, ctx.Err()
+ default:
+ }
+
+ // treq gets modified by roundTrip, so we need to recreate for each retry.
+ treq := &transportRequest{Request: req, trace: trace, cancelKey: cancelKey}
+ cm, err := t.connectMethodForRequest(treq)
+ if err != nil {
+ req.closeBody()
+ return nil, err
+ }
+
+ // Get the cached or newly-created connection to either the
+ // host (for http or https), the http proxy, or the http proxy
+ // pre-CONNECTed to https server. In any case, we'll be ready
+ // to send it requests.
+ pconn, err := t.getConn(treq, cm)
+ if err != nil {
+ t.setReqCanceler(cancelKey, nil)
+ req.closeBody()
+ return nil, err
+ }
+
+ var resp *Response
+ if pconn.alt != nil {
+ // HTTP/2 path.
+ t.setReqCanceler(cancelKey, nil) // not cancelable with CancelRequest
+ resp, err = pconn.alt.RoundTrip(req)
+ } else {
+ resp, err = pconn.roundTrip(treq)
+ }
+ if err == nil {
+ resp.Request = origReq
+ return resp, nil
+ }
+
+ // Failed. Clean up and determine whether to retry.
+ if http2isNoCachedConnError(err) {
+ if t.removeIdleConn(pconn) {
+ t.decConnsPerHost(pconn.cacheKey)
+ }
+ } else if !pconn.shouldRetryRequest(req, err) {
+ // Issue 16465: return underlying net.Conn.Read error from peek,
+ // as we've historically done.
+ if e, ok := err.(nothingWrittenError); ok {
+ err = e.error
+ }
+ if e, ok := err.(transportReadFromServerError); ok {
+ err = e.err
+ }
+ if b, ok := req.Body.(*readTrackingBody); ok && !b.didClose {
+ // Issue 49621: Close the request body if pconn.roundTrip
+ // didn't do so already. This can happen if the pconn
+ // write loop exits without reading the write request.
+ req.closeBody()
+ }
+ return nil, err
+ }
+ testHookRoundTripRetried()
+
+ // Rewind the body if we're able to.
+ req, err = rewindBody(req)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+var errCannotRewind = errors.New("net/http: cannot rewind body after connection loss")
+
+type readTrackingBody struct {
+ io.ReadCloser
+ didRead bool
+ didClose bool
+}
+
+func (r *readTrackingBody) Read(data []byte) (int, error) {
+ r.didRead = true
+ return r.ReadCloser.Read(data)
+}
+
+func (r *readTrackingBody) Close() error {
+ r.didClose = true
+ return r.ReadCloser.Close()
+}
+
+// setupRewindBody returns a new request with a custom body wrapper
+// that can report whether the body needs rewinding.
+// This lets rewindBody avoid an error result when the request
+// does not have GetBody but the body hasn't been read at all yet.
+func setupRewindBody(req *Request) *Request {
+ if req.Body == nil || req.Body == NoBody {
+ return req
+ }
+ newReq := *req
+ newReq.Body = &readTrackingBody{ReadCloser: req.Body}
+ return &newReq
+}
+
+// rewindBody returns a new request with the body rewound.
+// It returns req unmodified if the body does not need rewinding.
+// rewindBody takes care of closing req.Body when appropriate
+// (in all cases except when rewindBody returns req unmodified).
+func rewindBody(req *Request) (rewound *Request, err error) {
+ if req.Body == nil || req.Body == NoBody || (!req.Body.(*readTrackingBody).didRead && !req.Body.(*readTrackingBody).didClose) {
+ return req, nil // nothing to rewind
+ }
+ if !req.Body.(*readTrackingBody).didClose {
+ req.closeBody()
+ }
+ if req.GetBody == nil {
+ return nil, errCannotRewind
+ }
+ body, err := req.GetBody()
+ if err != nil {
+ return nil, err
+ }
+ newReq := *req
+ newReq.Body = &readTrackingBody{ReadCloser: body}
+ return &newReq, nil
+}
+
+// shouldRetryRequest reports whether we should retry sending a failed
+// HTTP request on a new connection. The non-nil input error is the
+// error from roundTrip.
+func (pc *persistConn) shouldRetryRequest(req *Request, err error) bool {
+ if http2isNoCachedConnError(err) {
+ // Issue 16582: if the user started a bunch of
+ // requests at once, they can all pick the same conn
+ // and violate the server's max concurrent streams.
+ // Instead, match the HTTP/1 behavior for now and dial
+ // again to get a new TCP connection, rather than failing
+ // this request.
+ return true
+ }
+ if err == errMissingHost {
+ // User error.
+ return false
+ }
+ if !pc.isReused() {
+ // This was a fresh connection. There's no reason the server
+ // should've hung up on us.
+ //
+ // Also, if we retried now, we could loop forever
+ // creating new connections and retrying if the server
+ // is just hanging up on us because it doesn't like
+ // our request (as opposed to sending an error).
+ return false
+ }
+ if _, ok := err.(nothingWrittenError); ok {
+ // We never wrote anything, so it's safe to retry, if there's no body or we
+ // can "rewind" the body with GetBody.
+ return req.outgoingLength() == 0 || req.GetBody != nil
+ }
+ if !req.isReplayable() {
+ // Don't retry non-idempotent requests.
+ return false
+ }
+ if _, ok := err.(transportReadFromServerError); ok {
+ // We got some non-EOF net.Conn.Read failure reading
+ // the 1st response byte from the server.
+ return true
+ }
+ if err == errServerClosedIdle {
+ // The server replied with io.EOF while we were trying to
+ // read the response. Probably an unfortunately keep-alive
+ // timeout, just as the client was writing a request.
+ return true
+ }
+ return false // conservatively
+}
+
+// ErrSkipAltProtocol is a sentinel error value defined by Transport.RegisterProtocol.
+var ErrSkipAltProtocol = errors.New("net/http: skip alternate protocol")
+
+// RegisterProtocol registers a new protocol with scheme.
+// The [Transport] will pass requests using the given scheme to rt.
+// It is rt's responsibility to simulate HTTP request semantics.
+//
+// RegisterProtocol can be used by other packages to provide
+// implementations of protocol schemes like "ftp" or "file".
+//
+// If rt.RoundTrip returns [ErrSkipAltProtocol], the Transport will
+// handle the [Transport.RoundTrip] itself for that one request, as if the
+// protocol were not registered.
+func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) {
+ t.altMu.Lock()
+ defer t.altMu.Unlock()
+ oldMap, _ := t.altProto.Load().(map[string]RoundTripper)
+ if _, exists := oldMap[scheme]; exists {
+ panic("protocol " + scheme + " already registered")
+ }
+ newMap := make(map[string]RoundTripper)
+ for k, v := range oldMap {
+ newMap[k] = v
+ }
+ newMap[scheme] = rt
+ t.altProto.Store(newMap)
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle in
+// a "keep-alive" state. It does not interrupt any connections currently
+// in use.
+func (t *Transport) CloseIdleConnections() {
+ t.nextProtoOnce.Do(t.onceSetNextProtoDefaults)
+ t.idleMu.Lock()
+ m := t.idleConn
+ t.idleConn = nil
+ t.closeIdle = true // close newly idle connections
+ t.idleLRU = connLRU{}
+ t.idleMu.Unlock()
+ for _, conns := range m {
+ for _, pconn := range conns {
+ pconn.close(errCloseIdleConns)
+ }
+ }
+ if t2 := t.h2transport; t2 != nil {
+ t2.CloseIdleConnections()
+ }
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+// CancelRequest should only be called after [Transport.RoundTrip] has returned.
+//
+// Deprecated: Use [Request.WithContext] to create a request with a
+// cancelable context instead. CancelRequest cannot cancel HTTP/2
+// requests.
+func (t *Transport) CancelRequest(req *Request) {
+ t.cancelRequest(cancelKey{req}, errRequestCanceled)
+}
+
+// Cancel an in-flight request, recording the error value.
+// Returns whether the request was canceled.
+func (t *Transport) cancelRequest(key cancelKey, err error) bool {
+ // This function must not return until the cancel func has completed.
+ // See: https://golang.org/issue/34658
+ t.reqMu.Lock()
+ defer t.reqMu.Unlock()
+ cancel := t.reqCanceler[key]
+ delete(t.reqCanceler, key)
+ if cancel != nil {
+ cancel(err)
+ }
+
+ return cancel != nil
+}
+
+//
+// Private implementation past this point.
+//
+
+var (
+ envProxyOnce sync.Once
+ envProxyFuncValue func(*url.URL) (*url.URL, error)
+)
+
+// envProxyFunc returns a function that reads the
+// environment variable to determine the proxy address.
+func envProxyFunc() func(*url.URL) (*url.URL, error) {
+ envProxyOnce.Do(func() {
+ envProxyFuncValue = httpproxy.FromEnvironment().ProxyFunc()
+ })
+ return envProxyFuncValue
+}
+
+// resetProxyConfig is used by tests.
+func resetProxyConfig() {
+ envProxyOnce = sync.Once{}
+ envProxyFuncValue = nil
+}
+
+func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) {
+ cm.targetScheme = treq.URL.Scheme
+ cm.targetAddr = canonicalAddr(treq.URL)
+ if t.Proxy != nil {
+ cm.proxyURL, err = t.Proxy(treq.Request)
+ }
+ cm.onlyH1 = treq.requiresHTTP1()
+ return cm, err
+}
+
+// proxyAuth returns the Proxy-Authorization header to set
+// on requests, if applicable.
+func (cm *connectMethod) proxyAuth() string {
+ if cm.proxyURL == nil {
+ return ""
+ }
+ if u := cm.proxyURL.User; u != nil {
+ username := u.Username()
+ password, _ := u.Password()
+ return "Basic " + basicAuth(username, password)
+ }
+ return ""
+}
+
+// error values for debugging and testing, not seen by users.
+var (
+ errKeepAlivesDisabled = errors.New("http: putIdleConn: keep alives disabled")
+ errConnBroken = errors.New("http: putIdleConn: connection is in bad state")
+ errCloseIdle = errors.New("http: putIdleConn: CloseIdleConnections was called")
+ errTooManyIdle = errors.New("http: putIdleConn: too many idle connections")
+ errTooManyIdleHost = errors.New("http: putIdleConn: too many idle connections for host")
+ errCloseIdleConns = errors.New("http: CloseIdleConnections called")
+ errReadLoopExiting = errors.New("http: persistConn.readLoop exiting")
+ errIdleConnTimeout = errors.New("http: idle connection timeout")
+
+ // errServerClosedIdle is not seen by users for idempotent requests, but may be
+ // seen by a user if the server shuts down an idle connection and sends its FIN
+ // in flight with already-written POST body bytes from the client.
+ // See https://github.com/golang/go/issues/19943#issuecomment-355607646
+ errServerClosedIdle = errors.New("http: server closed idle connection")
+)
+
+// transportReadFromServerError is used by Transport.readLoop when the
+// 1 byte peek read fails and we're actually anticipating a response.
+// Usually this is just due to the inherent keep-alive shut down race,
+// where the server closed the connection at the same time the client
+// wrote. The underlying err field is usually io.EOF or some
+// ECONNRESET sort of thing which varies by platform. But it might be
+// the user's custom net.Conn.Read error too, so we carry it along for
+// them to return from Transport.RoundTrip.
+type transportReadFromServerError struct {
+ err error
+}
+
+func (e transportReadFromServerError) Unwrap() error { return e.err }
+
+func (e transportReadFromServerError) Error() string {
+ return fmt.Sprintf("net/http: Transport failed to read from server: %v", e.err)
+}
+
+func (t *Transport) putOrCloseIdleConn(pconn *persistConn) {
+ if err := t.tryPutIdleConn(pconn); err != nil {
+ pconn.close(err)
+ }
+}
+
+func (t *Transport) maxIdleConnsPerHost() int {
+ if v := t.MaxIdleConnsPerHost; v != 0 {
+ return v
+ }
+ return DefaultMaxIdleConnsPerHost
+}
+
+// tryPutIdleConn adds pconn to the list of idle persistent connections awaiting
+// a new request.
+// If pconn is no longer needed or not in a good state, tryPutIdleConn returns
+// an error explaining why it wasn't registered.
+// tryPutIdleConn does not close pconn. Use putOrCloseIdleConn instead for that.
+func (t *Transport) tryPutIdleConn(pconn *persistConn) error {
+ if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 {
+ return errKeepAlivesDisabled
+ }
+ if pconn.isBroken() {
+ return errConnBroken
+ }
+ pconn.markReused()
+
+ t.idleMu.Lock()
+ defer t.idleMu.Unlock()
+
+ // HTTP/2 (pconn.alt != nil) connections do not come out of the idle list,
+ // because multiple goroutines can use them simultaneously.
+ // If this is an HTTP/2 connection being “returned,” we're done.
+ if pconn.alt != nil && t.idleLRU.m[pconn] != nil {
+ return nil
+ }
+
+ // Deliver pconn to goroutine waiting for idle connection, if any.
+ // (They may be actively dialing, but this conn is ready first.
+ // Chrome calls this socket late binding.
+ // See https://www.chromium.org/developers/design-documents/network-stack#TOC-Connection-Management.)
+ key := pconn.cacheKey
+ if q, ok := t.idleConnWait[key]; ok {
+ done := false
+ if pconn.alt == nil {
+ // HTTP/1.
+ // Loop over the waiting list until we find a w that isn't done already, and hand it pconn.
+ for q.len() > 0 {
+ w := q.popFront()
+ if w.tryDeliver(pconn, nil) {
+ done = true
+ break
+ }
+ }
+ } else {
+ // HTTP/2.
+ // Can hand the same pconn to everyone in the waiting list,
+ // and we still won't be done: we want to put it in the idle
+ // list unconditionally, for any future clients too.
+ for q.len() > 0 {
+ w := q.popFront()
+ w.tryDeliver(pconn, nil)
+ }
+ }
+ if q.len() == 0 {
+ delete(t.idleConnWait, key)
+ } else {
+ t.idleConnWait[key] = q
+ }
+ if done {
+ return nil
+ }
+ }
+
+ if t.closeIdle {
+ return errCloseIdle
+ }
+ if t.idleConn == nil {
+ t.idleConn = make(map[connectMethodKey][]*persistConn)
+ }
+ idles := t.idleConn[key]
+ if len(idles) >= t.maxIdleConnsPerHost() {
+ return errTooManyIdleHost
+ }
+ for _, exist := range idles {
+ if exist == pconn {
+ log.Fatalf("dup idle pconn %p in freelist", pconn)
+ }
+ }
+ t.idleConn[key] = append(idles, pconn)
+ t.idleLRU.add(pconn)
+ if t.MaxIdleConns != 0 && t.idleLRU.len() > t.MaxIdleConns {
+ oldest := t.idleLRU.removeOldest()
+ oldest.close(errTooManyIdle)
+ t.removeIdleConnLocked(oldest)
+ }
+
+ // Set idle timer, but only for HTTP/1 (pconn.alt == nil).
+ // The HTTP/2 implementation manages the idle timer itself
+ // (see idleConnTimeout in h2_bundle.go).
+ if t.IdleConnTimeout > 0 && pconn.alt == nil {
+ if pconn.idleTimer != nil {
+ pconn.idleTimer.Reset(t.IdleConnTimeout)
+ } else {
+ pconn.idleTimer = time.AfterFunc(t.IdleConnTimeout, pconn.closeConnIfStillIdle)
+ }
+ }
+ pconn.idleAt = time.Now()
+ return nil
+}
+
+// queueForIdleConn queues w to receive the next idle connection for w.cm.
+// As an optimization hint to the caller, queueForIdleConn reports whether
+// it successfully delivered an already-idle connection.
+func (t *Transport) queueForIdleConn(w *wantConn) (delivered bool) {
+ if t.DisableKeepAlives {
+ return false
+ }
+
+ t.idleMu.Lock()
+ defer t.idleMu.Unlock()
+
+ // Stop closing connections that become idle - we might want one.
+ // (That is, undo the effect of t.CloseIdleConnections.)
+ t.closeIdle = false
+
+ if w == nil {
+ // Happens in test hook.
+ return false
+ }
+
+ // If IdleConnTimeout is set, calculate the oldest
+ // persistConn.idleAt time we're willing to use a cached idle
+ // conn.
+ var oldTime time.Time
+ if t.IdleConnTimeout > 0 {
+ oldTime = time.Now().Add(-t.IdleConnTimeout)
+ }
+
+ // Look for most recently-used idle connection.
+ if list, ok := t.idleConn[w.key]; ok {
+ stop := false
+ delivered := false
+ for len(list) > 0 && !stop {
+ pconn := list[len(list)-1]
+
+ // See whether this connection has been idle too long, considering
+ // only the wall time (the Round(0)), in case this is a laptop or VM
+ // coming out of suspend with previously cached idle connections.
+ tooOld := !oldTime.IsZero() && pconn.idleAt.Round(0).Before(oldTime)
+ if tooOld {
+ // Async cleanup. Launch in its own goroutine (as if a
+ // time.AfterFunc called it); it acquires idleMu, which we're
+ // holding, and does a synchronous net.Conn.Close.
+ go pconn.closeConnIfStillIdle()
+ }
+ if pconn.isBroken() || tooOld {
+ // If either persistConn.readLoop has marked the connection
+ // broken, but Transport.removeIdleConn has not yet removed it
+ // from the idle list, or if this persistConn is too old (it was
+ // idle too long), then ignore it and look for another. In both
+ // cases it's already in the process of being closed.
+ list = list[:len(list)-1]
+ continue
+ }
+ delivered = w.tryDeliver(pconn, nil)
+ if delivered {
+ if pconn.alt != nil {
+ // HTTP/2: multiple clients can share pconn.
+ // Leave it in the list.
+ } else {
+ // HTTP/1: only one client can use pconn.
+ // Remove it from the list.
+ t.idleLRU.remove(pconn)
+ list = list[:len(list)-1]
+ }
+ }
+ stop = true
+ }
+ if len(list) > 0 {
+ t.idleConn[w.key] = list
+ } else {
+ delete(t.idleConn, w.key)
+ }
+ if stop {
+ return delivered
+ }
+ }
+
+ // Register to receive next connection that becomes idle.
+ if t.idleConnWait == nil {
+ t.idleConnWait = make(map[connectMethodKey]wantConnQueue)
+ }
+ q := t.idleConnWait[w.key]
+ q.cleanFront()
+ q.pushBack(w)
+ t.idleConnWait[w.key] = q
+ return false
+}
+
+// removeIdleConn marks pconn as dead.
+func (t *Transport) removeIdleConn(pconn *persistConn) bool {
+ t.idleMu.Lock()
+ defer t.idleMu.Unlock()
+ return t.removeIdleConnLocked(pconn)
+}
+
+// t.idleMu must be held.
+func (t *Transport) removeIdleConnLocked(pconn *persistConn) bool {
+ if pconn.idleTimer != nil {
+ pconn.idleTimer.Stop()
+ }
+ t.idleLRU.remove(pconn)
+ key := pconn.cacheKey
+ pconns := t.idleConn[key]
+ var removed bool
+ switch len(pconns) {
+ case 0:
+ // Nothing
+ case 1:
+ if pconns[0] == pconn {
+ delete(t.idleConn, key)
+ removed = true
+ }
+ default:
+ for i, v := range pconns {
+ if v != pconn {
+ continue
+ }
+ // Slide down, keeping most recently-used
+ // conns at the end.
+ copy(pconns[i:], pconns[i+1:])
+ t.idleConn[key] = pconns[:len(pconns)-1]
+ removed = true
+ break
+ }
+ }
+ return removed
+}
+
+func (t *Transport) setReqCanceler(key cancelKey, fn func(error)) {
+ t.reqMu.Lock()
+ defer t.reqMu.Unlock()
+ if t.reqCanceler == nil {
+ t.reqCanceler = make(map[cancelKey]func(error))
+ }
+ if fn != nil {
+ t.reqCanceler[key] = fn
+ } else {
+ delete(t.reqCanceler, key)
+ }
+}
+
+// replaceReqCanceler replaces an existing cancel function. If there is no cancel function
+// for the request, we don't set the function and return false.
+// Since CancelRequest will clear the canceler, we can use the return value to detect if
+// the request was canceled since the last setReqCancel call.
+func (t *Transport) replaceReqCanceler(key cancelKey, fn func(error)) bool {
+ t.reqMu.Lock()
+ defer t.reqMu.Unlock()
+ _, ok := t.reqCanceler[key]
+ if !ok {
+ return false
+ }
+ if fn != nil {
+ t.reqCanceler[key] = fn
+ } else {
+ delete(t.reqCanceler, key)
+ }
+ return true
+}
+
+var zeroDialer net.Dialer
+
+func (t *Transport) dial(ctx context.Context, network, addr string) (net.Conn, error) {
+ if t.DialContext != nil {
+ c, err := t.DialContext(ctx, network, addr)
+ if c == nil && err == nil {
+ err = errors.New("net/http: Transport.DialContext hook returned (nil, nil)")
+ }
+ return c, err
+ }
+ if t.Dial != nil {
+ c, err := t.Dial(network, addr)
+ if c == nil && err == nil {
+ err = errors.New("net/http: Transport.Dial hook returned (nil, nil)")
+ }
+ return c, err
+ }
+ return zeroDialer.DialContext(ctx, network, addr)
+}
+
+// A wantConn records state about a wanted connection
+// (that is, an active call to getConn).
+// The conn may be gotten by dialing or by finding an idle connection,
+// or a cancellation may make the conn no longer wanted.
+// These three options are racing against each other and use
+// wantConn to coordinate and agree about the winning outcome.
+type wantConn struct {
+ cm connectMethod
+ key connectMethodKey // cm.key()
+ ready chan struct{} // closed when pc, err pair is delivered
+
+ // hooks for testing to know when dials are done
+ // beforeDial is called in the getConn goroutine when the dial is queued.
+ // afterDial is called when the dial is completed or canceled.
+ beforeDial func()
+ afterDial func()
+
+ mu sync.Mutex // protects ctx, pc, err, close(ready)
+ ctx context.Context // context for dial, cleared after delivered or canceled
+ pc *persistConn
+ err error
+}
+
+// waiting reports whether w is still waiting for an answer (connection or error).
+func (w *wantConn) waiting() bool {
+ select {
+ case <-w.ready:
+ return false
+ default:
+ return true
+ }
+}
+
+// getCtxForDial returns context for dial or nil if connection was delivered or canceled.
+func (w *wantConn) getCtxForDial() context.Context {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ return w.ctx
+}
+
+// tryDeliver attempts to deliver pc, err to w and reports whether it succeeded.
+func (w *wantConn) tryDeliver(pc *persistConn, err error) bool {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.pc != nil || w.err != nil {
+ return false
+ }
+
+ w.ctx = nil
+ w.pc = pc
+ w.err = err
+ if w.pc == nil && w.err == nil {
+ panic("net/http: internal error: misuse of tryDeliver")
+ }
+ close(w.ready)
+ return true
+}
+
+// cancel marks w as no longer wanting a result (for example, due to cancellation).
+// If a connection has been delivered already, cancel returns it with t.putOrCloseIdleConn.
+func (w *wantConn) cancel(t *Transport, err error) {
+ w.mu.Lock()
+ if w.pc == nil && w.err == nil {
+ close(w.ready) // catch misbehavior in future delivery
+ }
+ pc := w.pc
+ w.ctx = nil
+ w.pc = nil
+ w.err = err
+ w.mu.Unlock()
+
+ if pc != nil {
+ t.putOrCloseIdleConn(pc)
+ }
+}
+
+// A wantConnQueue is a queue of wantConns.
+type wantConnQueue struct {
+ // This is a queue, not a deque.
+ // It is split into two stages - head[headPos:] and tail.
+ // popFront is trivial (headPos++) on the first stage, and
+ // pushBack is trivial (append) on the second stage.
+ // If the first stage is empty, popFront can swap the
+ // first and second stages to remedy the situation.
+ //
+ // This two-stage split is analogous to the use of two lists
+ // in Okasaki's purely functional queue but without the
+ // overhead of reversing the list when swapping stages.
+ head []*wantConn
+ headPos int
+ tail []*wantConn
+}
+
+// len returns the number of items in the queue.
+func (q *wantConnQueue) len() int {
+ return len(q.head) - q.headPos + len(q.tail)
+}
+
+// pushBack adds w to the back of the queue.
+func (q *wantConnQueue) pushBack(w *wantConn) {
+ q.tail = append(q.tail, w)
+}
+
+// popFront removes and returns the wantConn at the front of the queue.
+func (q *wantConnQueue) popFront() *wantConn {
+ if q.headPos >= len(q.head) {
+ if len(q.tail) == 0 {
+ return nil
+ }
+ // Pick up tail as new head, clear tail.
+ q.head, q.headPos, q.tail = q.tail, 0, q.head[:0]
+ }
+ w := q.head[q.headPos]
+ q.head[q.headPos] = nil
+ q.headPos++
+ return w
+}
+
+// peekFront returns the wantConn at the front of the queue without removing it.
+func (q *wantConnQueue) peekFront() *wantConn {
+ if q.headPos < len(q.head) {
+ return q.head[q.headPos]
+ }
+ if len(q.tail) > 0 {
+ return q.tail[0]
+ }
+ return nil
+}
+
+// cleanFront pops any wantConns that are no longer waiting from the head of the
+// queue, reporting whether any were popped.
+func (q *wantConnQueue) cleanFront() (cleaned bool) {
+ for {
+ w := q.peekFront()
+ if w == nil || w.waiting() {
+ return cleaned
+ }
+ q.popFront()
+ cleaned = true
+ }
+}
+
+func (t *Transport) customDialTLS(ctx context.Context, network, addr string) (conn net.Conn, err error) {
+ if t.DialTLSContext != nil {
+ conn, err = t.DialTLSContext(ctx, network, addr)
+ } else {
+ conn, err = t.DialTLS(network, addr)
+ }
+ if conn == nil && err == nil {
+ err = errors.New("net/http: Transport.DialTLS or DialTLSContext returned (nil, nil)")
+ }
+ return
+}
+
+// getConn dials and creates a new persistConn to the target as
+// specified in the connectMethod. This includes doing a proxy CONNECT
+// and/or setting up TLS. If this doesn't return an error, the persistConn
+// is ready to write requests to.
+func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (pc *persistConn, err error) {
+ req := treq.Request
+ trace := treq.trace
+ ctx := req.Context()
+ if trace != nil && trace.GetConn != nil {
+ trace.GetConn(cm.addr())
+ }
+
+ w := &wantConn{
+ cm: cm,
+ key: cm.key(),
+ ctx: ctx,
+ ready: make(chan struct{}, 1),
+ beforeDial: testHookPrePendingDial,
+ afterDial: testHookPostPendingDial,
+ }
+ defer func() {
+ if err != nil {
+ w.cancel(t, err)
+ }
+ }()
+
+ // Queue for idle connection.
+ if delivered := t.queueForIdleConn(w); delivered {
+ pc := w.pc
+ // Trace only for HTTP/1.
+ // HTTP/2 calls trace.GotConn itself.
+ if pc.alt == nil && trace != nil && trace.GotConn != nil {
+ trace.GotConn(pc.gotIdleConnTrace(pc.idleAt))
+ }
+ // set request canceler to some non-nil function so we
+ // can detect whether it was cleared between now and when
+ // we enter roundTrip
+ t.setReqCanceler(treq.cancelKey, func(error) {})
+ return pc, nil
+ }
+
+ cancelc := make(chan error, 1)
+ t.setReqCanceler(treq.cancelKey, func(err error) { cancelc <- err })
+
+ // Queue for permission to dial.
+ t.queueForDial(w)
+
+ // Wait for completion or cancellation.
+ select {
+ case <-w.ready:
+ // Trace success but only for HTTP/1.
+ // HTTP/2 calls trace.GotConn itself.
+ if w.pc != nil && w.pc.alt == nil && trace != nil && trace.GotConn != nil {
+ trace.GotConn(httptrace.GotConnInfo{Conn: w.pc.conn, Reused: w.pc.isReused()})
+ }
+ if w.err != nil {
+ // If the request has been canceled, that's probably
+ // what caused w.err; if so, prefer to return the
+ // cancellation error (see golang.org/issue/16049).
+ select {
+ case <-req.Cancel:
+ return nil, errRequestCanceledConn
+ case <-req.Context().Done():
+ return nil, req.Context().Err()
+ case err := <-cancelc:
+ if err == errRequestCanceled {
+ err = errRequestCanceledConn
+ }
+ return nil, err
+ default:
+ // return below
+ }
+ }
+ return w.pc, w.err
+ case <-req.Cancel:
+ return nil, errRequestCanceledConn
+ case <-req.Context().Done():
+ return nil, req.Context().Err()
+ case err := <-cancelc:
+ if err == errRequestCanceled {
+ err = errRequestCanceledConn
+ }
+ return nil, err
+ }
+}
+
+// queueForDial queues w to wait for permission to begin dialing.
+// Once w receives permission to dial, it will do so in a separate goroutine.
+func (t *Transport) queueForDial(w *wantConn) {
+ w.beforeDial()
+ if t.MaxConnsPerHost <= 0 {
+ go t.dialConnFor(w)
+ return
+ }
+
+ t.connsPerHostMu.Lock()
+ defer t.connsPerHostMu.Unlock()
+
+ if n := t.connsPerHost[w.key]; n < t.MaxConnsPerHost {
+ if t.connsPerHost == nil {
+ t.connsPerHost = make(map[connectMethodKey]int)
+ }
+ t.connsPerHost[w.key] = n + 1
+ go t.dialConnFor(w)
+ return
+ }
+
+ if t.connsPerHostWait == nil {
+ t.connsPerHostWait = make(map[connectMethodKey]wantConnQueue)
+ }
+ q := t.connsPerHostWait[w.key]
+ q.cleanFront()
+ q.pushBack(w)
+ t.connsPerHostWait[w.key] = q
+}
+
+// dialConnFor dials on behalf of w and delivers the result to w.
+// dialConnFor has received permission to dial w.cm and is counted in t.connCount[w.cm.key()].
+// If the dial is canceled or unsuccessful, dialConnFor decrements t.connCount[w.cm.key()].
+func (t *Transport) dialConnFor(w *wantConn) {
+ defer w.afterDial()
+ ctx := w.getCtxForDial()
+ if ctx == nil {
+ t.decConnsPerHost(w.key)
+ return
+ }
+
+ pc, err := t.dialConn(ctx, w.cm)
+ delivered := w.tryDeliver(pc, err)
+ if err == nil && (!delivered || pc.alt != nil) {
+ // pconn was not passed to w,
+ // or it is HTTP/2 and can be shared.
+ // Add to the idle connection pool.
+ t.putOrCloseIdleConn(pc)
+ }
+ if err != nil {
+ t.decConnsPerHost(w.key)
+ }
+}
+
+// decConnsPerHost decrements the per-host connection count for key,
+// which may in turn give a different waiting goroutine permission to dial.
+func (t *Transport) decConnsPerHost(key connectMethodKey) {
+ if t.MaxConnsPerHost <= 0 {
+ return
+ }
+
+ t.connsPerHostMu.Lock()
+ defer t.connsPerHostMu.Unlock()
+ n := t.connsPerHost[key]
+ if n == 0 {
+ // Shouldn't happen, but if it does, the counting is buggy and could
+ // easily lead to a silent deadlock, so report the problem loudly.
+ panic("net/http: internal error: connCount underflow")
+ }
+
+ // Can we hand this count to a goroutine still waiting to dial?
+ // (Some goroutines on the wait list may have timed out or
+ // gotten a connection another way. If they're all gone,
+ // we don't want to kick off any spurious dial operations.)
+ if q := t.connsPerHostWait[key]; q.len() > 0 {
+ done := false
+ for q.len() > 0 {
+ w := q.popFront()
+ if w.waiting() {
+ go t.dialConnFor(w)
+ done = true
+ break
+ }
+ }
+ if q.len() == 0 {
+ delete(t.connsPerHostWait, key)
+ } else {
+ // q is a value (like a slice), so we have to store
+ // the updated q back into the map.
+ t.connsPerHostWait[key] = q
+ }
+ if done {
+ return
+ }
+ }
+
+ // Otherwise, decrement the recorded count.
+ if n--; n == 0 {
+ delete(t.connsPerHost, key)
+ } else {
+ t.connsPerHost[key] = n
+ }
+}
+
+// Add TLS to a persistent connection, i.e. negotiate a TLS session. If pconn is already a TLS
+// tunnel, this function establishes a nested TLS session inside the encrypted channel.
+// The remote endpoint's name may be overridden by TLSClientConfig.ServerName.
+func (pconn *persistConn) addTLS(ctx context.Context, name string, trace *httptrace.ClientTrace) error {
+ // Initiate TLS and check remote host name against certificate.
+ cfg := cloneTLSConfig(pconn.t.TLSClientConfig)
+ if cfg.ServerName == "" {
+ cfg.ServerName = name
+ }
+ if pconn.cacheKey.onlyH1 {
+ cfg.NextProtos = nil
+ }
+ plainConn := pconn.conn
+ tlsConn := tls.Client(plainConn, cfg)
+ errc := make(chan error, 2)
+ var timer *time.Timer // for canceling TLS handshake
+ if d := pconn.t.TLSHandshakeTimeout; d != 0 {
+ timer = time.AfterFunc(d, func() {
+ errc <- tlsHandshakeTimeoutError{}
+ })
+ }
+ go func() {
+ if trace != nil && trace.TLSHandshakeStart != nil {
+ trace.TLSHandshakeStart()
+ }
+ err := tlsConn.HandshakeContext(ctx)
+ if timer != nil {
+ timer.Stop()
+ }
+ errc <- err
+ }()
+ if err := <-errc; err != nil {
+ plainConn.Close()
+ if err == (tlsHandshakeTimeoutError{}) {
+ // Now that we have closed the connection,
+ // wait for the call to HandshakeContext to return.
+ <-errc
+ }
+ if trace != nil && trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(tls.ConnectionState{}, err)
+ }
+ return err
+ }
+ cs := tlsConn.ConnectionState()
+ if trace != nil && trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(cs, nil)
+ }
+ pconn.tlsState = &cs
+ pconn.conn = tlsConn
+ return nil
+}
+
+type erringRoundTripper interface {
+ RoundTripErr() error
+}
+
+func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *persistConn, err error) {
+ pconn = &persistConn{
+ t: t,
+ cacheKey: cm.key(),
+ reqch: make(chan requestAndChan, 1),
+ writech: make(chan writeRequest, 1),
+ closech: make(chan struct{}),
+ writeErrCh: make(chan error, 1),
+ writeLoopDone: make(chan struct{}),
+ }
+ trace := httptrace.ContextClientTrace(ctx)
+ wrapErr := func(err error) error {
+ if cm.proxyURL != nil {
+ // Return a typed error, per Issue 16997
+ return &net.OpError{Op: "proxyconnect", Net: "tcp", Err: err}
+ }
+ return err
+ }
+ if cm.scheme() == "https" && t.hasCustomTLSDialer() {
+ var err error
+ pconn.conn, err = t.customDialTLS(ctx, "tcp", cm.addr())
+ if err != nil {
+ return nil, wrapErr(err)
+ }
+ if tc, ok := pconn.conn.(*tls.Conn); ok {
+ // Handshake here, in case DialTLS didn't. TLSNextProto below
+ // depends on it for knowing the connection state.
+ if trace != nil && trace.TLSHandshakeStart != nil {
+ trace.TLSHandshakeStart()
+ }
+ if err := tc.HandshakeContext(ctx); err != nil {
+ go pconn.conn.Close()
+ if trace != nil && trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(tls.ConnectionState{}, err)
+ }
+ return nil, err
+ }
+ cs := tc.ConnectionState()
+ if trace != nil && trace.TLSHandshakeDone != nil {
+ trace.TLSHandshakeDone(cs, nil)
+ }
+ pconn.tlsState = &cs
+ }
+ } else {
+ conn, err := t.dial(ctx, "tcp", cm.addr())
+ if err != nil {
+ return nil, wrapErr(err)
+ }
+ pconn.conn = conn
+ if cm.scheme() == "https" {
+ var firstTLSHost string
+ if firstTLSHost, _, err = net.SplitHostPort(cm.addr()); err != nil {
+ return nil, wrapErr(err)
+ }
+ if err = pconn.addTLS(ctx, firstTLSHost, trace); err != nil {
+ return nil, wrapErr(err)
+ }
+ }
+ }
+
+ // Proxy setup.
+ switch {
+ case cm.proxyURL == nil:
+ // Do nothing. Not using a proxy.
+ case cm.proxyURL.Scheme == "socks5":
+ conn := pconn.conn
+ d := socksNewDialer("tcp", conn.RemoteAddr().String())
+ if u := cm.proxyURL.User; u != nil {
+ auth := &socksUsernamePassword{
+ Username: u.Username(),
+ }
+ auth.Password, _ = u.Password()
+ d.AuthMethods = []socksAuthMethod{
+ socksAuthMethodNotRequired,
+ socksAuthMethodUsernamePassword,
+ }
+ d.Authenticate = auth.Authenticate
+ }
+ if _, err := d.DialWithConn(ctx, conn, "tcp", cm.targetAddr); err != nil {
+ conn.Close()
+ return nil, err
+ }
+ case cm.targetScheme == "http":
+ pconn.isProxy = true
+ if pa := cm.proxyAuth(); pa != "" {
+ pconn.mutateHeaderFunc = func(h Header) {
+ h.Set("Proxy-Authorization", pa)
+ }
+ }
+ case cm.targetScheme == "https":
+ conn := pconn.conn
+ var hdr Header
+ if t.GetProxyConnectHeader != nil {
+ var err error
+ hdr, err = t.GetProxyConnectHeader(ctx, cm.proxyURL, cm.targetAddr)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ } else {
+ hdr = t.ProxyConnectHeader
+ }
+ if hdr == nil {
+ hdr = make(Header)
+ }
+ if pa := cm.proxyAuth(); pa != "" {
+ hdr = hdr.Clone()
+ hdr.Set("Proxy-Authorization", pa)
+ }
+ connectReq := &Request{
+ Method: "CONNECT",
+ URL: &url.URL{Opaque: cm.targetAddr},
+ Host: cm.targetAddr,
+ Header: hdr,
+ }
+
+ // If there's no done channel (no deadline or cancellation
+ // from the caller possible), at least set some (long)
+ // timeout here. This will make sure we don't block forever
+ // and leak a goroutine if the connection stops replying
+ // after the TCP connect.
+ connectCtx := ctx
+ if ctx.Done() == nil {
+ newCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
+ defer cancel()
+ connectCtx = newCtx
+ }
+
+ didReadResponse := make(chan struct{}) // closed after CONNECT write+read is done or fails
+ var (
+ resp *Response
+ err error // write or read error
+ )
+ // Write the CONNECT request & read the response.
+ go func() {
+ defer close(didReadResponse)
+ err = connectReq.Write(conn)
+ if err != nil {
+ return
+ }
+ // Okay to use and discard buffered reader here, because
+ // TLS server will not speak until spoken to.
+ br := bufio.NewReader(conn)
+ resp, err = ReadResponse(br, connectReq)
+ }()
+ select {
+ case <-connectCtx.Done():
+ conn.Close()
+ <-didReadResponse
+ return nil, connectCtx.Err()
+ case <-didReadResponse:
+ // resp or err now set
+ }
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ if t.OnProxyConnectResponse != nil {
+ err = t.OnProxyConnectResponse(ctx, cm.proxyURL, connectReq, resp)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if resp.StatusCode != 200 {
+ _, text, ok := strings.Cut(resp.Status, " ")
+ conn.Close()
+ if !ok {
+ return nil, errors.New("unknown status code")
+ }
+ return nil, errors.New(text)
+ }
+ }
+
+ if cm.proxyURL != nil && cm.targetScheme == "https" {
+ if err := pconn.addTLS(ctx, cm.tlsHost(), trace); err != nil {
+ return nil, err
+ }
+ }
+
+ if s := pconn.tlsState; s != nil && s.NegotiatedProtocolIsMutual && s.NegotiatedProtocol != "" {
+ if next, ok := t.TLSNextProto[s.NegotiatedProtocol]; ok {
+ alt := next(cm.targetAddr, pconn.conn.(*tls.Conn))
+ if e, ok := alt.(erringRoundTripper); ok {
+ // pconn.conn was closed by next (http2configureTransports.upgradeFn).
+ return nil, e.RoundTripErr()
+ }
+ return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: alt}, nil
+ }
+ }
+
+ pconn.br = bufio.NewReaderSize(pconn, t.readBufferSize())
+ pconn.bw = bufio.NewWriterSize(persistConnWriter{pconn}, t.writeBufferSize())
+
+ go pconn.readLoop()
+ go pconn.writeLoop()
+ return pconn, nil
+}
+
+// persistConnWriter is the io.Writer written to by pc.bw.
+// It accumulates the number of bytes written to the underlying conn,
+// so the retry logic can determine whether any bytes made it across
+// the wire.
+// This is exactly 1 pointer field wide so it can go into an interface
+// without allocation.
+type persistConnWriter struct {
+ pc *persistConn
+}
+
+func (w persistConnWriter) Write(p []byte) (n int, err error) {
+ n, err = w.pc.conn.Write(p)
+ w.pc.nwrite += int64(n)
+ return
+}
+
+// ReadFrom exposes persistConnWriter's underlying Conn to io.Copy and if
+// the Conn implements io.ReaderFrom, it can take advantage of optimizations
+// such as sendfile.
+func (w persistConnWriter) ReadFrom(r io.Reader) (n int64, err error) {
+ n, err = io.Copy(w.pc.conn, r)
+ w.pc.nwrite += n
+ return
+}
+
+var _ io.ReaderFrom = (*persistConnWriter)(nil)
+
+// connectMethod is the map key (in its String form) for keeping persistent
+// TCP connections alive for subsequent HTTP requests.
+//
+// A connect method may be of the following types:
+//
+// connectMethod.key().String() Description
+// ------------------------------ -------------------------
+// |http|foo.com http directly to server, no proxy
+// |https|foo.com https directly to server, no proxy
+// |https,h1|foo.com https directly to server w/o HTTP/2, no proxy
+// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com
+// http://proxy.com|http http to proxy, http to anywhere after that
+// socks5://proxy.com|http|foo.com socks5 to proxy, then http to foo.com
+// socks5://proxy.com|https|foo.com socks5 to proxy, then https to foo.com
+// https://proxy.com|https|foo.com https to proxy, then CONNECT to foo.com
+// https://proxy.com|http https to proxy, http to anywhere after that
+type connectMethod struct {
+ _ incomparable
+ proxyURL *url.URL // nil for no proxy, else full proxy URL
+ targetScheme string // "http" or "https"
+ // If proxyURL specifies an http or https proxy, and targetScheme is http (not https),
+ // then targetAddr is not included in the connect method key, because the socket can
+ // be reused for different targetAddr values.
+ targetAddr string
+ onlyH1 bool // whether to disable HTTP/2 and force HTTP/1
+}
+
+func (cm *connectMethod) key() connectMethodKey {
+ proxyStr := ""
+ targetAddr := cm.targetAddr
+ if cm.proxyURL != nil {
+ proxyStr = cm.proxyURL.String()
+ if (cm.proxyURL.Scheme == "http" || cm.proxyURL.Scheme == "https") && cm.targetScheme == "http" {
+ targetAddr = ""
+ }
+ }
+ return connectMethodKey{
+ proxy: proxyStr,
+ scheme: cm.targetScheme,
+ addr: targetAddr,
+ onlyH1: cm.onlyH1,
+ }
+}
+
+// scheme returns the first hop scheme: http, https, or socks5
+func (cm *connectMethod) scheme() string {
+ if cm.proxyURL != nil {
+ return cm.proxyURL.Scheme
+ }
+ return cm.targetScheme
+}
+
+// addr returns the first hop "host:port" to which we need to TCP connect.
+func (cm *connectMethod) addr() string {
+ if cm.proxyURL != nil {
+ return canonicalAddr(cm.proxyURL)
+ }
+ return cm.targetAddr
+}
+
+// tlsHost returns the host name to match against the peer's
+// TLS certificate.
+func (cm *connectMethod) tlsHost() string {
+ h := cm.targetAddr
+ if hasPort(h) {
+ h = h[:strings.LastIndex(h, ":")]
+ }
+ return h
+}
+
+// connectMethodKey is the map key version of connectMethod, with a
+// stringified proxy URL (or the empty string) instead of a pointer to
+// a URL.
+type connectMethodKey struct {
+ proxy, scheme, addr string
+ onlyH1 bool
+}
+
+func (k connectMethodKey) String() string {
+ // Only used by tests.
+ var h1 string
+ if k.onlyH1 {
+ h1 = ",h1"
+ }
+ return fmt.Sprintf("%s|%s%s|%s", k.proxy, k.scheme, h1, k.addr)
+}
+
+// persistConn wraps a connection, usually a persistent one
+// (but may be used for non-keep-alive requests as well)
+type persistConn struct {
+ // alt optionally specifies the TLS NextProto RoundTripper.
+ // This is used for HTTP/2 today and future protocols later.
+ // If it's non-nil, the rest of the fields are unused.
+ alt RoundTripper
+
+ t *Transport
+ cacheKey connectMethodKey
+ conn net.Conn
+ tlsState *tls.ConnectionState
+ br *bufio.Reader // from conn
+ bw *bufio.Writer // to conn
+ nwrite int64 // bytes written
+ reqch chan requestAndChan // written by roundTrip; read by readLoop
+ writech chan writeRequest // written by roundTrip; read by writeLoop
+ closech chan struct{} // closed when conn closed
+ isProxy bool
+ sawEOF bool // whether we've seen EOF from conn; owned by readLoop
+ readLimit int64 // bytes allowed to be read; owned by readLoop
+ // writeErrCh passes the request write error (usually nil)
+ // from the writeLoop goroutine to the readLoop which passes
+ // it off to the res.Body reader, which then uses it to decide
+ // whether or not a connection can be reused. Issue 7569.
+ writeErrCh chan error
+
+ writeLoopDone chan struct{} // closed when write loop ends
+
+ // Both guarded by Transport.idleMu:
+ idleAt time.Time // time it last become idle
+ idleTimer *time.Timer // holding an AfterFunc to close it
+
+ mu sync.Mutex // guards following fields
+ numExpectedResponses int
+ closed error // set non-nil when conn is closed, before closech is closed
+ canceledErr error // set non-nil if conn is canceled
+ broken bool // an error has happened on this connection; marked broken so it's not reused.
+ reused bool // whether conn has had successful request/response and is being reused.
+ // mutateHeaderFunc is an optional func to modify extra
+ // headers on each outbound request before it's written. (the
+ // original Request given to RoundTrip is not modified)
+ mutateHeaderFunc func(Header)
+}
+
+func (pc *persistConn) maxHeaderResponseSize() int64 {
+ if v := pc.t.MaxResponseHeaderBytes; v != 0 {
+ return v
+ }
+ return 10 << 20 // conservative default; same as http2
+}
+
+func (pc *persistConn) Read(p []byte) (n int, err error) {
+ if pc.readLimit <= 0 {
+ return 0, fmt.Errorf("read limit of %d bytes exhausted", pc.maxHeaderResponseSize())
+ }
+ if int64(len(p)) > pc.readLimit {
+ p = p[:pc.readLimit]
+ }
+ n, err = pc.conn.Read(p)
+ if err == io.EOF {
+ pc.sawEOF = true
+ }
+ pc.readLimit -= int64(n)
+ return
+}
+
+// isBroken reports whether this connection is in a known broken state.
+func (pc *persistConn) isBroken() bool {
+ pc.mu.Lock()
+ b := pc.closed != nil
+ pc.mu.Unlock()
+ return b
+}
+
+// canceled returns non-nil if the connection was closed due to
+// CancelRequest or due to context cancellation.
+func (pc *persistConn) canceled() error {
+ pc.mu.Lock()
+ defer pc.mu.Unlock()
+ return pc.canceledErr
+}
+
+// isReused reports whether this connection has been used before.
+func (pc *persistConn) isReused() bool {
+ pc.mu.Lock()
+ r := pc.reused
+ pc.mu.Unlock()
+ return r
+}
+
+func (pc *persistConn) gotIdleConnTrace(idleAt time.Time) (t httptrace.GotConnInfo) {
+ pc.mu.Lock()
+ defer pc.mu.Unlock()
+ t.Reused = pc.reused
+ t.Conn = pc.conn
+ t.WasIdle = true
+ if !idleAt.IsZero() {
+ t.IdleTime = time.Since(idleAt)
+ }
+ return
+}
+
+func (pc *persistConn) cancelRequest(err error) {
+ pc.mu.Lock()
+ defer pc.mu.Unlock()
+ pc.canceledErr = err
+ pc.closeLocked(errRequestCanceled)
+}
+
+// closeConnIfStillIdle closes the connection if it's still sitting idle.
+// This is what's called by the persistConn's idleTimer, and is run in its
+// own goroutine.
+func (pc *persistConn) closeConnIfStillIdle() {
+ t := pc.t
+ t.idleMu.Lock()
+ defer t.idleMu.Unlock()
+ if _, ok := t.idleLRU.m[pc]; !ok {
+ // Not idle.
+ return
+ }
+ t.removeIdleConnLocked(pc)
+ pc.close(errIdleConnTimeout)
+}
+
+// mapRoundTripError returns the appropriate error value for
+// persistConn.roundTrip.
+//
+// The provided err is the first error that (*persistConn).roundTrip
+// happened to receive from its select statement.
+//
+// The startBytesWritten value should be the value of pc.nwrite before the roundTrip
+// started writing the request.
+func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritten int64, err error) error {
+ if err == nil {
+ return nil
+ }
+
+ // Wait for the writeLoop goroutine to terminate to avoid data
+ // races on callers who mutate the request on failure.
+ //
+ // When resc in pc.roundTrip and hence rc.ch receives a responseAndError
+ // with a non-nil error it implies that the persistConn is either closed
+ // or closing. Waiting on pc.writeLoopDone is hence safe as all callers
+ // close closech which in turn ensures writeLoop returns.
+ <-pc.writeLoopDone
+
+ // If the request was canceled, that's better than network
+ // failures that were likely the result of tearing down the
+ // connection.
+ if cerr := pc.canceled(); cerr != nil {
+ return cerr
+ }
+
+ // See if an error was set explicitly.
+ req.mu.Lock()
+ reqErr := req.err
+ req.mu.Unlock()
+ if reqErr != nil {
+ return reqErr
+ }
+
+ if err == errServerClosedIdle {
+ // Don't decorate
+ return err
+ }
+
+ if _, ok := err.(transportReadFromServerError); ok {
+ if pc.nwrite == startBytesWritten {
+ return nothingWrittenError{err}
+ }
+ // Don't decorate
+ return err
+ }
+ if pc.isBroken() {
+ if pc.nwrite == startBytesWritten {
+ return nothingWrittenError{err}
+ }
+ return fmt.Errorf("net/http: HTTP/1.x transport connection broken: %w", err)
+ }
+ return err
+}
+
+// errCallerOwnsConn is an internal sentinel error used when we hand
+// off a writable response.Body to the caller. We use this to prevent
+// closing a net.Conn that is now owned by the caller.
+var errCallerOwnsConn = errors.New("read loop ending; caller owns writable underlying conn")
+
+func (pc *persistConn) readLoop() {
+ closeErr := errReadLoopExiting // default value, if not changed below
+ defer func() {
+ pc.close(closeErr)
+ pc.t.removeIdleConn(pc)
+ }()
+
+ tryPutIdleConn := func(trace *httptrace.ClientTrace) bool {
+ if err := pc.t.tryPutIdleConn(pc); err != nil {
+ closeErr = err
+ if trace != nil && trace.PutIdleConn != nil && err != errKeepAlivesDisabled {
+ trace.PutIdleConn(err)
+ }
+ return false
+ }
+ if trace != nil && trace.PutIdleConn != nil {
+ trace.PutIdleConn(nil)
+ }
+ return true
+ }
+
+ // eofc is used to block caller goroutines reading from Response.Body
+ // at EOF until this goroutines has (potentially) added the connection
+ // back to the idle pool.
+ eofc := make(chan struct{})
+ defer close(eofc) // unblock reader on errors
+
+ // Read this once, before loop starts. (to avoid races in tests)
+ testHookMu.Lock()
+ testHookReadLoopBeforeNextRead := testHookReadLoopBeforeNextRead
+ testHookMu.Unlock()
+
+ alive := true
+ for alive {
+ pc.readLimit = pc.maxHeaderResponseSize()
+ _, err := pc.br.Peek(1)
+
+ pc.mu.Lock()
+ if pc.numExpectedResponses == 0 {
+ pc.readLoopPeekFailLocked(err)
+ pc.mu.Unlock()
+ return
+ }
+ pc.mu.Unlock()
+
+ rc := <-pc.reqch
+ trace := httptrace.ContextClientTrace(rc.req.Context())
+
+ var resp *Response
+ if err == nil {
+ resp, err = pc.readResponse(rc, trace)
+ } else {
+ err = transportReadFromServerError{err}
+ closeErr = err
+ }
+
+ if err != nil {
+ if pc.readLimit <= 0 {
+ err = fmt.Errorf("net/http: server response headers exceeded %d bytes; aborted", pc.maxHeaderResponseSize())
+ }
+
+ select {
+ case rc.ch <- responseAndError{err: err}:
+ case <-rc.callerGone:
+ return
+ }
+ return
+ }
+ pc.readLimit = maxInt64 // effectively no limit for response bodies
+
+ pc.mu.Lock()
+ pc.numExpectedResponses--
+ pc.mu.Unlock()
+
+ bodyWritable := resp.bodyIsWritable()
+ hasBody := rc.req.Method != "HEAD" && resp.ContentLength != 0
+
+ if resp.Close || rc.req.Close || resp.StatusCode <= 199 || bodyWritable {
+ // Don't do keep-alive on error if either party requested a close
+ // or we get an unexpected informational (1xx) response.
+ // StatusCode 100 is already handled above.
+ alive = false
+ }
+
+ if !hasBody || bodyWritable {
+ replaced := pc.t.replaceReqCanceler(rc.cancelKey, nil)
+
+ // Put the idle conn back into the pool before we send the response
+ // so if they process it quickly and make another request, they'll
+ // get this same conn. But we use the unbuffered channel 'rc'
+ // to guarantee that persistConn.roundTrip got out of its select
+ // potentially waiting for this persistConn to close.
+ alive = alive &&
+ !pc.sawEOF &&
+ pc.wroteRequest() &&
+ replaced && tryPutIdleConn(trace)
+
+ if bodyWritable {
+ closeErr = errCallerOwnsConn
+ }
+
+ select {
+ case rc.ch <- responseAndError{res: resp}:
+ case <-rc.callerGone:
+ return
+ }
+
+ // Now that they've read from the unbuffered channel, they're safely
+ // out of the select that also waits on this goroutine to die, so
+ // we're allowed to exit now if needed (if alive is false)
+ testHookReadLoopBeforeNextRead()
+ continue
+ }
+
+ waitForBodyRead := make(chan bool, 2)
+ body := &bodyEOFSignal{
+ body: resp.Body,
+ earlyCloseFn: func() error {
+ waitForBodyRead <- false
+ <-eofc // will be closed by deferred call at the end of the function
+ return nil
+
+ },
+ fn: func(err error) error {
+ isEOF := err == io.EOF
+ waitForBodyRead <- isEOF
+ if isEOF {
+ <-eofc // see comment above eofc declaration
+ } else if err != nil {
+ if cerr := pc.canceled(); cerr != nil {
+ return cerr
+ }
+ }
+ return err
+ },
+ }
+
+ resp.Body = body
+ if rc.addedGzip && ascii.EqualFold(resp.Header.Get("Content-Encoding"), "gzip") {
+ resp.Body = &gzipReader{body: body}
+ resp.Header.Del("Content-Encoding")
+ resp.Header.Del("Content-Length")
+ resp.ContentLength = -1
+ resp.Uncompressed = true
+ }
+
+ select {
+ case rc.ch <- responseAndError{res: resp}:
+ case <-rc.callerGone:
+ return
+ }
+
+ // Before looping back to the top of this function and peeking on
+ // the bufio.Reader, wait for the caller goroutine to finish
+ // reading the response body. (or for cancellation or death)
+ select {
+ case bodyEOF := <-waitForBodyRead:
+ replaced := pc.t.replaceReqCanceler(rc.cancelKey, nil) // before pc might return to idle pool
+ alive = alive &&
+ bodyEOF &&
+ !pc.sawEOF &&
+ pc.wroteRequest() &&
+ replaced && tryPutIdleConn(trace)
+ if bodyEOF {
+ eofc <- struct{}{}
+ }
+ case <-rc.req.Cancel:
+ alive = false
+ pc.t.cancelRequest(rc.cancelKey, errRequestCanceled)
+ case <-rc.req.Context().Done():
+ alive = false
+ pc.t.cancelRequest(rc.cancelKey, rc.req.Context().Err())
+ case <-pc.closech:
+ alive = false
+ }
+
+ testHookReadLoopBeforeNextRead()
+ }
+}
+
+func (pc *persistConn) readLoopPeekFailLocked(peekErr error) {
+ if pc.closed != nil {
+ return
+ }
+ if n := pc.br.Buffered(); n > 0 {
+ buf, _ := pc.br.Peek(n)
+ if is408Message(buf) {
+ pc.closeLocked(errServerClosedIdle)
+ return
+ } else {
+ log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", buf, peekErr)
+ }
+ }
+ if peekErr == io.EOF {
+ // common case.
+ pc.closeLocked(errServerClosedIdle)
+ } else {
+ pc.closeLocked(fmt.Errorf("readLoopPeekFailLocked: %w", peekErr))
+ }
+}
+
+// is408Message reports whether buf has the prefix of an
+// HTTP 408 Request Timeout response.
+// See golang.org/issue/32310.
+func is408Message(buf []byte) bool {
+ if len(buf) < len("HTTP/1.x 408") {
+ return false
+ }
+ if string(buf[:7]) != "HTTP/1." {
+ return false
+ }
+ return string(buf[8:12]) == " 408"
+}
+
+// readResponse reads an HTTP response (or two, in the case of "Expect:
+// 100-continue") from the server. It returns the final non-100 one.
+// trace is optional.
+func (pc *persistConn) readResponse(rc requestAndChan, trace *httptrace.ClientTrace) (resp *Response, err error) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ if peek, err := pc.br.Peek(1); err == nil && len(peek) == 1 {
+ trace.GotFirstResponseByte()
+ }
+ }
+ num1xx := 0 // number of informational 1xx headers received
+ const max1xxResponses = 5 // arbitrary bound on number of informational responses
+
+ continueCh := rc.continueCh
+ for {
+ resp, err = ReadResponse(pc.br, rc.req)
+ if err != nil {
+ return
+ }
+ resCode := resp.StatusCode
+ if continueCh != nil {
+ if resCode == 100 {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+ continueCh <- struct{}{}
+ continueCh = nil
+ } else if resCode >= 200 {
+ close(continueCh)
+ continueCh = nil
+ }
+ }
+ is1xx := 100 <= resCode && resCode <= 199
+ // treat 101 as a terminal status, see issue 26161
+ is1xxNonTerminal := is1xx && resCode != StatusSwitchingProtocols
+ if is1xxNonTerminal {
+ num1xx++
+ if num1xx > max1xxResponses {
+ return nil, errors.New("net/http: too many 1xx informational responses")
+ }
+ pc.readLimit = pc.maxHeaderResponseSize() // reset the limit
+ if trace != nil && trace.Got1xxResponse != nil {
+ if err := trace.Got1xxResponse(resCode, textproto.MIMEHeader(resp.Header)); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+ break
+ }
+ if resp.isProtocolSwitch() {
+ resp.Body = newReadWriteCloserBody(pc.br, pc.conn)
+ }
+
+ resp.TLS = pc.tlsState
+ return
+}
+
+// waitForContinue returns the function to block until
+// any response, timeout or connection close. After any of them,
+// the function returns a bool which indicates if the body should be sent.
+func (pc *persistConn) waitForContinue(continueCh <-chan struct{}) func() bool {
+ if continueCh == nil {
+ return nil
+ }
+ return func() bool {
+ timer := time.NewTimer(pc.t.ExpectContinueTimeout)
+ defer timer.Stop()
+
+ select {
+ case _, ok := <-continueCh:
+ return ok
+ case <-timer.C:
+ return true
+ case <-pc.closech:
+ return false
+ }
+ }
+}
+
+func newReadWriteCloserBody(br *bufio.Reader, rwc io.ReadWriteCloser) io.ReadWriteCloser {
+ body := &readWriteCloserBody{ReadWriteCloser: rwc}
+ if br.Buffered() != 0 {
+ body.br = br
+ }
+ return body
+}
+
+// readWriteCloserBody is the Response.Body type used when we want to
+// give users write access to the Body through the underlying
+// connection (TCP, unless using custom dialers). This is then
+// the concrete type for a Response.Body on the 101 Switching
+// Protocols response, as used by WebSockets, h2c, etc.
+type readWriteCloserBody struct {
+ _ incomparable
+ br *bufio.Reader // used until empty
+ io.ReadWriteCloser
+}
+
+func (b *readWriteCloserBody) Read(p []byte) (n int, err error) {
+ if b.br != nil {
+ if n := b.br.Buffered(); len(p) > n {
+ p = p[:n]
+ }
+ n, err = b.br.Read(p)
+ if b.br.Buffered() == 0 {
+ b.br = nil
+ }
+ return n, err
+ }
+ return b.ReadWriteCloser.Read(p)
+}
+
+// nothingWrittenError wraps a write errors which ended up writing zero bytes.
+type nothingWrittenError struct {
+ error
+}
+
+func (nwe nothingWrittenError) Unwrap() error {
+ return nwe.error
+}
+
+func (pc *persistConn) writeLoop() {
+ defer close(pc.writeLoopDone)
+ for {
+ select {
+ case wr := <-pc.writech:
+ startBytesWritten := pc.nwrite
+ err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra, pc.waitForContinue(wr.continueCh))
+ if bre, ok := err.(requestBodyReadError); ok {
+ err = bre.error
+ // Errors reading from the user's
+ // Request.Body are high priority.
+ // Set it here before sending on the
+ // channels below or calling
+ // pc.close() which tears down
+ // connections and causes other
+ // errors.
+ wr.req.setError(err)
+ }
+ if err == nil {
+ err = pc.bw.Flush()
+ }
+ if err != nil {
+ if pc.nwrite == startBytesWritten {
+ err = nothingWrittenError{err}
+ }
+ }
+ pc.writeErrCh <- err // to the body reader, which might recycle us
+ wr.ch <- err // to the roundTrip function
+ if err != nil {
+ pc.close(err)
+ return
+ }
+ case <-pc.closech:
+ return
+ }
+ }
+}
+
+// maxWriteWaitBeforeConnReuse is how long the a Transport RoundTrip
+// will wait to see the Request's Body.Write result after getting a
+// response from the server. See comments in (*persistConn).wroteRequest.
+//
+// In tests, we set this to a large value to avoid flakiness from inconsistent
+// recycling of connections.
+var maxWriteWaitBeforeConnReuse = 50 * time.Millisecond
+
+// wroteRequest is a check before recycling a connection that the previous write
+// (from writeLoop above) happened and was successful.
+func (pc *persistConn) wroteRequest() bool {
+ select {
+ case err := <-pc.writeErrCh:
+ // Common case: the write happened well before the response, so
+ // avoid creating a timer.
+ return err == nil
+ default:
+ // Rare case: the request was written in writeLoop above but
+ // before it could send to pc.writeErrCh, the reader read it
+ // all, processed it, and called us here. In this case, give the
+ // write goroutine a bit of time to finish its send.
+ //
+ // Less rare case: We also get here in the legitimate case of
+ // Issue 7569, where the writer is still writing (or stalled),
+ // but the server has already replied. In this case, we don't
+ // want to wait too long, and we want to return false so this
+ // connection isn't re-used.
+ t := time.NewTimer(maxWriteWaitBeforeConnReuse)
+ defer t.Stop()
+ select {
+ case err := <-pc.writeErrCh:
+ return err == nil
+ case <-t.C:
+ return false
+ }
+ }
+}
+
+// responseAndError is how the goroutine reading from an HTTP/1 server
+// communicates with the goroutine doing the RoundTrip.
+type responseAndError struct {
+ _ incomparable
+ res *Response // else use this response (see res method)
+ err error
+}
+
+type requestAndChan struct {
+ _ incomparable
+ req *Request
+ cancelKey cancelKey
+ ch chan responseAndError // unbuffered; always send in select on callerGone
+
+ // whether the Transport (as opposed to the user client code)
+ // added the Accept-Encoding gzip header. If the Transport
+ // set it, only then do we transparently decode the gzip.
+ addedGzip bool
+
+ // Optional blocking chan for Expect: 100-continue (for send).
+ // If the request has an "Expect: 100-continue" header and
+ // the server responds 100 Continue, readLoop send a value
+ // to writeLoop via this chan.
+ continueCh chan<- struct{}
+
+ callerGone <-chan struct{} // closed when roundTrip caller has returned
+}
+
+// A writeRequest is sent by the caller's goroutine to the
+// writeLoop's goroutine to write a request while the read loop
+// concurrently waits on both the write response and the server's
+// reply.
+type writeRequest struct {
+ req *transportRequest
+ ch chan<- error
+
+ // Optional blocking chan for Expect: 100-continue (for receive).
+ // If not nil, writeLoop blocks sending request body until
+ // it receives from this chan.
+ continueCh <-chan struct{}
+}
+
+type httpError struct {
+ err string
+ timeout bool
+}
+
+func (e *httpError) Error() string { return e.err }
+func (e *httpError) Timeout() bool { return e.timeout }
+func (e *httpError) Temporary() bool { return true }
+
+var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true}
+
+// errRequestCanceled is set to be identical to the one from h2 to facilitate
+// testing.
+var errRequestCanceled = http2errRequestCanceled
+var errRequestCanceledConn = errors.New("net/http: request canceled while waiting for connection") // TODO: unify?
+
+func nop() {}
+
+// testHooks. Always non-nil.
+var (
+ testHookEnterRoundTrip = nop
+ testHookWaitResLoop = nop
+ testHookRoundTripRetried = nop
+ testHookPrePendingDial = nop
+ testHookPostPendingDial = nop
+
+ testHookMu sync.Locker = fakeLocker{} // guards following
+ testHookReadLoopBeforeNextRead = nop
+)
+
+func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
+ testHookEnterRoundTrip()
+ if !pc.t.replaceReqCanceler(req.cancelKey, pc.cancelRequest) {
+ pc.t.putOrCloseIdleConn(pc)
+ return nil, errRequestCanceled
+ }
+ pc.mu.Lock()
+ pc.numExpectedResponses++
+ headerFn := pc.mutateHeaderFunc
+ pc.mu.Unlock()
+
+ if headerFn != nil {
+ headerFn(req.extraHeaders())
+ }
+
+ // Ask for a compressed version if the caller didn't set their
+ // own value for Accept-Encoding. We only attempt to
+ // uncompress the gzip stream if we were the layer that
+ // requested it.
+ requestedGzip := false
+ if !pc.t.DisableCompression &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ req.Method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: https://zlib.net/zlib_faq.html#faq39
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // https://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ requestedGzip = true
+ req.extraHeaders().Set("Accept-Encoding", "gzip")
+ }
+
+ var continueCh chan struct{}
+ if req.ProtoAtLeast(1, 1) && req.Body != nil && req.expectsContinue() {
+ continueCh = make(chan struct{}, 1)
+ }
+
+ if pc.t.DisableKeepAlives &&
+ !req.wantsClose() &&
+ !isProtocolSwitchHeader(req.Header) {
+ req.extraHeaders().Set("Connection", "close")
+ }
+
+ gone := make(chan struct{})
+ defer close(gone)
+
+ defer func() {
+ if err != nil {
+ pc.t.setReqCanceler(req.cancelKey, nil)
+ }
+ }()
+
+ const debugRoundTrip = false
+
+ // Write the request concurrently with waiting for a response,
+ // in case the server decides to reply before reading our full
+ // request body.
+ startBytesWritten := pc.nwrite
+ writeErrCh := make(chan error, 1)
+ pc.writech <- writeRequest{req, writeErrCh, continueCh}
+
+ resc := make(chan responseAndError)
+ pc.reqch <- requestAndChan{
+ req: req.Request,
+ cancelKey: req.cancelKey,
+ ch: resc,
+ addedGzip: requestedGzip,
+ continueCh: continueCh,
+ callerGone: gone,
+ }
+
+ var respHeaderTimer <-chan time.Time
+ cancelChan := req.Request.Cancel
+ ctxDoneChan := req.Context().Done()
+ pcClosed := pc.closech
+ canceled := false
+ for {
+ testHookWaitResLoop()
+ select {
+ case err := <-writeErrCh:
+ if debugRoundTrip {
+ req.logf("writeErrCh resv: %T/%#v", err, err)
+ }
+ if err != nil {
+ pc.close(fmt.Errorf("write error: %w", err))
+ return nil, pc.mapRoundTripError(req, startBytesWritten, err)
+ }
+ if d := pc.t.ResponseHeaderTimeout; d > 0 {
+ if debugRoundTrip {
+ req.logf("starting timer for %v", d)
+ }
+ timer := time.NewTimer(d)
+ defer timer.Stop() // prevent leaks
+ respHeaderTimer = timer.C
+ }
+ case <-pcClosed:
+ pcClosed = nil
+ if canceled || pc.t.replaceReqCanceler(req.cancelKey, nil) {
+ if debugRoundTrip {
+ req.logf("closech recv: %T %#v", pc.closed, pc.closed)
+ }
+ return nil, pc.mapRoundTripError(req, startBytesWritten, pc.closed)
+ }
+ case <-respHeaderTimer:
+ if debugRoundTrip {
+ req.logf("timeout waiting for response headers.")
+ }
+ pc.close(errTimeout)
+ return nil, errTimeout
+ case re := <-resc:
+ if (re.res == nil) == (re.err == nil) {
+ panic(fmt.Sprintf("internal error: exactly one of res or err should be set; nil=%v", re.res == nil))
+ }
+ if debugRoundTrip {
+ req.logf("resc recv: %p, %T/%#v", re.res, re.err, re.err)
+ }
+ if re.err != nil {
+ return nil, pc.mapRoundTripError(req, startBytesWritten, re.err)
+ }
+ return re.res, nil
+ case <-cancelChan:
+ canceled = pc.t.cancelRequest(req.cancelKey, errRequestCanceled)
+ cancelChan = nil
+ case <-ctxDoneChan:
+ canceled = pc.t.cancelRequest(req.cancelKey, req.Context().Err())
+ cancelChan = nil
+ ctxDoneChan = nil
+ }
+ }
+}
+
+// tLogKey is a context WithValue key for test debugging contexts containing
+// a t.Logf func. See export_test.go's Request.WithT method.
+type tLogKey struct{}
+
+func (tr *transportRequest) logf(format string, args ...any) {
+ if logf, ok := tr.Request.Context().Value(tLogKey{}).(func(string, ...any)); ok {
+ logf(time.Now().Format(time.RFC3339Nano)+": "+format, args...)
+ }
+}
+
+// markReused marks this connection as having been successfully used for a
+// request and response.
+func (pc *persistConn) markReused() {
+ pc.mu.Lock()
+ pc.reused = true
+ pc.mu.Unlock()
+}
+
+// close closes the underlying TCP connection and closes
+// the pc.closech channel.
+//
+// The provided err is only for testing and debugging; in normal
+// circumstances it should never be seen by users.
+func (pc *persistConn) close(err error) {
+ pc.mu.Lock()
+ defer pc.mu.Unlock()
+ pc.closeLocked(err)
+}
+
+func (pc *persistConn) closeLocked(err error) {
+ if err == nil {
+ panic("nil error")
+ }
+ pc.broken = true
+ if pc.closed == nil {
+ pc.closed = err
+ pc.t.decConnsPerHost(pc.cacheKey)
+ // Close HTTP/1 (pc.alt == nil) connection.
+ // HTTP/2 closes its connection itself.
+ if pc.alt == nil {
+ if err != errCallerOwnsConn {
+ pc.conn.Close()
+ }
+ close(pc.closech)
+ }
+ }
+ pc.mutateHeaderFunc = nil
+}
+
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+ "socks5": "1080",
+}
+
+func idnaASCIIFromURL(url *url.URL) string {
+ addr := url.Hostname()
+ if v, err := idnaASCII(addr); err == nil {
+ addr = v
+ }
+ return addr
+}
+
+// canonicalAddr returns url.Host but always with a ":port" suffix.
+func canonicalAddr(url *url.URL) string {
+ port := url.Port()
+ if port == "" {
+ port = portMap[url.Scheme]
+ }
+ return net.JoinHostPort(idnaASCIIFromURL(url), port)
+}
+
+// bodyEOFSignal is used by the HTTP/1 transport when reading response
+// bodies to make sure we see the end of a response body before
+// proceeding and reading on the connection again.
+//
+// It wraps a ReadCloser but runs fn (if non-nil) at most
+// once, right before its final (error-producing) Read or Close call
+// returns. fn should return the new error to return from Read or Close.
+//
+// If earlyCloseFn is non-nil and Close is called before io.EOF is
+// seen, earlyCloseFn is called instead of fn, and its return value is
+// the return value from Close.
+type bodyEOFSignal struct {
+ body io.ReadCloser
+ mu sync.Mutex // guards following 4 fields
+ closed bool // whether Close has been called
+ rerr error // sticky Read error
+ fn func(error) error // err will be nil on Read io.EOF
+ earlyCloseFn func() error // optional alt Close func used if io.EOF not seen
+}
+
+var errReadOnClosedResBody = errors.New("http: read on closed response body")
+
+func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
+ es.mu.Lock()
+ closed, rerr := es.closed, es.rerr
+ es.mu.Unlock()
+ if closed {
+ return 0, errReadOnClosedResBody
+ }
+ if rerr != nil {
+ return 0, rerr
+ }
+
+ n, err = es.body.Read(p)
+ if err != nil {
+ es.mu.Lock()
+ defer es.mu.Unlock()
+ if es.rerr == nil {
+ es.rerr = err
+ }
+ err = es.condfn(err)
+ }
+ return
+}
+
+func (es *bodyEOFSignal) Close() error {
+ es.mu.Lock()
+ defer es.mu.Unlock()
+ if es.closed {
+ return nil
+ }
+ es.closed = true
+ if es.earlyCloseFn != nil && es.rerr != io.EOF {
+ return es.earlyCloseFn()
+ }
+ err := es.body.Close()
+ return es.condfn(err)
+}
+
+// caller must hold es.mu.
+func (es *bodyEOFSignal) condfn(err error) error {
+ if es.fn == nil {
+ return err
+ }
+ err = es.fn(err)
+ es.fn = nil
+ return err
+}
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+ _ incomparable
+ body *bodyEOFSignal // underlying HTTP/1 response body framing
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // any error from gzip.NewReader; sticky
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zr == nil {
+ if gz.zerr == nil {
+ gz.zr, gz.zerr = gzip.NewReader(gz.body)
+ }
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ }
+
+ gz.body.mu.Lock()
+ if gz.body.closed {
+ err = errReadOnClosedResBody
+ }
+ gz.body.mu.Unlock()
+
+ if err != nil {
+ return 0, err
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ return gz.body.Close()
+}
+
+type tlsHandshakeTimeoutError struct{}
+
+func (tlsHandshakeTimeoutError) Timeout() bool { return true }
+func (tlsHandshakeTimeoutError) Temporary() bool { return true }
+func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" }
+
+// fakeLocker is a sync.Locker which does nothing. It's used to guard
+// test-only fields when not under test, to avoid runtime atomic
+// overhead.
+type fakeLocker struct{}
+
+func (fakeLocker) Lock() {}
+func (fakeLocker) Unlock() {}
+
+// cloneTLSConfig returns a shallow clone of cfg, or a new zero tls.Config if
+// cfg is nil. This is safe to call even if cfg is in active use by a TLS
+// client or server.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return cfg.Clone()
+}
+
+type connLRU struct {
+ ll *list.List // list.Element.Value type is of *persistConn
+ m map[*persistConn]*list.Element
+}
+
+// add adds pc to the head of the linked list.
+func (cl *connLRU) add(pc *persistConn) {
+ if cl.ll == nil {
+ cl.ll = list.New()
+ cl.m = make(map[*persistConn]*list.Element)
+ }
+ ele := cl.ll.PushFront(pc)
+ if _, ok := cl.m[pc]; ok {
+ panic("persistConn was already in LRU")
+ }
+ cl.m[pc] = ele
+}
+
+func (cl *connLRU) removeOldest() *persistConn {
+ ele := cl.ll.Back()
+ pc := ele.Value.(*persistConn)
+ cl.ll.Remove(ele)
+ delete(cl.m, pc)
+ return pc
+}
+
+// remove removes pc from cl.
+func (cl *connLRU) remove(pc *persistConn) {
+ if ele, ok := cl.m[pc]; ok {
+ cl.ll.Remove(ele)
+ delete(cl.m, pc)
+ }
+}
+
+// len returns the number of items in the cache.
+func (cl *connLRU) len() int {
+ return len(cl.m)
+}
diff --git a/contrib/go/_std_1.22/src/net/http/transport_default_other.go b/contrib/go/_std_1.22/src/net/http/transport_default_other.go
new file mode 100644
index 0000000000..4f6c5c1271
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/transport_default_other.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !wasm
+
+package http
+
+import (
+ "context"
+ "net"
+)
+
+func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) {
+ return dialer.DialContext
+}
diff --git a/contrib/go/_std_1.22/src/net/http/ya.make b/contrib/go/_std_1.22/src/net/http/ya.make
new file mode 100644
index 0000000000..242f160c55
--- /dev/null
+++ b/contrib/go/_std_1.22/src/net/http/ya.make
@@ -0,0 +1,34 @@
+GO_LIBRARY()
+IF (TRUE)
+ SRCS(
+ client.go
+ clone.go
+ cookie.go
+ doc.go
+ filetransport.go
+ fs.go
+ h2_bundle.go
+ h2_error.go
+ header.go
+ http.go
+ jar.go
+ mapping.go
+ method.go
+ pattern.go
+ request.go
+ response.go
+ responsecontroller.go
+ roundtrip.go
+ routing_index.go
+ routing_tree.go
+ servemux121.go
+ server.go
+ sniff.go
+ socks_bundle.go
+ status.go
+ transfer.go
+ transport.go
+ transport_default_other.go
+ )
+ENDIF()
+END()