aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/go/_std_1.21/src
diff options
context:
space:
mode:
authorAlexSm <alex@ydb.tech>2024-01-04 15:09:05 +0100
committerGitHub <noreply@github.com>2024-01-04 15:09:05 +0100
commitdab291146f6cd7d35684e3a1150e5bb1c412982c (patch)
tree36ef35f6cacb6432845a4a33f940c95871036b32 /contrib/go/_std_1.21/src
parent63660ad5e7512029fd0218e7a636580695a24e1f (diff)
downloadydb-dab291146f6cd7d35684e3a1150e5bb1c412982c.tar.gz
Library import 5, delete go dependencies (#832)
* Library import 5, delete go dependencies * Fix yt client
Diffstat (limited to 'contrib/go/_std_1.21/src')
-rw-r--r--contrib/go/_std_1.21/src/archive/tar/common.go736
-rw-r--r--contrib/go/_std_1.21/src/archive/tar/format.go307
-rw-r--r--contrib/go/_std_1.21/src/archive/tar/reader.go882
-rw-r--r--contrib/go/_std_1.21/src/archive/tar/stat_actime1.go20
-rw-r--r--contrib/go/_std_1.21/src/archive/tar/stat_actime2.go20
-rw-r--r--contrib/go/_std_1.21/src/archive/tar/stat_unix.go101
-rw-r--r--contrib/go/_std_1.21/src/archive/tar/strconv.go327
-rw-r--r--contrib/go/_std_1.21/src/archive/tar/writer.go659
-rw-r--r--contrib/go/_std_1.21/src/archive/tar/ya.make38
-rw-r--r--contrib/go/_std_1.21/src/database/sql/convert.go591
-rw-r--r--contrib/go/_std_1.21/src/database/sql/ctxutil.go146
-rw-r--r--contrib/go/_std_1.21/src/database/sql/sql.go3503
-rw-r--r--contrib/go/_std_1.21/src/database/sql/ya.make25
-rw-r--r--contrib/go/_std_1.21/src/expvar/expvar.go373
-rw-r--r--contrib/go/_std_1.21/src/expvar/ya.make12
-rw-r--r--contrib/go/_std_1.21/src/image/color/color.go347
-rw-r--r--contrib/go/_std_1.21/src/image/color/ya.make17
-rw-r--r--contrib/go/_std_1.21/src/image/color/ycbcr.go373
-rw-r--r--contrib/go/_std_1.21/src/image/format.go109
-rw-r--r--contrib/go/_std_1.21/src/image/geom.go317
-rw-r--r--contrib/go/_std_1.21/src/image/image.go1273
-rw-r--r--contrib/go/_std_1.21/src/image/names.go58
-rw-r--r--contrib/go/_std_1.21/src/image/ya.make31
-rw-r--r--contrib/go/_std_1.21/src/image/ycbcr.go329
-rw-r--r--contrib/go/_std_1.21/src/testing/fstest/mapfs.go244
-rw-r--r--contrib/go/_std_1.21/src/testing/fstest/testfs.go624
-rw-r--r--contrib/go/_std_1.21/src/testing/fstest/ya.make16
-rw-r--r--contrib/go/_std_1.21/src/testing/internal/ya.make3
-rw-r--r--contrib/go/_std_1.21/src/testing/iotest/logger.go54
-rw-r--r--contrib/go/_std_1.21/src/testing/iotest/reader.go268
-rw-r--r--contrib/go/_std_1.21/src/testing/iotest/writer.go35
-rw-r--r--contrib/go/_std_1.21/src/testing/iotest/ya.make20
-rw-r--r--contrib/go/_std_1.21/src/testing/quick/quick.go385
-rw-r--r--contrib/go/_std_1.21/src/testing/quick/ya.make12
-rw-r--r--contrib/go/_std_1.21/src/testing/slogtest/slogtest.go322
-rw-r--r--contrib/go/_std_1.21/src/testing/slogtest/ya.make12
36 files changed, 0 insertions, 12589 deletions
diff --git a/contrib/go/_std_1.21/src/archive/tar/common.go b/contrib/go/_std_1.21/src/archive/tar/common.go
deleted file mode 100644
index dc9d350eb7..0000000000
--- a/contrib/go/_std_1.21/src/archive/tar/common.go
+++ /dev/null
@@ -1,736 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tar implements access to tar archives.
-//
-// Tape archives (tar) are a file format for storing a sequence of files that
-// can be read and written in a streaming manner.
-// This package aims to cover most variations of the format,
-// including those produced by GNU and BSD tar tools.
-package tar
-
-import (
- "errors"
- "fmt"
- "internal/godebug"
- "io/fs"
- "math"
- "path"
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit
-// architectures. If a large value is encountered when decoding, the result
-// stored in Header will be the truncated version.
-
-var tarinsecurepath = godebug.New("tarinsecurepath")
-
-var (
- ErrHeader = errors.New("archive/tar: invalid tar header")
- ErrWriteTooLong = errors.New("archive/tar: write too long")
- ErrFieldTooLong = errors.New("archive/tar: header field too long")
- ErrWriteAfterClose = errors.New("archive/tar: write after close")
- ErrInsecurePath = errors.New("archive/tar: insecure file path")
- errMissData = errors.New("archive/tar: sparse file references non-existent data")
- errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
- errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
-)
-
-type headerError []string
-
-func (he headerError) Error() string {
- const prefix = "archive/tar: cannot encode header"
- var ss []string
- for _, s := range he {
- if s != "" {
- ss = append(ss, s)
- }
- }
- if len(ss) == 0 {
- return prefix
- }
- return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and "))
-}
-
-// Type flags for Header.Typeflag.
-const (
- // Type '0' indicates a regular file.
- TypeReg = '0'
-
- // Deprecated: Use TypeReg instead.
- TypeRegA = '\x00'
-
- // Type '1' to '6' are header-only flags and may not have a data body.
- TypeLink = '1' // Hard link
- TypeSymlink = '2' // Symbolic link
- TypeChar = '3' // Character device node
- TypeBlock = '4' // Block device node
- TypeDir = '5' // Directory
- TypeFifo = '6' // FIFO node
-
- // Type '7' is reserved.
- TypeCont = '7'
-
- // Type 'x' is used by the PAX format to store key-value records that
- // are only relevant to the next file.
- // This package transparently handles these types.
- TypeXHeader = 'x'
-
- // Type 'g' is used by the PAX format to store key-value records that
- // are relevant to all subsequent files.
- // This package only supports parsing and composing such headers,
- // but does not currently support persisting the global state across files.
- TypeXGlobalHeader = 'g'
-
- // Type 'S' indicates a sparse file in the GNU format.
- TypeGNUSparse = 'S'
-
- // Types 'L' and 'K' are used by the GNU format for a meta file
- // used to store the path or link name for the next file.
- // This package transparently handles these types.
- TypeGNULongName = 'L'
- TypeGNULongLink = 'K'
-)
-
-// Keywords for PAX extended header records.
-const (
- paxNone = "" // Indicates that no PAX key is suitable
- paxPath = "path"
- paxLinkpath = "linkpath"
- paxSize = "size"
- paxUid = "uid"
- paxGid = "gid"
- paxUname = "uname"
- paxGname = "gname"
- paxMtime = "mtime"
- paxAtime = "atime"
- paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid
- paxCharset = "charset" // Currently unused
- paxComment = "comment" // Currently unused
-
- paxSchilyXattr = "SCHILY.xattr."
-
- // Keywords for GNU sparse files in a PAX extended header.
- paxGNUSparse = "GNU.sparse."
- paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
- paxGNUSparseOffset = "GNU.sparse.offset"
- paxGNUSparseNumBytes = "GNU.sparse.numbytes"
- paxGNUSparseMap = "GNU.sparse.map"
- paxGNUSparseName = "GNU.sparse.name"
- paxGNUSparseMajor = "GNU.sparse.major"
- paxGNUSparseMinor = "GNU.sparse.minor"
- paxGNUSparseSize = "GNU.sparse.size"
- paxGNUSparseRealSize = "GNU.sparse.realsize"
-)
-
-// basicKeys is a set of the PAX keys for which we have built-in support.
-// This does not contain "charset" or "comment", which are both PAX-specific,
-// so adding them as first-class features of Header is unlikely.
-// Users can use the PAXRecords field to set it themselves.
-var basicKeys = map[string]bool{
- paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true,
- paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true,
-}
-
-// A Header represents a single header in a tar archive.
-// Some fields may not be populated.
-//
-// For forward compatibility, users that retrieve a Header from Reader.Next,
-// mutate it in some ways, and then pass it back to Writer.WriteHeader
-// should do so by creating a new Header and copying the fields
-// that they are interested in preserving.
-type Header struct {
- // Typeflag is the type of header entry.
- // The zero value is automatically promoted to either TypeReg or TypeDir
- // depending on the presence of a trailing slash in Name.
- Typeflag byte
-
- Name string // Name of file entry
- Linkname string // Target name of link (valid for TypeLink or TypeSymlink)
-
- Size int64 // Logical file size in bytes
- Mode int64 // Permission and mode bits
- Uid int // User ID of owner
- Gid int // Group ID of owner
- Uname string // User name of owner
- Gname string // Group name of owner
-
- // If the Format is unspecified, then Writer.WriteHeader rounds ModTime
- // to the nearest second and ignores the AccessTime and ChangeTime fields.
- //
- // To use AccessTime or ChangeTime, specify the Format as PAX or GNU.
- // To use sub-second resolution, specify the Format as PAX.
- ModTime time.Time // Modification time
- AccessTime time.Time // Access time (requires either PAX or GNU support)
- ChangeTime time.Time // Change time (requires either PAX or GNU support)
-
- Devmajor int64 // Major device number (valid for TypeChar or TypeBlock)
- Devminor int64 // Minor device number (valid for TypeChar or TypeBlock)
-
- // Xattrs stores extended attributes as PAX records under the
- // "SCHILY.xattr." namespace.
- //
- // The following are semantically equivalent:
- // h.Xattrs[key] = value
- // h.PAXRecords["SCHILY.xattr."+key] = value
- //
- // When Writer.WriteHeader is called, the contents of Xattrs will take
- // precedence over those in PAXRecords.
- //
- // Deprecated: Use PAXRecords instead.
- Xattrs map[string]string
-
- // PAXRecords is a map of PAX extended header records.
- //
- // User-defined records should have keys of the following form:
- // VENDOR.keyword
- // Where VENDOR is some namespace in all uppercase, and keyword may
- // not contain the '=' character (e.g., "GOLANG.pkg.version").
- // The key and value should be non-empty UTF-8 strings.
- //
- // When Writer.WriteHeader is called, PAX records derived from the
- // other fields in Header take precedence over PAXRecords.
- PAXRecords map[string]string
-
- // Format specifies the format of the tar header.
- //
- // This is set by Reader.Next as a best-effort guess at the format.
- // Since the Reader liberally reads some non-compliant files,
- // it is possible for this to be FormatUnknown.
- //
- // If the format is unspecified when Writer.WriteHeader is called,
- // then it uses the first format (in the order of USTAR, PAX, GNU)
- // capable of encoding this Header (see Format).
- Format Format
-}
-
-// sparseEntry represents a Length-sized fragment at Offset in the file.
-type sparseEntry struct{ Offset, Length int64 }
-
-func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
-
-// A sparse file can be represented as either a sparseDatas or a sparseHoles.
-// As long as the total size is known, they are equivalent and one can be
-// converted to the other form and back. The various tar formats with sparse
-// file support represent sparse files in the sparseDatas form. That is, they
-// specify the fragments in the file that has data, and treat everything else as
-// having zero bytes. As such, the encoding and decoding logic in this package
-// deals with sparseDatas.
-//
-// However, the external API uses sparseHoles instead of sparseDatas because the
-// zero value of sparseHoles logically represents a normal file (i.e., there are
-// no holes in it). On the other hand, the zero value of sparseDatas implies
-// that the file has no data in it, which is rather odd.
-//
-// As an example, if the underlying raw file contains the 10-byte data:
-//
-// var compactFile = "abcdefgh"
-//
-// And the sparse map has the following entries:
-//
-// var spd sparseDatas = []sparseEntry{
-// {Offset: 2, Length: 5}, // Data fragment for 2..6
-// {Offset: 18, Length: 3}, // Data fragment for 18..20
-// }
-// var sph sparseHoles = []sparseEntry{
-// {Offset: 0, Length: 2}, // Hole fragment for 0..1
-// {Offset: 7, Length: 11}, // Hole fragment for 7..17
-// {Offset: 21, Length: 4}, // Hole fragment for 21..24
-// }
-//
-// Then the content of the resulting sparse file with a Header.Size of 25 is:
-//
-// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
-type (
- sparseDatas []sparseEntry
- sparseHoles []sparseEntry
-)
-
-// validateSparseEntries reports whether sp is a valid sparse map.
-// It does not matter whether sp represents data fragments or hole fragments.
-func validateSparseEntries(sp []sparseEntry, size int64) bool {
- // Validate all sparse entries. These are the same checks as performed by
- // the BSD tar utility.
- if size < 0 {
- return false
- }
- var pre sparseEntry
- for _, cur := range sp {
- switch {
- case cur.Offset < 0 || cur.Length < 0:
- return false // Negative values are never okay
- case cur.Offset > math.MaxInt64-cur.Length:
- return false // Integer overflow with large length
- case cur.endOffset() > size:
- return false // Region extends beyond the actual size
- case pre.endOffset() > cur.Offset:
- return false // Regions cannot overlap and must be in order
- }
- pre = cur
- }
- return true
-}
-
-// alignSparseEntries mutates src and returns dst where each fragment's
-// starting offset is aligned up to the nearest block edge, and each
-// ending offset is aligned down to the nearest block edge.
-//
-// Even though the Go tar Reader and the BSD tar utility can handle entries
-// with arbitrary offsets and lengths, the GNU tar utility can only handle
-// offsets and lengths that are multiples of blockSize.
-func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry {
- dst := src[:0]
- for _, s := range src {
- pos, end := s.Offset, s.endOffset()
- pos += blockPadding(+pos) // Round-up to nearest blockSize
- if end != size {
- end -= blockPadding(-end) // Round-down to nearest blockSize
- }
- if pos < end {
- dst = append(dst, sparseEntry{Offset: pos, Length: end - pos})
- }
- }
- return dst
-}
-
-// invertSparseEntries converts a sparse map from one form to the other.
-// If the input is sparseHoles, then it will output sparseDatas and vice-versa.
-// The input must have been already validated.
-//
-// This function mutates src and returns a normalized map where:
-// - adjacent fragments are coalesced together
-// - only the last fragment may be empty
-// - the endOffset of the last fragment is the total size
-func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
- dst := src[:0]
- var pre sparseEntry
- for _, cur := range src {
- if cur.Length == 0 {
- continue // Skip empty fragments
- }
- pre.Length = cur.Offset - pre.Offset
- if pre.Length > 0 {
- dst = append(dst, pre) // Only add non-empty fragments
- }
- pre.Offset = cur.endOffset()
- }
- pre.Length = size - pre.Offset // Possibly the only empty fragment
- return append(dst, pre)
-}
-
-// fileState tracks the number of logical (includes sparse holes) and physical
-// (actual in tar archive) bytes remaining for the current file.
-//
-// Invariant: logicalRemaining >= physicalRemaining
-type fileState interface {
- logicalRemaining() int64
- physicalRemaining() int64
-}
-
-// allowedFormats determines which formats can be used.
-// The value returned is the logical OR of multiple possible formats.
-// If the value is FormatUnknown, then the input Header cannot be encoded
-// and an error is returned explaining why.
-//
-// As a by-product of checking the fields, this function returns paxHdrs, which
-// contain all fields that could not be directly encoded.
-// A value receiver ensures that this method does not mutate the source Header.
-func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) {
- format = FormatUSTAR | FormatPAX | FormatGNU
- paxHdrs = make(map[string]string)
-
- var whyNoUSTAR, whyNoPAX, whyNoGNU string
- var preferPAX bool // Prefer PAX over USTAR
- verifyString := func(s string, size int, name, paxKey string) {
- // NUL-terminator is optional for path and linkpath.
- // Technically, it is required for uname and gname,
- // but neither GNU nor BSD tar checks for it.
- tooLong := len(s) > size
- allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath
- if hasNUL(s) || (tooLong && !allowLongGNU) {
- whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s)
- format.mustNotBe(FormatGNU)
- }
- if !isASCII(s) || tooLong {
- canSplitUSTAR := paxKey == paxPath
- if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok {
- whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s)
- format.mustNotBe(FormatUSTAR)
- }
- if paxKey == paxNone {
- whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s)
- format.mustNotBe(FormatPAX)
- } else {
- paxHdrs[paxKey] = s
- }
- }
- if v, ok := h.PAXRecords[paxKey]; ok && v == s {
- paxHdrs[paxKey] = v
- }
- }
- verifyNumeric := func(n int64, size int, name, paxKey string) {
- if !fitsInBase256(size, n) {
- whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n)
- format.mustNotBe(FormatGNU)
- }
- if !fitsInOctal(size, n) {
- whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n)
- format.mustNotBe(FormatUSTAR)
- if paxKey == paxNone {
- whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n)
- format.mustNotBe(FormatPAX)
- } else {
- paxHdrs[paxKey] = strconv.FormatInt(n, 10)
- }
- }
- if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) {
- paxHdrs[paxKey] = v
- }
- }
- verifyTime := func(ts time.Time, size int, name, paxKey string) {
- if ts.IsZero() {
- return // Always okay
- }
- if !fitsInBase256(size, ts.Unix()) {
- whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts)
- format.mustNotBe(FormatGNU)
- }
- isMtime := paxKey == paxMtime
- fitsOctal := fitsInOctal(size, ts.Unix())
- if (isMtime && !fitsOctal) || !isMtime {
- whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts)
- format.mustNotBe(FormatUSTAR)
- }
- needsNano := ts.Nanosecond() != 0
- if !isMtime || !fitsOctal || needsNano {
- preferPAX = true // USTAR may truncate sub-second measurements
- if paxKey == paxNone {
- whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts)
- format.mustNotBe(FormatPAX)
- } else {
- paxHdrs[paxKey] = formatPAXTime(ts)
- }
- }
- if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) {
- paxHdrs[paxKey] = v
- }
- }
-
- // Check basic fields.
- var blk block
- v7 := blk.toV7()
- ustar := blk.toUSTAR()
- gnu := blk.toGNU()
- verifyString(h.Name, len(v7.name()), "Name", paxPath)
- verifyString(h.Linkname, len(v7.linkName()), "Linkname", paxLinkpath)
- verifyString(h.Uname, len(ustar.userName()), "Uname", paxUname)
- verifyString(h.Gname, len(ustar.groupName()), "Gname", paxGname)
- verifyNumeric(h.Mode, len(v7.mode()), "Mode", paxNone)
- verifyNumeric(int64(h.Uid), len(v7.uid()), "Uid", paxUid)
- verifyNumeric(int64(h.Gid), len(v7.gid()), "Gid", paxGid)
- verifyNumeric(h.Size, len(v7.size()), "Size", paxSize)
- verifyNumeric(h.Devmajor, len(ustar.devMajor()), "Devmajor", paxNone)
- verifyNumeric(h.Devminor, len(ustar.devMinor()), "Devminor", paxNone)
- verifyTime(h.ModTime, len(v7.modTime()), "ModTime", paxMtime)
- verifyTime(h.AccessTime, len(gnu.accessTime()), "AccessTime", paxAtime)
- verifyTime(h.ChangeTime, len(gnu.changeTime()), "ChangeTime", paxCtime)
-
- // Check for header-only types.
- var whyOnlyPAX, whyOnlyGNU string
- switch h.Typeflag {
- case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse:
- // Exclude TypeLink and TypeSymlink, since they may reference directories.
- if strings.HasSuffix(h.Name, "/") {
- return FormatUnknown, nil, headerError{"filename may not have trailing slash"}
- }
- case TypeXHeader, TypeGNULongName, TypeGNULongLink:
- return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"}
- case TypeXGlobalHeader:
- h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}
- if !reflect.DeepEqual(h, h2) {
- return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"}
- }
- whyOnlyPAX = "only PAX supports TypeXGlobalHeader"
- format.mayOnlyBe(FormatPAX)
- }
- if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 {
- return FormatUnknown, nil, headerError{"negative size on header-only type"}
- }
-
- // Check PAX records.
- if len(h.Xattrs) > 0 {
- for k, v := range h.Xattrs {
- paxHdrs[paxSchilyXattr+k] = v
- }
- whyOnlyPAX = "only PAX supports Xattrs"
- format.mayOnlyBe(FormatPAX)
- }
- if len(h.PAXRecords) > 0 {
- for k, v := range h.PAXRecords {
- switch _, exists := paxHdrs[k]; {
- case exists:
- continue // Do not overwrite existing records
- case h.Typeflag == TypeXGlobalHeader:
- paxHdrs[k] = v // Copy all records
- case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse):
- paxHdrs[k] = v // Ignore local records that may conflict
- }
- }
- whyOnlyPAX = "only PAX supports PAXRecords"
- format.mayOnlyBe(FormatPAX)
- }
- for k, v := range paxHdrs {
- if !validPAXRecord(k, v) {
- return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)}
- }
- }
-
- // TODO(dsnet): Re-enable this when adding sparse support.
- // See https://golang.org/issue/22735
- /*
- // Check sparse files.
- if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse {
- if isHeaderOnlyType(h.Typeflag) {
- return FormatUnknown, nil, headerError{"header-only type cannot be sparse"}
- }
- if !validateSparseEntries(h.SparseHoles, h.Size) {
- return FormatUnknown, nil, headerError{"invalid sparse holes"}
- }
- if h.Typeflag == TypeGNUSparse {
- whyOnlyGNU = "only GNU supports TypeGNUSparse"
- format.mayOnlyBe(FormatGNU)
- } else {
- whyNoGNU = "GNU supports sparse files only with TypeGNUSparse"
- format.mustNotBe(FormatGNU)
- }
- whyNoUSTAR = "USTAR does not support sparse files"
- format.mustNotBe(FormatUSTAR)
- }
- */
-
- // Check desired format.
- if wantFormat := h.Format; wantFormat != FormatUnknown {
- if wantFormat.has(FormatPAX) && !preferPAX {
- wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too
- }
- format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted
- }
- if format == FormatUnknown {
- switch h.Format {
- case FormatUSTAR:
- err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU}
- case FormatPAX:
- err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU}
- case FormatGNU:
- err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX}
- default:
- err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU}
- }
- }
- return format, paxHdrs, err
-}
-
-// FileInfo returns an fs.FileInfo for the Header.
-func (h *Header) FileInfo() fs.FileInfo {
- return headerFileInfo{h}
-}
-
-// headerFileInfo implements fs.FileInfo.
-type headerFileInfo struct {
- h *Header
-}
-
-func (fi headerFileInfo) Size() int64 { return fi.h.Size }
-func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
-func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
-func (fi headerFileInfo) Sys() any { return fi.h }
-
-// Name returns the base name of the file.
-func (fi headerFileInfo) Name() string {
- if fi.IsDir() {
- return path.Base(path.Clean(fi.h.Name))
- }
- return path.Base(fi.h.Name)
-}
-
-// Mode returns the permission and mode bits for the headerFileInfo.
-func (fi headerFileInfo) Mode() (mode fs.FileMode) {
- // Set file permission bits.
- mode = fs.FileMode(fi.h.Mode).Perm()
-
- // Set setuid, setgid and sticky bits.
- if fi.h.Mode&c_ISUID != 0 {
- mode |= fs.ModeSetuid
- }
- if fi.h.Mode&c_ISGID != 0 {
- mode |= fs.ModeSetgid
- }
- if fi.h.Mode&c_ISVTX != 0 {
- mode |= fs.ModeSticky
- }
-
- // Set file mode bits; clear perm, setuid, setgid, and sticky bits.
- switch m := fs.FileMode(fi.h.Mode) &^ 07777; m {
- case c_ISDIR:
- mode |= fs.ModeDir
- case c_ISFIFO:
- mode |= fs.ModeNamedPipe
- case c_ISLNK:
- mode |= fs.ModeSymlink
- case c_ISBLK:
- mode |= fs.ModeDevice
- case c_ISCHR:
- mode |= fs.ModeDevice
- mode |= fs.ModeCharDevice
- case c_ISSOCK:
- mode |= fs.ModeSocket
- }
-
- switch fi.h.Typeflag {
- case TypeSymlink:
- mode |= fs.ModeSymlink
- case TypeChar:
- mode |= fs.ModeDevice
- mode |= fs.ModeCharDevice
- case TypeBlock:
- mode |= fs.ModeDevice
- case TypeDir:
- mode |= fs.ModeDir
- case TypeFifo:
- mode |= fs.ModeNamedPipe
- }
-
- return mode
-}
-
-func (fi headerFileInfo) String() string {
- return fs.FormatFileInfo(fi)
-}
-
-// sysStat, if non-nil, populates h from system-dependent fields of fi.
-var sysStat func(fi fs.FileInfo, h *Header) error
-
-const (
- // Mode constants from the USTAR spec:
- // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
- c_ISUID = 04000 // Set uid
- c_ISGID = 02000 // Set gid
- c_ISVTX = 01000 // Save text (sticky bit)
-
- // Common Unix mode constants; these are not defined in any common tar standard.
- // Header.FileInfo understands these, but FileInfoHeader will never produce these.
- c_ISDIR = 040000 // Directory
- c_ISFIFO = 010000 // FIFO
- c_ISREG = 0100000 // Regular file
- c_ISLNK = 0120000 // Symbolic link
- c_ISBLK = 060000 // Block special file
- c_ISCHR = 020000 // Character special file
- c_ISSOCK = 0140000 // Socket
-)
-
-// FileInfoHeader creates a partially-populated Header from fi.
-// If fi describes a symlink, FileInfoHeader records link as the link target.
-// If fi describes a directory, a slash is appended to the name.
-//
-// Since fs.FileInfo's Name method only returns the base name of
-// the file it describes, it may be necessary to modify Header.Name
-// to provide the full path name of the file.
-func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
- if fi == nil {
- return nil, errors.New("archive/tar: FileInfo is nil")
- }
- fm := fi.Mode()
- h := &Header{
- Name: fi.Name(),
- ModTime: fi.ModTime(),
- Mode: int64(fm.Perm()), // or'd with c_IS* constants later
- }
- switch {
- case fm.IsRegular():
- h.Typeflag = TypeReg
- h.Size = fi.Size()
- case fi.IsDir():
- h.Typeflag = TypeDir
- h.Name += "/"
- case fm&fs.ModeSymlink != 0:
- h.Typeflag = TypeSymlink
- h.Linkname = link
- case fm&fs.ModeDevice != 0:
- if fm&fs.ModeCharDevice != 0 {
- h.Typeflag = TypeChar
- } else {
- h.Typeflag = TypeBlock
- }
- case fm&fs.ModeNamedPipe != 0:
- h.Typeflag = TypeFifo
- case fm&fs.ModeSocket != 0:
- return nil, fmt.Errorf("archive/tar: sockets not supported")
- default:
- return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
- }
- if fm&fs.ModeSetuid != 0 {
- h.Mode |= c_ISUID
- }
- if fm&fs.ModeSetgid != 0 {
- h.Mode |= c_ISGID
- }
- if fm&fs.ModeSticky != 0 {
- h.Mode |= c_ISVTX
- }
- // If possible, populate additional fields from OS-specific
- // FileInfo fields.
- if sys, ok := fi.Sys().(*Header); ok {
- // This FileInfo came from a Header (not the OS). Use the
- // original Header to populate all remaining fields.
- h.Uid = sys.Uid
- h.Gid = sys.Gid
- h.Uname = sys.Uname
- h.Gname = sys.Gname
- h.AccessTime = sys.AccessTime
- h.ChangeTime = sys.ChangeTime
- if sys.Xattrs != nil {
- h.Xattrs = make(map[string]string)
- for k, v := range sys.Xattrs {
- h.Xattrs[k] = v
- }
- }
- if sys.Typeflag == TypeLink {
- // hard link
- h.Typeflag = TypeLink
- h.Size = 0
- h.Linkname = sys.Linkname
- }
- if sys.PAXRecords != nil {
- h.PAXRecords = make(map[string]string)
- for k, v := range sys.PAXRecords {
- h.PAXRecords[k] = v
- }
- }
- }
- if sysStat != nil {
- return h, sysStat(fi, h)
- }
- return h, nil
-}
-
-// isHeaderOnlyType checks if the given type flag is of the type that has no
-// data section even if a size is specified.
-func isHeaderOnlyType(flag byte) bool {
- switch flag {
- case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
- return true
- default:
- return false
- }
-}
-
-func min(a, b int64) int64 {
- if a < b {
- return a
- }
- return b
-}
diff --git a/contrib/go/_std_1.21/src/archive/tar/format.go b/contrib/go/_std_1.21/src/archive/tar/format.go
deleted file mode 100644
index e50124d99e..0000000000
--- a/contrib/go/_std_1.21/src/archive/tar/format.go
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import "strings"
-
-// Format represents the tar archive format.
-//
-// The original tar format was introduced in Unix V7.
-// Since then, there have been multiple competing formats attempting to
-// standardize or extend the V7 format to overcome its limitations.
-// The most common formats are the USTAR, PAX, and GNU formats,
-// each with their own advantages and limitations.
-//
-// The following table captures the capabilities of each format:
-//
-// | USTAR | PAX | GNU
-// ------------------+--------+-----------+----------
-// Name | 256B | unlimited | unlimited
-// Linkname | 100B | unlimited | unlimited
-// Size | uint33 | unlimited | uint89
-// Mode | uint21 | uint21 | uint57
-// Uid/Gid | uint21 | unlimited | uint57
-// Uname/Gname | 32B | unlimited | 32B
-// ModTime | uint33 | unlimited | int89
-// AccessTime | n/a | unlimited | int89
-// ChangeTime | n/a | unlimited | int89
-// Devmajor/Devminor | uint21 | uint21 | uint57
-// ------------------+--------+-----------+----------
-// string encoding | ASCII | UTF-8 | binary
-// sub-second times | no | yes | no
-// sparse files | no | yes | yes
-//
-// The table's upper portion shows the Header fields, where each format reports
-// the maximum number of bytes allowed for each string field and
-// the integer type used to store each numeric field
-// (where timestamps are stored as the number of seconds since the Unix epoch).
-//
-// The table's lower portion shows specialized features of each format,
-// such as supported string encodings, support for sub-second timestamps,
-// or support for sparse files.
-//
-// The Writer currently provides no support for sparse files.
-type Format int
-
-// Constants to identify various tar formats.
-const (
- // Deliberately hide the meaning of constants from public API.
- _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc...
-
- // FormatUnknown indicates that the format is unknown.
- FormatUnknown
-
- // The format of the original Unix V7 tar tool prior to standardization.
- formatV7
-
- // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988.
- //
- // While this format is compatible with most tar readers,
- // the format has several limitations making it unsuitable for some usages.
- // Most notably, it cannot support sparse files, files larger than 8GiB,
- // filenames larger than 256 characters, and non-ASCII filenames.
- //
- // Reference:
- // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
- FormatUSTAR
-
- // FormatPAX represents the PAX header format defined in POSIX.1-2001.
- //
- // PAX extends USTAR by writing a special file with Typeflag TypeXHeader
- // preceding the original header. This file contains a set of key-value
- // records, which are used to overcome USTAR's shortcomings, in addition to
- // providing the ability to have sub-second resolution for timestamps.
- //
- // Some newer formats add their own extensions to PAX by defining their
- // own keys and assigning certain semantic meaning to the associated values.
- // For example, sparse file support in PAX is implemented using keys
- // defined by the GNU manual (e.g., "GNU.sparse.map").
- //
- // Reference:
- // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
- FormatPAX
-
- // FormatGNU represents the GNU header format.
- //
- // The GNU header format is older than the USTAR and PAX standards and
- // is not compatible with them. The GNU format supports
- // arbitrary file sizes, filenames of arbitrary encoding and length,
- // sparse files, and other features.
- //
- // It is recommended that PAX be chosen over GNU unless the target
- // application can only parse GNU formatted archives.
- //
- // Reference:
- // https://www.gnu.org/software/tar/manual/html_node/Standard.html
- FormatGNU
-
- // Schily's tar format, which is incompatible with USTAR.
- // This does not cover STAR extensions to the PAX format; these fall under
- // the PAX format.
- formatSTAR
-
- formatMax
-)
-
-func (f Format) has(f2 Format) bool { return f&f2 != 0 }
-func (f *Format) mayBe(f2 Format) { *f |= f2 }
-func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 }
-func (f *Format) mustNotBe(f2 Format) { *f &^= f2 }
-
-var formatNames = map[Format]string{
- formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR",
-}
-
-func (f Format) String() string {
- var ss []string
- for f2 := Format(1); f2 < formatMax; f2 <<= 1 {
- if f.has(f2) {
- ss = append(ss, formatNames[f2])
- }
- }
- switch len(ss) {
- case 0:
- return "<unknown>"
- case 1:
- return ss[0]
- default:
- return "(" + strings.Join(ss, " | ") + ")"
- }
-}
-
-// Magics used to identify various formats.
-const (
- magicGNU, versionGNU = "ustar ", " \x00"
- magicUSTAR, versionUSTAR = "ustar\x00", "00"
- trailerSTAR = "tar\x00"
-)
-
-// Size constants from various tar specifications.
-const (
- blockSize = 512 // Size of each block in a tar stream
- nameSize = 100 // Max length of the name field in USTAR format
- prefixSize = 155 // Max length of the prefix field in USTAR format
-
- // Max length of a special file (PAX header, GNU long name or link).
- // This matches the limit used by libarchive.
- maxSpecialFileSize = 1 << 20
-)
-
-// blockPadding computes the number of bytes needed to pad offset up to the
-// nearest block edge where 0 <= n < blockSize.
-func blockPadding(offset int64) (n int64) {
- return -offset & (blockSize - 1)
-}
-
-var zeroBlock block
-
-type block [blockSize]byte
-
-// Convert block to any number of formats.
-func (b *block) toV7() *headerV7 { return (*headerV7)(b) }
-func (b *block) toGNU() *headerGNU { return (*headerGNU)(b) }
-func (b *block) toSTAR() *headerSTAR { return (*headerSTAR)(b) }
-func (b *block) toUSTAR() *headerUSTAR { return (*headerUSTAR)(b) }
-func (b *block) toSparse() sparseArray { return sparseArray(b[:]) }
-
-// getFormat checks that the block is a valid tar header based on the checksum.
-// It then attempts to guess the specific format based on magic values.
-// If the checksum fails, then FormatUnknown is returned.
-func (b *block) getFormat() Format {
- // Verify checksum.
- var p parser
- value := p.parseOctal(b.toV7().chksum())
- chksum1, chksum2 := b.computeChecksum()
- if p.err != nil || (value != chksum1 && value != chksum2) {
- return FormatUnknown
- }
-
- // Guess the magic values.
- magic := string(b.toUSTAR().magic())
- version := string(b.toUSTAR().version())
- trailer := string(b.toSTAR().trailer())
- switch {
- case magic == magicUSTAR && trailer == trailerSTAR:
- return formatSTAR
- case magic == magicUSTAR:
- return FormatUSTAR | FormatPAX
- case magic == magicGNU && version == versionGNU:
- return FormatGNU
- default:
- return formatV7
- }
-}
-
-// setFormat writes the magic values necessary for specified format
-// and then updates the checksum accordingly.
-func (b *block) setFormat(format Format) {
- // Set the magic values.
- switch {
- case format.has(formatV7):
- // Do nothing.
- case format.has(FormatGNU):
- copy(b.toGNU().magic(), magicGNU)
- copy(b.toGNU().version(), versionGNU)
- case format.has(formatSTAR):
- copy(b.toSTAR().magic(), magicUSTAR)
- copy(b.toSTAR().version(), versionUSTAR)
- copy(b.toSTAR().trailer(), trailerSTAR)
- case format.has(FormatUSTAR | FormatPAX):
- copy(b.toUSTAR().magic(), magicUSTAR)
- copy(b.toUSTAR().version(), versionUSTAR)
- default:
- panic("invalid format")
- }
-
- // Update checksum.
- // This field is special in that it is terminated by a NULL then space.
- var f formatter
- field := b.toV7().chksum()
- chksum, _ := b.computeChecksum() // Possible values are 256..128776
- f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
- field[7] = ' '
-}
-
-// computeChecksum computes the checksum for the header block.
-// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
-// signed byte values.
-// We compute and return both.
-func (b *block) computeChecksum() (unsigned, signed int64) {
- for i, c := range b {
- if 148 <= i && i < 156 {
- c = ' ' // Treat the checksum field itself as all spaces.
- }
- unsigned += int64(c)
- signed += int64(int8(c))
- }
- return unsigned, signed
-}
-
-// reset clears the block with all zeros.
-func (b *block) reset() {
- *b = block{}
-}
-
-type headerV7 [blockSize]byte
-
-func (h *headerV7) name() []byte { return h[000:][:100] }
-func (h *headerV7) mode() []byte { return h[100:][:8] }
-func (h *headerV7) uid() []byte { return h[108:][:8] }
-func (h *headerV7) gid() []byte { return h[116:][:8] }
-func (h *headerV7) size() []byte { return h[124:][:12] }
-func (h *headerV7) modTime() []byte { return h[136:][:12] }
-func (h *headerV7) chksum() []byte { return h[148:][:8] }
-func (h *headerV7) typeFlag() []byte { return h[156:][:1] }
-func (h *headerV7) linkName() []byte { return h[157:][:100] }
-
-type headerGNU [blockSize]byte
-
-func (h *headerGNU) v7() *headerV7 { return (*headerV7)(h) }
-func (h *headerGNU) magic() []byte { return h[257:][:6] }
-func (h *headerGNU) version() []byte { return h[263:][:2] }
-func (h *headerGNU) userName() []byte { return h[265:][:32] }
-func (h *headerGNU) groupName() []byte { return h[297:][:32] }
-func (h *headerGNU) devMajor() []byte { return h[329:][:8] }
-func (h *headerGNU) devMinor() []byte { return h[337:][:8] }
-func (h *headerGNU) accessTime() []byte { return h[345:][:12] }
-func (h *headerGNU) changeTime() []byte { return h[357:][:12] }
-func (h *headerGNU) sparse() sparseArray { return sparseArray(h[386:][:24*4+1]) }
-func (h *headerGNU) realSize() []byte { return h[483:][:12] }
-
-type headerSTAR [blockSize]byte
-
-func (h *headerSTAR) v7() *headerV7 { return (*headerV7)(h) }
-func (h *headerSTAR) magic() []byte { return h[257:][:6] }
-func (h *headerSTAR) version() []byte { return h[263:][:2] }
-func (h *headerSTAR) userName() []byte { return h[265:][:32] }
-func (h *headerSTAR) groupName() []byte { return h[297:][:32] }
-func (h *headerSTAR) devMajor() []byte { return h[329:][:8] }
-func (h *headerSTAR) devMinor() []byte { return h[337:][:8] }
-func (h *headerSTAR) prefix() []byte { return h[345:][:131] }
-func (h *headerSTAR) accessTime() []byte { return h[476:][:12] }
-func (h *headerSTAR) changeTime() []byte { return h[488:][:12] }
-func (h *headerSTAR) trailer() []byte { return h[508:][:4] }
-
-type headerUSTAR [blockSize]byte
-
-func (h *headerUSTAR) v7() *headerV7 { return (*headerV7)(h) }
-func (h *headerUSTAR) magic() []byte { return h[257:][:6] }
-func (h *headerUSTAR) version() []byte { return h[263:][:2] }
-func (h *headerUSTAR) userName() []byte { return h[265:][:32] }
-func (h *headerUSTAR) groupName() []byte { return h[297:][:32] }
-func (h *headerUSTAR) devMajor() []byte { return h[329:][:8] }
-func (h *headerUSTAR) devMinor() []byte { return h[337:][:8] }
-func (h *headerUSTAR) prefix() []byte { return h[345:][:155] }
-
-type sparseArray []byte
-
-func (s sparseArray) entry(i int) sparseElem { return sparseElem(s[i*24:]) }
-func (s sparseArray) isExtended() []byte { return s[24*s.maxEntries():][:1] }
-func (s sparseArray) maxEntries() int { return len(s) / 24 }
-
-type sparseElem []byte
-
-func (s sparseElem) offset() []byte { return s[00:][:12] }
-func (s sparseElem) length() []byte { return s[12:][:12] }
diff --git a/contrib/go/_std_1.21/src/archive/tar/reader.go b/contrib/go/_std_1.21/src/archive/tar/reader.go
deleted file mode 100644
index cfa50446ed..0000000000
--- a/contrib/go/_std_1.21/src/archive/tar/reader.go
+++ /dev/null
@@ -1,882 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
- "bytes"
- "io"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-)
-
-// Reader provides sequential access to the contents of a tar archive.
-// Reader.Next advances to the next file in the archive (including the first),
-// and then Reader can be treated as an io.Reader to access the file's data.
-type Reader struct {
- r io.Reader
- pad int64 // Amount of padding (ignored) after current file entry
- curr fileReader // Reader for current file entry
- blk block // Buffer to use as temporary local storage
-
- // err is a persistent error.
- // It is only the responsibility of every exported method of Reader to
- // ensure that this error is sticky.
- err error
-}
-
-type fileReader interface {
- io.Reader
- fileState
-
- WriteTo(io.Writer) (int64, error)
-}
-
-// NewReader creates a new Reader reading from r.
-func NewReader(r io.Reader) *Reader {
- return &Reader{r: r, curr: &regFileReader{r, 0}}
-}
-
-// Next advances to the next entry in the tar archive.
-// The Header.Size determines how many bytes can be read for the next file.
-// Any remaining data in the current file is automatically discarded.
-// At the end of the archive, Next returns the error io.EOF.
-//
-// If Next encounters a non-local name (as defined by [filepath.IsLocal])
-// and the GODEBUG environment variable contains `tarinsecurepath=0`,
-// Next returns the header with an ErrInsecurePath error.
-// A future version of Go may introduce this behavior by default.
-// Programs that want to accept non-local names can ignore
-// the ErrInsecurePath error and use the returned header.
-func (tr *Reader) Next() (*Header, error) {
- if tr.err != nil {
- return nil, tr.err
- }
- hdr, err := tr.next()
- tr.err = err
- if err == nil && !filepath.IsLocal(hdr.Name) {
- if tarinsecurepath.Value() == "0" {
- tarinsecurepath.IncNonDefault()
- err = ErrInsecurePath
- }
- }
- return hdr, err
-}
-
-func (tr *Reader) next() (*Header, error) {
- var paxHdrs map[string]string
- var gnuLongName, gnuLongLink string
-
- // Externally, Next iterates through the tar archive as if it is a series of
- // files. Internally, the tar format often uses fake "files" to add meta
- // data that describes the next file. These meta data "files" should not
- // normally be visible to the outside. As such, this loop iterates through
- // one or more "header files" until it finds a "normal file".
- format := FormatUSTAR | FormatPAX | FormatGNU
- for {
- // Discard the remainder of the file and any padding.
- if err := discard(tr.r, tr.curr.physicalRemaining()); err != nil {
- return nil, err
- }
- if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil {
- return nil, err
- }
- tr.pad = 0
-
- hdr, rawHdr, err := tr.readHeader()
- if err != nil {
- return nil, err
- }
- if err := tr.handleRegularFile(hdr); err != nil {
- return nil, err
- }
- format.mayOnlyBe(hdr.Format)
-
- // Check for PAX/GNU special headers and files.
- switch hdr.Typeflag {
- case TypeXHeader, TypeXGlobalHeader:
- format.mayOnlyBe(FormatPAX)
- paxHdrs, err = parsePAX(tr)
- if err != nil {
- return nil, err
- }
- if hdr.Typeflag == TypeXGlobalHeader {
- mergePAX(hdr, paxHdrs)
- return &Header{
- Name: hdr.Name,
- Typeflag: hdr.Typeflag,
- Xattrs: hdr.Xattrs,
- PAXRecords: hdr.PAXRecords,
- Format: format,
- }, nil
- }
- continue // This is a meta header affecting the next header
- case TypeGNULongName, TypeGNULongLink:
- format.mayOnlyBe(FormatGNU)
- realname, err := readSpecialFile(tr)
- if err != nil {
- return nil, err
- }
-
- var p parser
- switch hdr.Typeflag {
- case TypeGNULongName:
- gnuLongName = p.parseString(realname)
- case TypeGNULongLink:
- gnuLongLink = p.parseString(realname)
- }
- continue // This is a meta header affecting the next header
- default:
- // The old GNU sparse format is handled here since it is technically
- // just a regular file with additional attributes.
-
- if err := mergePAX(hdr, paxHdrs); err != nil {
- return nil, err
- }
- if gnuLongName != "" {
- hdr.Name = gnuLongName
- }
- if gnuLongLink != "" {
- hdr.Linkname = gnuLongLink
- }
- if hdr.Typeflag == TypeRegA {
- if strings.HasSuffix(hdr.Name, "/") {
- hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
- } else {
- hdr.Typeflag = TypeReg
- }
- }
-
- // The extended headers may have updated the size.
- // Thus, setup the regFileReader again after merging PAX headers.
- if err := tr.handleRegularFile(hdr); err != nil {
- return nil, err
- }
-
- // Sparse formats rely on being able to read from the logical data
- // section; there must be a preceding call to handleRegularFile.
- if err := tr.handleSparseFile(hdr, rawHdr); err != nil {
- return nil, err
- }
-
- // Set the final guess at the format.
- if format.has(FormatUSTAR) && format.has(FormatPAX) {
- format.mayOnlyBe(FormatUSTAR)
- }
- hdr.Format = format
- return hdr, nil // This is a file, so stop
- }
- }
-}
-
-// handleRegularFile sets up the current file reader and padding such that it
-// can only read the following logical data section. It will properly handle
-// special headers that contain no data section.
-func (tr *Reader) handleRegularFile(hdr *Header) error {
- nb := hdr.Size
- if isHeaderOnlyType(hdr.Typeflag) {
- nb = 0
- }
- if nb < 0 {
- return ErrHeader
- }
-
- tr.pad = blockPadding(nb)
- tr.curr = &regFileReader{r: tr.r, nb: nb}
- return nil
-}
-
-// handleSparseFile checks if the current file is a sparse format of any type
-// and sets the curr reader appropriately.
-func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
- var spd sparseDatas
- var err error
- if hdr.Typeflag == TypeGNUSparse {
- spd, err = tr.readOldGNUSparseMap(hdr, rawHdr)
- } else {
- spd, err = tr.readGNUSparsePAXHeaders(hdr)
- }
-
- // If sp is non-nil, then this is a sparse file.
- // Note that it is possible for len(sp) == 0.
- if err == nil && spd != nil {
- if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) {
- return ErrHeader
- }
- sph := invertSparseEntries(spd, hdr.Size)
- tr.curr = &sparseFileReader{tr.curr, sph, 0}
- }
- return err
-}
-
-// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
-// If they are found, then this function reads the sparse map and returns it.
-// This assumes that 0.0 headers have already been converted to 0.1 headers
-// by the PAX header parsing logic.
-func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) {
- // Identify the version of GNU headers.
- var is1x0 bool
- major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor]
- switch {
- case major == "0" && (minor == "0" || minor == "1"):
- is1x0 = false
- case major == "1" && minor == "0":
- is1x0 = true
- case major != "" || minor != "":
- return nil, nil // Unknown GNU sparse PAX version
- case hdr.PAXRecords[paxGNUSparseMap] != "":
- is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
- default:
- return nil, nil // Not a PAX format GNU sparse file.
- }
- hdr.Format.mayOnlyBe(FormatPAX)
-
- // Update hdr from GNU sparse PAX headers.
- if name := hdr.PAXRecords[paxGNUSparseName]; name != "" {
- hdr.Name = name
- }
- size := hdr.PAXRecords[paxGNUSparseSize]
- if size == "" {
- size = hdr.PAXRecords[paxGNUSparseRealSize]
- }
- if size != "" {
- n, err := strconv.ParseInt(size, 10, 64)
- if err != nil {
- return nil, ErrHeader
- }
- hdr.Size = n
- }
-
- // Read the sparse map according to the appropriate format.
- if is1x0 {
- return readGNUSparseMap1x0(tr.curr)
- }
- return readGNUSparseMap0x1(hdr.PAXRecords)
-}
-
-// mergePAX merges paxHdrs into hdr for all relevant fields of Header.
-func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
- for k, v := range paxHdrs {
- if v == "" {
- continue // Keep the original USTAR value
- }
- var id64 int64
- switch k {
- case paxPath:
- hdr.Name = v
- case paxLinkpath:
- hdr.Linkname = v
- case paxUname:
- hdr.Uname = v
- case paxGname:
- hdr.Gname = v
- case paxUid:
- id64, err = strconv.ParseInt(v, 10, 64)
- hdr.Uid = int(id64) // Integer overflow possible
- case paxGid:
- id64, err = strconv.ParseInt(v, 10, 64)
- hdr.Gid = int(id64) // Integer overflow possible
- case paxAtime:
- hdr.AccessTime, err = parsePAXTime(v)
- case paxMtime:
- hdr.ModTime, err = parsePAXTime(v)
- case paxCtime:
- hdr.ChangeTime, err = parsePAXTime(v)
- case paxSize:
- hdr.Size, err = strconv.ParseInt(v, 10, 64)
- default:
- if strings.HasPrefix(k, paxSchilyXattr) {
- if hdr.Xattrs == nil {
- hdr.Xattrs = make(map[string]string)
- }
- hdr.Xattrs[k[len(paxSchilyXattr):]] = v
- }
- }
- if err != nil {
- return ErrHeader
- }
- }
- hdr.PAXRecords = paxHdrs
- return nil
-}
-
-// parsePAX parses PAX headers.
-// If an extended header (type 'x') is invalid, ErrHeader is returned.
-func parsePAX(r io.Reader) (map[string]string, error) {
- buf, err := readSpecialFile(r)
- if err != nil {
- return nil, err
- }
- sbuf := string(buf)
-
- // For GNU PAX sparse format 0.0 support.
- // This function transforms the sparse format 0.0 headers into format 0.1
- // headers since 0.0 headers were not PAX compliant.
- var sparseMap []string
-
- paxHdrs := make(map[string]string)
- for len(sbuf) > 0 {
- key, value, residual, err := parsePAXRecord(sbuf)
- if err != nil {
- return nil, ErrHeader
- }
- sbuf = residual
-
- switch key {
- case paxGNUSparseOffset, paxGNUSparseNumBytes:
- // Validate sparse header order and value.
- if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) ||
- (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) ||
- strings.Contains(value, ",") {
- return nil, ErrHeader
- }
- sparseMap = append(sparseMap, value)
- default:
- paxHdrs[key] = value
- }
- }
- if len(sparseMap) > 0 {
- paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
- }
- return paxHdrs, nil
-}
-
-// readHeader reads the next block header and assumes that the underlying reader
-// is already aligned to a block boundary. It returns the raw block of the
-// header in case further processing is required.
-//
-// The err will be set to io.EOF only when one of the following occurs:
-// - Exactly 0 bytes are read and EOF is hit.
-// - Exactly 1 block of zeros is read and EOF is hit.
-// - At least 2 blocks of zeros are read.
-func (tr *Reader) readHeader() (*Header, *block, error) {
- // Two blocks of zero bytes marks the end of the archive.
- if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
- return nil, nil, err // EOF is okay here; exactly 0 bytes read
- }
- if bytes.Equal(tr.blk[:], zeroBlock[:]) {
- if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
- return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
- }
- if bytes.Equal(tr.blk[:], zeroBlock[:]) {
- return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
- }
- return nil, nil, ErrHeader // Zero block and then non-zero block
- }
-
- // Verify the header matches a known format.
- format := tr.blk.getFormat()
- if format == FormatUnknown {
- return nil, nil, ErrHeader
- }
-
- var p parser
- hdr := new(Header)
-
- // Unpack the V7 header.
- v7 := tr.blk.toV7()
- hdr.Typeflag = v7.typeFlag()[0]
- hdr.Name = p.parseString(v7.name())
- hdr.Linkname = p.parseString(v7.linkName())
- hdr.Size = p.parseNumeric(v7.size())
- hdr.Mode = p.parseNumeric(v7.mode())
- hdr.Uid = int(p.parseNumeric(v7.uid()))
- hdr.Gid = int(p.parseNumeric(v7.gid()))
- hdr.ModTime = time.Unix(p.parseNumeric(v7.modTime()), 0)
-
- // Unpack format specific fields.
- if format > formatV7 {
- ustar := tr.blk.toUSTAR()
- hdr.Uname = p.parseString(ustar.userName())
- hdr.Gname = p.parseString(ustar.groupName())
- hdr.Devmajor = p.parseNumeric(ustar.devMajor())
- hdr.Devminor = p.parseNumeric(ustar.devMinor())
-
- var prefix string
- switch {
- case format.has(FormatUSTAR | FormatPAX):
- hdr.Format = format
- ustar := tr.blk.toUSTAR()
- prefix = p.parseString(ustar.prefix())
-
- // For Format detection, check if block is properly formatted since
- // the parser is more liberal than what USTAR actually permits.
- notASCII := func(r rune) bool { return r >= 0x80 }
- if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 {
- hdr.Format = FormatUnknown // Non-ASCII characters in block.
- }
- nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
- if !(nul(v7.size()) && nul(v7.mode()) && nul(v7.uid()) && nul(v7.gid()) &&
- nul(v7.modTime()) && nul(ustar.devMajor()) && nul(ustar.devMinor())) {
- hdr.Format = FormatUnknown // Numeric fields must end in NUL
- }
- case format.has(formatSTAR):
- star := tr.blk.toSTAR()
- prefix = p.parseString(star.prefix())
- hdr.AccessTime = time.Unix(p.parseNumeric(star.accessTime()), 0)
- hdr.ChangeTime = time.Unix(p.parseNumeric(star.changeTime()), 0)
- case format.has(FormatGNU):
- hdr.Format = format
- var p2 parser
- gnu := tr.blk.toGNU()
- if b := gnu.accessTime(); b[0] != 0 {
- hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
- }
- if b := gnu.changeTime(); b[0] != 0 {
- hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
- }
-
- // Prior to Go1.8, the Writer had a bug where it would output
- // an invalid tar file in certain rare situations because the logic
- // incorrectly believed that the old GNU format had a prefix field.
- // This is wrong and leads to an output file that mangles the
- // atime and ctime fields, which are often left unused.
- //
- // In order to continue reading tar files created by former, buggy
- // versions of Go, we skeptically parse the atime and ctime fields.
- // If we are unable to parse them and the prefix field looks like
- // an ASCII string, then we fallback on the pre-Go1.8 behavior
- // of treating these fields as the USTAR prefix field.
- //
- // Note that this will not use the fallback logic for all possible
- // files generated by a pre-Go1.8 toolchain. If the generated file
- // happened to have a prefix field that parses as valid
- // atime and ctime fields (e.g., when they are valid octal strings),
- // then it is impossible to distinguish between a valid GNU file
- // and an invalid pre-Go1.8 file.
- //
- // See https://golang.org/issues/12594
- // See https://golang.org/issues/21005
- if p2.err != nil {
- hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
- ustar := tr.blk.toUSTAR()
- if s := p.parseString(ustar.prefix()); isASCII(s) {
- prefix = s
- }
- hdr.Format = FormatUnknown // Buggy file is not GNU
- }
- }
- if len(prefix) > 0 {
- hdr.Name = prefix + "/" + hdr.Name
- }
- }
- return hdr, &tr.blk, p.err
-}
-
-// readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
-// The sparse map is stored in the tar header if it's small enough.
-// If it's larger than four entries, then one or more extension headers are used
-// to store the rest of the sparse map.
-//
-// The Header.Size does not reflect the size of any extended headers used.
-// Thus, this function will read from the raw io.Reader to fetch extra headers.
-// This method mutates blk in the process.
-func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) {
- // Make sure that the input format is GNU.
- // Unfortunately, the STAR format also has a sparse header format that uses
- // the same type flag but has a completely different layout.
- if blk.getFormat() != FormatGNU {
- return nil, ErrHeader
- }
- hdr.Format.mayOnlyBe(FormatGNU)
-
- var p parser
- hdr.Size = p.parseNumeric(blk.toGNU().realSize())
- if p.err != nil {
- return nil, p.err
- }
- s := blk.toGNU().sparse()
- spd := make(sparseDatas, 0, s.maxEntries())
- for {
- for i := 0; i < s.maxEntries(); i++ {
- // This termination condition is identical to GNU and BSD tar.
- if s.entry(i).offset()[0] == 0x00 {
- break // Don't return, need to process extended headers (even if empty)
- }
- offset := p.parseNumeric(s.entry(i).offset())
- length := p.parseNumeric(s.entry(i).length())
- if p.err != nil {
- return nil, p.err
- }
- spd = append(spd, sparseEntry{Offset: offset, Length: length})
- }
-
- if s.isExtended()[0] > 0 {
- // There are more entries. Read an extension header and parse its entries.
- if _, err := mustReadFull(tr.r, blk[:]); err != nil {
- return nil, err
- }
- s = blk.toSparse()
- continue
- }
- return spd, nil // Done
- }
-}
-
-// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
-// version 1.0. The format of the sparse map consists of a series of
-// newline-terminated numeric fields. The first field is the number of entries
-// and is always present. Following this are the entries, consisting of two
-// fields (offset, length). This function must stop reading at the end
-// boundary of the block containing the last newline.
-//
-// Note that the GNU manual says that numeric values should be encoded in octal
-// format. However, the GNU tar utility itself outputs these values in decimal.
-// As such, this library treats values as being encoded in decimal.
-func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
- var (
- cntNewline int64
- buf bytes.Buffer
- blk block
- )
-
- // feedTokens copies data in blocks from r into buf until there are
- // at least cnt newlines in buf. It will not read more blocks than needed.
- feedTokens := func(n int64) error {
- for cntNewline < n {
- if _, err := mustReadFull(r, blk[:]); err != nil {
- return err
- }
- buf.Write(blk[:])
- for _, c := range blk {
- if c == '\n' {
- cntNewline++
- }
- }
- }
- return nil
- }
-
- // nextToken gets the next token delimited by a newline. This assumes that
- // at least one newline exists in the buffer.
- nextToken := func() string {
- cntNewline--
- tok, _ := buf.ReadString('\n')
- return strings.TrimRight(tok, "\n")
- }
-
- // Parse for the number of entries.
- // Use integer overflow resistant math to check this.
- if err := feedTokens(1); err != nil {
- return nil, err
- }
- numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
- if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
- return nil, ErrHeader
- }
-
- // Parse for all member entries.
- // numEntries is trusted after this since a potential attacker must have
- // committed resources proportional to what this library used.
- if err := feedTokens(2 * numEntries); err != nil {
- return nil, err
- }
- spd := make(sparseDatas, 0, numEntries)
- for i := int64(0); i < numEntries; i++ {
- offset, err1 := strconv.ParseInt(nextToken(), 10, 64)
- length, err2 := strconv.ParseInt(nextToken(), 10, 64)
- if err1 != nil || err2 != nil {
- return nil, ErrHeader
- }
- spd = append(spd, sparseEntry{Offset: offset, Length: length})
- }
- return spd, nil
-}
-
-// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
-// version 0.1. The sparse map is stored in the PAX headers.
-func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
- // Get number of entries.
- // Use integer overflow resistant math to check this.
- numEntriesStr := paxHdrs[paxGNUSparseNumBlocks]
- numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
- if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
- return nil, ErrHeader
- }
-
- // There should be two numbers in sparseMap for each entry.
- sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",")
- if len(sparseMap) == 1 && sparseMap[0] == "" {
- sparseMap = sparseMap[:0]
- }
- if int64(len(sparseMap)) != 2*numEntries {
- return nil, ErrHeader
- }
-
- // Loop through the entries in the sparse map.
- // numEntries is trusted now.
- spd := make(sparseDatas, 0, numEntries)
- for len(sparseMap) >= 2 {
- offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64)
- length, err2 := strconv.ParseInt(sparseMap[1], 10, 64)
- if err1 != nil || err2 != nil {
- return nil, ErrHeader
- }
- spd = append(spd, sparseEntry{Offset: offset, Length: length})
- sparseMap = sparseMap[2:]
- }
- return spd, nil
-}
-
-// Read reads from the current file in the tar archive.
-// It returns (0, io.EOF) when it reaches the end of that file,
-// until Next is called to advance to the next file.
-//
-// If the current file is sparse, then the regions marked as a hole
-// are read back as NUL-bytes.
-//
-// Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
-// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
-// the Header.Size claims.
-func (tr *Reader) Read(b []byte) (int, error) {
- if tr.err != nil {
- return 0, tr.err
- }
- n, err := tr.curr.Read(b)
- if err != nil && err != io.EOF {
- tr.err = err
- }
- return n, err
-}
-
-// writeTo writes the content of the current file to w.
-// The bytes written matches the number of remaining bytes in the current file.
-//
-// If the current file is sparse and w is an io.WriteSeeker,
-// then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
-// assuming that skipped regions are filled with NULs.
-// This always writes the last byte to ensure w is the right size.
-//
-// TODO(dsnet): Re-export this when adding sparse file support.
-// See https://golang.org/issue/22735
-func (tr *Reader) writeTo(w io.Writer) (int64, error) {
- if tr.err != nil {
- return 0, tr.err
- }
- n, err := tr.curr.WriteTo(w)
- if err != nil {
- tr.err = err
- }
- return n, err
-}
-
-// regFileReader is a fileReader for reading data from a regular file entry.
-type regFileReader struct {
- r io.Reader // Underlying Reader
- nb int64 // Number of remaining bytes to read
-}
-
-func (fr *regFileReader) Read(b []byte) (n int, err error) {
- if int64(len(b)) > fr.nb {
- b = b[:fr.nb]
- }
- if len(b) > 0 {
- n, err = fr.r.Read(b)
- fr.nb -= int64(n)
- }
- switch {
- case err == io.EOF && fr.nb > 0:
- return n, io.ErrUnexpectedEOF
- case err == nil && fr.nb == 0:
- return n, io.EOF
- default:
- return n, err
- }
-}
-
-func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
- return io.Copy(w, struct{ io.Reader }{fr})
-}
-
-// logicalRemaining implements fileState.logicalRemaining.
-func (fr regFileReader) logicalRemaining() int64 {
- return fr.nb
-}
-
-// physicalRemaining implements fileState.physicalRemaining.
-func (fr regFileReader) physicalRemaining() int64 {
- return fr.nb
-}
-
-// sparseFileReader is a fileReader for reading data from a sparse file entry.
-type sparseFileReader struct {
- fr fileReader // Underlying fileReader
- sp sparseHoles // Normalized list of sparse holes
- pos int64 // Current position in sparse file
-}
-
-func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
- finished := int64(len(b)) >= sr.logicalRemaining()
- if finished {
- b = b[:sr.logicalRemaining()]
- }
-
- b0 := b
- endPos := sr.pos + int64(len(b))
- for endPos > sr.pos && err == nil {
- var nf int // Bytes read in fragment
- holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
- if sr.pos < holeStart { // In a data fragment
- bf := b[:min(int64(len(b)), holeStart-sr.pos)]
- nf, err = tryReadFull(sr.fr, bf)
- } else { // In a hole fragment
- bf := b[:min(int64(len(b)), holeEnd-sr.pos)]
- nf, err = tryReadFull(zeroReader{}, bf)
- }
- b = b[nf:]
- sr.pos += int64(nf)
- if sr.pos >= holeEnd && len(sr.sp) > 1 {
- sr.sp = sr.sp[1:] // Ensure last fragment always remains
- }
- }
-
- n = len(b0) - len(b)
- switch {
- case err == io.EOF:
- return n, errMissData // Less data in dense file than sparse file
- case err != nil:
- return n, err
- case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
- return n, errUnrefData // More data in dense file than sparse file
- case finished:
- return n, io.EOF
- default:
- return n, nil
- }
-}
-
-func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
- ws, ok := w.(io.WriteSeeker)
- if ok {
- if _, err := ws.Seek(0, io.SeekCurrent); err != nil {
- ok = false // Not all io.Seeker can really seek
- }
- }
- if !ok {
- return io.Copy(w, struct{ io.Reader }{sr})
- }
-
- var writeLastByte bool
- pos0 := sr.pos
- for sr.logicalRemaining() > 0 && !writeLastByte && err == nil {
- var nf int64 // Size of fragment
- holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
- if sr.pos < holeStart { // In a data fragment
- nf = holeStart - sr.pos
- nf, err = io.CopyN(ws, sr.fr, nf)
- } else { // In a hole fragment
- nf = holeEnd - sr.pos
- if sr.physicalRemaining() == 0 {
- writeLastByte = true
- nf--
- }
- _, err = ws.Seek(nf, io.SeekCurrent)
- }
- sr.pos += nf
- if sr.pos >= holeEnd && len(sr.sp) > 1 {
- sr.sp = sr.sp[1:] // Ensure last fragment always remains
- }
- }
-
- // If the last fragment is a hole, then seek to 1-byte before EOF, and
- // write a single byte to ensure the file is the right size.
- if writeLastByte && err == nil {
- _, err = ws.Write([]byte{0})
- sr.pos++
- }
-
- n = sr.pos - pos0
- switch {
- case err == io.EOF:
- return n, errMissData // Less data in dense file than sparse file
- case err != nil:
- return n, err
- case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
- return n, errUnrefData // More data in dense file than sparse file
- default:
- return n, nil
- }
-}
-
-func (sr sparseFileReader) logicalRemaining() int64 {
- return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
-}
-func (sr sparseFileReader) physicalRemaining() int64 {
- return sr.fr.physicalRemaining()
-}
-
-type zeroReader struct{}
-
-func (zeroReader) Read(b []byte) (int, error) {
- for i := range b {
- b[i] = 0
- }
- return len(b), nil
-}
-
-// mustReadFull is like io.ReadFull except it returns
-// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
-func mustReadFull(r io.Reader, b []byte) (int, error) {
- n, err := tryReadFull(r, b)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return n, err
-}
-
-// tryReadFull is like io.ReadFull except it returns
-// io.EOF when it is hit before len(b) bytes are read.
-func tryReadFull(r io.Reader, b []byte) (n int, err error) {
- for len(b) > n && err == nil {
- var nn int
- nn, err = r.Read(b[n:])
- n += nn
- }
- if len(b) == n && err == io.EOF {
- err = nil
- }
- return n, err
-}
-
-// readSpecialFile is like io.ReadAll except it returns
-// ErrFieldTooLong if more than maxSpecialFileSize is read.
-func readSpecialFile(r io.Reader) ([]byte, error) {
- buf, err := io.ReadAll(io.LimitReader(r, maxSpecialFileSize+1))
- if len(buf) > maxSpecialFileSize {
- return nil, ErrFieldTooLong
- }
- return buf, err
-}
-
-// discard skips n bytes in r, reporting an error if unable to do so.
-func discard(r io.Reader, n int64) error {
- // If possible, Seek to the last byte before the end of the data section.
- // Do this because Seek is often lazy about reporting errors; this will mask
- // the fact that the stream may be truncated. We can rely on the
- // io.CopyN done shortly afterwards to trigger any IO errors.
- var seekSkipped int64 // Number of bytes skipped via Seek
- if sr, ok := r.(io.Seeker); ok && n > 1 {
- // Not all io.Seeker can actually Seek. For example, os.Stdin implements
- // io.Seeker, but calling Seek always returns an error and performs
- // no action. Thus, we try an innocent seek to the current position
- // to see if Seek is really supported.
- pos1, err := sr.Seek(0, io.SeekCurrent)
- if pos1 >= 0 && err == nil {
- // Seek seems supported, so perform the real Seek.
- pos2, err := sr.Seek(n-1, io.SeekCurrent)
- if pos2 < 0 || err != nil {
- return err
- }
- seekSkipped = pos2 - pos1
- }
- }
-
- copySkipped, err := io.CopyN(io.Discard, r, n-seekSkipped)
- if err == io.EOF && seekSkipped+copySkipped < n {
- err = io.ErrUnexpectedEOF
- }
- return err
-}
diff --git a/contrib/go/_std_1.21/src/archive/tar/stat_actime1.go b/contrib/go/_std_1.21/src/archive/tar/stat_actime1.go
deleted file mode 100644
index c4c2480fee..0000000000
--- a/contrib/go/_std_1.21/src/archive/tar/stat_actime1.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || linux || dragonfly || openbsd || solaris
-
-package tar
-
-import (
- "syscall"
- "time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Atim.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Ctim.Unix())
-}
diff --git a/contrib/go/_std_1.21/src/archive/tar/stat_actime2.go b/contrib/go/_std_1.21/src/archive/tar/stat_actime2.go
deleted file mode 100644
index f76d6be220..0000000000
--- a/contrib/go/_std_1.21/src/archive/tar/stat_actime2.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin || freebsd || netbsd
-
-package tar
-
-import (
- "syscall"
- "time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Atimespec.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
- return time.Unix(st.Ctimespec.Unix())
-}
diff --git a/contrib/go/_std_1.21/src/archive/tar/stat_unix.go b/contrib/go/_std_1.21/src/archive/tar/stat_unix.go
deleted file mode 100644
index 0f3428bc24..0000000000
--- a/contrib/go/_std_1.21/src/archive/tar/stat_unix.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build unix
-
-package tar
-
-import (
- "io/fs"
- "os/user"
- "runtime"
- "strconv"
- "sync"
- "syscall"
-)
-
-func init() {
- sysStat = statUnix
-}
-
-// userMap and groupMap caches UID and GID lookups for performance reasons.
-// The downside is that renaming uname or gname by the OS never takes effect.
-var userMap, groupMap sync.Map // map[int]string
-
-func statUnix(fi fs.FileInfo, h *Header) error {
- sys, ok := fi.Sys().(*syscall.Stat_t)
- if !ok {
- return nil
- }
- h.Uid = int(sys.Uid)
- h.Gid = int(sys.Gid)
-
- // Best effort at populating Uname and Gname.
- // The os/user functions may fail for any number of reasons
- // (not implemented on that platform, cgo not enabled, etc).
- if u, ok := userMap.Load(h.Uid); ok {
- h.Uname = u.(string)
- } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
- h.Uname = u.Username
- userMap.Store(h.Uid, h.Uname)
- }
- if g, ok := groupMap.Load(h.Gid); ok {
- h.Gname = g.(string)
- } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
- h.Gname = g.Name
- groupMap.Store(h.Gid, h.Gname)
- }
-
- h.AccessTime = statAtime(sys)
- h.ChangeTime = statCtime(sys)
-
- // Best effort at populating Devmajor and Devminor.
- if h.Typeflag == TypeChar || h.Typeflag == TypeBlock {
- dev := uint64(sys.Rdev) // May be int32 or uint32
- switch runtime.GOOS {
- case "aix":
- var major, minor uint32
- major = uint32((dev & 0x3fffffff00000000) >> 32)
- minor = uint32((dev & 0x00000000ffffffff) >> 0)
- h.Devmajor, h.Devminor = int64(major), int64(minor)
- case "linux":
- // Copied from golang.org/x/sys/unix/dev_linux.go.
- major := uint32((dev & 0x00000000000fff00) >> 8)
- major |= uint32((dev & 0xfffff00000000000) >> 32)
- minor := uint32((dev & 0x00000000000000ff) >> 0)
- minor |= uint32((dev & 0x00000ffffff00000) >> 12)
- h.Devmajor, h.Devminor = int64(major), int64(minor)
- case "darwin", "ios":
- // Copied from golang.org/x/sys/unix/dev_darwin.go.
- major := uint32((dev >> 24) & 0xff)
- minor := uint32(dev & 0xffffff)
- h.Devmajor, h.Devminor = int64(major), int64(minor)
- case "dragonfly":
- // Copied from golang.org/x/sys/unix/dev_dragonfly.go.
- major := uint32((dev >> 8) & 0xff)
- minor := uint32(dev & 0xffff00ff)
- h.Devmajor, h.Devminor = int64(major), int64(minor)
- case "freebsd":
- // Copied from golang.org/x/sys/unix/dev_freebsd.go.
- major := uint32((dev >> 8) & 0xff)
- minor := uint32(dev & 0xffff00ff)
- h.Devmajor, h.Devminor = int64(major), int64(minor)
- case "netbsd":
- // Copied from golang.org/x/sys/unix/dev_netbsd.go.
- major := uint32((dev & 0x000fff00) >> 8)
- minor := uint32((dev & 0x000000ff) >> 0)
- minor |= uint32((dev & 0xfff00000) >> 12)
- h.Devmajor, h.Devminor = int64(major), int64(minor)
- case "openbsd":
- // Copied from golang.org/x/sys/unix/dev_openbsd.go.
- major := uint32((dev & 0x0000ff00) >> 8)
- minor := uint32((dev & 0x000000ff) >> 0)
- minor |= uint32((dev & 0xffff0000) >> 8)
- h.Devmajor, h.Devminor = int64(major), int64(minor)
- default:
- // TODO: Implement solaris (see https://golang.org/issue/8106)
- }
- }
- return nil
-}
diff --git a/contrib/go/_std_1.21/src/archive/tar/strconv.go b/contrib/go/_std_1.21/src/archive/tar/strconv.go
deleted file mode 100644
index ac3196370e..0000000000
--- a/contrib/go/_std_1.21/src/archive/tar/strconv.go
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
- "time"
-)
-
-// hasNUL reports whether the NUL character exists within s.
-func hasNUL(s string) bool {
- return strings.Contains(s, "\x00")
-}
-
-// isASCII reports whether the input is an ASCII C-style string.
-func isASCII(s string) bool {
- for _, c := range s {
- if c >= 0x80 || c == 0x00 {
- return false
- }
- }
- return true
-}
-
-// toASCII converts the input to an ASCII C-style string.
-// This is a best effort conversion, so invalid characters are dropped.
-func toASCII(s string) string {
- if isASCII(s) {
- return s
- }
- b := make([]byte, 0, len(s))
- for _, c := range s {
- if c < 0x80 && c != 0x00 {
- b = append(b, byte(c))
- }
- }
- return string(b)
-}
-
-type parser struct {
- err error // Last error seen
-}
-
-type formatter struct {
- err error // Last error seen
-}
-
-// parseString parses bytes as a NUL-terminated C-style string.
-// If a NUL byte is not found then the whole slice is returned as a string.
-func (*parser) parseString(b []byte) string {
- if i := bytes.IndexByte(b, 0); i >= 0 {
- return string(b[:i])
- }
- return string(b)
-}
-
-// formatString copies s into b, NUL-terminating if possible.
-func (f *formatter) formatString(b []byte, s string) {
- if len(s) > len(b) {
- f.err = ErrFieldTooLong
- }
- copy(b, s)
- if len(s) < len(b) {
- b[len(s)] = 0
- }
-
- // Some buggy readers treat regular files with a trailing slash
- // in the V7 path field as a directory even though the full path
- // recorded elsewhere (e.g., via PAX record) contains no trailing slash.
- if len(s) > len(b) && b[len(b)-1] == '/' {
- n := len(strings.TrimRight(s[:len(b)], "/"))
- b[n] = 0 // Replace trailing slash with NUL terminator
- }
-}
-
-// fitsInBase256 reports whether x can be encoded into n bytes using base-256
-// encoding. Unlike octal encoding, base-256 encoding does not require that the
-// string ends with a NUL character. Thus, all n bytes are available for output.
-//
-// If operating in binary mode, this assumes strict GNU binary mode; which means
-// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
-// equivalent to the sign bit in two's complement form.
-func fitsInBase256(n int, x int64) bool {
- binBits := uint(n-1) * 8
- return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
-}
-
-// parseNumeric parses the input as being encoded in either base-256 or octal.
-// This function may return negative numbers.
-// If parsing fails or an integer overflow occurs, err will be set.
-func (p *parser) parseNumeric(b []byte) int64 {
- // Check for base-256 (binary) format first.
- // If the first bit is set, then all following bits constitute a two's
- // complement encoded number in big-endian byte order.
- if len(b) > 0 && b[0]&0x80 != 0 {
- // Handling negative numbers relies on the following identity:
- // -a-1 == ^a
- //
- // If the number is negative, we use an inversion mask to invert the
- // data bytes and treat the value as an unsigned number.
- var inv byte // 0x00 if positive or zero, 0xff if negative
- if b[0]&0x40 != 0 {
- inv = 0xff
- }
-
- var x uint64
- for i, c := range b {
- c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
- if i == 0 {
- c &= 0x7f // Ignore signal bit in first byte
- }
- if (x >> 56) > 0 {
- p.err = ErrHeader // Integer overflow
- return 0
- }
- x = x<<8 | uint64(c)
- }
- if (x >> 63) > 0 {
- p.err = ErrHeader // Integer overflow
- return 0
- }
- if inv == 0xff {
- return ^int64(x)
- }
- return int64(x)
- }
-
- // Normal case is base-8 (octal) format.
- return p.parseOctal(b)
-}
-
-// formatNumeric encodes x into b using base-8 (octal) encoding if possible.
-// Otherwise it will attempt to use base-256 (binary) encoding.
-func (f *formatter) formatNumeric(b []byte, x int64) {
- if fitsInOctal(len(b), x) {
- f.formatOctal(b, x)
- return
- }
-
- if fitsInBase256(len(b), x) {
- for i := len(b) - 1; i >= 0; i-- {
- b[i] = byte(x)
- x >>= 8
- }
- b[0] |= 0x80 // Highest bit indicates binary format
- return
- }
-
- f.formatOctal(b, 0) // Last resort, just write zero
- f.err = ErrFieldTooLong
-}
-
-func (p *parser) parseOctal(b []byte) int64 {
- // Because unused fields are filled with NULs, we need
- // to skip leading NULs. Fields may also be padded with
- // spaces or NULs.
- // So we remove leading and trailing NULs and spaces to
- // be sure.
- b = bytes.Trim(b, " \x00")
-
- if len(b) == 0 {
- return 0
- }
- x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
- if perr != nil {
- p.err = ErrHeader
- }
- return int64(x)
-}
-
-func (f *formatter) formatOctal(b []byte, x int64) {
- if !fitsInOctal(len(b), x) {
- x = 0 // Last resort, just write zero
- f.err = ErrFieldTooLong
- }
-
- s := strconv.FormatInt(x, 8)
- // Add leading zeros, but leave room for a NUL.
- if n := len(b) - len(s) - 1; n > 0 {
- s = strings.Repeat("0", n) + s
- }
- f.formatString(b, s)
-}
-
-// fitsInOctal reports whether the integer x fits in a field n-bytes long
-// using octal encoding with the appropriate NUL terminator.
-func fitsInOctal(n int, x int64) bool {
- octBits := uint(n-1) * 3
- return x >= 0 && (n >= 22 || x < 1<<octBits)
-}
-
-// parsePAXTime takes a string of the form %d.%d as described in the PAX
-// specification. Note that this implementation allows for negative timestamps,
-// which is allowed for by the PAX specification, but not always portable.
-func parsePAXTime(s string) (time.Time, error) {
- const maxNanoSecondDigits = 9
-
- // Split string into seconds and sub-seconds parts.
- ss, sn, _ := strings.Cut(s, ".")
-
- // Parse the seconds.
- secs, err := strconv.ParseInt(ss, 10, 64)
- if err != nil {
- return time.Time{}, ErrHeader
- }
- if len(sn) == 0 {
- return time.Unix(secs, 0), nil // No sub-second values
- }
-
- // Parse the nanoseconds.
- if strings.Trim(sn, "0123456789") != "" {
- return time.Time{}, ErrHeader
- }
- if len(sn) < maxNanoSecondDigits {
- sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
- } else {
- sn = sn[:maxNanoSecondDigits] // Right truncate
- }
- nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
- if len(ss) > 0 && ss[0] == '-' {
- return time.Unix(secs, -1*nsecs), nil // Negative correction
- }
- return time.Unix(secs, nsecs), nil
-}
-
-// formatPAXTime converts ts into a time of the form %d.%d as described in the
-// PAX specification. This function is capable of negative timestamps.
-func formatPAXTime(ts time.Time) (s string) {
- secs, nsecs := ts.Unix(), ts.Nanosecond()
- if nsecs == 0 {
- return strconv.FormatInt(secs, 10)
- }
-
- // If seconds is negative, then perform correction.
- sign := ""
- if secs < 0 {
- sign = "-" // Remember sign
- secs = -(secs + 1) // Add a second to secs
- nsecs = -(nsecs - 1e9) // Take that second away from nsecs
- }
- return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
-}
-
-// parsePAXRecord parses the input PAX record string into a key-value pair.
-// If parsing is successful, it will slice off the currently read record and
-// return the remainder as r.
-func parsePAXRecord(s string) (k, v, r string, err error) {
- // The size field ends at the first space.
- nStr, rest, ok := strings.Cut(s, " ")
- if !ok {
- return "", "", s, ErrHeader
- }
-
- // Parse the first token as a decimal integer.
- n, perr := strconv.ParseInt(nStr, 10, 0) // Intentionally parse as native int
- if perr != nil || n < 5 || n > int64(len(s)) {
- return "", "", s, ErrHeader
- }
- n -= int64(len(nStr) + 1) // convert from index in s to index in rest
- if n <= 0 {
- return "", "", s, ErrHeader
- }
-
- // Extract everything between the space and the final newline.
- rec, nl, rem := rest[:n-1], rest[n-1:n], rest[n:]
- if nl != "\n" {
- return "", "", s, ErrHeader
- }
-
- // The first equals separates the key from the value.
- k, v, ok = strings.Cut(rec, "=")
- if !ok {
- return "", "", s, ErrHeader
- }
-
- if !validPAXRecord(k, v) {
- return "", "", s, ErrHeader
- }
- return k, v, rem, nil
-}
-
-// formatPAXRecord formats a single PAX record, prefixing it with the
-// appropriate length.
-func formatPAXRecord(k, v string) (string, error) {
- if !validPAXRecord(k, v) {
- return "", ErrHeader
- }
-
- const padding = 3 // Extra padding for ' ', '=', and '\n'
- size := len(k) + len(v) + padding
- size += len(strconv.Itoa(size))
- record := strconv.Itoa(size) + " " + k + "=" + v + "\n"
-
- // Final adjustment if adding size field increased the record size.
- if len(record) != size {
- size = len(record)
- record = strconv.Itoa(size) + " " + k + "=" + v + "\n"
- }
- return record, nil
-}
-
-// validPAXRecord reports whether the key-value pair is valid where each
-// record is formatted as:
-//
-// "%d %s=%s\n" % (size, key, value)
-//
-// Keys and values should be UTF-8, but the number of bad writers out there
-// forces us to be a more liberal.
-// Thus, we only reject all keys with NUL, and only reject NULs in values
-// for the PAX version of the USTAR string fields.
-// The key must not contain an '=' character.
-func validPAXRecord(k, v string) bool {
- if k == "" || strings.Contains(k, "=") {
- return false
- }
- switch k {
- case paxPath, paxLinkpath, paxUname, paxGname:
- return !hasNUL(v)
- default:
- return !hasNUL(k)
- }
-}
diff --git a/contrib/go/_std_1.21/src/archive/tar/writer.go b/contrib/go/_std_1.21/src/archive/tar/writer.go
deleted file mode 100644
index 1c95f0738a..0000000000
--- a/contrib/go/_std_1.21/src/archive/tar/writer.go
+++ /dev/null
@@ -1,659 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
- "fmt"
- "io"
- "path"
- "sort"
- "strings"
- "time"
-)
-
-// Writer provides sequential writing of a tar archive.
-// Write.WriteHeader begins a new file with the provided Header,
-// and then Writer can be treated as an io.Writer to supply that file's data.
-type Writer struct {
- w io.Writer
- pad int64 // Amount of padding to write after current file entry
- curr fileWriter // Writer for current file entry
- hdr Header // Shallow copy of Header that is safe for mutations
- blk block // Buffer to use as temporary local storage
-
- // err is a persistent error.
- // It is only the responsibility of every exported method of Writer to
- // ensure that this error is sticky.
- err error
-}
-
-// NewWriter creates a new Writer writing to w.
-func NewWriter(w io.Writer) *Writer {
- return &Writer{w: w, curr: &regFileWriter{w, 0}}
-}
-
-type fileWriter interface {
- io.Writer
- fileState
-
- ReadFrom(io.Reader) (int64, error)
-}
-
-// Flush finishes writing the current file's block padding.
-// The current file must be fully written before Flush can be called.
-//
-// This is unnecessary as the next call to WriteHeader or Close
-// will implicitly flush out the file's padding.
-func (tw *Writer) Flush() error {
- if tw.err != nil {
- return tw.err
- }
- if nb := tw.curr.logicalRemaining(); nb > 0 {
- return fmt.Errorf("archive/tar: missed writing %d bytes", nb)
- }
- if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil {
- return tw.err
- }
- tw.pad = 0
- return nil
-}
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// The Header.Size determines how many bytes can be written for the next file.
-// If the current file is not fully written, then this returns an error.
-// This implicitly flushes any padding necessary before writing the header.
-func (tw *Writer) WriteHeader(hdr *Header) error {
- if err := tw.Flush(); err != nil {
- return err
- }
- tw.hdr = *hdr // Shallow copy of Header
-
- // Avoid usage of the legacy TypeRegA flag, and automatically promote
- // it to use TypeReg or TypeDir.
- if tw.hdr.Typeflag == TypeRegA {
- if strings.HasSuffix(tw.hdr.Name, "/") {
- tw.hdr.Typeflag = TypeDir
- } else {
- tw.hdr.Typeflag = TypeReg
- }
- }
-
- // Round ModTime and ignore AccessTime and ChangeTime unless
- // the format is explicitly chosen.
- // This ensures nominal usage of WriteHeader (without specifying the format)
- // does not always result in the PAX format being chosen, which
- // causes a 1KiB increase to every header.
- if tw.hdr.Format == FormatUnknown {
- tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second)
- tw.hdr.AccessTime = time.Time{}
- tw.hdr.ChangeTime = time.Time{}
- }
-
- allowedFormats, paxHdrs, err := tw.hdr.allowedFormats()
- switch {
- case allowedFormats.has(FormatUSTAR):
- tw.err = tw.writeUSTARHeader(&tw.hdr)
- return tw.err
- case allowedFormats.has(FormatPAX):
- tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs)
- return tw.err
- case allowedFormats.has(FormatGNU):
- tw.err = tw.writeGNUHeader(&tw.hdr)
- return tw.err
- default:
- return err // Non-fatal error
- }
-}
-
-func (tw *Writer) writeUSTARHeader(hdr *Header) error {
- // Check if we can use USTAR prefix/suffix splitting.
- var namePrefix string
- if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok {
- namePrefix, hdr.Name = prefix, suffix
- }
-
- // Pack the main header.
- var f formatter
- blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal)
- f.formatString(blk.toUSTAR().prefix(), namePrefix)
- blk.setFormat(FormatUSTAR)
- if f.err != nil {
- return f.err // Should never happen since header is validated
- }
- return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag)
-}
-
-func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
- realName, realSize := hdr.Name, hdr.Size
-
- // TODO(dsnet): Re-enable this when adding sparse support.
- // See https://golang.org/issue/22735
- /*
- // Handle sparse files.
- var spd sparseDatas
- var spb []byte
- if len(hdr.SparseHoles) > 0 {
- sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
- sph = alignSparseEntries(sph, hdr.Size)
- spd = invertSparseEntries(sph, hdr.Size)
-
- // Format the sparse map.
- hdr.Size = 0 // Replace with encoded size
- spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n')
- for _, s := range spd {
- hdr.Size += s.Length
- spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n')
- spb = append(strconv.AppendInt(spb, s.Length, 10), '\n')
- }
- pad := blockPadding(int64(len(spb)))
- spb = append(spb, zeroBlock[:pad]...)
- hdr.Size += int64(len(spb)) // Accounts for encoded sparse map
-
- // Add and modify appropriate PAX records.
- dir, file := path.Split(realName)
- hdr.Name = path.Join(dir, "GNUSparseFile.0", file)
- paxHdrs[paxGNUSparseMajor] = "1"
- paxHdrs[paxGNUSparseMinor] = "0"
- paxHdrs[paxGNUSparseName] = realName
- paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10)
- paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10)
- delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
- }
- */
- _ = realSize
-
- // Write PAX records to the output.
- isGlobal := hdr.Typeflag == TypeXGlobalHeader
- if len(paxHdrs) > 0 || isGlobal {
- // Sort keys for deterministic ordering.
- var keys []string
- for k := range paxHdrs {
- keys = append(keys, k)
- }
- sort.Strings(keys)
-
- // Write each record to a buffer.
- var buf strings.Builder
- for _, k := range keys {
- rec, err := formatPAXRecord(k, paxHdrs[k])
- if err != nil {
- return err
- }
- buf.WriteString(rec)
- }
-
- // Write the extended header file.
- var name string
- var flag byte
- if isGlobal {
- name = realName
- if name == "" {
- name = "GlobalHead.0.0"
- }
- flag = TypeXGlobalHeader
- } else {
- dir, file := path.Split(realName)
- name = path.Join(dir, "PaxHeaders.0", file)
- flag = TypeXHeader
- }
- data := buf.String()
- if len(data) > maxSpecialFileSize {
- return ErrFieldTooLong
- }
- if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
- return err // Global headers return here
- }
- }
-
- // Pack the main header.
- var f formatter // Ignore errors since they are expected
- fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) }
- blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal)
- blk.setFormat(FormatPAX)
- if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
- return err
- }
-
- // TODO(dsnet): Re-enable this when adding sparse support.
- // See https://golang.org/issue/22735
- /*
- // Write the sparse map and setup the sparse writer if necessary.
- if len(spd) > 0 {
- // Use tw.curr since the sparse map is accounted for in hdr.Size.
- if _, err := tw.curr.Write(spb); err != nil {
- return err
- }
- tw.curr = &sparseFileWriter{tw.curr, spd, 0}
- }
- */
- return nil
-}
-
-func (tw *Writer) writeGNUHeader(hdr *Header) error {
- // Use long-link files if Name or Linkname exceeds the field size.
- const longName = "././@LongLink"
- if len(hdr.Name) > nameSize {
- data := hdr.Name + "\x00"
- if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil {
- return err
- }
- }
- if len(hdr.Linkname) > nameSize {
- data := hdr.Linkname + "\x00"
- if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil {
- return err
- }
- }
-
- // Pack the main header.
- var f formatter // Ignore errors since they are expected
- var spd sparseDatas
- var spb []byte
- blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric)
- if !hdr.AccessTime.IsZero() {
- f.formatNumeric(blk.toGNU().accessTime(), hdr.AccessTime.Unix())
- }
- if !hdr.ChangeTime.IsZero() {
- f.formatNumeric(blk.toGNU().changeTime(), hdr.ChangeTime.Unix())
- }
- // TODO(dsnet): Re-enable this when adding sparse support.
- // See https://golang.org/issue/22735
- /*
- if hdr.Typeflag == TypeGNUSparse {
- sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
- sph = alignSparseEntries(sph, hdr.Size)
- spd = invertSparseEntries(sph, hdr.Size)
-
- // Format the sparse map.
- formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas {
- for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ {
- f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset)
- f.formatNumeric(sa.Entry(i).Length(), sp[0].Length)
- sp = sp[1:]
- }
- if len(sp) > 0 {
- sa.IsExtended()[0] = 1
- }
- return sp
- }
- sp2 := formatSPD(spd, blk.GNU().Sparse())
- for len(sp2) > 0 {
- var spHdr block
- sp2 = formatSPD(sp2, spHdr.Sparse())
- spb = append(spb, spHdr[:]...)
- }
-
- // Update size fields in the header block.
- realSize := hdr.Size
- hdr.Size = 0 // Encoded size; does not account for encoded sparse map
- for _, s := range spd {
- hdr.Size += s.Length
- }
- copy(blk.V7().Size(), zeroBlock[:]) // Reset field
- f.formatNumeric(blk.V7().Size(), hdr.Size)
- f.formatNumeric(blk.GNU().RealSize(), realSize)
- }
- */
- blk.setFormat(FormatGNU)
- if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
- return err
- }
-
- // Write the extended sparse map and setup the sparse writer if necessary.
- if len(spd) > 0 {
- // Use tw.w since the sparse map is not accounted for in hdr.Size.
- if _, err := tw.w.Write(spb); err != nil {
- return err
- }
- tw.curr = &sparseFileWriter{tw.curr, spd, 0}
- }
- return nil
-}
-
-type (
- stringFormatter func([]byte, string)
- numberFormatter func([]byte, int64)
-)
-
-// templateV7Plus fills out the V7 fields of a block using values from hdr.
-// It also fills out fields (uname, gname, devmajor, devminor) that are
-// shared in the USTAR, PAX, and GNU formats using the provided formatters.
-//
-// The block returned is only valid until the next call to
-// templateV7Plus or writeRawFile.
-func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block {
- tw.blk.reset()
-
- modTime := hdr.ModTime
- if modTime.IsZero() {
- modTime = time.Unix(0, 0)
- }
-
- v7 := tw.blk.toV7()
- v7.typeFlag()[0] = hdr.Typeflag
- fmtStr(v7.name(), hdr.Name)
- fmtStr(v7.linkName(), hdr.Linkname)
- fmtNum(v7.mode(), hdr.Mode)
- fmtNum(v7.uid(), int64(hdr.Uid))
- fmtNum(v7.gid(), int64(hdr.Gid))
- fmtNum(v7.size(), hdr.Size)
- fmtNum(v7.modTime(), modTime.Unix())
-
- ustar := tw.blk.toUSTAR()
- fmtStr(ustar.userName(), hdr.Uname)
- fmtStr(ustar.groupName(), hdr.Gname)
- fmtNum(ustar.devMajor(), hdr.Devmajor)
- fmtNum(ustar.devMinor(), hdr.Devminor)
-
- return &tw.blk
-}
-
-// writeRawFile writes a minimal file with the given name and flag type.
-// It uses format to encode the header format and will write data as the body.
-// It uses default values for all of the other fields (as BSD and GNU tar does).
-func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error {
- tw.blk.reset()
-
- // Best effort for the filename.
- name = toASCII(name)
- if len(name) > nameSize {
- name = name[:nameSize]
- }
- name = strings.TrimRight(name, "/")
-
- var f formatter
- v7 := tw.blk.toV7()
- v7.typeFlag()[0] = flag
- f.formatString(v7.name(), name)
- f.formatOctal(v7.mode(), 0)
- f.formatOctal(v7.uid(), 0)
- f.formatOctal(v7.gid(), 0)
- f.formatOctal(v7.size(), int64(len(data))) // Must be < 8GiB
- f.formatOctal(v7.modTime(), 0)
- tw.blk.setFormat(format)
- if f.err != nil {
- return f.err // Only occurs if size condition is violated
- }
-
- // Write the header and data.
- if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil {
- return err
- }
- _, err := io.WriteString(tw, data)
- return err
-}
-
-// writeRawHeader writes the value of blk, regardless of its value.
-// It sets up the Writer such that it can accept a file of the given size.
-// If the flag is a special header-only flag, then the size is treated as zero.
-func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error {
- if err := tw.Flush(); err != nil {
- return err
- }
- if _, err := tw.w.Write(blk[:]); err != nil {
- return err
- }
- if isHeaderOnlyType(flag) {
- size = 0
- }
- tw.curr = &regFileWriter{tw.w, size}
- tw.pad = blockPadding(size)
- return nil
-}
-
-// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
-// If the path is not splittable, then it will return ("", "", false).
-func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
- length := len(name)
- if length <= nameSize || !isASCII(name) {
- return "", "", false
- } else if length > prefixSize+1 {
- length = prefixSize + 1
- } else if name[length-1] == '/' {
- length--
- }
-
- i := strings.LastIndex(name[:length], "/")
- nlen := len(name) - i - 1 // nlen is length of suffix
- plen := i // plen is length of prefix
- if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize {
- return "", "", false
- }
- return name[:i], name[i+1:], true
-}
-
-// Write writes to the current file in the tar archive.
-// Write returns the error ErrWriteTooLong if more than
-// Header.Size bytes are written after WriteHeader.
-//
-// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
-// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
-// of what the Header.Size claims.
-func (tw *Writer) Write(b []byte) (int, error) {
- if tw.err != nil {
- return 0, tw.err
- }
- n, err := tw.curr.Write(b)
- if err != nil && err != ErrWriteTooLong {
- tw.err = err
- }
- return n, err
-}
-
-// readFrom populates the content of the current file by reading from r.
-// The bytes read must match the number of remaining bytes in the current file.
-//
-// If the current file is sparse and r is an io.ReadSeeker,
-// then readFrom uses Seek to skip past holes defined in Header.SparseHoles,
-// assuming that skipped regions are all NULs.
-// This always reads the last byte to ensure r is the right size.
-//
-// TODO(dsnet): Re-export this when adding sparse file support.
-// See https://golang.org/issue/22735
-func (tw *Writer) readFrom(r io.Reader) (int64, error) {
- if tw.err != nil {
- return 0, tw.err
- }
- n, err := tw.curr.ReadFrom(r)
- if err != nil && err != ErrWriteTooLong {
- tw.err = err
- }
- return n, err
-}
-
-// Close closes the tar archive by flushing the padding, and writing the footer.
-// If the current file (from a prior call to WriteHeader) is not fully written,
-// then this returns an error.
-func (tw *Writer) Close() error {
- if tw.err == ErrWriteAfterClose {
- return nil
- }
- if tw.err != nil {
- return tw.err
- }
-
- // Trailer: two zero blocks.
- err := tw.Flush()
- for i := 0; i < 2 && err == nil; i++ {
- _, err = tw.w.Write(zeroBlock[:])
- }
-
- // Ensure all future actions are invalid.
- tw.err = ErrWriteAfterClose
- return err // Report IO errors
-}
-
-// regFileWriter is a fileWriter for writing data to a regular file entry.
-type regFileWriter struct {
- w io.Writer // Underlying Writer
- nb int64 // Number of remaining bytes to write
-}
-
-func (fw *regFileWriter) Write(b []byte) (n int, err error) {
- overwrite := int64(len(b)) > fw.nb
- if overwrite {
- b = b[:fw.nb]
- }
- if len(b) > 0 {
- n, err = fw.w.Write(b)
- fw.nb -= int64(n)
- }
- switch {
- case err != nil:
- return n, err
- case overwrite:
- return n, ErrWriteTooLong
- default:
- return n, nil
- }
-}
-
-func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) {
- return io.Copy(struct{ io.Writer }{fw}, r)
-}
-
-// logicalRemaining implements fileState.logicalRemaining.
-func (fw regFileWriter) logicalRemaining() int64 {
- return fw.nb
-}
-
-// physicalRemaining implements fileState.physicalRemaining.
-func (fw regFileWriter) physicalRemaining() int64 {
- return fw.nb
-}
-
-// sparseFileWriter is a fileWriter for writing data to a sparse file entry.
-type sparseFileWriter struct {
- fw fileWriter // Underlying fileWriter
- sp sparseDatas // Normalized list of data fragments
- pos int64 // Current position in sparse file
-}
-
-func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
- overwrite := int64(len(b)) > sw.logicalRemaining()
- if overwrite {
- b = b[:sw.logicalRemaining()]
- }
-
- b0 := b
- endPos := sw.pos + int64(len(b))
- for endPos > sw.pos && err == nil {
- var nf int // Bytes written in fragment
- dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
- if sw.pos < dataStart { // In a hole fragment
- bf := b[:min(int64(len(b)), dataStart-sw.pos)]
- nf, err = zeroWriter{}.Write(bf)
- } else { // In a data fragment
- bf := b[:min(int64(len(b)), dataEnd-sw.pos)]
- nf, err = sw.fw.Write(bf)
- }
- b = b[nf:]
- sw.pos += int64(nf)
- if sw.pos >= dataEnd && len(sw.sp) > 1 {
- sw.sp = sw.sp[1:] // Ensure last fragment always remains
- }
- }
-
- n = len(b0) - len(b)
- switch {
- case err == ErrWriteTooLong:
- return n, errMissData // Not possible; implies bug in validation logic
- case err != nil:
- return n, err
- case sw.logicalRemaining() == 0 && sw.physicalRemaining() > 0:
- return n, errUnrefData // Not possible; implies bug in validation logic
- case overwrite:
- return n, ErrWriteTooLong
- default:
- return n, nil
- }
-}
-
-func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
- rs, ok := r.(io.ReadSeeker)
- if ok {
- if _, err := rs.Seek(0, io.SeekCurrent); err != nil {
- ok = false // Not all io.Seeker can really seek
- }
- }
- if !ok {
- return io.Copy(struct{ io.Writer }{sw}, r)
- }
-
- var readLastByte bool
- pos0 := sw.pos
- for sw.logicalRemaining() > 0 && !readLastByte && err == nil {
- var nf int64 // Size of fragment
- dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
- if sw.pos < dataStart { // In a hole fragment
- nf = dataStart - sw.pos
- if sw.physicalRemaining() == 0 {
- readLastByte = true
- nf--
- }
- _, err = rs.Seek(nf, io.SeekCurrent)
- } else { // In a data fragment
- nf = dataEnd - sw.pos
- nf, err = io.CopyN(sw.fw, rs, nf)
- }
- sw.pos += nf
- if sw.pos >= dataEnd && len(sw.sp) > 1 {
- sw.sp = sw.sp[1:] // Ensure last fragment always remains
- }
- }
-
- // If the last fragment is a hole, then seek to 1-byte before EOF, and
- // read a single byte to ensure the file is the right size.
- if readLastByte && err == nil {
- _, err = mustReadFull(rs, []byte{0})
- sw.pos++
- }
-
- n = sw.pos - pos0
- switch {
- case err == io.EOF:
- return n, io.ErrUnexpectedEOF
- case err == ErrWriteTooLong:
- return n, errMissData // Not possible; implies bug in validation logic
- case err != nil:
- return n, err
- case sw.logicalRemaining() == 0 && sw.physicalRemaining() > 0:
- return n, errUnrefData // Not possible; implies bug in validation logic
- default:
- return n, ensureEOF(rs)
- }
-}
-
-func (sw sparseFileWriter) logicalRemaining() int64 {
- return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
-}
-func (sw sparseFileWriter) physicalRemaining() int64 {
- return sw.fw.physicalRemaining()
-}
-
-// zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
-type zeroWriter struct{}
-
-func (zeroWriter) Write(b []byte) (int, error) {
- for i, c := range b {
- if c != 0 {
- return i, errWriteHole
- }
- }
- return len(b), nil
-}
-
-// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so.
-func ensureEOF(r io.Reader) error {
- n, err := tryReadFull(r, []byte{0})
- switch {
- case n > 0:
- return ErrWriteTooLong
- case err == io.EOF:
- return nil
- default:
- return err
- }
-}
diff --git a/contrib/go/_std_1.21/src/archive/tar/ya.make b/contrib/go/_std_1.21/src/archive/tar/ya.make
deleted file mode 100644
index 4b06fb1c02..0000000000
--- a/contrib/go/_std_1.21/src/archive/tar/ya.make
+++ /dev/null
@@ -1,38 +0,0 @@
-GO_LIBRARY()
-
-SRCS(
- common.go
- format.go
- reader.go
- strconv.go
- writer.go
-)
-
-GO_TEST_SRCS(
- fuzz_test.go
- reader_test.go
- strconv_test.go
- tar_test.go
- writer_test.go
-)
-
-GO_XTEST_SRCS(example_test.go)
-
-IF (OS_LINUX)
- SRCS(
- stat_actime1.go
- stat_unix.go
- )
-ENDIF()
-
-IF (OS_DARWIN)
- SRCS(
- stat_actime2.go
- stat_unix.go
- )
-ENDIF()
-
-END()
-
-RECURSE(
-)
diff --git a/contrib/go/_std_1.21/src/database/sql/convert.go b/contrib/go/_std_1.21/src/database/sql/convert.go
deleted file mode 100644
index ffc4e497b4..0000000000
--- a/contrib/go/_std_1.21/src/database/sql/convert.go
+++ /dev/null
@@ -1,591 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Type conversions for Scan.
-
-package sql
-
-import (
- "bytes"
- "database/sql/driver"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "time"
- "unicode"
- "unicode/utf8"
-)
-
-var errNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error
-
-func describeNamedValue(nv *driver.NamedValue) string {
- if len(nv.Name) == 0 {
- return fmt.Sprintf("$%d", nv.Ordinal)
- }
- return fmt.Sprintf("with name %q", nv.Name)
-}
-
-func validateNamedValueName(name string) error {
- if len(name) == 0 {
- return nil
- }
- r, _ := utf8.DecodeRuneInString(name)
- if unicode.IsLetter(r) {
- return nil
- }
- return fmt.Errorf("name %q does not begin with a letter", name)
-}
-
-// ccChecker wraps the driver.ColumnConverter and allows it to be used
-// as if it were a NamedValueChecker. If the driver ColumnConverter
-// is not present then the NamedValueChecker will return driver.ErrSkip.
-type ccChecker struct {
- cci driver.ColumnConverter
- want int
-}
-
-func (c ccChecker) CheckNamedValue(nv *driver.NamedValue) error {
- if c.cci == nil {
- return driver.ErrSkip
- }
- // The column converter shouldn't be called on any index
- // it isn't expecting. The final error will be thrown
- // in the argument converter loop.
- index := nv.Ordinal - 1
- if c.want <= index {
- return nil
- }
-
- // First, see if the value itself knows how to convert
- // itself to a driver type. For example, a NullString
- // struct changing into a string or nil.
- if vr, ok := nv.Value.(driver.Valuer); ok {
- sv, err := callValuerValue(vr)
- if err != nil {
- return err
- }
- if !driver.IsValue(sv) {
- return fmt.Errorf("non-subset type %T returned from Value", sv)
- }
- nv.Value = sv
- }
-
- // Second, ask the column to sanity check itself. For
- // example, drivers might use this to make sure that
- // an int64 values being inserted into a 16-bit
- // integer field is in range (before getting
- // truncated), or that a nil can't go into a NOT NULL
- // column before going across the network to get the
- // same error.
- var err error
- arg := nv.Value
- nv.Value, err = c.cci.ColumnConverter(index).ConvertValue(arg)
- if err != nil {
- return err
- }
- if !driver.IsValue(nv.Value) {
- return fmt.Errorf("driver ColumnConverter error converted %T to unsupported type %T", arg, nv.Value)
- }
- return nil
-}
-
-// defaultCheckNamedValue wraps the default ColumnConverter to have the same
-// function signature as the CheckNamedValue in the driver.NamedValueChecker
-// interface.
-func defaultCheckNamedValue(nv *driver.NamedValue) (err error) {
- nv.Value, err = driver.DefaultParameterConverter.ConvertValue(nv.Value)
- return err
-}
-
-// driverArgsConnLocked converts arguments from callers of Stmt.Exec and
-// Stmt.Query into driver Values.
-//
-// The statement ds may be nil, if no statement is available.
-//
-// ci must be locked.
-func driverArgsConnLocked(ci driver.Conn, ds *driverStmt, args []any) ([]driver.NamedValue, error) {
- nvargs := make([]driver.NamedValue, len(args))
-
- // -1 means the driver doesn't know how to count the number of
- // placeholders, so we won't sanity check input here and instead let the
- // driver deal with errors.
- want := -1
-
- var si driver.Stmt
- var cc ccChecker
- if ds != nil {
- si = ds.si
- want = ds.si.NumInput()
- cc.want = want
- }
-
- // Check all types of interfaces from the start.
- // Drivers may opt to use the NamedValueChecker for special
- // argument types, then return driver.ErrSkip to pass it along
- // to the column converter.
- nvc, ok := si.(driver.NamedValueChecker)
- if !ok {
- nvc, ok = ci.(driver.NamedValueChecker)
- }
- cci, ok := si.(driver.ColumnConverter)
- if ok {
- cc.cci = cci
- }
-
- // Loop through all the arguments, checking each one.
- // If no error is returned simply increment the index
- // and continue. However if driver.ErrRemoveArgument
- // is returned the argument is not included in the query
- // argument list.
- var err error
- var n int
- for _, arg := range args {
- nv := &nvargs[n]
- if np, ok := arg.(NamedArg); ok {
- if err = validateNamedValueName(np.Name); err != nil {
- return nil, err
- }
- arg = np.Value
- nv.Name = np.Name
- }
- nv.Ordinal = n + 1
- nv.Value = arg
-
- // Checking sequence has four routes:
- // A: 1. Default
- // B: 1. NamedValueChecker 2. Column Converter 3. Default
- // C: 1. NamedValueChecker 3. Default
- // D: 1. Column Converter 2. Default
- //
- // The only time a Column Converter is called is first
- // or after NamedValueConverter. If first it is handled before
- // the nextCheck label. Thus for repeats tries only when the
- // NamedValueConverter is selected should the Column Converter
- // be used in the retry.
- checker := defaultCheckNamedValue
- nextCC := false
- switch {
- case nvc != nil:
- nextCC = cci != nil
- checker = nvc.CheckNamedValue
- case cci != nil:
- checker = cc.CheckNamedValue
- }
-
- nextCheck:
- err = checker(nv)
- switch err {
- case nil:
- n++
- continue
- case driver.ErrRemoveArgument:
- nvargs = nvargs[:len(nvargs)-1]
- continue
- case driver.ErrSkip:
- if nextCC {
- nextCC = false
- checker = cc.CheckNamedValue
- } else {
- checker = defaultCheckNamedValue
- }
- goto nextCheck
- default:
- return nil, fmt.Errorf("sql: converting argument %s type: %v", describeNamedValue(nv), err)
- }
- }
-
- // Check the length of arguments after conversion to allow for omitted
- // arguments.
- if want != -1 && len(nvargs) != want {
- return nil, fmt.Errorf("sql: expected %d arguments, got %d", want, len(nvargs))
- }
-
- return nvargs, nil
-
-}
-
-// convertAssign is the same as convertAssignRows, but without the optional
-// rows argument.
-func convertAssign(dest, src any) error {
- return convertAssignRows(dest, src, nil)
-}
-
-// convertAssignRows copies to dest the value in src, converting it if possible.
-// An error is returned if the copy would result in loss of information.
-// dest should be a pointer type. If rows is passed in, the rows will
-// be used as the parent for any cursor values converted from a
-// driver.Rows to a *Rows.
-func convertAssignRows(dest, src any, rows *Rows) error {
- // Common cases, without reflect.
- switch s := src.(type) {
- case string:
- switch d := dest.(type) {
- case *string:
- if d == nil {
- return errNilPtr
- }
- *d = s
- return nil
- case *[]byte:
- if d == nil {
- return errNilPtr
- }
- *d = []byte(s)
- return nil
- case *RawBytes:
- if d == nil {
- return errNilPtr
- }
- *d = append((*d)[:0], s...)
- return nil
- }
- case []byte:
- switch d := dest.(type) {
- case *string:
- if d == nil {
- return errNilPtr
- }
- *d = string(s)
- return nil
- case *any:
- if d == nil {
- return errNilPtr
- }
- *d = bytes.Clone(s)
- return nil
- case *[]byte:
- if d == nil {
- return errNilPtr
- }
- *d = bytes.Clone(s)
- return nil
- case *RawBytes:
- if d == nil {
- return errNilPtr
- }
- *d = s
- return nil
- }
- case time.Time:
- switch d := dest.(type) {
- case *time.Time:
- *d = s
- return nil
- case *string:
- *d = s.Format(time.RFC3339Nano)
- return nil
- case *[]byte:
- if d == nil {
- return errNilPtr
- }
- *d = []byte(s.Format(time.RFC3339Nano))
- return nil
- case *RawBytes:
- if d == nil {
- return errNilPtr
- }
- *d = s.AppendFormat((*d)[:0], time.RFC3339Nano)
- return nil
- }
- case decimalDecompose:
- switch d := dest.(type) {
- case decimalCompose:
- return d.Compose(s.Decompose(nil))
- }
- case nil:
- switch d := dest.(type) {
- case *any:
- if d == nil {
- return errNilPtr
- }
- *d = nil
- return nil
- case *[]byte:
- if d == nil {
- return errNilPtr
- }
- *d = nil
- return nil
- case *RawBytes:
- if d == nil {
- return errNilPtr
- }
- *d = nil
- return nil
- }
- // The driver is returning a cursor the client may iterate over.
- case driver.Rows:
- switch d := dest.(type) {
- case *Rows:
- if d == nil {
- return errNilPtr
- }
- if rows == nil {
- return errors.New("invalid context to convert cursor rows, missing parent *Rows")
- }
- rows.closemu.Lock()
- *d = Rows{
- dc: rows.dc,
- releaseConn: func(error) {},
- rowsi: s,
- }
- // Chain the cancel function.
- parentCancel := rows.cancel
- rows.cancel = func() {
- // When Rows.cancel is called, the closemu will be locked as well.
- // So we can access rs.lasterr.
- d.close(rows.lasterr)
- if parentCancel != nil {
- parentCancel()
- }
- }
- rows.closemu.Unlock()
- return nil
- }
- }
-
- var sv reflect.Value
-
- switch d := dest.(type) {
- case *string:
- sv = reflect.ValueOf(src)
- switch sv.Kind() {
- case reflect.Bool,
- reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
- reflect.Float32, reflect.Float64:
- *d = asString(src)
- return nil
- }
- case *[]byte:
- sv = reflect.ValueOf(src)
- if b, ok := asBytes(nil, sv); ok {
- *d = b
- return nil
- }
- case *RawBytes:
- sv = reflect.ValueOf(src)
- if b, ok := asBytes([]byte(*d)[:0], sv); ok {
- *d = RawBytes(b)
- return nil
- }
- case *bool:
- bv, err := driver.Bool.ConvertValue(src)
- if err == nil {
- *d = bv.(bool)
- }
- return err
- case *any:
- *d = src
- return nil
- }
-
- if scanner, ok := dest.(Scanner); ok {
- return scanner.Scan(src)
- }
-
- dpv := reflect.ValueOf(dest)
- if dpv.Kind() != reflect.Pointer {
- return errors.New("destination not a pointer")
- }
- if dpv.IsNil() {
- return errNilPtr
- }
-
- if !sv.IsValid() {
- sv = reflect.ValueOf(src)
- }
-
- dv := reflect.Indirect(dpv)
- if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) {
- switch b := src.(type) {
- case []byte:
- dv.Set(reflect.ValueOf(bytes.Clone(b)))
- default:
- dv.Set(sv)
- }
- return nil
- }
-
- if dv.Kind() == sv.Kind() && sv.Type().ConvertibleTo(dv.Type()) {
- dv.Set(sv.Convert(dv.Type()))
- return nil
- }
-
- // The following conversions use a string value as an intermediate representation
- // to convert between various numeric types.
- //
- // This also allows scanning into user defined types such as "type Int int64".
- // For symmetry, also check for string destination types.
- switch dv.Kind() {
- case reflect.Pointer:
- if src == nil {
- dv.SetZero()
- return nil
- }
- dv.Set(reflect.New(dv.Type().Elem()))
- return convertAssignRows(dv.Interface(), src, rows)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- if src == nil {
- return fmt.Errorf("converting NULL to %s is unsupported", dv.Kind())
- }
- s := asString(src)
- i64, err := strconv.ParseInt(s, 10, dv.Type().Bits())
- if err != nil {
- err = strconvErr(err)
- return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
- }
- dv.SetInt(i64)
- return nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- if src == nil {
- return fmt.Errorf("converting NULL to %s is unsupported", dv.Kind())
- }
- s := asString(src)
- u64, err := strconv.ParseUint(s, 10, dv.Type().Bits())
- if err != nil {
- err = strconvErr(err)
- return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
- }
- dv.SetUint(u64)
- return nil
- case reflect.Float32, reflect.Float64:
- if src == nil {
- return fmt.Errorf("converting NULL to %s is unsupported", dv.Kind())
- }
- s := asString(src)
- f64, err := strconv.ParseFloat(s, dv.Type().Bits())
- if err != nil {
- err = strconvErr(err)
- return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
- }
- dv.SetFloat(f64)
- return nil
- case reflect.String:
- if src == nil {
- return fmt.Errorf("converting NULL to %s is unsupported", dv.Kind())
- }
- switch v := src.(type) {
- case string:
- dv.SetString(v)
- return nil
- case []byte:
- dv.SetString(string(v))
- return nil
- }
- }
-
- return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest)
-}
-
-func strconvErr(err error) error {
- if ne, ok := err.(*strconv.NumError); ok {
- return ne.Err
- }
- return err
-}
-
-func asString(src any) string {
- switch v := src.(type) {
- case string:
- return v
- case []byte:
- return string(v)
- }
- rv := reflect.ValueOf(src)
- switch rv.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return strconv.FormatInt(rv.Int(), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return strconv.FormatUint(rv.Uint(), 10)
- case reflect.Float64:
- return strconv.FormatFloat(rv.Float(), 'g', -1, 64)
- case reflect.Float32:
- return strconv.FormatFloat(rv.Float(), 'g', -1, 32)
- case reflect.Bool:
- return strconv.FormatBool(rv.Bool())
- }
- return fmt.Sprintf("%v", src)
-}
-
-func asBytes(buf []byte, rv reflect.Value) (b []byte, ok bool) {
- switch rv.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return strconv.AppendInt(buf, rv.Int(), 10), true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return strconv.AppendUint(buf, rv.Uint(), 10), true
- case reflect.Float32:
- return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 32), true
- case reflect.Float64:
- return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 64), true
- case reflect.Bool:
- return strconv.AppendBool(buf, rv.Bool()), true
- case reflect.String:
- s := rv.String()
- return append(buf, s...), true
- }
- return
-}
-
-var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
-
-// callValuerValue returns vr.Value(), with one exception:
-// If vr.Value is an auto-generated method on a pointer type and the
-// pointer is nil, it would panic at runtime in the panicwrap
-// method. Treat it like nil instead.
-// Issue 8415.
-//
-// This is so people can implement driver.Value on value types and
-// still use nil pointers to those types to mean nil/NULL, just like
-// string/*string.
-//
-// This function is mirrored in the database/sql/driver package.
-func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
- if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Pointer &&
- rv.IsNil() &&
- rv.Type().Elem().Implements(valuerReflectType) {
- return nil, nil
- }
- return vr.Value()
-}
-
-// decimal composes or decomposes a decimal value to and from individual parts.
-// There are four parts: a boolean negative flag, a form byte with three possible states
-// (finite=0, infinite=1, NaN=2), a base-2 big-endian integer
-// coefficient (also known as a significand) as a []byte, and an int32 exponent.
-// These are composed into a final value as "decimal = (neg) (form=finite) coefficient * 10 ^ exponent".
-// A zero length coefficient is a zero value.
-// The big-endian integer coefficient stores the most significant byte first (at coefficient[0]).
-// If the form is not finite the coefficient and exponent should be ignored.
-// The negative parameter may be set to true for any form, although implementations are not required
-// to respect the negative parameter in the non-finite form.
-//
-// Implementations may choose to set the negative parameter to true on a zero or NaN value,
-// but implementations that do not differentiate between negative and positive
-// zero or NaN values should ignore the negative parameter without error.
-// If an implementation does not support Infinity it may be converted into a NaN without error.
-// If a value is set that is larger than what is supported by an implementation,
-// an error must be returned.
-// Implementations must return an error if a NaN or Infinity is attempted to be set while neither
-// are supported.
-//
-// NOTE(kardianos): This is an experimental interface. See https://golang.org/issue/30870
-type decimal interface {
- decimalDecompose
- decimalCompose
-}
-
-type decimalDecompose interface {
- // Decompose returns the internal decimal state in parts.
- // If the provided buf has sufficient capacity, buf may be returned as the coefficient with
- // the value set and length set as appropriate.
- Decompose(buf []byte) (form byte, negative bool, coefficient []byte, exponent int32)
-}
-
-type decimalCompose interface {
- // Compose sets the internal decimal value from parts. If the value cannot be
- // represented then an error should be returned.
- Compose(form byte, negative bool, coefficient []byte, exponent int32) error
-}
diff --git a/contrib/go/_std_1.21/src/database/sql/ctxutil.go b/contrib/go/_std_1.21/src/database/sql/ctxutil.go
deleted file mode 100644
index 4dbe6af6d2..0000000000
--- a/contrib/go/_std_1.21/src/database/sql/ctxutil.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sql
-
-import (
- "context"
- "database/sql/driver"
- "errors"
-)
-
-func ctxDriverPrepare(ctx context.Context, ci driver.Conn, query string) (driver.Stmt, error) {
- if ciCtx, is := ci.(driver.ConnPrepareContext); is {
- return ciCtx.PrepareContext(ctx, query)
- }
- si, err := ci.Prepare(query)
- if err == nil {
- select {
- default:
- case <-ctx.Done():
- si.Close()
- return nil, ctx.Err()
- }
- }
- return si, err
-}
-
-func ctxDriverExec(ctx context.Context, execerCtx driver.ExecerContext, execer driver.Execer, query string, nvdargs []driver.NamedValue) (driver.Result, error) {
- if execerCtx != nil {
- return execerCtx.ExecContext(ctx, query, nvdargs)
- }
- dargs, err := namedValueToValue(nvdargs)
- if err != nil {
- return nil, err
- }
-
- select {
- default:
- case <-ctx.Done():
- return nil, ctx.Err()
- }
- return execer.Exec(query, dargs)
-}
-
-func ctxDriverQuery(ctx context.Context, queryerCtx driver.QueryerContext, queryer driver.Queryer, query string, nvdargs []driver.NamedValue) (driver.Rows, error) {
- if queryerCtx != nil {
- return queryerCtx.QueryContext(ctx, query, nvdargs)
- }
- dargs, err := namedValueToValue(nvdargs)
- if err != nil {
- return nil, err
- }
-
- select {
- default:
- case <-ctx.Done():
- return nil, ctx.Err()
- }
- return queryer.Query(query, dargs)
-}
-
-func ctxDriverStmtExec(ctx context.Context, si driver.Stmt, nvdargs []driver.NamedValue) (driver.Result, error) {
- if siCtx, is := si.(driver.StmtExecContext); is {
- return siCtx.ExecContext(ctx, nvdargs)
- }
- dargs, err := namedValueToValue(nvdargs)
- if err != nil {
- return nil, err
- }
-
- select {
- default:
- case <-ctx.Done():
- return nil, ctx.Err()
- }
- return si.Exec(dargs)
-}
-
-func ctxDriverStmtQuery(ctx context.Context, si driver.Stmt, nvdargs []driver.NamedValue) (driver.Rows, error) {
- if siCtx, is := si.(driver.StmtQueryContext); is {
- return siCtx.QueryContext(ctx, nvdargs)
- }
- dargs, err := namedValueToValue(nvdargs)
- if err != nil {
- return nil, err
- }
-
- select {
- default:
- case <-ctx.Done():
- return nil, ctx.Err()
- }
- return si.Query(dargs)
-}
-
-func ctxDriverBegin(ctx context.Context, opts *TxOptions, ci driver.Conn) (driver.Tx, error) {
- if ciCtx, is := ci.(driver.ConnBeginTx); is {
- dopts := driver.TxOptions{}
- if opts != nil {
- dopts.Isolation = driver.IsolationLevel(opts.Isolation)
- dopts.ReadOnly = opts.ReadOnly
- }
- return ciCtx.BeginTx(ctx, dopts)
- }
-
- if opts != nil {
- // Check the transaction level. If the transaction level is non-default
- // then return an error here as the BeginTx driver value is not supported.
- if opts.Isolation != LevelDefault {
- return nil, errors.New("sql: driver does not support non-default isolation level")
- }
-
- // If a read-only transaction is requested return an error as the
- // BeginTx driver value is not supported.
- if opts.ReadOnly {
- return nil, errors.New("sql: driver does not support read-only transactions")
- }
- }
-
- if ctx.Done() == nil {
- return ci.Begin()
- }
-
- txi, err := ci.Begin()
- if err == nil {
- select {
- default:
- case <-ctx.Done():
- txi.Rollback()
- return nil, ctx.Err()
- }
- }
- return txi, err
-}
-
-func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
- dargs := make([]driver.Value, len(named))
- for n, param := range named {
- if len(param.Name) > 0 {
- return nil, errors.New("sql: driver does not support the use of Named Parameters")
- }
- dargs[n] = param.Value
- }
- return dargs, nil
-}
diff --git a/contrib/go/_std_1.21/src/database/sql/sql.go b/contrib/go/_std_1.21/src/database/sql/sql.go
deleted file mode 100644
index 836fe83e2e..0000000000
--- a/contrib/go/_std_1.21/src/database/sql/sql.go
+++ /dev/null
@@ -1,3503 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sql provides a generic interface around SQL (or SQL-like)
-// databases.
-//
-// The sql package must be used in conjunction with a database driver.
-// See https://golang.org/s/sqldrivers for a list of drivers.
-//
-// Drivers that do not support context cancellation will not return until
-// after the query is completed.
-//
-// For usage examples, see the wiki page at
-// https://golang.org/s/sqlwiki.
-package sql
-
-import (
- "context"
- "database/sql/driver"
- "errors"
- "fmt"
- "io"
- "reflect"
- "runtime"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-)
-
-var (
- driversMu sync.RWMutex
- drivers = make(map[string]driver.Driver)
-)
-
-// nowFunc returns the current time; it's overridden in tests.
-var nowFunc = time.Now
-
-// Register makes a database driver available by the provided name.
-// If Register is called twice with the same name or if driver is nil,
-// it panics.
-func Register(name string, driver driver.Driver) {
- driversMu.Lock()
- defer driversMu.Unlock()
- if driver == nil {
- panic("sql: Register driver is nil")
- }
- if _, dup := drivers[name]; dup {
- panic("sql: Register called twice for driver " + name)
- }
- drivers[name] = driver
-}
-
-func unregisterAllDrivers() {
- driversMu.Lock()
- defer driversMu.Unlock()
- // For tests.
- drivers = make(map[string]driver.Driver)
-}
-
-// Drivers returns a sorted list of the names of the registered drivers.
-func Drivers() []string {
- driversMu.RLock()
- defer driversMu.RUnlock()
- list := make([]string, 0, len(drivers))
- for name := range drivers {
- list = append(list, name)
- }
- sort.Strings(list)
- return list
-}
-
-// A NamedArg is a named argument. NamedArg values may be used as
-// arguments to Query or Exec and bind to the corresponding named
-// parameter in the SQL statement.
-//
-// For a more concise way to create NamedArg values, see
-// the Named function.
-type NamedArg struct {
- _NamedFieldsRequired struct{}
-
- // Name is the name of the parameter placeholder.
- //
- // If empty, the ordinal position in the argument list will be
- // used.
- //
- // Name must omit any symbol prefix.
- Name string
-
- // Value is the value of the parameter.
- // It may be assigned the same value types as the query
- // arguments.
- Value any
-}
-
-// Named provides a more concise way to create NamedArg values.
-//
-// Example usage:
-//
-// db.ExecContext(ctx, `
-// delete from Invoice
-// where
-// TimeCreated < @end
-// and TimeCreated >= @start;`,
-// sql.Named("start", startTime),
-// sql.Named("end", endTime),
-// )
-func Named(name string, value any) NamedArg {
- // This method exists because the go1compat promise
- // doesn't guarantee that structs don't grow more fields,
- // so unkeyed struct literals are a vet error. Thus, we don't
- // want to allow sql.NamedArg{name, value}.
- return NamedArg{Name: name, Value: value}
-}
-
-// IsolationLevel is the transaction isolation level used in TxOptions.
-type IsolationLevel int
-
-// Various isolation levels that drivers may support in BeginTx.
-// If a driver does not support a given isolation level an error may be returned.
-//
-// See https://en.wikipedia.org/wiki/Isolation_(database_systems)#Isolation_levels.
-const (
- LevelDefault IsolationLevel = iota
- LevelReadUncommitted
- LevelReadCommitted
- LevelWriteCommitted
- LevelRepeatableRead
- LevelSnapshot
- LevelSerializable
- LevelLinearizable
-)
-
-// String returns the name of the transaction isolation level.
-func (i IsolationLevel) String() string {
- switch i {
- case LevelDefault:
- return "Default"
- case LevelReadUncommitted:
- return "Read Uncommitted"
- case LevelReadCommitted:
- return "Read Committed"
- case LevelWriteCommitted:
- return "Write Committed"
- case LevelRepeatableRead:
- return "Repeatable Read"
- case LevelSnapshot:
- return "Snapshot"
- case LevelSerializable:
- return "Serializable"
- case LevelLinearizable:
- return "Linearizable"
- default:
- return "IsolationLevel(" + strconv.Itoa(int(i)) + ")"
- }
-}
-
-var _ fmt.Stringer = LevelDefault
-
-// TxOptions holds the transaction options to be used in DB.BeginTx.
-type TxOptions struct {
- // Isolation is the transaction isolation level.
- // If zero, the driver or database's default level is used.
- Isolation IsolationLevel
- ReadOnly bool
-}
-
-// RawBytes is a byte slice that holds a reference to memory owned by
-// the database itself. After a Scan into a RawBytes, the slice is only
-// valid until the next call to Next, Scan, or Close.
-type RawBytes []byte
-
-// NullString represents a string that may be null.
-// NullString implements the Scanner interface so
-// it can be used as a scan destination:
-//
-// var s NullString
-// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&s)
-// ...
-// if s.Valid {
-// // use s.String
-// } else {
-// // NULL value
-// }
-type NullString struct {
- String string
- Valid bool // Valid is true if String is not NULL
-}
-
-// Scan implements the Scanner interface.
-func (ns *NullString) Scan(value any) error {
- if value == nil {
- ns.String, ns.Valid = "", false
- return nil
- }
- ns.Valid = true
- return convertAssign(&ns.String, value)
-}
-
-// Value implements the driver Valuer interface.
-func (ns NullString) Value() (driver.Value, error) {
- if !ns.Valid {
- return nil, nil
- }
- return ns.String, nil
-}
-
-// NullInt64 represents an int64 that may be null.
-// NullInt64 implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
-type NullInt64 struct {
- Int64 int64
- Valid bool // Valid is true if Int64 is not NULL
-}
-
-// Scan implements the Scanner interface.
-func (n *NullInt64) Scan(value any) error {
- if value == nil {
- n.Int64, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- return convertAssign(&n.Int64, value)
-}
-
-// Value implements the driver Valuer interface.
-func (n NullInt64) Value() (driver.Value, error) {
- if !n.Valid {
- return nil, nil
- }
- return n.Int64, nil
-}
-
-// NullInt32 represents an int32 that may be null.
-// NullInt32 implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
-type NullInt32 struct {
- Int32 int32
- Valid bool // Valid is true if Int32 is not NULL
-}
-
-// Scan implements the Scanner interface.
-func (n *NullInt32) Scan(value any) error {
- if value == nil {
- n.Int32, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- return convertAssign(&n.Int32, value)
-}
-
-// Value implements the driver Valuer interface.
-func (n NullInt32) Value() (driver.Value, error) {
- if !n.Valid {
- return nil, nil
- }
- return int64(n.Int32), nil
-}
-
-// NullInt16 represents an int16 that may be null.
-// NullInt16 implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
-type NullInt16 struct {
- Int16 int16
- Valid bool // Valid is true if Int16 is not NULL
-}
-
-// Scan implements the Scanner interface.
-func (n *NullInt16) Scan(value any) error {
- if value == nil {
- n.Int16, n.Valid = 0, false
- return nil
- }
- err := convertAssign(&n.Int16, value)
- n.Valid = err == nil
- return err
-}
-
-// Value implements the driver Valuer interface.
-func (n NullInt16) Value() (driver.Value, error) {
- if !n.Valid {
- return nil, nil
- }
- return int64(n.Int16), nil
-}
-
-// NullByte represents a byte that may be null.
-// NullByte implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
-type NullByte struct {
- Byte byte
- Valid bool // Valid is true if Byte is not NULL
-}
-
-// Scan implements the Scanner interface.
-func (n *NullByte) Scan(value any) error {
- if value == nil {
- n.Byte, n.Valid = 0, false
- return nil
- }
- err := convertAssign(&n.Byte, value)
- n.Valid = err == nil
- return err
-}
-
-// Value implements the driver Valuer interface.
-func (n NullByte) Value() (driver.Value, error) {
- if !n.Valid {
- return nil, nil
- }
- return int64(n.Byte), nil
-}
-
-// NullFloat64 represents a float64 that may be null.
-// NullFloat64 implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
-type NullFloat64 struct {
- Float64 float64
- Valid bool // Valid is true if Float64 is not NULL
-}
-
-// Scan implements the Scanner interface.
-func (n *NullFloat64) Scan(value any) error {
- if value == nil {
- n.Float64, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- return convertAssign(&n.Float64, value)
-}
-
-// Value implements the driver Valuer interface.
-func (n NullFloat64) Value() (driver.Value, error) {
- if !n.Valid {
- return nil, nil
- }
- return n.Float64, nil
-}
-
-// NullBool represents a bool that may be null.
-// NullBool implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
-type NullBool struct {
- Bool bool
- Valid bool // Valid is true if Bool is not NULL
-}
-
-// Scan implements the Scanner interface.
-func (n *NullBool) Scan(value any) error {
- if value == nil {
- n.Bool, n.Valid = false, false
- return nil
- }
- n.Valid = true
- return convertAssign(&n.Bool, value)
-}
-
-// Value implements the driver Valuer interface.
-func (n NullBool) Value() (driver.Value, error) {
- if !n.Valid {
- return nil, nil
- }
- return n.Bool, nil
-}
-
-// NullTime represents a time.Time that may be null.
-// NullTime implements the Scanner interface so
-// it can be used as a scan destination, similar to NullString.
-type NullTime struct {
- Time time.Time
- Valid bool // Valid is true if Time is not NULL
-}
-
-// Scan implements the Scanner interface.
-func (n *NullTime) Scan(value any) error {
- if value == nil {
- n.Time, n.Valid = time.Time{}, false
- return nil
- }
- n.Valid = true
- return convertAssign(&n.Time, value)
-}
-
-// Value implements the driver Valuer interface.
-func (n NullTime) Value() (driver.Value, error) {
- if !n.Valid {
- return nil, nil
- }
- return n.Time, nil
-}
-
-// Scanner is an interface used by Scan.
-type Scanner interface {
- // Scan assigns a value from a database driver.
- //
- // The src value will be of one of the following types:
- //
- // int64
- // float64
- // bool
- // []byte
- // string
- // time.Time
- // nil - for NULL values
- //
- // An error should be returned if the value cannot be stored
- // without loss of information.
- //
- // Reference types such as []byte are only valid until the next call to Scan
- // and should not be retained. Their underlying memory is owned by the driver.
- // If retention is necessary, copy their values before the next call to Scan.
- Scan(src any) error
-}
-
-// Out may be used to retrieve OUTPUT value parameters from stored procedures.
-//
-// Not all drivers and databases support OUTPUT value parameters.
-//
-// Example usage:
-//
-// var outArg string
-// _, err := db.ExecContext(ctx, "ProcName", sql.Named("Arg1", sql.Out{Dest: &outArg}))
-type Out struct {
- _NamedFieldsRequired struct{}
-
- // Dest is a pointer to the value that will be set to the result of the
- // stored procedure's OUTPUT parameter.
- Dest any
-
- // In is whether the parameter is an INOUT parameter. If so, the input value to the stored
- // procedure is the dereferenced value of Dest's pointer, which is then replaced with
- // the output value.
- In bool
-}
-
-// ErrNoRows is returned by Scan when QueryRow doesn't return a
-// row. In such a case, QueryRow returns a placeholder *Row value that
-// defers this error until a Scan.
-var ErrNoRows = errors.New("sql: no rows in result set")
-
-// DB is a database handle representing a pool of zero or more
-// underlying connections. It's safe for concurrent use by multiple
-// goroutines.
-//
-// The sql package creates and frees connections automatically; it
-// also maintains a free pool of idle connections. If the database has
-// a concept of per-connection state, such state can be reliably observed
-// within a transaction (Tx) or connection (Conn). Once DB.Begin is called, the
-// returned Tx is bound to a single connection. Once Commit or
-// Rollback is called on the transaction, that transaction's
-// connection is returned to DB's idle connection pool. The pool size
-// can be controlled with SetMaxIdleConns.
-type DB struct {
- // Total time waited for new connections.
- waitDuration atomic.Int64
-
- connector driver.Connector
- // numClosed is an atomic counter which represents a total number of
- // closed connections. Stmt.openStmt checks it before cleaning closed
- // connections in Stmt.css.
- numClosed atomic.Uint64
-
- mu sync.Mutex // protects following fields
- freeConn []*driverConn // free connections ordered by returnedAt oldest to newest
- connRequests map[uint64]chan connRequest
- nextRequest uint64 // Next key to use in connRequests.
- numOpen int // number of opened and pending open connections
- // Used to signal the need for new connections
- // a goroutine running connectionOpener() reads on this chan and
- // maybeOpenNewConnections sends on the chan (one send per needed connection)
- // It is closed during db.Close(). The close tells the connectionOpener
- // goroutine to exit.
- openerCh chan struct{}
- closed bool
- dep map[finalCloser]depSet
- lastPut map[*driverConn]string // stacktrace of last conn's put; debug only
- maxIdleCount int // zero means defaultMaxIdleConns; negative means 0
- maxOpen int // <= 0 means unlimited
- maxLifetime time.Duration // maximum amount of time a connection may be reused
- maxIdleTime time.Duration // maximum amount of time a connection may be idle before being closed
- cleanerCh chan struct{}
- waitCount int64 // Total number of connections waited for.
- maxIdleClosed int64 // Total number of connections closed due to idle count.
- maxIdleTimeClosed int64 // Total number of connections closed due to idle time.
- maxLifetimeClosed int64 // Total number of connections closed due to max connection lifetime limit.
-
- stop func() // stop cancels the connection opener.
-}
-
-// connReuseStrategy determines how (*DB).conn returns database connections.
-type connReuseStrategy uint8
-
-const (
- // alwaysNewConn forces a new connection to the database.
- alwaysNewConn connReuseStrategy = iota
- // cachedOrNewConn returns a cached connection, if available, else waits
- // for one to become available (if MaxOpenConns has been reached) or
- // creates a new database connection.
- cachedOrNewConn
-)
-
-// driverConn wraps a driver.Conn with a mutex, to
-// be held during all calls into the Conn. (including any calls onto
-// interfaces returned via that Conn, such as calls on Tx, Stmt,
-// Result, Rows)
-type driverConn struct {
- db *DB
- createdAt time.Time
-
- sync.Mutex // guards following
- ci driver.Conn
- needReset bool // The connection session should be reset before use if true.
- closed bool
- finalClosed bool // ci.Close has been called
- openStmt map[*driverStmt]bool
-
- // guarded by db.mu
- inUse bool
- returnedAt time.Time // Time the connection was created or returned.
- onPut []func() // code (with db.mu held) run when conn is next returned
- dbmuClosed bool // same as closed, but guarded by db.mu, for removeClosedStmtLocked
-}
-
-func (dc *driverConn) releaseConn(err error) {
- dc.db.putConn(dc, err, true)
-}
-
-func (dc *driverConn) removeOpenStmt(ds *driverStmt) {
- dc.Lock()
- defer dc.Unlock()
- delete(dc.openStmt, ds)
-}
-
-func (dc *driverConn) expired(timeout time.Duration) bool {
- if timeout <= 0 {
- return false
- }
- return dc.createdAt.Add(timeout).Before(nowFunc())
-}
-
-// resetSession checks if the driver connection needs the
-// session to be reset and if required, resets it.
-func (dc *driverConn) resetSession(ctx context.Context) error {
- dc.Lock()
- defer dc.Unlock()
-
- if !dc.needReset {
- return nil
- }
- if cr, ok := dc.ci.(driver.SessionResetter); ok {
- return cr.ResetSession(ctx)
- }
- return nil
-}
-
-// validateConnection checks if the connection is valid and can
-// still be used. It also marks the session for reset if required.
-func (dc *driverConn) validateConnection(needsReset bool) bool {
- dc.Lock()
- defer dc.Unlock()
-
- if needsReset {
- dc.needReset = true
- }
- if cv, ok := dc.ci.(driver.Validator); ok {
- return cv.IsValid()
- }
- return true
-}
-
-// prepareLocked prepares the query on dc. When cg == nil the dc must keep track of
-// the prepared statements in a pool.
-func (dc *driverConn) prepareLocked(ctx context.Context, cg stmtConnGrabber, query string) (*driverStmt, error) {
- si, err := ctxDriverPrepare(ctx, dc.ci, query)
- if err != nil {
- return nil, err
- }
- ds := &driverStmt{Locker: dc, si: si}
-
- // No need to manage open statements if there is a single connection grabber.
- if cg != nil {
- return ds, nil
- }
-
- // Track each driverConn's open statements, so we can close them
- // before closing the conn.
- //
- // Wrap all driver.Stmt is *driverStmt to ensure they are only closed once.
- if dc.openStmt == nil {
- dc.openStmt = make(map[*driverStmt]bool)
- }
- dc.openStmt[ds] = true
- return ds, nil
-}
-
-// the dc.db's Mutex is held.
-func (dc *driverConn) closeDBLocked() func() error {
- dc.Lock()
- defer dc.Unlock()
- if dc.closed {
- return func() error { return errors.New("sql: duplicate driverConn close") }
- }
- dc.closed = true
- return dc.db.removeDepLocked(dc, dc)
-}
-
-func (dc *driverConn) Close() error {
- dc.Lock()
- if dc.closed {
- dc.Unlock()
- return errors.New("sql: duplicate driverConn close")
- }
- dc.closed = true
- dc.Unlock() // not defer; removeDep finalClose calls may need to lock
-
- // And now updates that require holding dc.mu.Lock.
- dc.db.mu.Lock()
- dc.dbmuClosed = true
- fn := dc.db.removeDepLocked(dc, dc)
- dc.db.mu.Unlock()
- return fn()
-}
-
-func (dc *driverConn) finalClose() error {
- var err error
-
- // Each *driverStmt has a lock to the dc. Copy the list out of the dc
- // before calling close on each stmt.
- var openStmt []*driverStmt
- withLock(dc, func() {
- openStmt = make([]*driverStmt, 0, len(dc.openStmt))
- for ds := range dc.openStmt {
- openStmt = append(openStmt, ds)
- }
- dc.openStmt = nil
- })
- for _, ds := range openStmt {
- ds.Close()
- }
- withLock(dc, func() {
- dc.finalClosed = true
- err = dc.ci.Close()
- dc.ci = nil
- })
-
- dc.db.mu.Lock()
- dc.db.numOpen--
- dc.db.maybeOpenNewConnections()
- dc.db.mu.Unlock()
-
- dc.db.numClosed.Add(1)
- return err
-}
-
-// driverStmt associates a driver.Stmt with the
-// *driverConn from which it came, so the driverConn's lock can be
-// held during calls.
-type driverStmt struct {
- sync.Locker // the *driverConn
- si driver.Stmt
- closed bool
- closeErr error // return value of previous Close call
-}
-
-// Close ensures driver.Stmt is only closed once and always returns the same
-// result.
-func (ds *driverStmt) Close() error {
- ds.Lock()
- defer ds.Unlock()
- if ds.closed {
- return ds.closeErr
- }
- ds.closed = true
- ds.closeErr = ds.si.Close()
- return ds.closeErr
-}
-
-// depSet is a finalCloser's outstanding dependencies
-type depSet map[any]bool // set of true bools
-
-// The finalCloser interface is used by (*DB).addDep and related
-// dependency reference counting.
-type finalCloser interface {
- // finalClose is called when the reference count of an object
- // goes to zero. (*DB).mu is not held while calling it.
- finalClose() error
-}
-
-// addDep notes that x now depends on dep, and x's finalClose won't be
-// called until all of x's dependencies are removed with removeDep.
-func (db *DB) addDep(x finalCloser, dep any) {
- db.mu.Lock()
- defer db.mu.Unlock()
- db.addDepLocked(x, dep)
-}
-
-func (db *DB) addDepLocked(x finalCloser, dep any) {
- if db.dep == nil {
- db.dep = make(map[finalCloser]depSet)
- }
- xdep := db.dep[x]
- if xdep == nil {
- xdep = make(depSet)
- db.dep[x] = xdep
- }
- xdep[dep] = true
-}
-
-// removeDep notes that x no longer depends on dep.
-// If x still has dependencies, nil is returned.
-// If x no longer has any dependencies, its finalClose method will be
-// called and its error value will be returned.
-func (db *DB) removeDep(x finalCloser, dep any) error {
- db.mu.Lock()
- fn := db.removeDepLocked(x, dep)
- db.mu.Unlock()
- return fn()
-}
-
-func (db *DB) removeDepLocked(x finalCloser, dep any) func() error {
- xdep, ok := db.dep[x]
- if !ok {
- panic(fmt.Sprintf("unpaired removeDep: no deps for %T", x))
- }
-
- l0 := len(xdep)
- delete(xdep, dep)
-
- switch len(xdep) {
- case l0:
- // Nothing removed. Shouldn't happen.
- panic(fmt.Sprintf("unpaired removeDep: no %T dep on %T", dep, x))
- case 0:
- // No more dependencies.
- delete(db.dep, x)
- return x.finalClose
- default:
- // Dependencies remain.
- return func() error { return nil }
- }
-}
-
-// This is the size of the connectionOpener request chan (DB.openerCh).
-// This value should be larger than the maximum typical value
-// used for db.maxOpen. If maxOpen is significantly larger than
-// connectionRequestQueueSize then it is possible for ALL calls into the *DB
-// to block until the connectionOpener can satisfy the backlog of requests.
-var connectionRequestQueueSize = 1000000
-
-type dsnConnector struct {
- dsn string
- driver driver.Driver
-}
-
-func (t dsnConnector) Connect(_ context.Context) (driver.Conn, error) {
- return t.driver.Open(t.dsn)
-}
-
-func (t dsnConnector) Driver() driver.Driver {
- return t.driver
-}
-
-// OpenDB opens a database using a Connector, allowing drivers to
-// bypass a string based data source name.
-//
-// Most users will open a database via a driver-specific connection
-// helper function that returns a *DB. No database drivers are included
-// in the Go standard library. See https://golang.org/s/sqldrivers for
-// a list of third-party drivers.
-//
-// OpenDB may just validate its arguments without creating a connection
-// to the database. To verify that the data source name is valid, call
-// Ping.
-//
-// The returned DB is safe for concurrent use by multiple goroutines
-// and maintains its own pool of idle connections. Thus, the OpenDB
-// function should be called just once. It is rarely necessary to
-// close a DB.
-func OpenDB(c driver.Connector) *DB {
- ctx, cancel := context.WithCancel(context.Background())
- db := &DB{
- connector: c,
- openerCh: make(chan struct{}, connectionRequestQueueSize),
- lastPut: make(map[*driverConn]string),
- connRequests: make(map[uint64]chan connRequest),
- stop: cancel,
- }
-
- go db.connectionOpener(ctx)
-
- return db
-}
-
-// Open opens a database specified by its database driver name and a
-// driver-specific data source name, usually consisting of at least a
-// database name and connection information.
-//
-// Most users will open a database via a driver-specific connection
-// helper function that returns a *DB. No database drivers are included
-// in the Go standard library. See https://golang.org/s/sqldrivers for
-// a list of third-party drivers.
-//
-// Open may just validate its arguments without creating a connection
-// to the database. To verify that the data source name is valid, call
-// Ping.
-//
-// The returned DB is safe for concurrent use by multiple goroutines
-// and maintains its own pool of idle connections. Thus, the Open
-// function should be called just once. It is rarely necessary to
-// close a DB.
-func Open(driverName, dataSourceName string) (*DB, error) {
- driversMu.RLock()
- driveri, ok := drivers[driverName]
- driversMu.RUnlock()
- if !ok {
- return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
- }
-
- if driverCtx, ok := driveri.(driver.DriverContext); ok {
- connector, err := driverCtx.OpenConnector(dataSourceName)
- if err != nil {
- return nil, err
- }
- return OpenDB(connector), nil
- }
-
- return OpenDB(dsnConnector{dsn: dataSourceName, driver: driveri}), nil
-}
-
-func (db *DB) pingDC(ctx context.Context, dc *driverConn, release func(error)) error {
- var err error
- if pinger, ok := dc.ci.(driver.Pinger); ok {
- withLock(dc, func() {
- err = pinger.Ping(ctx)
- })
- }
- release(err)
- return err
-}
-
-// PingContext verifies a connection to the database is still alive,
-// establishing a connection if necessary.
-func (db *DB) PingContext(ctx context.Context) error {
- var dc *driverConn
- var err error
-
- err = db.retry(func(strategy connReuseStrategy) error {
- dc, err = db.conn(ctx, strategy)
- return err
- })
-
- if err != nil {
- return err
- }
-
- return db.pingDC(ctx, dc, dc.releaseConn)
-}
-
-// Ping verifies a connection to the database is still alive,
-// establishing a connection if necessary.
-//
-// Ping uses context.Background internally; to specify the context, use
-// PingContext.
-func (db *DB) Ping() error {
- return db.PingContext(context.Background())
-}
-
-// Close closes the database and prevents new queries from starting.
-// Close then waits for all queries that have started processing on the server
-// to finish.
-//
-// It is rare to Close a DB, as the DB handle is meant to be
-// long-lived and shared between many goroutines.
-func (db *DB) Close() error {
- db.mu.Lock()
- if db.closed { // Make DB.Close idempotent
- db.mu.Unlock()
- return nil
- }
- if db.cleanerCh != nil {
- close(db.cleanerCh)
- }
- var err error
- fns := make([]func() error, 0, len(db.freeConn))
- for _, dc := range db.freeConn {
- fns = append(fns, dc.closeDBLocked())
- }
- db.freeConn = nil
- db.closed = true
- for _, req := range db.connRequests {
- close(req)
- }
- db.mu.Unlock()
- for _, fn := range fns {
- err1 := fn()
- if err1 != nil {
- err = err1
- }
- }
- db.stop()
- if c, ok := db.connector.(io.Closer); ok {
- err1 := c.Close()
- if err1 != nil {
- err = err1
- }
- }
- return err
-}
-
-const defaultMaxIdleConns = 2
-
-func (db *DB) maxIdleConnsLocked() int {
- n := db.maxIdleCount
- switch {
- case n == 0:
- // TODO(bradfitz): ask driver, if supported, for its default preference
- return defaultMaxIdleConns
- case n < 0:
- return 0
- default:
- return n
- }
-}
-
-func (db *DB) shortestIdleTimeLocked() time.Duration {
- if db.maxIdleTime <= 0 {
- return db.maxLifetime
- }
- if db.maxLifetime <= 0 {
- return db.maxIdleTime
- }
-
- min := db.maxIdleTime
- if min > db.maxLifetime {
- min = db.maxLifetime
- }
- return min
-}
-
-// SetMaxIdleConns sets the maximum number of connections in the idle
-// connection pool.
-//
-// If MaxOpenConns is greater than 0 but less than the new MaxIdleConns,
-// then the new MaxIdleConns will be reduced to match the MaxOpenConns limit.
-//
-// If n <= 0, no idle connections are retained.
-//
-// The default max idle connections is currently 2. This may change in
-// a future release.
-func (db *DB) SetMaxIdleConns(n int) {
- db.mu.Lock()
- if n > 0 {
- db.maxIdleCount = n
- } else {
- // No idle connections.
- db.maxIdleCount = -1
- }
- // Make sure maxIdle doesn't exceed maxOpen
- if db.maxOpen > 0 && db.maxIdleConnsLocked() > db.maxOpen {
- db.maxIdleCount = db.maxOpen
- }
- var closing []*driverConn
- idleCount := len(db.freeConn)
- maxIdle := db.maxIdleConnsLocked()
- if idleCount > maxIdle {
- closing = db.freeConn[maxIdle:]
- db.freeConn = db.freeConn[:maxIdle]
- }
- db.maxIdleClosed += int64(len(closing))
- db.mu.Unlock()
- for _, c := range closing {
- c.Close()
- }
-}
-
-// SetMaxOpenConns sets the maximum number of open connections to the database.
-//
-// If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than
-// MaxIdleConns, then MaxIdleConns will be reduced to match the new
-// MaxOpenConns limit.
-//
-// If n <= 0, then there is no limit on the number of open connections.
-// The default is 0 (unlimited).
-func (db *DB) SetMaxOpenConns(n int) {
- db.mu.Lock()
- db.maxOpen = n
- if n < 0 {
- db.maxOpen = 0
- }
- syncMaxIdle := db.maxOpen > 0 && db.maxIdleConnsLocked() > db.maxOpen
- db.mu.Unlock()
- if syncMaxIdle {
- db.SetMaxIdleConns(n)
- }
-}
-
-// SetConnMaxLifetime sets the maximum amount of time a connection may be reused.
-//
-// Expired connections may be closed lazily before reuse.
-//
-// If d <= 0, connections are not closed due to a connection's age.
-func (db *DB) SetConnMaxLifetime(d time.Duration) {
- if d < 0 {
- d = 0
- }
- db.mu.Lock()
- // Wake cleaner up when lifetime is shortened.
- if d > 0 && d < db.maxLifetime && db.cleanerCh != nil {
- select {
- case db.cleanerCh <- struct{}{}:
- default:
- }
- }
- db.maxLifetime = d
- db.startCleanerLocked()
- db.mu.Unlock()
-}
-
-// SetConnMaxIdleTime sets the maximum amount of time a connection may be idle.
-//
-// Expired connections may be closed lazily before reuse.
-//
-// If d <= 0, connections are not closed due to a connection's idle time.
-func (db *DB) SetConnMaxIdleTime(d time.Duration) {
- if d < 0 {
- d = 0
- }
- db.mu.Lock()
- defer db.mu.Unlock()
-
- // Wake cleaner up when idle time is shortened.
- if d > 0 && d < db.maxIdleTime && db.cleanerCh != nil {
- select {
- case db.cleanerCh <- struct{}{}:
- default:
- }
- }
- db.maxIdleTime = d
- db.startCleanerLocked()
-}
-
-// startCleanerLocked starts connectionCleaner if needed.
-func (db *DB) startCleanerLocked() {
- if (db.maxLifetime > 0 || db.maxIdleTime > 0) && db.numOpen > 0 && db.cleanerCh == nil {
- db.cleanerCh = make(chan struct{}, 1)
- go db.connectionCleaner(db.shortestIdleTimeLocked())
- }
-}
-
-func (db *DB) connectionCleaner(d time.Duration) {
- const minInterval = time.Second
-
- if d < minInterval {
- d = minInterval
- }
- t := time.NewTimer(d)
-
- for {
- select {
- case <-t.C:
- case <-db.cleanerCh: // maxLifetime was changed or db was closed.
- }
-
- db.mu.Lock()
-
- d = db.shortestIdleTimeLocked()
- if db.closed || db.numOpen == 0 || d <= 0 {
- db.cleanerCh = nil
- db.mu.Unlock()
- return
- }
-
- d, closing := db.connectionCleanerRunLocked(d)
- db.mu.Unlock()
- for _, c := range closing {
- c.Close()
- }
-
- if d < minInterval {
- d = minInterval
- }
-
- if !t.Stop() {
- select {
- case <-t.C:
- default:
- }
- }
- t.Reset(d)
- }
-}
-
-// connectionCleanerRunLocked removes connections that should be closed from
-// freeConn and returns them along side an updated duration to the next check
-// if a quicker check is required to ensure connections are checked appropriately.
-func (db *DB) connectionCleanerRunLocked(d time.Duration) (time.Duration, []*driverConn) {
- var idleClosing int64
- var closing []*driverConn
- if db.maxIdleTime > 0 {
- // As freeConn is ordered by returnedAt process
- // in reverse order to minimise the work needed.
- idleSince := nowFunc().Add(-db.maxIdleTime)
- last := len(db.freeConn) - 1
- for i := last; i >= 0; i-- {
- c := db.freeConn[i]
- if c.returnedAt.Before(idleSince) {
- i++
- closing = db.freeConn[:i:i]
- db.freeConn = db.freeConn[i:]
- idleClosing = int64(len(closing))
- db.maxIdleTimeClosed += idleClosing
- break
- }
- }
-
- if len(db.freeConn) > 0 {
- c := db.freeConn[0]
- if d2 := c.returnedAt.Sub(idleSince); d2 < d {
- // Ensure idle connections are cleaned up as soon as
- // possible.
- d = d2
- }
- }
- }
-
- if db.maxLifetime > 0 {
- expiredSince := nowFunc().Add(-db.maxLifetime)
- for i := 0; i < len(db.freeConn); i++ {
- c := db.freeConn[i]
- if c.createdAt.Before(expiredSince) {
- closing = append(closing, c)
-
- last := len(db.freeConn) - 1
- // Use slow delete as order is required to ensure
- // connections are reused least idle time first.
- copy(db.freeConn[i:], db.freeConn[i+1:])
- db.freeConn[last] = nil
- db.freeConn = db.freeConn[:last]
- i--
- } else if d2 := c.createdAt.Sub(expiredSince); d2 < d {
- // Prevent connections sitting the freeConn when they
- // have expired by updating our next deadline d.
- d = d2
- }
- }
- db.maxLifetimeClosed += int64(len(closing)) - idleClosing
- }
-
- return d, closing
-}
-
-// DBStats contains database statistics.
-type DBStats struct {
- MaxOpenConnections int // Maximum number of open connections to the database.
-
- // Pool Status
- OpenConnections int // The number of established connections both in use and idle.
- InUse int // The number of connections currently in use.
- Idle int // The number of idle connections.
-
- // Counters
- WaitCount int64 // The total number of connections waited for.
- WaitDuration time.Duration // The total time blocked waiting for a new connection.
- MaxIdleClosed int64 // The total number of connections closed due to SetMaxIdleConns.
- MaxIdleTimeClosed int64 // The total number of connections closed due to SetConnMaxIdleTime.
- MaxLifetimeClosed int64 // The total number of connections closed due to SetConnMaxLifetime.
-}
-
-// Stats returns database statistics.
-func (db *DB) Stats() DBStats {
- wait := db.waitDuration.Load()
-
- db.mu.Lock()
- defer db.mu.Unlock()
-
- stats := DBStats{
- MaxOpenConnections: db.maxOpen,
-
- Idle: len(db.freeConn),
- OpenConnections: db.numOpen,
- InUse: db.numOpen - len(db.freeConn),
-
- WaitCount: db.waitCount,
- WaitDuration: time.Duration(wait),
- MaxIdleClosed: db.maxIdleClosed,
- MaxIdleTimeClosed: db.maxIdleTimeClosed,
- MaxLifetimeClosed: db.maxLifetimeClosed,
- }
- return stats
-}
-
-// Assumes db.mu is locked.
-// If there are connRequests and the connection limit hasn't been reached,
-// then tell the connectionOpener to open new connections.
-func (db *DB) maybeOpenNewConnections() {
- numRequests := len(db.connRequests)
- if db.maxOpen > 0 {
- numCanOpen := db.maxOpen - db.numOpen
- if numRequests > numCanOpen {
- numRequests = numCanOpen
- }
- }
- for numRequests > 0 {
- db.numOpen++ // optimistically
- numRequests--
- if db.closed {
- return
- }
- db.openerCh <- struct{}{}
- }
-}
-
-// Runs in a separate goroutine, opens new connections when requested.
-func (db *DB) connectionOpener(ctx context.Context) {
- for {
- select {
- case <-ctx.Done():
- return
- case <-db.openerCh:
- db.openNewConnection(ctx)
- }
- }
-}
-
-// Open one new connection
-func (db *DB) openNewConnection(ctx context.Context) {
- // maybeOpenNewConnections has already executed db.numOpen++ before it sent
- // on db.openerCh. This function must execute db.numOpen-- if the
- // connection fails or is closed before returning.
- ci, err := db.connector.Connect(ctx)
- db.mu.Lock()
- defer db.mu.Unlock()
- if db.closed {
- if err == nil {
- ci.Close()
- }
- db.numOpen--
- return
- }
- if err != nil {
- db.numOpen--
- db.putConnDBLocked(nil, err)
- db.maybeOpenNewConnections()
- return
- }
- dc := &driverConn{
- db: db,
- createdAt: nowFunc(),
- returnedAt: nowFunc(),
- ci: ci,
- }
- if db.putConnDBLocked(dc, err) {
- db.addDepLocked(dc, dc)
- } else {
- db.numOpen--
- ci.Close()
- }
-}
-
-// connRequest represents one request for a new connection
-// When there are no idle connections available, DB.conn will create
-// a new connRequest and put it on the db.connRequests list.
-type connRequest struct {
- conn *driverConn
- err error
-}
-
-var errDBClosed = errors.New("sql: database is closed")
-
-// nextRequestKeyLocked returns the next connection request key.
-// It is assumed that nextRequest will not overflow.
-func (db *DB) nextRequestKeyLocked() uint64 {
- next := db.nextRequest
- db.nextRequest++
- return next
-}
-
-// conn returns a newly-opened or cached *driverConn.
-func (db *DB) conn(ctx context.Context, strategy connReuseStrategy) (*driverConn, error) {
- db.mu.Lock()
- if db.closed {
- db.mu.Unlock()
- return nil, errDBClosed
- }
- // Check if the context is expired.
- select {
- default:
- case <-ctx.Done():
- db.mu.Unlock()
- return nil, ctx.Err()
- }
- lifetime := db.maxLifetime
-
- // Prefer a free connection, if possible.
- last := len(db.freeConn) - 1
- if strategy == cachedOrNewConn && last >= 0 {
- // Reuse the lowest idle time connection so we can close
- // connections which remain idle as soon as possible.
- conn := db.freeConn[last]
- db.freeConn = db.freeConn[:last]
- conn.inUse = true
- if conn.expired(lifetime) {
- db.maxLifetimeClosed++
- db.mu.Unlock()
- conn.Close()
- return nil, driver.ErrBadConn
- }
- db.mu.Unlock()
-
- // Reset the session if required.
- if err := conn.resetSession(ctx); errors.Is(err, driver.ErrBadConn) {
- conn.Close()
- return nil, err
- }
-
- return conn, nil
- }
-
- // Out of free connections or we were asked not to use one. If we're not
- // allowed to open any more connections, make a request and wait.
- if db.maxOpen > 0 && db.numOpen >= db.maxOpen {
- // Make the connRequest channel. It's buffered so that the
- // connectionOpener doesn't block while waiting for the req to be read.
- req := make(chan connRequest, 1)
- reqKey := db.nextRequestKeyLocked()
- db.connRequests[reqKey] = req
- db.waitCount++
- db.mu.Unlock()
-
- waitStart := nowFunc()
-
- // Timeout the connection request with the context.
- select {
- case <-ctx.Done():
- // Remove the connection request and ensure no value has been sent
- // on it after removing.
- db.mu.Lock()
- delete(db.connRequests, reqKey)
- db.mu.Unlock()
-
- db.waitDuration.Add(int64(time.Since(waitStart)))
-
- select {
- default:
- case ret, ok := <-req:
- if ok && ret.conn != nil {
- db.putConn(ret.conn, ret.err, false)
- }
- }
- return nil, ctx.Err()
- case ret, ok := <-req:
- db.waitDuration.Add(int64(time.Since(waitStart)))
-
- if !ok {
- return nil, errDBClosed
- }
- // Only check if the connection is expired if the strategy is cachedOrNewConns.
- // If we require a new connection, just re-use the connection without looking
- // at the expiry time. If it is expired, it will be checked when it is placed
- // back into the connection pool.
- // This prioritizes giving a valid connection to a client over the exact connection
- // lifetime, which could expire exactly after this point anyway.
- if strategy == cachedOrNewConn && ret.err == nil && ret.conn.expired(lifetime) {
- db.mu.Lock()
- db.maxLifetimeClosed++
- db.mu.Unlock()
- ret.conn.Close()
- return nil, driver.ErrBadConn
- }
- if ret.conn == nil {
- return nil, ret.err
- }
-
- // Reset the session if required.
- if err := ret.conn.resetSession(ctx); errors.Is(err, driver.ErrBadConn) {
- ret.conn.Close()
- return nil, err
- }
- return ret.conn, ret.err
- }
- }
-
- db.numOpen++ // optimistically
- db.mu.Unlock()
- ci, err := db.connector.Connect(ctx)
- if err != nil {
- db.mu.Lock()
- db.numOpen-- // correct for earlier optimism
- db.maybeOpenNewConnections()
- db.mu.Unlock()
- return nil, err
- }
- db.mu.Lock()
- dc := &driverConn{
- db: db,
- createdAt: nowFunc(),
- returnedAt: nowFunc(),
- ci: ci,
- inUse: true,
- }
- db.addDepLocked(dc, dc)
- db.mu.Unlock()
- return dc, nil
-}
-
-// putConnHook is a hook for testing.
-var putConnHook func(*DB, *driverConn)
-
-// noteUnusedDriverStatement notes that ds is no longer used and should
-// be closed whenever possible (when c is next not in use), unless c is
-// already closed.
-func (db *DB) noteUnusedDriverStatement(c *driverConn, ds *driverStmt) {
- db.mu.Lock()
- defer db.mu.Unlock()
- if c.inUse {
- c.onPut = append(c.onPut, func() {
- ds.Close()
- })
- } else {
- c.Lock()
- fc := c.finalClosed
- c.Unlock()
- if !fc {
- ds.Close()
- }
- }
-}
-
-// debugGetPut determines whether getConn & putConn calls' stack traces
-// are returned for more verbose crashes.
-const debugGetPut = false
-
-// putConn adds a connection to the db's free pool.
-// err is optionally the last error that occurred on this connection.
-func (db *DB) putConn(dc *driverConn, err error, resetSession bool) {
- if !errors.Is(err, driver.ErrBadConn) {
- if !dc.validateConnection(resetSession) {
- err = driver.ErrBadConn
- }
- }
- db.mu.Lock()
- if !dc.inUse {
- db.mu.Unlock()
- if debugGetPut {
- fmt.Printf("putConn(%v) DUPLICATE was: %s\n\nPREVIOUS was: %s", dc, stack(), db.lastPut[dc])
- }
- panic("sql: connection returned that was never out")
- }
-
- if !errors.Is(err, driver.ErrBadConn) && dc.expired(db.maxLifetime) {
- db.maxLifetimeClosed++
- err = driver.ErrBadConn
- }
- if debugGetPut {
- db.lastPut[dc] = stack()
- }
- dc.inUse = false
- dc.returnedAt = nowFunc()
-
- for _, fn := range dc.onPut {
- fn()
- }
- dc.onPut = nil
-
- if errors.Is(err, driver.ErrBadConn) {
- // Don't reuse bad connections.
- // Since the conn is considered bad and is being discarded, treat it
- // as closed. Don't decrement the open count here, finalClose will
- // take care of that.
- db.maybeOpenNewConnections()
- db.mu.Unlock()
- dc.Close()
- return
- }
- if putConnHook != nil {
- putConnHook(db, dc)
- }
- added := db.putConnDBLocked(dc, nil)
- db.mu.Unlock()
-
- if !added {
- dc.Close()
- return
- }
-}
-
-// Satisfy a connRequest or put the driverConn in the idle pool and return true
-// or return false.
-// putConnDBLocked will satisfy a connRequest if there is one, or it will
-// return the *driverConn to the freeConn list if err == nil and the idle
-// connection limit will not be exceeded.
-// If err != nil, the value of dc is ignored.
-// If err == nil, then dc must not equal nil.
-// If a connRequest was fulfilled or the *driverConn was placed in the
-// freeConn list, then true is returned, otherwise false is returned.
-func (db *DB) putConnDBLocked(dc *driverConn, err error) bool {
- if db.closed {
- return false
- }
- if db.maxOpen > 0 && db.numOpen > db.maxOpen {
- return false
- }
- if c := len(db.connRequests); c > 0 {
- var req chan connRequest
- var reqKey uint64
- for reqKey, req = range db.connRequests {
- break
- }
- delete(db.connRequests, reqKey) // Remove from pending requests.
- if err == nil {
- dc.inUse = true
- }
- req <- connRequest{
- conn: dc,
- err: err,
- }
- return true
- } else if err == nil && !db.closed {
- if db.maxIdleConnsLocked() > len(db.freeConn) {
- db.freeConn = append(db.freeConn, dc)
- db.startCleanerLocked()
- return true
- }
- db.maxIdleClosed++
- }
- return false
-}
-
-// maxBadConnRetries is the number of maximum retries if the driver returns
-// driver.ErrBadConn to signal a broken connection before forcing a new
-// connection to be opened.
-const maxBadConnRetries = 2
-
-func (db *DB) retry(fn func(strategy connReuseStrategy) error) error {
- for i := int64(0); i < maxBadConnRetries; i++ {
- err := fn(cachedOrNewConn)
- // retry if err is driver.ErrBadConn
- if err == nil || !errors.Is(err, driver.ErrBadConn) {
- return err
- }
- }
-
- return fn(alwaysNewConn)
-}
-
-// PrepareContext creates a prepared statement for later queries or executions.
-// Multiple queries or executions may be run concurrently from the
-// returned statement.
-// The caller must call the statement's Close method
-// when the statement is no longer needed.
-//
-// The provided context is used for the preparation of the statement, not for the
-// execution of the statement.
-func (db *DB) PrepareContext(ctx context.Context, query string) (*Stmt, error) {
- var stmt *Stmt
- var err error
-
- err = db.retry(func(strategy connReuseStrategy) error {
- stmt, err = db.prepare(ctx, query, strategy)
- return err
- })
-
- return stmt, err
-}
-
-// Prepare creates a prepared statement for later queries or executions.
-// Multiple queries or executions may be run concurrently from the
-// returned statement.
-// The caller must call the statement's Close method
-// when the statement is no longer needed.
-//
-// Prepare uses context.Background internally; to specify the context, use
-// PrepareContext.
-func (db *DB) Prepare(query string) (*Stmt, error) {
- return db.PrepareContext(context.Background(), query)
-}
-
-func (db *DB) prepare(ctx context.Context, query string, strategy connReuseStrategy) (*Stmt, error) {
- // TODO: check if db.driver supports an optional
- // driver.Preparer interface and call that instead, if so,
- // otherwise we make a prepared statement that's bound
- // to a connection, and to execute this prepared statement
- // we either need to use this connection (if it's free), else
- // get a new connection + re-prepare + execute on that one.
- dc, err := db.conn(ctx, strategy)
- if err != nil {
- return nil, err
- }
- return db.prepareDC(ctx, dc, dc.releaseConn, nil, query)
-}
-
-// prepareDC prepares a query on the driverConn and calls release before
-// returning. When cg == nil it implies that a connection pool is used, and
-// when cg != nil only a single driver connection is used.
-func (db *DB) prepareDC(ctx context.Context, dc *driverConn, release func(error), cg stmtConnGrabber, query string) (*Stmt, error) {
- var ds *driverStmt
- var err error
- defer func() {
- release(err)
- }()
- withLock(dc, func() {
- ds, err = dc.prepareLocked(ctx, cg, query)
- })
- if err != nil {
- return nil, err
- }
- stmt := &Stmt{
- db: db,
- query: query,
- cg: cg,
- cgds: ds,
- }
-
- // When cg == nil this statement will need to keep track of various
- // connections they are prepared on and record the stmt dependency on
- // the DB.
- if cg == nil {
- stmt.css = []connStmt{{dc, ds}}
- stmt.lastNumClosed = db.numClosed.Load()
- db.addDep(stmt, stmt)
- }
- return stmt, nil
-}
-
-// ExecContext executes a query without returning any rows.
-// The args are for any placeholder parameters in the query.
-func (db *DB) ExecContext(ctx context.Context, query string, args ...any) (Result, error) {
- var res Result
- var err error
-
- err = db.retry(func(strategy connReuseStrategy) error {
- res, err = db.exec(ctx, query, args, strategy)
- return err
- })
-
- return res, err
-}
-
-// Exec executes a query without returning any rows.
-// The args are for any placeholder parameters in the query.
-//
-// Exec uses context.Background internally; to specify the context, use
-// ExecContext.
-func (db *DB) Exec(query string, args ...any) (Result, error) {
- return db.ExecContext(context.Background(), query, args...)
-}
-
-func (db *DB) exec(ctx context.Context, query string, args []any, strategy connReuseStrategy) (Result, error) {
- dc, err := db.conn(ctx, strategy)
- if err != nil {
- return nil, err
- }
- return db.execDC(ctx, dc, dc.releaseConn, query, args)
-}
-
-func (db *DB) execDC(ctx context.Context, dc *driverConn, release func(error), query string, args []any) (res Result, err error) {
- defer func() {
- release(err)
- }()
- execerCtx, ok := dc.ci.(driver.ExecerContext)
- var execer driver.Execer
- if !ok {
- execer, ok = dc.ci.(driver.Execer)
- }
- if ok {
- var nvdargs []driver.NamedValue
- var resi driver.Result
- withLock(dc, func() {
- nvdargs, err = driverArgsConnLocked(dc.ci, nil, args)
- if err != nil {
- return
- }
- resi, err = ctxDriverExec(ctx, execerCtx, execer, query, nvdargs)
- })
- if err != driver.ErrSkip {
- if err != nil {
- return nil, err
- }
- return driverResult{dc, resi}, nil
- }
- }
-
- var si driver.Stmt
- withLock(dc, func() {
- si, err = ctxDriverPrepare(ctx, dc.ci, query)
- })
- if err != nil {
- return nil, err
- }
- ds := &driverStmt{Locker: dc, si: si}
- defer ds.Close()
- return resultFromStatement(ctx, dc.ci, ds, args...)
-}
-
-// QueryContext executes a query that returns rows, typically a SELECT.
-// The args are for any placeholder parameters in the query.
-func (db *DB) QueryContext(ctx context.Context, query string, args ...any) (*Rows, error) {
- var rows *Rows
- var err error
-
- err = db.retry(func(strategy connReuseStrategy) error {
- rows, err = db.query(ctx, query, args, strategy)
- return err
- })
-
- return rows, err
-}
-
-// Query executes a query that returns rows, typically a SELECT.
-// The args are for any placeholder parameters in the query.
-//
-// Query uses context.Background internally; to specify the context, use
-// QueryContext.
-func (db *DB) Query(query string, args ...any) (*Rows, error) {
- return db.QueryContext(context.Background(), query, args...)
-}
-
-func (db *DB) query(ctx context.Context, query string, args []any, strategy connReuseStrategy) (*Rows, error) {
- dc, err := db.conn(ctx, strategy)
- if err != nil {
- return nil, err
- }
-
- return db.queryDC(ctx, nil, dc, dc.releaseConn, query, args)
-}
-
-// queryDC executes a query on the given connection.
-// The connection gets released by the releaseConn function.
-// The ctx context is from a query method and the txctx context is from an
-// optional transaction context.
-func (db *DB) queryDC(ctx, txctx context.Context, dc *driverConn, releaseConn func(error), query string, args []any) (*Rows, error) {
- queryerCtx, ok := dc.ci.(driver.QueryerContext)
- var queryer driver.Queryer
- if !ok {
- queryer, ok = dc.ci.(driver.Queryer)
- }
- if ok {
- var nvdargs []driver.NamedValue
- var rowsi driver.Rows
- var err error
- withLock(dc, func() {
- nvdargs, err = driverArgsConnLocked(dc.ci, nil, args)
- if err != nil {
- return
- }
- rowsi, err = ctxDriverQuery(ctx, queryerCtx, queryer, query, nvdargs)
- })
- if err != driver.ErrSkip {
- if err != nil {
- releaseConn(err)
- return nil, err
- }
- // Note: ownership of dc passes to the *Rows, to be freed
- // with releaseConn.
- rows := &Rows{
- dc: dc,
- releaseConn: releaseConn,
- rowsi: rowsi,
- }
- rows.initContextClose(ctx, txctx)
- return rows, nil
- }
- }
-
- var si driver.Stmt
- var err error
- withLock(dc, func() {
- si, err = ctxDriverPrepare(ctx, dc.ci, query)
- })
- if err != nil {
- releaseConn(err)
- return nil, err
- }
-
- ds := &driverStmt{Locker: dc, si: si}
- rowsi, err := rowsiFromStatement(ctx, dc.ci, ds, args...)
- if err != nil {
- ds.Close()
- releaseConn(err)
- return nil, err
- }
-
- // Note: ownership of ci passes to the *Rows, to be freed
- // with releaseConn.
- rows := &Rows{
- dc: dc,
- releaseConn: releaseConn,
- rowsi: rowsi,
- closeStmt: ds,
- }
- rows.initContextClose(ctx, txctx)
- return rows, nil
-}
-
-// QueryRowContext executes a query that is expected to return at most one row.
-// QueryRowContext always returns a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
-// the rest.
-func (db *DB) QueryRowContext(ctx context.Context, query string, args ...any) *Row {
- rows, err := db.QueryContext(ctx, query, args...)
- return &Row{rows: rows, err: err}
-}
-
-// QueryRow executes a query that is expected to return at most one row.
-// QueryRow always returns a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
-// the rest.
-//
-// QueryRow uses context.Background internally; to specify the context, use
-// QueryRowContext.
-func (db *DB) QueryRow(query string, args ...any) *Row {
- return db.QueryRowContext(context.Background(), query, args...)
-}
-
-// BeginTx starts a transaction.
-//
-// The provided context is used until the transaction is committed or rolled back.
-// If the context is canceled, the sql package will roll back
-// the transaction. Tx.Commit will return an error if the context provided to
-// BeginTx is canceled.
-//
-// The provided TxOptions is optional and may be nil if defaults should be used.
-// If a non-default isolation level is used that the driver doesn't support,
-// an error will be returned.
-func (db *DB) BeginTx(ctx context.Context, opts *TxOptions) (*Tx, error) {
- var tx *Tx
- var err error
-
- err = db.retry(func(strategy connReuseStrategy) error {
- tx, err = db.begin(ctx, opts, strategy)
- return err
- })
-
- return tx, err
-}
-
-// Begin starts a transaction. The default isolation level is dependent on
-// the driver.
-//
-// Begin uses context.Background internally; to specify the context, use
-// BeginTx.
-func (db *DB) Begin() (*Tx, error) {
- return db.BeginTx(context.Background(), nil)
-}
-
-func (db *DB) begin(ctx context.Context, opts *TxOptions, strategy connReuseStrategy) (tx *Tx, err error) {
- dc, err := db.conn(ctx, strategy)
- if err != nil {
- return nil, err
- }
- return db.beginDC(ctx, dc, dc.releaseConn, opts)
-}
-
-// beginDC starts a transaction. The provided dc must be valid and ready to use.
-func (db *DB) beginDC(ctx context.Context, dc *driverConn, release func(error), opts *TxOptions) (tx *Tx, err error) {
- var txi driver.Tx
- keepConnOnRollback := false
- withLock(dc, func() {
- _, hasSessionResetter := dc.ci.(driver.SessionResetter)
- _, hasConnectionValidator := dc.ci.(driver.Validator)
- keepConnOnRollback = hasSessionResetter && hasConnectionValidator
- txi, err = ctxDriverBegin(ctx, opts, dc.ci)
- })
- if err != nil {
- release(err)
- return nil, err
- }
-
- // Schedule the transaction to rollback when the context is canceled.
- // The cancel function in Tx will be called after done is set to true.
- ctx, cancel := context.WithCancel(ctx)
- tx = &Tx{
- db: db,
- dc: dc,
- releaseConn: release,
- txi: txi,
- cancel: cancel,
- keepConnOnRollback: keepConnOnRollback,
- ctx: ctx,
- }
- go tx.awaitDone()
- return tx, nil
-}
-
-// Driver returns the database's underlying driver.
-func (db *DB) Driver() driver.Driver {
- return db.connector.Driver()
-}
-
-// ErrConnDone is returned by any operation that is performed on a connection
-// that has already been returned to the connection pool.
-var ErrConnDone = errors.New("sql: connection is already closed")
-
-// Conn returns a single connection by either opening a new connection
-// or returning an existing connection from the connection pool. Conn will
-// block until either a connection is returned or ctx is canceled.
-// Queries run on the same Conn will be run in the same database session.
-//
-// Every Conn must be returned to the database pool after use by
-// calling Conn.Close.
-func (db *DB) Conn(ctx context.Context) (*Conn, error) {
- var dc *driverConn
- var err error
-
- err = db.retry(func(strategy connReuseStrategy) error {
- dc, err = db.conn(ctx, strategy)
- return err
- })
-
- if err != nil {
- return nil, err
- }
-
- conn := &Conn{
- db: db,
- dc: dc,
- }
- return conn, nil
-}
-
-type releaseConn func(error)
-
-// Conn represents a single database connection rather than a pool of database
-// connections. Prefer running queries from DB unless there is a specific
-// need for a continuous single database connection.
-//
-// A Conn must call Close to return the connection to the database pool
-// and may do so concurrently with a running query.
-//
-// After a call to Close, all operations on the
-// connection fail with ErrConnDone.
-type Conn struct {
- db *DB
-
- // closemu prevents the connection from closing while there
- // is an active query. It is held for read during queries
- // and exclusively during close.
- closemu sync.RWMutex
-
- // dc is owned until close, at which point
- // it's returned to the connection pool.
- dc *driverConn
-
- // done transitions from false to true exactly once, on close.
- // Once done, all operations fail with ErrConnDone.
- done atomic.Bool
-
- // releaseConn is a cache of c.closemuRUnlockCondReleaseConn
- // to save allocations in a call to grabConn.
- releaseConnOnce sync.Once
- releaseConnCache releaseConn
-}
-
-// grabConn takes a context to implement stmtConnGrabber
-// but the context is not used.
-func (c *Conn) grabConn(context.Context) (*driverConn, releaseConn, error) {
- if c.done.Load() {
- return nil, nil, ErrConnDone
- }
- c.releaseConnOnce.Do(func() {
- c.releaseConnCache = c.closemuRUnlockCondReleaseConn
- })
- c.closemu.RLock()
- return c.dc, c.releaseConnCache, nil
-}
-
-// PingContext verifies the connection to the database is still alive.
-func (c *Conn) PingContext(ctx context.Context) error {
- dc, release, err := c.grabConn(ctx)
- if err != nil {
- return err
- }
- return c.db.pingDC(ctx, dc, release)
-}
-
-// ExecContext executes a query without returning any rows.
-// The args are for any placeholder parameters in the query.
-func (c *Conn) ExecContext(ctx context.Context, query string, args ...any) (Result, error) {
- dc, release, err := c.grabConn(ctx)
- if err != nil {
- return nil, err
- }
- return c.db.execDC(ctx, dc, release, query, args)
-}
-
-// QueryContext executes a query that returns rows, typically a SELECT.
-// The args are for any placeholder parameters in the query.
-func (c *Conn) QueryContext(ctx context.Context, query string, args ...any) (*Rows, error) {
- dc, release, err := c.grabConn(ctx)
- if err != nil {
- return nil, err
- }
- return c.db.queryDC(ctx, nil, dc, release, query, args)
-}
-
-// QueryRowContext executes a query that is expected to return at most one row.
-// QueryRowContext always returns a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
-// the rest.
-func (c *Conn) QueryRowContext(ctx context.Context, query string, args ...any) *Row {
- rows, err := c.QueryContext(ctx, query, args...)
- return &Row{rows: rows, err: err}
-}
-
-// PrepareContext creates a prepared statement for later queries or executions.
-// Multiple queries or executions may be run concurrently from the
-// returned statement.
-// The caller must call the statement's Close method
-// when the statement is no longer needed.
-//
-// The provided context is used for the preparation of the statement, not for the
-// execution of the statement.
-func (c *Conn) PrepareContext(ctx context.Context, query string) (*Stmt, error) {
- dc, release, err := c.grabConn(ctx)
- if err != nil {
- return nil, err
- }
- return c.db.prepareDC(ctx, dc, release, c, query)
-}
-
-// Raw executes f exposing the underlying driver connection for the
-// duration of f. The driverConn must not be used outside of f.
-//
-// Once f returns and err is not driver.ErrBadConn, the Conn will continue to be usable
-// until Conn.Close is called.
-func (c *Conn) Raw(f func(driverConn any) error) (err error) {
- var dc *driverConn
- var release releaseConn
-
- // grabConn takes a context to implement stmtConnGrabber, but the context is not used.
- dc, release, err = c.grabConn(nil)
- if err != nil {
- return
- }
- fPanic := true
- dc.Mutex.Lock()
- defer func() {
- dc.Mutex.Unlock()
-
- // If f panics fPanic will remain true.
- // Ensure an error is passed to release so the connection
- // may be discarded.
- if fPanic {
- err = driver.ErrBadConn
- }
- release(err)
- }()
- err = f(dc.ci)
- fPanic = false
-
- return
-}
-
-// BeginTx starts a transaction.
-//
-// The provided context is used until the transaction is committed or rolled back.
-// If the context is canceled, the sql package will roll back
-// the transaction. Tx.Commit will return an error if the context provided to
-// BeginTx is canceled.
-//
-// The provided TxOptions is optional and may be nil if defaults should be used.
-// If a non-default isolation level is used that the driver doesn't support,
-// an error will be returned.
-func (c *Conn) BeginTx(ctx context.Context, opts *TxOptions) (*Tx, error) {
- dc, release, err := c.grabConn(ctx)
- if err != nil {
- return nil, err
- }
- return c.db.beginDC(ctx, dc, release, opts)
-}
-
-// closemuRUnlockCondReleaseConn read unlocks closemu
-// as the sql operation is done with the dc.
-func (c *Conn) closemuRUnlockCondReleaseConn(err error) {
- c.closemu.RUnlock()
- if errors.Is(err, driver.ErrBadConn) {
- c.close(err)
- }
-}
-
-func (c *Conn) txCtx() context.Context {
- return nil
-}
-
-func (c *Conn) close(err error) error {
- if !c.done.CompareAndSwap(false, true) {
- return ErrConnDone
- }
-
- // Lock around releasing the driver connection
- // to ensure all queries have been stopped before doing so.
- c.closemu.Lock()
- defer c.closemu.Unlock()
-
- c.dc.releaseConn(err)
- c.dc = nil
- c.db = nil
- return err
-}
-
-// Close returns the connection to the connection pool.
-// All operations after a Close will return with ErrConnDone.
-// Close is safe to call concurrently with other operations and will
-// block until all other operations finish. It may be useful to first
-// cancel any used context and then call close directly after.
-func (c *Conn) Close() error {
- return c.close(nil)
-}
-
-// Tx is an in-progress database transaction.
-//
-// A transaction must end with a call to Commit or Rollback.
-//
-// After a call to Commit or Rollback, all operations on the
-// transaction fail with ErrTxDone.
-//
-// The statements prepared for a transaction by calling
-// the transaction's Prepare or Stmt methods are closed
-// by the call to Commit or Rollback.
-type Tx struct {
- db *DB
-
- // closemu prevents the transaction from closing while there
- // is an active query. It is held for read during queries
- // and exclusively during close.
- closemu sync.RWMutex
-
- // dc is owned exclusively until Commit or Rollback, at which point
- // it's returned with putConn.
- dc *driverConn
- txi driver.Tx
-
- // releaseConn is called once the Tx is closed to release
- // any held driverConn back to the pool.
- releaseConn func(error)
-
- // done transitions from false to true exactly once, on Commit
- // or Rollback. once done, all operations fail with
- // ErrTxDone.
- done atomic.Bool
-
- // keepConnOnRollback is true if the driver knows
- // how to reset the connection's session and if need be discard
- // the connection.
- keepConnOnRollback bool
-
- // All Stmts prepared for this transaction. These will be closed after the
- // transaction has been committed or rolled back.
- stmts struct {
- sync.Mutex
- v []*Stmt
- }
-
- // cancel is called after done transitions from 0 to 1.
- cancel func()
-
- // ctx lives for the life of the transaction.
- ctx context.Context
-}
-
-// awaitDone blocks until the context in Tx is canceled and rolls back
-// the transaction if it's not already done.
-func (tx *Tx) awaitDone() {
- // Wait for either the transaction to be committed or rolled
- // back, or for the associated context to be closed.
- <-tx.ctx.Done()
-
- // Discard and close the connection used to ensure the
- // transaction is closed and the resources are released. This
- // rollback does nothing if the transaction has already been
- // committed or rolled back.
- // Do not discard the connection if the connection knows
- // how to reset the session.
- discardConnection := !tx.keepConnOnRollback
- tx.rollback(discardConnection)
-}
-
-func (tx *Tx) isDone() bool {
- return tx.done.Load()
-}
-
-// ErrTxDone is returned by any operation that is performed on a transaction
-// that has already been committed or rolled back.
-var ErrTxDone = errors.New("sql: transaction has already been committed or rolled back")
-
-// close returns the connection to the pool and
-// must only be called by Tx.rollback or Tx.Commit while
-// tx is already canceled and won't be executed concurrently.
-func (tx *Tx) close(err error) {
- tx.releaseConn(err)
- tx.dc = nil
- tx.txi = nil
-}
-
-// hookTxGrabConn specifies an optional hook to be called on
-// a successful call to (*Tx).grabConn. For tests.
-var hookTxGrabConn func()
-
-func (tx *Tx) grabConn(ctx context.Context) (*driverConn, releaseConn, error) {
- select {
- default:
- case <-ctx.Done():
- return nil, nil, ctx.Err()
- }
-
- // closemu.RLock must come before the check for isDone to prevent the Tx from
- // closing while a query is executing.
- tx.closemu.RLock()
- if tx.isDone() {
- tx.closemu.RUnlock()
- return nil, nil, ErrTxDone
- }
- if hookTxGrabConn != nil { // test hook
- hookTxGrabConn()
- }
- return tx.dc, tx.closemuRUnlockRelease, nil
-}
-
-func (tx *Tx) txCtx() context.Context {
- return tx.ctx
-}
-
-// closemuRUnlockRelease is used as a func(error) method value in
-// ExecContext and QueryContext. Unlocking in the releaseConn keeps
-// the driver conn from being returned to the connection pool until
-// the Rows has been closed.
-func (tx *Tx) closemuRUnlockRelease(error) {
- tx.closemu.RUnlock()
-}
-
-// Closes all Stmts prepared for this transaction.
-func (tx *Tx) closePrepared() {
- tx.stmts.Lock()
- defer tx.stmts.Unlock()
- for _, stmt := range tx.stmts.v {
- stmt.Close()
- }
-}
-
-// Commit commits the transaction.
-func (tx *Tx) Commit() error {
- // Check context first to avoid transaction leak.
- // If put it behind tx.done CompareAndSwap statement, we can't ensure
- // the consistency between tx.done and the real COMMIT operation.
- select {
- default:
- case <-tx.ctx.Done():
- if tx.done.Load() {
- return ErrTxDone
- }
- return tx.ctx.Err()
- }
- if !tx.done.CompareAndSwap(false, true) {
- return ErrTxDone
- }
-
- // Cancel the Tx to release any active R-closemu locks.
- // This is safe to do because tx.done has already transitioned
- // from 0 to 1. Hold the W-closemu lock prior to rollback
- // to ensure no other connection has an active query.
- tx.cancel()
- tx.closemu.Lock()
- tx.closemu.Unlock()
-
- var err error
- withLock(tx.dc, func() {
- err = tx.txi.Commit()
- })
- if !errors.Is(err, driver.ErrBadConn) {
- tx.closePrepared()
- }
- tx.close(err)
- return err
-}
-
-var rollbackHook func()
-
-// rollback aborts the transaction and optionally forces the pool to discard
-// the connection.
-func (tx *Tx) rollback(discardConn bool) error {
- if !tx.done.CompareAndSwap(false, true) {
- return ErrTxDone
- }
-
- if rollbackHook != nil {
- rollbackHook()
- }
-
- // Cancel the Tx to release any active R-closemu locks.
- // This is safe to do because tx.done has already transitioned
- // from 0 to 1. Hold the W-closemu lock prior to rollback
- // to ensure no other connection has an active query.
- tx.cancel()
- tx.closemu.Lock()
- tx.closemu.Unlock()
-
- var err error
- withLock(tx.dc, func() {
- err = tx.txi.Rollback()
- })
- if !errors.Is(err, driver.ErrBadConn) {
- tx.closePrepared()
- }
- if discardConn {
- err = driver.ErrBadConn
- }
- tx.close(err)
- return err
-}
-
-// Rollback aborts the transaction.
-func (tx *Tx) Rollback() error {
- return tx.rollback(false)
-}
-
-// PrepareContext creates a prepared statement for use within a transaction.
-//
-// The returned statement operates within the transaction and will be closed
-// when the transaction has been committed or rolled back.
-//
-// To use an existing prepared statement on this transaction, see Tx.Stmt.
-//
-// The provided context will be used for the preparation of the context, not
-// for the execution of the returned statement. The returned statement
-// will run in the transaction context.
-func (tx *Tx) PrepareContext(ctx context.Context, query string) (*Stmt, error) {
- dc, release, err := tx.grabConn(ctx)
- if err != nil {
- return nil, err
- }
-
- stmt, err := tx.db.prepareDC(ctx, dc, release, tx, query)
- if err != nil {
- return nil, err
- }
- tx.stmts.Lock()
- tx.stmts.v = append(tx.stmts.v, stmt)
- tx.stmts.Unlock()
- return stmt, nil
-}
-
-// Prepare creates a prepared statement for use within a transaction.
-//
-// The returned statement operates within the transaction and will be closed
-// when the transaction has been committed or rolled back.
-//
-// To use an existing prepared statement on this transaction, see Tx.Stmt.
-//
-// Prepare uses context.Background internally; to specify the context, use
-// PrepareContext.
-func (tx *Tx) Prepare(query string) (*Stmt, error) {
- return tx.PrepareContext(context.Background(), query)
-}
-
-// StmtContext returns a transaction-specific prepared statement from
-// an existing statement.
-//
-// Example:
-//
-// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
-// ...
-// tx, err := db.Begin()
-// ...
-// res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203)
-//
-// The provided context is used for the preparation of the statement, not for the
-// execution of the statement.
-//
-// The returned statement operates within the transaction and will be closed
-// when the transaction has been committed or rolled back.
-func (tx *Tx) StmtContext(ctx context.Context, stmt *Stmt) *Stmt {
- dc, release, err := tx.grabConn(ctx)
- if err != nil {
- return &Stmt{stickyErr: err}
- }
- defer release(nil)
-
- if tx.db != stmt.db {
- return &Stmt{stickyErr: errors.New("sql: Tx.Stmt: statement from different database used")}
- }
- var si driver.Stmt
- var parentStmt *Stmt
- stmt.mu.Lock()
- if stmt.closed || stmt.cg != nil {
- // If the statement has been closed or already belongs to a
- // transaction, we can't reuse it in this connection.
- // Since tx.StmtContext should never need to be called with a
- // Stmt already belonging to tx, we ignore this edge case and
- // re-prepare the statement in this case. No need to add
- // code-complexity for this.
- stmt.mu.Unlock()
- withLock(dc, func() {
- si, err = ctxDriverPrepare(ctx, dc.ci, stmt.query)
- })
- if err != nil {
- return &Stmt{stickyErr: err}
- }
- } else {
- stmt.removeClosedStmtLocked()
- // See if the statement has already been prepared on this connection,
- // and reuse it if possible.
- for _, v := range stmt.css {
- if v.dc == dc {
- si = v.ds.si
- break
- }
- }
-
- stmt.mu.Unlock()
-
- if si == nil {
- var ds *driverStmt
- withLock(dc, func() {
- ds, err = stmt.prepareOnConnLocked(ctx, dc)
- })
- if err != nil {
- return &Stmt{stickyErr: err}
- }
- si = ds.si
- }
- parentStmt = stmt
- }
-
- txs := &Stmt{
- db: tx.db,
- cg: tx,
- cgds: &driverStmt{
- Locker: dc,
- si: si,
- },
- parentStmt: parentStmt,
- query: stmt.query,
- }
- if parentStmt != nil {
- tx.db.addDep(parentStmt, txs)
- }
- tx.stmts.Lock()
- tx.stmts.v = append(tx.stmts.v, txs)
- tx.stmts.Unlock()
- return txs
-}
-
-// Stmt returns a transaction-specific prepared statement from
-// an existing statement.
-//
-// Example:
-//
-// updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
-// ...
-// tx, err := db.Begin()
-// ...
-// res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)
-//
-// The returned statement operates within the transaction and will be closed
-// when the transaction has been committed or rolled back.
-//
-// Stmt uses context.Background internally; to specify the context, use
-// StmtContext.
-func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
- return tx.StmtContext(context.Background(), stmt)
-}
-
-// ExecContext executes a query that doesn't return rows.
-// For example: an INSERT and UPDATE.
-func (tx *Tx) ExecContext(ctx context.Context, query string, args ...any) (Result, error) {
- dc, release, err := tx.grabConn(ctx)
- if err != nil {
- return nil, err
- }
- return tx.db.execDC(ctx, dc, release, query, args)
-}
-
-// Exec executes a query that doesn't return rows.
-// For example: an INSERT and UPDATE.
-//
-// Exec uses context.Background internally; to specify the context, use
-// ExecContext.
-func (tx *Tx) Exec(query string, args ...any) (Result, error) {
- return tx.ExecContext(context.Background(), query, args...)
-}
-
-// QueryContext executes a query that returns rows, typically a SELECT.
-func (tx *Tx) QueryContext(ctx context.Context, query string, args ...any) (*Rows, error) {
- dc, release, err := tx.grabConn(ctx)
- if err != nil {
- return nil, err
- }
-
- return tx.db.queryDC(ctx, tx.ctx, dc, release, query, args)
-}
-
-// Query executes a query that returns rows, typically a SELECT.
-//
-// Query uses context.Background internally; to specify the context, use
-// QueryContext.
-func (tx *Tx) Query(query string, args ...any) (*Rows, error) {
- return tx.QueryContext(context.Background(), query, args...)
-}
-
-// QueryRowContext executes a query that is expected to return at most one row.
-// QueryRowContext always returns a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
-// the rest.
-func (tx *Tx) QueryRowContext(ctx context.Context, query string, args ...any) *Row {
- rows, err := tx.QueryContext(ctx, query, args...)
- return &Row{rows: rows, err: err}
-}
-
-// QueryRow executes a query that is expected to return at most one row.
-// QueryRow always returns a non-nil value. Errors are deferred until
-// Row's Scan method is called.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
-// the rest.
-//
-// QueryRow uses context.Background internally; to specify the context, use
-// QueryRowContext.
-func (tx *Tx) QueryRow(query string, args ...any) *Row {
- return tx.QueryRowContext(context.Background(), query, args...)
-}
-
-// connStmt is a prepared statement on a particular connection.
-type connStmt struct {
- dc *driverConn
- ds *driverStmt
-}
-
-// stmtConnGrabber represents a Tx or Conn that will return the underlying
-// driverConn and release function.
-type stmtConnGrabber interface {
- // grabConn returns the driverConn and the associated release function
- // that must be called when the operation completes.
- grabConn(context.Context) (*driverConn, releaseConn, error)
-
- // txCtx returns the transaction context if available.
- // The returned context should be selected on along with
- // any query context when awaiting a cancel.
- txCtx() context.Context
-}
-
-var (
- _ stmtConnGrabber = &Tx{}
- _ stmtConnGrabber = &Conn{}
-)
-
-// Stmt is a prepared statement.
-// A Stmt is safe for concurrent use by multiple goroutines.
-//
-// If a Stmt is prepared on a Tx or Conn, it will be bound to a single
-// underlying connection forever. If the Tx or Conn closes, the Stmt will
-// become unusable and all operations will return an error.
-// If a Stmt is prepared on a DB, it will remain usable for the lifetime of the
-// DB. When the Stmt needs to execute on a new underlying connection, it will
-// prepare itself on the new connection automatically.
-type Stmt struct {
- // Immutable:
- db *DB // where we came from
- query string // that created the Stmt
- stickyErr error // if non-nil, this error is returned for all operations
-
- closemu sync.RWMutex // held exclusively during close, for read otherwise.
-
- // If Stmt is prepared on a Tx or Conn then cg is present and will
- // only ever grab a connection from cg.
- // If cg is nil then the Stmt must grab an arbitrary connection
- // from db and determine if it must prepare the stmt again by
- // inspecting css.
- cg stmtConnGrabber
- cgds *driverStmt
-
- // parentStmt is set when a transaction-specific statement
- // is requested from an identical statement prepared on the same
- // conn. parentStmt is used to track the dependency of this statement
- // on its originating ("parent") statement so that parentStmt may
- // be closed by the user without them having to know whether or not
- // any transactions are still using it.
- parentStmt *Stmt
-
- mu sync.Mutex // protects the rest of the fields
- closed bool
-
- // css is a list of underlying driver statement interfaces
- // that are valid on particular connections. This is only
- // used if cg == nil and one is found that has idle
- // connections. If cg != nil, cgds is always used.
- css []connStmt
-
- // lastNumClosed is copied from db.numClosed when Stmt is created
- // without tx and closed connections in css are removed.
- lastNumClosed uint64
-}
-
-// ExecContext executes a prepared statement with the given arguments and
-// returns a Result summarizing the effect of the statement.
-func (s *Stmt) ExecContext(ctx context.Context, args ...any) (Result, error) {
- s.closemu.RLock()
- defer s.closemu.RUnlock()
-
- var res Result
- err := s.db.retry(func(strategy connReuseStrategy) error {
- dc, releaseConn, ds, err := s.connStmt(ctx, strategy)
- if err != nil {
- return err
- }
-
- res, err = resultFromStatement(ctx, dc.ci, ds, args...)
- releaseConn(err)
- return err
- })
-
- return res, err
-}
-
-// Exec executes a prepared statement with the given arguments and
-// returns a Result summarizing the effect of the statement.
-//
-// Exec uses context.Background internally; to specify the context, use
-// ExecContext.
-func (s *Stmt) Exec(args ...any) (Result, error) {
- return s.ExecContext(context.Background(), args...)
-}
-
-func resultFromStatement(ctx context.Context, ci driver.Conn, ds *driverStmt, args ...any) (Result, error) {
- ds.Lock()
- defer ds.Unlock()
-
- dargs, err := driverArgsConnLocked(ci, ds, args)
- if err != nil {
- return nil, err
- }
-
- resi, err := ctxDriverStmtExec(ctx, ds.si, dargs)
- if err != nil {
- return nil, err
- }
- return driverResult{ds.Locker, resi}, nil
-}
-
-// removeClosedStmtLocked removes closed conns in s.css.
-//
-// To avoid lock contention on DB.mu, we do it only when
-// s.db.numClosed - s.lastNum is large enough.
-func (s *Stmt) removeClosedStmtLocked() {
- t := len(s.css)/2 + 1
- if t > 10 {
- t = 10
- }
- dbClosed := s.db.numClosed.Load()
- if dbClosed-s.lastNumClosed < uint64(t) {
- return
- }
-
- s.db.mu.Lock()
- for i := 0; i < len(s.css); i++ {
- if s.css[i].dc.dbmuClosed {
- s.css[i] = s.css[len(s.css)-1]
- s.css = s.css[:len(s.css)-1]
- i--
- }
- }
- s.db.mu.Unlock()
- s.lastNumClosed = dbClosed
-}
-
-// connStmt returns a free driver connection on which to execute the
-// statement, a function to call to release the connection, and a
-// statement bound to that connection.
-func (s *Stmt) connStmt(ctx context.Context, strategy connReuseStrategy) (dc *driverConn, releaseConn func(error), ds *driverStmt, err error) {
- if err = s.stickyErr; err != nil {
- return
- }
- s.mu.Lock()
- if s.closed {
- s.mu.Unlock()
- err = errors.New("sql: statement is closed")
- return
- }
-
- // In a transaction or connection, we always use the connection that the
- // stmt was created on.
- if s.cg != nil {
- s.mu.Unlock()
- dc, releaseConn, err = s.cg.grabConn(ctx) // blocks, waiting for the connection.
- if err != nil {
- return
- }
- return dc, releaseConn, s.cgds, nil
- }
-
- s.removeClosedStmtLocked()
- s.mu.Unlock()
-
- dc, err = s.db.conn(ctx, strategy)
- if err != nil {
- return nil, nil, nil, err
- }
-
- s.mu.Lock()
- for _, v := range s.css {
- if v.dc == dc {
- s.mu.Unlock()
- return dc, dc.releaseConn, v.ds, nil
- }
- }
- s.mu.Unlock()
-
- // No luck; we need to prepare the statement on this connection
- withLock(dc, func() {
- ds, err = s.prepareOnConnLocked(ctx, dc)
- })
- if err != nil {
- dc.releaseConn(err)
- return nil, nil, nil, err
- }
-
- return dc, dc.releaseConn, ds, nil
-}
-
-// prepareOnConnLocked prepares the query in Stmt s on dc and adds it to the list of
-// open connStmt on the statement. It assumes the caller is holding the lock on dc.
-func (s *Stmt) prepareOnConnLocked(ctx context.Context, dc *driverConn) (*driverStmt, error) {
- si, err := dc.prepareLocked(ctx, s.cg, s.query)
- if err != nil {
- return nil, err
- }
- cs := connStmt{dc, si}
- s.mu.Lock()
- s.css = append(s.css, cs)
- s.mu.Unlock()
- return cs.ds, nil
-}
-
-// QueryContext executes a prepared query statement with the given arguments
-// and returns the query results as a *Rows.
-func (s *Stmt) QueryContext(ctx context.Context, args ...any) (*Rows, error) {
- s.closemu.RLock()
- defer s.closemu.RUnlock()
-
- var rowsi driver.Rows
- var rows *Rows
-
- err := s.db.retry(func(strategy connReuseStrategy) error {
- dc, releaseConn, ds, err := s.connStmt(ctx, strategy)
- if err != nil {
- return err
- }
-
- rowsi, err = rowsiFromStatement(ctx, dc.ci, ds, args...)
- if err == nil {
- // Note: ownership of ci passes to the *Rows, to be freed
- // with releaseConn.
- rows = &Rows{
- dc: dc,
- rowsi: rowsi,
- // releaseConn set below
- }
- // addDep must be added before initContextClose or it could attempt
- // to removeDep before it has been added.
- s.db.addDep(s, rows)
-
- // releaseConn must be set before initContextClose or it could
- // release the connection before it is set.
- rows.releaseConn = func(err error) {
- releaseConn(err)
- s.db.removeDep(s, rows)
- }
- var txctx context.Context
- if s.cg != nil {
- txctx = s.cg.txCtx()
- }
- rows.initContextClose(ctx, txctx)
- return nil
- }
-
- releaseConn(err)
- return err
- })
-
- return rows, err
-}
-
-// Query executes a prepared query statement with the given arguments
-// and returns the query results as a *Rows.
-//
-// Query uses context.Background internally; to specify the context, use
-// QueryContext.
-func (s *Stmt) Query(args ...any) (*Rows, error) {
- return s.QueryContext(context.Background(), args...)
-}
-
-func rowsiFromStatement(ctx context.Context, ci driver.Conn, ds *driverStmt, args ...any) (driver.Rows, error) {
- ds.Lock()
- defer ds.Unlock()
- dargs, err := driverArgsConnLocked(ci, ds, args)
- if err != nil {
- return nil, err
- }
- return ctxDriverStmtQuery(ctx, ds.si, dargs)
-}
-
-// QueryRowContext executes a prepared query statement with the given arguments.
-// If an error occurs during the execution of the statement, that error will
-// be returned by a call to Scan on the returned *Row, which is always non-nil.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
-// the rest.
-func (s *Stmt) QueryRowContext(ctx context.Context, args ...any) *Row {
- rows, err := s.QueryContext(ctx, args...)
- if err != nil {
- return &Row{err: err}
- }
- return &Row{rows: rows}
-}
-
-// QueryRow executes a prepared query statement with the given arguments.
-// If an error occurs during the execution of the statement, that error will
-// be returned by a call to Scan on the returned *Row, which is always non-nil.
-// If the query selects no rows, the *Row's Scan will return ErrNoRows.
-// Otherwise, the *Row's Scan scans the first selected row and discards
-// the rest.
-//
-// Example usage:
-//
-// var name string
-// err := nameByUseridStmt.QueryRow(id).Scan(&name)
-//
-// QueryRow uses context.Background internally; to specify the context, use
-// QueryRowContext.
-func (s *Stmt) QueryRow(args ...any) *Row {
- return s.QueryRowContext(context.Background(), args...)
-}
-
-// Close closes the statement.
-func (s *Stmt) Close() error {
- s.closemu.Lock()
- defer s.closemu.Unlock()
-
- if s.stickyErr != nil {
- return s.stickyErr
- }
- s.mu.Lock()
- if s.closed {
- s.mu.Unlock()
- return nil
- }
- s.closed = true
- txds := s.cgds
- s.cgds = nil
-
- s.mu.Unlock()
-
- if s.cg == nil {
- return s.db.removeDep(s, s)
- }
-
- if s.parentStmt != nil {
- // If parentStmt is set, we must not close s.txds since it's stored
- // in the css array of the parentStmt.
- return s.db.removeDep(s.parentStmt, s)
- }
- return txds.Close()
-}
-
-func (s *Stmt) finalClose() error {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.css != nil {
- for _, v := range s.css {
- s.db.noteUnusedDriverStatement(v.dc, v.ds)
- v.dc.removeOpenStmt(v.ds)
- }
- s.css = nil
- }
- return nil
-}
-
-// Rows is the result of a query. Its cursor starts before the first row
-// of the result set. Use Next to advance from row to row.
-type Rows struct {
- dc *driverConn // owned; must call releaseConn when closed to release
- releaseConn func(error)
- rowsi driver.Rows
- cancel func() // called when Rows is closed, may be nil.
- closeStmt *driverStmt // if non-nil, statement to Close on close
-
- contextDone atomic.Pointer[error] // error that awaitDone saw; set before close attempt
-
- // closemu prevents Rows from closing while there
- // is an active streaming result. It is held for read during non-close operations
- // and exclusively during close.
- //
- // closemu guards lasterr and closed.
- closemu sync.RWMutex
- closed bool
- lasterr error // non-nil only if closed is true
-
- // lastcols is only used in Scan, Next, and NextResultSet which are expected
- // not to be called concurrently.
- lastcols []driver.Value
-
- // closemuScanHold is whether the previous call to Scan kept closemu RLock'ed
- // without unlocking it. It does that when the user passes a *RawBytes scan
- // target. In that case, we need to prevent awaitDone from closing the Rows
- // while the user's still using the memory. See go.dev/issue/60304.
- //
- // It is only used by Scan, Next, and NextResultSet which are expected
- // not to be called concurrently.
- closemuScanHold bool
-
- // hitEOF is whether Next hit the end of the rows without
- // encountering an error. It's set in Next before
- // returning. It's only used by Next and Err which are
- // expected not to be called concurrently.
- hitEOF bool
-}
-
-// lasterrOrErrLocked returns either lasterr or the provided err.
-// rs.closemu must be read-locked.
-func (rs *Rows) lasterrOrErrLocked(err error) error {
- if rs.lasterr != nil && rs.lasterr != io.EOF {
- return rs.lasterr
- }
- return err
-}
-
-// bypassRowsAwaitDone is only used for testing.
-// If true, it will not close the Rows automatically from the context.
-var bypassRowsAwaitDone = false
-
-func (rs *Rows) initContextClose(ctx, txctx context.Context) {
- if ctx.Done() == nil && (txctx == nil || txctx.Done() == nil) {
- return
- }
- if bypassRowsAwaitDone {
- return
- }
- closectx, cancel := context.WithCancel(ctx)
- rs.cancel = cancel
- go rs.awaitDone(ctx, txctx, closectx)
-}
-
-// awaitDone blocks until ctx, txctx, or closectx is canceled.
-// The ctx is provided from the query context.
-// If the query was issued in a transaction, the transaction's context
-// is also provided in txctx, to ensure Rows is closed if the Tx is closed.
-// The closectx is closed by an explicit call to rs.Close.
-func (rs *Rows) awaitDone(ctx, txctx, closectx context.Context) {
- var txctxDone <-chan struct{}
- if txctx != nil {
- txctxDone = txctx.Done()
- }
- select {
- case <-ctx.Done():
- err := ctx.Err()
- rs.contextDone.Store(&err)
- case <-txctxDone:
- err := txctx.Err()
- rs.contextDone.Store(&err)
- case <-closectx.Done():
- // rs.cancel was called via Close(); don't store this into contextDone
- // to ensure Err() is unaffected.
- }
- rs.close(ctx.Err())
-}
-
-// Next prepares the next result row for reading with the Scan method. It
-// returns true on success, or false if there is no next result row or an error
-// happened while preparing it. Err should be consulted to distinguish between
-// the two cases.
-//
-// Every call to Scan, even the first one, must be preceded by a call to Next.
-func (rs *Rows) Next() bool {
- // If the user's calling Next, they're done with their previous row's Scan
- // results (any RawBytes memory), so we can release the read lock that would
- // be preventing awaitDone from calling close.
- rs.closemuRUnlockIfHeldByScan()
-
- if rs.contextDone.Load() != nil {
- return false
- }
-
- var doClose, ok bool
- withLock(rs.closemu.RLocker(), func() {
- doClose, ok = rs.nextLocked()
- })
- if doClose {
- rs.Close()
- }
- if doClose && !ok {
- rs.hitEOF = true
- }
- return ok
-}
-
-func (rs *Rows) nextLocked() (doClose, ok bool) {
- if rs.closed {
- return false, false
- }
-
- // Lock the driver connection before calling the driver interface
- // rowsi to prevent a Tx from rolling back the connection at the same time.
- rs.dc.Lock()
- defer rs.dc.Unlock()
-
- if rs.lastcols == nil {
- rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns()))
- }
-
- rs.lasterr = rs.rowsi.Next(rs.lastcols)
- if rs.lasterr != nil {
- // Close the connection if there is a driver error.
- if rs.lasterr != io.EOF {
- return true, false
- }
- nextResultSet, ok := rs.rowsi.(driver.RowsNextResultSet)
- if !ok {
- return true, false
- }
- // The driver is at the end of the current result set.
- // Test to see if there is another result set after the current one.
- // Only close Rows if there is no further result sets to read.
- if !nextResultSet.HasNextResultSet() {
- doClose = true
- }
- return doClose, false
- }
- return false, true
-}
-
-// NextResultSet prepares the next result set for reading. It reports whether
-// there is further result sets, or false if there is no further result set
-// or if there is an error advancing to it. The Err method should be consulted
-// to distinguish between the two cases.
-//
-// After calling NextResultSet, the Next method should always be called before
-// scanning. If there are further result sets they may not have rows in the result
-// set.
-func (rs *Rows) NextResultSet() bool {
- // If the user's calling NextResultSet, they're done with their previous
- // row's Scan results (any RawBytes memory), so we can release the read lock
- // that would be preventing awaitDone from calling close.
- rs.closemuRUnlockIfHeldByScan()
-
- var doClose bool
- defer func() {
- if doClose {
- rs.Close()
- }
- }()
- rs.closemu.RLock()
- defer rs.closemu.RUnlock()
-
- if rs.closed {
- return false
- }
-
- rs.lastcols = nil
- nextResultSet, ok := rs.rowsi.(driver.RowsNextResultSet)
- if !ok {
- doClose = true
- return false
- }
-
- // Lock the driver connection before calling the driver interface
- // rowsi to prevent a Tx from rolling back the connection at the same time.
- rs.dc.Lock()
- defer rs.dc.Unlock()
-
- rs.lasterr = nextResultSet.NextResultSet()
- if rs.lasterr != nil {
- doClose = true
- return false
- }
- return true
-}
-
-// Err returns the error, if any, that was encountered during iteration.
-// Err may be called after an explicit or implicit Close.
-func (rs *Rows) Err() error {
- // Return any context error that might've happened during row iteration,
- // but only if we haven't reported the final Next() = false after rows
- // are done, in which case the user might've canceled their own context
- // before calling Rows.Err.
- if !rs.hitEOF {
- if errp := rs.contextDone.Load(); errp != nil {
- return *errp
- }
- }
-
- rs.closemu.RLock()
- defer rs.closemu.RUnlock()
- return rs.lasterrOrErrLocked(nil)
-}
-
-var errRowsClosed = errors.New("sql: Rows are closed")
-var errNoRows = errors.New("sql: no Rows available")
-
-// Columns returns the column names.
-// Columns returns an error if the rows are closed.
-func (rs *Rows) Columns() ([]string, error) {
- rs.closemu.RLock()
- defer rs.closemu.RUnlock()
- if rs.closed {
- return nil, rs.lasterrOrErrLocked(errRowsClosed)
- }
- if rs.rowsi == nil {
- return nil, rs.lasterrOrErrLocked(errNoRows)
- }
- rs.dc.Lock()
- defer rs.dc.Unlock()
-
- return rs.rowsi.Columns(), nil
-}
-
-// ColumnTypes returns column information such as column type, length,
-// and nullable. Some information may not be available from some drivers.
-func (rs *Rows) ColumnTypes() ([]*ColumnType, error) {
- rs.closemu.RLock()
- defer rs.closemu.RUnlock()
- if rs.closed {
- return nil, rs.lasterrOrErrLocked(errRowsClosed)
- }
- if rs.rowsi == nil {
- return nil, rs.lasterrOrErrLocked(errNoRows)
- }
- rs.dc.Lock()
- defer rs.dc.Unlock()
-
- return rowsColumnInfoSetupConnLocked(rs.rowsi), nil
-}
-
-// ColumnType contains the name and type of a column.
-type ColumnType struct {
- name string
-
- hasNullable bool
- hasLength bool
- hasPrecisionScale bool
-
- nullable bool
- length int64
- databaseType string
- precision int64
- scale int64
- scanType reflect.Type
-}
-
-// Name returns the name or alias of the column.
-func (ci *ColumnType) Name() string {
- return ci.name
-}
-
-// Length returns the column type length for variable length column types such
-// as text and binary field types. If the type length is unbounded the value will
-// be math.MaxInt64 (any database limits will still apply).
-// If the column type is not variable length, such as an int, or if not supported
-// by the driver ok is false.
-func (ci *ColumnType) Length() (length int64, ok bool) {
- return ci.length, ci.hasLength
-}
-
-// DecimalSize returns the scale and precision of a decimal type.
-// If not applicable or if not supported ok is false.
-func (ci *ColumnType) DecimalSize() (precision, scale int64, ok bool) {
- return ci.precision, ci.scale, ci.hasPrecisionScale
-}
-
-// ScanType returns a Go type suitable for scanning into using Rows.Scan.
-// If a driver does not support this property ScanType will return
-// the type of an empty interface.
-func (ci *ColumnType) ScanType() reflect.Type {
- return ci.scanType
-}
-
-// Nullable reports whether the column may be null.
-// If a driver does not support this property ok will be false.
-func (ci *ColumnType) Nullable() (nullable, ok bool) {
- return ci.nullable, ci.hasNullable
-}
-
-// DatabaseTypeName returns the database system name of the column type. If an empty
-// string is returned, then the driver type name is not supported.
-// Consult your driver documentation for a list of driver data types. Length specifiers
-// are not included.
-// Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL",
-// "INT", and "BIGINT".
-func (ci *ColumnType) DatabaseTypeName() string {
- return ci.databaseType
-}
-
-func rowsColumnInfoSetupConnLocked(rowsi driver.Rows) []*ColumnType {
- names := rowsi.Columns()
-
- list := make([]*ColumnType, len(names))
- for i := range list {
- ci := &ColumnType{
- name: names[i],
- }
- list[i] = ci
-
- if prop, ok := rowsi.(driver.RowsColumnTypeScanType); ok {
- ci.scanType = prop.ColumnTypeScanType(i)
- } else {
- ci.scanType = reflect.TypeOf(new(any)).Elem()
- }
- if prop, ok := rowsi.(driver.RowsColumnTypeDatabaseTypeName); ok {
- ci.databaseType = prop.ColumnTypeDatabaseTypeName(i)
- }
- if prop, ok := rowsi.(driver.RowsColumnTypeLength); ok {
- ci.length, ci.hasLength = prop.ColumnTypeLength(i)
- }
- if prop, ok := rowsi.(driver.RowsColumnTypeNullable); ok {
- ci.nullable, ci.hasNullable = prop.ColumnTypeNullable(i)
- }
- if prop, ok := rowsi.(driver.RowsColumnTypePrecisionScale); ok {
- ci.precision, ci.scale, ci.hasPrecisionScale = prop.ColumnTypePrecisionScale(i)
- }
- }
- return list
-}
-
-// Scan copies the columns in the current row into the values pointed
-// at by dest. The number of values in dest must be the same as the
-// number of columns in Rows.
-//
-// Scan converts columns read from the database into the following
-// common Go types and special types provided by the sql package:
-//
-// *string
-// *[]byte
-// *int, *int8, *int16, *int32, *int64
-// *uint, *uint8, *uint16, *uint32, *uint64
-// *bool
-// *float32, *float64
-// *interface{}
-// *RawBytes
-// *Rows (cursor value)
-// any type implementing Scanner (see Scanner docs)
-//
-// In the most simple case, if the type of the value from the source
-// column is an integer, bool or string type T and dest is of type *T,
-// Scan simply assigns the value through the pointer.
-//
-// Scan also converts between string and numeric types, as long as no
-// information would be lost. While Scan stringifies all numbers
-// scanned from numeric database columns into *string, scans into
-// numeric types are checked for overflow. For example, a float64 with
-// value 300 or a string with value "300" can scan into a uint16, but
-// not into a uint8, though float64(255) or "255" can scan into a
-// uint8. One exception is that scans of some float64 numbers to
-// strings may lose information when stringifying. In general, scan
-// floating point columns into *float64.
-//
-// If a dest argument has type *[]byte, Scan saves in that argument a
-// copy of the corresponding data. The copy is owned by the caller and
-// can be modified and held indefinitely. The copy can be avoided by
-// using an argument of type *RawBytes instead; see the documentation
-// for RawBytes for restrictions on its use.
-//
-// If an argument has type *interface{}, Scan copies the value
-// provided by the underlying driver without conversion. When scanning
-// from a source value of type []byte to *interface{}, a copy of the
-// slice is made and the caller owns the result.
-//
-// Source values of type time.Time may be scanned into values of type
-// *time.Time, *interface{}, *string, or *[]byte. When converting to
-// the latter two, time.RFC3339Nano is used.
-//
-// Source values of type bool may be scanned into types *bool,
-// *interface{}, *string, *[]byte, or *RawBytes.
-//
-// For scanning into *bool, the source may be true, false, 1, 0, or
-// string inputs parseable by strconv.ParseBool.
-//
-// Scan can also convert a cursor returned from a query, such as
-// "select cursor(select * from my_table) from dual", into a
-// *Rows value that can itself be scanned from. The parent
-// select query will close any cursor *Rows if the parent *Rows is closed.
-//
-// If any of the first arguments implementing Scanner returns an error,
-// that error will be wrapped in the returned error.
-func (rs *Rows) Scan(dest ...any) error {
- if rs.closemuScanHold {
- // This should only be possible if the user calls Scan twice in a row
- // without calling Next.
- return fmt.Errorf("sql: Scan called without calling Next (closemuScanHold)")
- }
- rs.closemu.RLock()
-
- if rs.lasterr != nil && rs.lasterr != io.EOF {
- rs.closemu.RUnlock()
- return rs.lasterr
- }
- if rs.closed {
- err := rs.lasterrOrErrLocked(errRowsClosed)
- rs.closemu.RUnlock()
- return err
- }
-
- if scanArgsContainRawBytes(dest) {
- rs.closemuScanHold = true
- } else {
- rs.closemu.RUnlock()
- }
-
- if rs.lastcols == nil {
- rs.closemuRUnlockIfHeldByScan()
- return errors.New("sql: Scan called without calling Next")
- }
- if len(dest) != len(rs.lastcols) {
- rs.closemuRUnlockIfHeldByScan()
- return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
- }
-
- for i, sv := range rs.lastcols {
- err := convertAssignRows(dest[i], sv, rs)
- if err != nil {
- rs.closemuRUnlockIfHeldByScan()
- return fmt.Errorf(`sql: Scan error on column index %d, name %q: %w`, i, rs.rowsi.Columns()[i], err)
- }
- }
- return nil
-}
-
-// closemuRUnlockIfHeldByScan releases any closemu.RLock held open by a previous
-// call to Scan with *RawBytes.
-func (rs *Rows) closemuRUnlockIfHeldByScan() {
- if rs.closemuScanHold {
- rs.closemuScanHold = false
- rs.closemu.RUnlock()
- }
-}
-
-func scanArgsContainRawBytes(args []any) bool {
- for _, a := range args {
- if _, ok := a.(*RawBytes); ok {
- return true
- }
- }
- return false
-}
-
-// rowsCloseHook returns a function so tests may install the
-// hook through a test only mutex.
-var rowsCloseHook = func() func(*Rows, *error) { return nil }
-
-// Close closes the Rows, preventing further enumeration. If Next is called
-// and returns false and there are no further result sets,
-// the Rows are closed automatically and it will suffice to check the
-// result of Err. Close is idempotent and does not affect the result of Err.
-func (rs *Rows) Close() error {
- // If the user's calling Close, they're done with their previous row's Scan
- // results (any RawBytes memory), so we can release the read lock that would
- // be preventing awaitDone from calling the unexported close before we do so.
- rs.closemuRUnlockIfHeldByScan()
-
- return rs.close(nil)
-}
-
-func (rs *Rows) close(err error) error {
- rs.closemu.Lock()
- defer rs.closemu.Unlock()
-
- if rs.closed {
- return nil
- }
- rs.closed = true
-
- if rs.lasterr == nil {
- rs.lasterr = err
- }
-
- withLock(rs.dc, func() {
- err = rs.rowsi.Close()
- })
- if fn := rowsCloseHook(); fn != nil {
- fn(rs, &err)
- }
- if rs.cancel != nil {
- rs.cancel()
- }
-
- if rs.closeStmt != nil {
- rs.closeStmt.Close()
- }
- rs.releaseConn(err)
-
- rs.lasterr = rs.lasterrOrErrLocked(err)
- return err
-}
-
-// Row is the result of calling QueryRow to select a single row.
-type Row struct {
- // One of these two will be non-nil:
- err error // deferred error for easy chaining
- rows *Rows
-}
-
-// Scan copies the columns from the matched row into the values
-// pointed at by dest. See the documentation on Rows.Scan for details.
-// If more than one row matches the query,
-// Scan uses the first row and discards the rest. If no row matches
-// the query, Scan returns ErrNoRows.
-func (r *Row) Scan(dest ...any) error {
- if r.err != nil {
- return r.err
- }
-
- // TODO(bradfitz): for now we need to defensively clone all
- // []byte that the driver returned (not permitting
- // *RawBytes in Rows.Scan), since we're about to close
- // the Rows in our defer, when we return from this function.
- // the contract with the driver.Next(...) interface is that it
- // can return slices into read-only temporary memory that's
- // only valid until the next Scan/Close. But the TODO is that
- // for a lot of drivers, this copy will be unnecessary. We
- // should provide an optional interface for drivers to
- // implement to say, "don't worry, the []bytes that I return
- // from Next will not be modified again." (for instance, if
- // they were obtained from the network anyway) But for now we
- // don't care.
- defer r.rows.Close()
- for _, dp := range dest {
- if _, ok := dp.(*RawBytes); ok {
- return errors.New("sql: RawBytes isn't allowed on Row.Scan")
- }
- }
-
- if !r.rows.Next() {
- if err := r.rows.Err(); err != nil {
- return err
- }
- return ErrNoRows
- }
- err := r.rows.Scan(dest...)
- if err != nil {
- return err
- }
- // Make sure the query can be processed to completion with no errors.
- return r.rows.Close()
-}
-
-// Err provides a way for wrapping packages to check for
-// query errors without calling Scan.
-// Err returns the error, if any, that was encountered while running the query.
-// If this error is not nil, this error will also be returned from Scan.
-func (r *Row) Err() error {
- return r.err
-}
-
-// A Result summarizes an executed SQL command.
-type Result interface {
- // LastInsertId returns the integer generated by the database
- // in response to a command. Typically this will be from an
- // "auto increment" column when inserting a new row. Not all
- // databases support this feature, and the syntax of such
- // statements varies.
- LastInsertId() (int64, error)
-
- // RowsAffected returns the number of rows affected by an
- // update, insert, or delete. Not every database or database
- // driver may support this.
- RowsAffected() (int64, error)
-}
-
-type driverResult struct {
- sync.Locker // the *driverConn
- resi driver.Result
-}
-
-func (dr driverResult) LastInsertId() (int64, error) {
- dr.Lock()
- defer dr.Unlock()
- return dr.resi.LastInsertId()
-}
-
-func (dr driverResult) RowsAffected() (int64, error) {
- dr.Lock()
- defer dr.Unlock()
- return dr.resi.RowsAffected()
-}
-
-func stack() string {
- var buf [2 << 10]byte
- return string(buf[:runtime.Stack(buf[:], false)])
-}
-
-// withLock runs while holding lk.
-func withLock(lk sync.Locker, fn func()) {
- lk.Lock()
- defer lk.Unlock() // in case fn panics
- fn()
-}
diff --git a/contrib/go/_std_1.21/src/database/sql/ya.make b/contrib/go/_std_1.21/src/database/sql/ya.make
deleted file mode 100644
index 1287f3aafa..0000000000
--- a/contrib/go/_std_1.21/src/database/sql/ya.make
+++ /dev/null
@@ -1,25 +0,0 @@
-GO_LIBRARY()
-
-SRCS(
- convert.go
- ctxutil.go
- sql.go
-)
-
-GO_TEST_SRCS(
- convert_test.go
- fakedb_test.go
- sql_test.go
-)
-
-GO_XTEST_SRCS(
- example_cli_test.go
- example_service_test.go
- example_test.go
-)
-
-END()
-
-RECURSE(
- driver
-)
diff --git a/contrib/go/_std_1.21/src/expvar/expvar.go b/contrib/go/_std_1.21/src/expvar/expvar.go
deleted file mode 100644
index 300d8c2676..0000000000
--- a/contrib/go/_std_1.21/src/expvar/expvar.go
+++ /dev/null
@@ -1,373 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package expvar provides a standardized interface to public variables, such
-// as operation counters in servers. It exposes these variables via HTTP at
-// /debug/vars in JSON format.
-//
-// Operations to set or modify these public variables are atomic.
-//
-// In addition to adding the HTTP handler, this package registers the
-// following variables:
-//
-// cmdline os.Args
-// memstats runtime.Memstats
-//
-// The package is sometimes only imported for the side effect of
-// registering its HTTP handler and the above variables. To use it
-// this way, link this package into your program:
-//
-// import _ "expvar"
-package expvar
-
-import (
- "encoding/json"
- "fmt"
- "log"
- "math"
- "net/http"
- "os"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-// Var is an abstract type for all exported variables.
-type Var interface {
- // String returns a valid JSON value for the variable.
- // Types with String methods that do not return valid JSON
- // (such as time.Time) must not be used as a Var.
- String() string
-}
-
-// Int is a 64-bit integer variable that satisfies the Var interface.
-type Int struct {
- i int64
-}
-
-func (v *Int) Value() int64 {
- return atomic.LoadInt64(&v.i)
-}
-
-func (v *Int) String() string {
- return strconv.FormatInt(atomic.LoadInt64(&v.i), 10)
-}
-
-func (v *Int) Add(delta int64) {
- atomic.AddInt64(&v.i, delta)
-}
-
-func (v *Int) Set(value int64) {
- atomic.StoreInt64(&v.i, value)
-}
-
-// Float is a 64-bit float variable that satisfies the Var interface.
-type Float struct {
- f atomic.Uint64
-}
-
-func (v *Float) Value() float64 {
- return math.Float64frombits(v.f.Load())
-}
-
-func (v *Float) String() string {
- return strconv.FormatFloat(
- math.Float64frombits(v.f.Load()), 'g', -1, 64)
-}
-
-// Add adds delta to v.
-func (v *Float) Add(delta float64) {
- for {
- cur := v.f.Load()
- curVal := math.Float64frombits(cur)
- nxtVal := curVal + delta
- nxt := math.Float64bits(nxtVal)
- if v.f.CompareAndSwap(cur, nxt) {
- return
- }
- }
-}
-
-// Set sets v to value.
-func (v *Float) Set(value float64) {
- v.f.Store(math.Float64bits(value))
-}
-
-// Map is a string-to-Var map variable that satisfies the Var interface.
-type Map struct {
- m sync.Map // map[string]Var
- keysMu sync.RWMutex
- keys []string // sorted
-}
-
-// KeyValue represents a single entry in a Map.
-type KeyValue struct {
- Key string
- Value Var
-}
-
-func (v *Map) String() string {
- var b strings.Builder
- fmt.Fprintf(&b, "{")
- first := true
- v.Do(func(kv KeyValue) {
- if !first {
- fmt.Fprintf(&b, ", ")
- }
- fmt.Fprintf(&b, "%q: ", kv.Key)
- if kv.Value != nil {
- fmt.Fprintf(&b, "%v", kv.Value)
- } else {
- fmt.Fprint(&b, "null")
- }
- first = false
- })
- fmt.Fprintf(&b, "}")
- return b.String()
-}
-
-// Init removes all keys from the map.
-func (v *Map) Init() *Map {
- v.keysMu.Lock()
- defer v.keysMu.Unlock()
- v.keys = v.keys[:0]
- v.m.Range(func(k, _ any) bool {
- v.m.Delete(k)
- return true
- })
- return v
-}
-
-// addKey updates the sorted list of keys in v.keys.
-func (v *Map) addKey(key string) {
- v.keysMu.Lock()
- defer v.keysMu.Unlock()
- // Using insertion sort to place key into the already-sorted v.keys.
- if i := sort.SearchStrings(v.keys, key); i >= len(v.keys) {
- v.keys = append(v.keys, key)
- } else if v.keys[i] != key {
- v.keys = append(v.keys, "")
- copy(v.keys[i+1:], v.keys[i:])
- v.keys[i] = key
- }
-}
-
-func (v *Map) Get(key string) Var {
- i, _ := v.m.Load(key)
- av, _ := i.(Var)
- return av
-}
-
-func (v *Map) Set(key string, av Var) {
- // Before we store the value, check to see whether the key is new. Try a Load
- // before LoadOrStore: LoadOrStore causes the key interface to escape even on
- // the Load path.
- if _, ok := v.m.Load(key); !ok {
- if _, dup := v.m.LoadOrStore(key, av); !dup {
- v.addKey(key)
- return
- }
- }
-
- v.m.Store(key, av)
-}
-
-// Add adds delta to the *Int value stored under the given map key.
-func (v *Map) Add(key string, delta int64) {
- i, ok := v.m.Load(key)
- if !ok {
- var dup bool
- i, dup = v.m.LoadOrStore(key, new(Int))
- if !dup {
- v.addKey(key)
- }
- }
-
- // Add to Int; ignore otherwise.
- if iv, ok := i.(*Int); ok {
- iv.Add(delta)
- }
-}
-
-// AddFloat adds delta to the *Float value stored under the given map key.
-func (v *Map) AddFloat(key string, delta float64) {
- i, ok := v.m.Load(key)
- if !ok {
- var dup bool
- i, dup = v.m.LoadOrStore(key, new(Float))
- if !dup {
- v.addKey(key)
- }
- }
-
- // Add to Float; ignore otherwise.
- if iv, ok := i.(*Float); ok {
- iv.Add(delta)
- }
-}
-
-// Delete deletes the given key from the map.
-func (v *Map) Delete(key string) {
- v.keysMu.Lock()
- defer v.keysMu.Unlock()
- i := sort.SearchStrings(v.keys, key)
- if i < len(v.keys) && key == v.keys[i] {
- v.keys = append(v.keys[:i], v.keys[i+1:]...)
- v.m.Delete(key)
- }
-}
-
-// Do calls f for each entry in the map.
-// The map is locked during the iteration,
-// but existing entries may be concurrently updated.
-func (v *Map) Do(f func(KeyValue)) {
- v.keysMu.RLock()
- defer v.keysMu.RUnlock()
- for _, k := range v.keys {
- i, _ := v.m.Load(k)
- val, _ := i.(Var)
- f(KeyValue{k, val})
- }
-}
-
-// String is a string variable, and satisfies the Var interface.
-type String struct {
- s atomic.Value // string
-}
-
-func (v *String) Value() string {
- p, _ := v.s.Load().(string)
- return p
-}
-
-// String implements the Var interface. To get the unquoted string
-// use Value.
-func (v *String) String() string {
- s := v.Value()
- b, _ := json.Marshal(s)
- return string(b)
-}
-
-func (v *String) Set(value string) {
- v.s.Store(value)
-}
-
-// Func implements Var by calling the function
-// and formatting the returned value using JSON.
-type Func func() any
-
-func (f Func) Value() any {
- return f()
-}
-
-func (f Func) String() string {
- v, _ := json.Marshal(f())
- return string(v)
-}
-
-// All published variables.
-var (
- vars sync.Map // map[string]Var
- varKeysMu sync.RWMutex
- varKeys []string // sorted
-)
-
-// Publish declares a named exported variable. This should be called from a
-// package's init function when it creates its Vars. If the name is already
-// registered then this will log.Panic.
-func Publish(name string, v Var) {
- if _, dup := vars.LoadOrStore(name, v); dup {
- log.Panicln("Reuse of exported var name:", name)
- }
- varKeysMu.Lock()
- defer varKeysMu.Unlock()
- varKeys = append(varKeys, name)
- sort.Strings(varKeys)
-}
-
-// Get retrieves a named exported variable. It returns nil if the name has
-// not been registered.
-func Get(name string) Var {
- i, _ := vars.Load(name)
- v, _ := i.(Var)
- return v
-}
-
-// Convenience functions for creating new exported variables.
-
-func NewInt(name string) *Int {
- v := new(Int)
- Publish(name, v)
- return v
-}
-
-func NewFloat(name string) *Float {
- v := new(Float)
- Publish(name, v)
- return v
-}
-
-func NewMap(name string) *Map {
- v := new(Map).Init()
- Publish(name, v)
- return v
-}
-
-func NewString(name string) *String {
- v := new(String)
- Publish(name, v)
- return v
-}
-
-// Do calls f for each exported variable.
-// The global variable map is locked during the iteration,
-// but existing entries may be concurrently updated.
-func Do(f func(KeyValue)) {
- varKeysMu.RLock()
- defer varKeysMu.RUnlock()
- for _, k := range varKeys {
- val, _ := vars.Load(k)
- f(KeyValue{k, val.(Var)})
- }
-}
-
-func expvarHandler(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- fmt.Fprintf(w, "{\n")
- first := true
- Do(func(kv KeyValue) {
- if !first {
- fmt.Fprintf(w, ",\n")
- }
- first = false
- fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
- })
- fmt.Fprintf(w, "\n}\n")
-}
-
-// Handler returns the expvar HTTP Handler.
-//
-// This is only needed to install the handler in a non-standard location.
-func Handler() http.Handler {
- return http.HandlerFunc(expvarHandler)
-}
-
-func cmdline() any {
- return os.Args
-}
-
-func memstats() any {
- stats := new(runtime.MemStats)
- runtime.ReadMemStats(stats)
- return *stats
-}
-
-func init() {
- http.HandleFunc("/debug/vars", expvarHandler)
- Publish("cmdline", Func(cmdline))
- Publish("memstats", Func(memstats))
-}
diff --git a/contrib/go/_std_1.21/src/expvar/ya.make b/contrib/go/_std_1.21/src/expvar/ya.make
deleted file mode 100644
index 06dfbfc8bc..0000000000
--- a/contrib/go/_std_1.21/src/expvar/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-GO_LIBRARY()
-
-SRCS(
- expvar.go
-)
-
-GO_TEST_SRCS(expvar_test.go)
-
-END()
-
-RECURSE(
-)
diff --git a/contrib/go/_std_1.21/src/image/color/color.go b/contrib/go/_std_1.21/src/image/color/color.go
deleted file mode 100644
index 8895839140..0000000000
--- a/contrib/go/_std_1.21/src/image/color/color.go
+++ /dev/null
@@ -1,347 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package color implements a basic color library.
-package color
-
-// Color can convert itself to alpha-premultiplied 16-bits per channel RGBA.
-// The conversion may be lossy.
-type Color interface {
- // RGBA returns the alpha-premultiplied red, green, blue and alpha values
- // for the color. Each value ranges within [0, 0xffff], but is represented
- // by a uint32 so that multiplying by a blend factor up to 0xffff will not
- // overflow.
- //
- // An alpha-premultiplied color component c has been scaled by alpha (a),
- // so has valid values 0 <= c <= a.
- RGBA() (r, g, b, a uint32)
-}
-
-// RGBA represents a traditional 32-bit alpha-premultiplied color, having 8
-// bits for each of red, green, blue and alpha.
-//
-// An alpha-premultiplied color component C has been scaled by alpha (A), so
-// has valid values 0 <= C <= A.
-type RGBA struct {
- R, G, B, A uint8
-}
-
-func (c RGBA) RGBA() (r, g, b, a uint32) {
- r = uint32(c.R)
- r |= r << 8
- g = uint32(c.G)
- g |= g << 8
- b = uint32(c.B)
- b |= b << 8
- a = uint32(c.A)
- a |= a << 8
- return
-}
-
-// RGBA64 represents a 64-bit alpha-premultiplied color, having 16 bits for
-// each of red, green, blue and alpha.
-//
-// An alpha-premultiplied color component C has been scaled by alpha (A), so
-// has valid values 0 <= C <= A.
-type RGBA64 struct {
- R, G, B, A uint16
-}
-
-func (c RGBA64) RGBA() (r, g, b, a uint32) {
- return uint32(c.R), uint32(c.G), uint32(c.B), uint32(c.A)
-}
-
-// NRGBA represents a non-alpha-premultiplied 32-bit color.
-type NRGBA struct {
- R, G, B, A uint8
-}
-
-func (c NRGBA) RGBA() (r, g, b, a uint32) {
- r = uint32(c.R)
- r |= r << 8
- r *= uint32(c.A)
- r /= 0xff
- g = uint32(c.G)
- g |= g << 8
- g *= uint32(c.A)
- g /= 0xff
- b = uint32(c.B)
- b |= b << 8
- b *= uint32(c.A)
- b /= 0xff
- a = uint32(c.A)
- a |= a << 8
- return
-}
-
-// NRGBA64 represents a non-alpha-premultiplied 64-bit color,
-// having 16 bits for each of red, green, blue and alpha.
-type NRGBA64 struct {
- R, G, B, A uint16
-}
-
-func (c NRGBA64) RGBA() (r, g, b, a uint32) {
- r = uint32(c.R)
- r *= uint32(c.A)
- r /= 0xffff
- g = uint32(c.G)
- g *= uint32(c.A)
- g /= 0xffff
- b = uint32(c.B)
- b *= uint32(c.A)
- b /= 0xffff
- a = uint32(c.A)
- return
-}
-
-// Alpha represents an 8-bit alpha color.
-type Alpha struct {
- A uint8
-}
-
-func (c Alpha) RGBA() (r, g, b, a uint32) {
- a = uint32(c.A)
- a |= a << 8
- return a, a, a, a
-}
-
-// Alpha16 represents a 16-bit alpha color.
-type Alpha16 struct {
- A uint16
-}
-
-func (c Alpha16) RGBA() (r, g, b, a uint32) {
- a = uint32(c.A)
- return a, a, a, a
-}
-
-// Gray represents an 8-bit grayscale color.
-type Gray struct {
- Y uint8
-}
-
-func (c Gray) RGBA() (r, g, b, a uint32) {
- y := uint32(c.Y)
- y |= y << 8
- return y, y, y, 0xffff
-}
-
-// Gray16 represents a 16-bit grayscale color.
-type Gray16 struct {
- Y uint16
-}
-
-func (c Gray16) RGBA() (r, g, b, a uint32) {
- y := uint32(c.Y)
- return y, y, y, 0xffff
-}
-
-// Model can convert any Color to one from its own color model. The conversion
-// may be lossy.
-type Model interface {
- Convert(c Color) Color
-}
-
-// ModelFunc returns a Model that invokes f to implement the conversion.
-func ModelFunc(f func(Color) Color) Model {
- // Note: using *modelFunc as the implementation
- // means that callers can still use comparisons
- // like m == RGBAModel. This is not possible if
- // we use the func value directly, because funcs
- // are no longer comparable.
- return &modelFunc{f}
-}
-
-type modelFunc struct {
- f func(Color) Color
-}
-
-func (m *modelFunc) Convert(c Color) Color {
- return m.f(c)
-}
-
-// Models for the standard color types.
-var (
- RGBAModel Model = ModelFunc(rgbaModel)
- RGBA64Model Model = ModelFunc(rgba64Model)
- NRGBAModel Model = ModelFunc(nrgbaModel)
- NRGBA64Model Model = ModelFunc(nrgba64Model)
- AlphaModel Model = ModelFunc(alphaModel)
- Alpha16Model Model = ModelFunc(alpha16Model)
- GrayModel Model = ModelFunc(grayModel)
- Gray16Model Model = ModelFunc(gray16Model)
-)
-
-func rgbaModel(c Color) Color {
- if _, ok := c.(RGBA); ok {
- return c
- }
- r, g, b, a := c.RGBA()
- return RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)}
-}
-
-func rgba64Model(c Color) Color {
- if _, ok := c.(RGBA64); ok {
- return c
- }
- r, g, b, a := c.RGBA()
- return RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
-}
-
-func nrgbaModel(c Color) Color {
- if _, ok := c.(NRGBA); ok {
- return c
- }
- r, g, b, a := c.RGBA()
- if a == 0xffff {
- return NRGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), 0xff}
- }
- if a == 0 {
- return NRGBA{0, 0, 0, 0}
- }
- // Since Color.RGBA returns an alpha-premultiplied color, we should have r <= a && g <= a && b <= a.
- r = (r * 0xffff) / a
- g = (g * 0xffff) / a
- b = (b * 0xffff) / a
- return NRGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)}
-}
-
-func nrgba64Model(c Color) Color {
- if _, ok := c.(NRGBA64); ok {
- return c
- }
- r, g, b, a := c.RGBA()
- if a == 0xffff {
- return NRGBA64{uint16(r), uint16(g), uint16(b), 0xffff}
- }
- if a == 0 {
- return NRGBA64{0, 0, 0, 0}
- }
- // Since Color.RGBA returns an alpha-premultiplied color, we should have r <= a && g <= a && b <= a.
- r = (r * 0xffff) / a
- g = (g * 0xffff) / a
- b = (b * 0xffff) / a
- return NRGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
-}
-
-func alphaModel(c Color) Color {
- if _, ok := c.(Alpha); ok {
- return c
- }
- _, _, _, a := c.RGBA()
- return Alpha{uint8(a >> 8)}
-}
-
-func alpha16Model(c Color) Color {
- if _, ok := c.(Alpha16); ok {
- return c
- }
- _, _, _, a := c.RGBA()
- return Alpha16{uint16(a)}
-}
-
-func grayModel(c Color) Color {
- if _, ok := c.(Gray); ok {
- return c
- }
- r, g, b, _ := c.RGBA()
-
- // These coefficients (the fractions 0.299, 0.587 and 0.114) are the same
- // as those given by the JFIF specification and used by func RGBToYCbCr in
- // ycbcr.go.
- //
- // Note that 19595 + 38470 + 7471 equals 65536.
- //
- // The 24 is 16 + 8. The 16 is the same as used in RGBToYCbCr. The 8 is
- // because the return value is 8 bit color, not 16 bit color.
- y := (19595*r + 38470*g + 7471*b + 1<<15) >> 24
-
- return Gray{uint8(y)}
-}
-
-func gray16Model(c Color) Color {
- if _, ok := c.(Gray16); ok {
- return c
- }
- r, g, b, _ := c.RGBA()
-
- // These coefficients (the fractions 0.299, 0.587 and 0.114) are the same
- // as those given by the JFIF specification and used by func RGBToYCbCr in
- // ycbcr.go.
- //
- // Note that 19595 + 38470 + 7471 equals 65536.
- y := (19595*r + 38470*g + 7471*b + 1<<15) >> 16
-
- return Gray16{uint16(y)}
-}
-
-// Palette is a palette of colors.
-type Palette []Color
-
-// Convert returns the palette color closest to c in Euclidean R,G,B space.
-func (p Palette) Convert(c Color) Color {
- if len(p) == 0 {
- return nil
- }
- return p[p.Index(c)]
-}
-
-// Index returns the index of the palette color closest to c in Euclidean
-// R,G,B,A space.
-func (p Palette) Index(c Color) int {
- // A batch version of this computation is in image/draw/draw.go.
-
- cr, cg, cb, ca := c.RGBA()
- ret, bestSum := 0, uint32(1<<32-1)
- for i, v := range p {
- vr, vg, vb, va := v.RGBA()
- sum := sqDiff(cr, vr) + sqDiff(cg, vg) + sqDiff(cb, vb) + sqDiff(ca, va)
- if sum < bestSum {
- if sum == 0 {
- return i
- }
- ret, bestSum = i, sum
- }
- }
- return ret
-}
-
-// sqDiff returns the squared-difference of x and y, shifted by 2 so that
-// adding four of those won't overflow a uint32.
-//
-// x and y are both assumed to be in the range [0, 0xffff].
-func sqDiff(x, y uint32) uint32 {
- // The canonical code of this function looks as follows:
- //
- // var d uint32
- // if x > y {
- // d = x - y
- // } else {
- // d = y - x
- // }
- // return (d * d) >> 2
- //
- // Language spec guarantees the following properties of unsigned integer
- // values operations with respect to overflow/wrap around:
- //
- // > For unsigned integer values, the operations +, -, *, and << are
- // > computed modulo 2n, where n is the bit width of the unsigned
- // > integer's type. Loosely speaking, these unsigned integer operations
- // > discard high bits upon overflow, and programs may rely on ``wrap
- // > around''.
- //
- // Considering these properties and the fact that this function is
- // called in the hot paths (x,y loops), it is reduced to the below code
- // which is slightly faster. See TestSqDiff for correctness check.
- d := x - y
- return (d * d) >> 2
-}
-
-// Standard colors.
-var (
- Black = Gray16{0}
- White = Gray16{0xffff}
- Transparent = Alpha16{0}
- Opaque = Alpha16{0xffff}
-)
diff --git a/contrib/go/_std_1.21/src/image/color/ya.make b/contrib/go/_std_1.21/src/image/color/ya.make
deleted file mode 100644
index 0a5a4dab7d..0000000000
--- a/contrib/go/_std_1.21/src/image/color/ya.make
+++ /dev/null
@@ -1,17 +0,0 @@
-GO_LIBRARY()
-
-SRCS(
- color.go
- ycbcr.go
-)
-
-GO_TEST_SRCS(
- color_test.go
- ycbcr_test.go
-)
-
-END()
-
-RECURSE(
- palette
-)
diff --git a/contrib/go/_std_1.21/src/image/color/ycbcr.go b/contrib/go/_std_1.21/src/image/color/ycbcr.go
deleted file mode 100644
index 8b6d508588..0000000000
--- a/contrib/go/_std_1.21/src/image/color/ycbcr.go
+++ /dev/null
@@ -1,373 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package color
-
-// RGBToYCbCr converts an RGB triple to a Y'CbCr triple.
-func RGBToYCbCr(r, g, b uint8) (uint8, uint8, uint8) {
- // The JFIF specification says:
- // Y' = 0.2990*R + 0.5870*G + 0.1140*B
- // Cb = -0.1687*R - 0.3313*G + 0.5000*B + 128
- // Cr = 0.5000*R - 0.4187*G - 0.0813*B + 128
- // https://www.w3.org/Graphics/JPEG/jfif3.pdf says Y but means Y'.
-
- r1 := int32(r)
- g1 := int32(g)
- b1 := int32(b)
-
- // yy is in range [0,0xff].
- //
- // Note that 19595 + 38470 + 7471 equals 65536.
- yy := (19595*r1 + 38470*g1 + 7471*b1 + 1<<15) >> 16
-
- // The bit twiddling below is equivalent to
- //
- // cb := (-11056*r1 - 21712*g1 + 32768*b1 + 257<<15) >> 16
- // if cb < 0 {
- // cb = 0
- // } else if cb > 0xff {
- // cb = ^int32(0)
- // }
- //
- // but uses fewer branches and is faster.
- // Note that the uint8 type conversion in the return
- // statement will convert ^int32(0) to 0xff.
- // The code below to compute cr uses a similar pattern.
- //
- // Note that -11056 - 21712 + 32768 equals 0.
- cb := -11056*r1 - 21712*g1 + 32768*b1 + 257<<15
- if uint32(cb)&0xff000000 == 0 {
- cb >>= 16
- } else {
- cb = ^(cb >> 31)
- }
-
- // Note that 32768 - 27440 - 5328 equals 0.
- cr := 32768*r1 - 27440*g1 - 5328*b1 + 257<<15
- if uint32(cr)&0xff000000 == 0 {
- cr >>= 16
- } else {
- cr = ^(cr >> 31)
- }
-
- return uint8(yy), uint8(cb), uint8(cr)
-}
-
-// YCbCrToRGB converts a Y'CbCr triple to an RGB triple.
-func YCbCrToRGB(y, cb, cr uint8) (uint8, uint8, uint8) {
- // The JFIF specification says:
- // R = Y' + 1.40200*(Cr-128)
- // G = Y' - 0.34414*(Cb-128) - 0.71414*(Cr-128)
- // B = Y' + 1.77200*(Cb-128)
- // https://www.w3.org/Graphics/JPEG/jfif3.pdf says Y but means Y'.
- //
- // Those formulae use non-integer multiplication factors. When computing,
- // integer math is generally faster than floating point math. We multiply
- // all of those factors by 1<<16 and round to the nearest integer:
- // 91881 = roundToNearestInteger(1.40200 * 65536).
- // 22554 = roundToNearestInteger(0.34414 * 65536).
- // 46802 = roundToNearestInteger(0.71414 * 65536).
- // 116130 = roundToNearestInteger(1.77200 * 65536).
- //
- // Adding a rounding adjustment in the range [0, 1<<16-1] and then shifting
- // right by 16 gives us an integer math version of the original formulae.
- // R = (65536*Y' + 91881 *(Cr-128) + adjustment) >> 16
- // G = (65536*Y' - 22554 *(Cb-128) - 46802*(Cr-128) + adjustment) >> 16
- // B = (65536*Y' + 116130 *(Cb-128) + adjustment) >> 16
- // A constant rounding adjustment of 1<<15, one half of 1<<16, would mean
- // round-to-nearest when dividing by 65536 (shifting right by 16).
- // Similarly, a constant rounding adjustment of 0 would mean round-down.
- //
- // Defining YY1 = 65536*Y' + adjustment simplifies the formulae and
- // requires fewer CPU operations:
- // R = (YY1 + 91881 *(Cr-128) ) >> 16
- // G = (YY1 - 22554 *(Cb-128) - 46802*(Cr-128)) >> 16
- // B = (YY1 + 116130 *(Cb-128) ) >> 16
- //
- // The inputs (y, cb, cr) are 8 bit color, ranging in [0x00, 0xff]. In this
- // function, the output is also 8 bit color, but in the related YCbCr.RGBA
- // method, below, the output is 16 bit color, ranging in [0x0000, 0xffff].
- // Outputting 16 bit color simply requires changing the 16 to 8 in the "R =
- // etc >> 16" equation, and likewise for G and B.
- //
- // As mentioned above, a constant rounding adjustment of 1<<15 is a natural
- // choice, but there is an additional constraint: if c0 := YCbCr{Y: y, Cb:
- // 0x80, Cr: 0x80} and c1 := Gray{Y: y} then c0.RGBA() should equal
- // c1.RGBA(). Specifically, if y == 0 then "R = etc >> 8" should yield
- // 0x0000 and if y == 0xff then "R = etc >> 8" should yield 0xffff. If we
- // used a constant rounding adjustment of 1<<15, then it would yield 0x0080
- // and 0xff80 respectively.
- //
- // Note that when cb == 0x80 and cr == 0x80 then the formulae collapse to:
- // R = YY1 >> n
- // G = YY1 >> n
- // B = YY1 >> n
- // where n is 16 for this function (8 bit color output) and 8 for the
- // YCbCr.RGBA method (16 bit color output).
- //
- // The solution is to make the rounding adjustment non-constant, and equal
- // to 257*Y', which ranges over [0, 1<<16-1] as Y' ranges over [0, 255].
- // YY1 is then defined as:
- // YY1 = 65536*Y' + 257*Y'
- // or equivalently:
- // YY1 = Y' * 0x10101
- yy1 := int32(y) * 0x10101
- cb1 := int32(cb) - 128
- cr1 := int32(cr) - 128
-
- // The bit twiddling below is equivalent to
- //
- // r := (yy1 + 91881*cr1) >> 16
- // if r < 0 {
- // r = 0
- // } else if r > 0xff {
- // r = ^int32(0)
- // }
- //
- // but uses fewer branches and is faster.
- // Note that the uint8 type conversion in the return
- // statement will convert ^int32(0) to 0xff.
- // The code below to compute g and b uses a similar pattern.
- r := yy1 + 91881*cr1
- if uint32(r)&0xff000000 == 0 {
- r >>= 16
- } else {
- r = ^(r >> 31)
- }
-
- g := yy1 - 22554*cb1 - 46802*cr1
- if uint32(g)&0xff000000 == 0 {
- g >>= 16
- } else {
- g = ^(g >> 31)
- }
-
- b := yy1 + 116130*cb1
- if uint32(b)&0xff000000 == 0 {
- b >>= 16
- } else {
- b = ^(b >> 31)
- }
-
- return uint8(r), uint8(g), uint8(b)
-}
-
-// YCbCr represents a fully opaque 24-bit Y'CbCr color, having 8 bits each for
-// one luma and two chroma components.
-//
-// JPEG, VP8, the MPEG family and other codecs use this color model. Such
-// codecs often use the terms YUV and Y'CbCr interchangeably, but strictly
-// speaking, the term YUV applies only to analog video signals, and Y' (luma)
-// is Y (luminance) after applying gamma correction.
-//
-// Conversion between RGB and Y'CbCr is lossy and there are multiple, slightly
-// different formulae for converting between the two. This package follows
-// the JFIF specification at https://www.w3.org/Graphics/JPEG/jfif3.pdf.
-type YCbCr struct {
- Y, Cb, Cr uint8
-}
-
-func (c YCbCr) RGBA() (uint32, uint32, uint32, uint32) {
- // This code is a copy of the YCbCrToRGB function above, except that it
- // returns values in the range [0, 0xffff] instead of [0, 0xff]. There is a
- // subtle difference between doing this and having YCbCr satisfy the Color
- // interface by first converting to an RGBA. The latter loses some
- // information by going to and from 8 bits per channel.
- //
- // For example, this code:
- // const y, cb, cr = 0x7f, 0x7f, 0x7f
- // r, g, b := color.YCbCrToRGB(y, cb, cr)
- // r0, g0, b0, _ := color.YCbCr{y, cb, cr}.RGBA()
- // r1, g1, b1, _ := color.RGBA{r, g, b, 0xff}.RGBA()
- // fmt.Printf("0x%04x 0x%04x 0x%04x\n", r0, g0, b0)
- // fmt.Printf("0x%04x 0x%04x 0x%04x\n", r1, g1, b1)
- // prints:
- // 0x7e18 0x808d 0x7db9
- // 0x7e7e 0x8080 0x7d7d
-
- yy1 := int32(c.Y) * 0x10101
- cb1 := int32(c.Cb) - 128
- cr1 := int32(c.Cr) - 128
-
- // The bit twiddling below is equivalent to
- //
- // r := (yy1 + 91881*cr1) >> 8
- // if r < 0 {
- // r = 0
- // } else if r > 0xff {
- // r = 0xffff
- // }
- //
- // but uses fewer branches and is faster.
- // The code below to compute g and b uses a similar pattern.
- r := yy1 + 91881*cr1
- if uint32(r)&0xff000000 == 0 {
- r >>= 8
- } else {
- r = ^(r >> 31) & 0xffff
- }
-
- g := yy1 - 22554*cb1 - 46802*cr1
- if uint32(g)&0xff000000 == 0 {
- g >>= 8
- } else {
- g = ^(g >> 31) & 0xffff
- }
-
- b := yy1 + 116130*cb1
- if uint32(b)&0xff000000 == 0 {
- b >>= 8
- } else {
- b = ^(b >> 31) & 0xffff
- }
-
- return uint32(r), uint32(g), uint32(b), 0xffff
-}
-
-// YCbCrModel is the Model for Y'CbCr colors.
-var YCbCrModel Model = ModelFunc(yCbCrModel)
-
-func yCbCrModel(c Color) Color {
- if _, ok := c.(YCbCr); ok {
- return c
- }
- r, g, b, _ := c.RGBA()
- y, u, v := RGBToYCbCr(uint8(r>>8), uint8(g>>8), uint8(b>>8))
- return YCbCr{y, u, v}
-}
-
-// NYCbCrA represents a non-alpha-premultiplied Y'CbCr-with-alpha color, having
-// 8 bits each for one luma, two chroma and one alpha component.
-type NYCbCrA struct {
- YCbCr
- A uint8
-}
-
-func (c NYCbCrA) RGBA() (uint32, uint32, uint32, uint32) {
- // The first part of this method is the same as YCbCr.RGBA.
- yy1 := int32(c.Y) * 0x10101
- cb1 := int32(c.Cb) - 128
- cr1 := int32(c.Cr) - 128
-
- // The bit twiddling below is equivalent to
- //
- // r := (yy1 + 91881*cr1) >> 8
- // if r < 0 {
- // r = 0
- // } else if r > 0xff {
- // r = 0xffff
- // }
- //
- // but uses fewer branches and is faster.
- // The code below to compute g and b uses a similar pattern.
- r := yy1 + 91881*cr1
- if uint32(r)&0xff000000 == 0 {
- r >>= 8
- } else {
- r = ^(r >> 31) & 0xffff
- }
-
- g := yy1 - 22554*cb1 - 46802*cr1
- if uint32(g)&0xff000000 == 0 {
- g >>= 8
- } else {
- g = ^(g >> 31) & 0xffff
- }
-
- b := yy1 + 116130*cb1
- if uint32(b)&0xff000000 == 0 {
- b >>= 8
- } else {
- b = ^(b >> 31) & 0xffff
- }
-
- // The second part of this method applies the alpha.
- a := uint32(c.A) * 0x101
- return uint32(r) * a / 0xffff, uint32(g) * a / 0xffff, uint32(b) * a / 0xffff, a
-}
-
-// NYCbCrAModel is the Model for non-alpha-premultiplied Y'CbCr-with-alpha
-// colors.
-var NYCbCrAModel Model = ModelFunc(nYCbCrAModel)
-
-func nYCbCrAModel(c Color) Color {
- switch c := c.(type) {
- case NYCbCrA:
- return c
- case YCbCr:
- return NYCbCrA{c, 0xff}
- }
- r, g, b, a := c.RGBA()
-
- // Convert from alpha-premultiplied to non-alpha-premultiplied.
- if a != 0 {
- r = (r * 0xffff) / a
- g = (g * 0xffff) / a
- b = (b * 0xffff) / a
- }
-
- y, u, v := RGBToYCbCr(uint8(r>>8), uint8(g>>8), uint8(b>>8))
- return NYCbCrA{YCbCr{Y: y, Cb: u, Cr: v}, uint8(a >> 8)}
-}
-
-// RGBToCMYK converts an RGB triple to a CMYK quadruple.
-func RGBToCMYK(r, g, b uint8) (uint8, uint8, uint8, uint8) {
- rr := uint32(r)
- gg := uint32(g)
- bb := uint32(b)
- w := rr
- if w < gg {
- w = gg
- }
- if w < bb {
- w = bb
- }
- if w == 0 {
- return 0, 0, 0, 0xff
- }
- c := (w - rr) * 0xff / w
- m := (w - gg) * 0xff / w
- y := (w - bb) * 0xff / w
- return uint8(c), uint8(m), uint8(y), uint8(0xff - w)
-}
-
-// CMYKToRGB converts a CMYK quadruple to an RGB triple.
-func CMYKToRGB(c, m, y, k uint8) (uint8, uint8, uint8) {
- w := 0xffff - uint32(k)*0x101
- r := (0xffff - uint32(c)*0x101) * w / 0xffff
- g := (0xffff - uint32(m)*0x101) * w / 0xffff
- b := (0xffff - uint32(y)*0x101) * w / 0xffff
- return uint8(r >> 8), uint8(g >> 8), uint8(b >> 8)
-}
-
-// CMYK represents a fully opaque CMYK color, having 8 bits for each of cyan,
-// magenta, yellow and black.
-//
-// It is not associated with any particular color profile.
-type CMYK struct {
- C, M, Y, K uint8
-}
-
-func (c CMYK) RGBA() (uint32, uint32, uint32, uint32) {
- // This code is a copy of the CMYKToRGB function above, except that it
- // returns values in the range [0, 0xffff] instead of [0, 0xff].
-
- w := 0xffff - uint32(c.K)*0x101
- r := (0xffff - uint32(c.C)*0x101) * w / 0xffff
- g := (0xffff - uint32(c.M)*0x101) * w / 0xffff
- b := (0xffff - uint32(c.Y)*0x101) * w / 0xffff
- return r, g, b, 0xffff
-}
-
-// CMYKModel is the Model for CMYK colors.
-var CMYKModel Model = ModelFunc(cmykModel)
-
-func cmykModel(c Color) Color {
- if _, ok := c.(CMYK); ok {
- return c
- }
- r, g, b, _ := c.RGBA()
- cc, mm, yy, kk := RGBToCMYK(uint8(r>>8), uint8(g>>8), uint8(b>>8))
- return CMYK{cc, mm, yy, kk}
-}
diff --git a/contrib/go/_std_1.21/src/image/format.go b/contrib/go/_std_1.21/src/image/format.go
deleted file mode 100644
index 51d7ad9021..0000000000
--- a/contrib/go/_std_1.21/src/image/format.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package image
-
-import (
- "bufio"
- "errors"
- "io"
- "sync"
- "sync/atomic"
-)
-
-// ErrFormat indicates that decoding encountered an unknown format.
-var ErrFormat = errors.New("image: unknown format")
-
-// A format holds an image format's name, magic header and how to decode it.
-type format struct {
- name, magic string
- decode func(io.Reader) (Image, error)
- decodeConfig func(io.Reader) (Config, error)
-}
-
-// Formats is the list of registered formats.
-var (
- formatsMu sync.Mutex
- atomicFormats atomic.Value
-)
-
-// RegisterFormat registers an image format for use by Decode.
-// Name is the name of the format, like "jpeg" or "png".
-// Magic is the magic prefix that identifies the format's encoding. The magic
-// string can contain "?" wildcards that each match any one byte.
-// Decode is the function that decodes the encoded image.
-// DecodeConfig is the function that decodes just its configuration.
-func RegisterFormat(name, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error)) {
- formatsMu.Lock()
- formats, _ := atomicFormats.Load().([]format)
- atomicFormats.Store(append(formats, format{name, magic, decode, decodeConfig}))
- formatsMu.Unlock()
-}
-
-// A reader is an io.Reader that can also peek ahead.
-type reader interface {
- io.Reader
- Peek(int) ([]byte, error)
-}
-
-// asReader converts an io.Reader to a reader.
-func asReader(r io.Reader) reader {
- if rr, ok := r.(reader); ok {
- return rr
- }
- return bufio.NewReader(r)
-}
-
-// match reports whether magic matches b. Magic may contain "?" wildcards.
-func match(magic string, b []byte) bool {
- if len(magic) != len(b) {
- return false
- }
- for i, c := range b {
- if magic[i] != c && magic[i] != '?' {
- return false
- }
- }
- return true
-}
-
-// sniff determines the format of r's data.
-func sniff(r reader) format {
- formats, _ := atomicFormats.Load().([]format)
- for _, f := range formats {
- b, err := r.Peek(len(f.magic))
- if err == nil && match(f.magic, b) {
- return f
- }
- }
- return format{}
-}
-
-// Decode decodes an image that has been encoded in a registered format.
-// The string returned is the format name used during format registration.
-// Format registration is typically done by an init function in the codec-
-// specific package.
-func Decode(r io.Reader) (Image, string, error) {
- rr := asReader(r)
- f := sniff(rr)
- if f.decode == nil {
- return nil, "", ErrFormat
- }
- m, err := f.decode(rr)
- return m, f.name, err
-}
-
-// DecodeConfig decodes the color model and dimensions of an image that has
-// been encoded in a registered format. The string returned is the format name
-// used during format registration. Format registration is typically done by
-// an init function in the codec-specific package.
-func DecodeConfig(r io.Reader) (Config, string, error) {
- rr := asReader(r)
- f := sniff(rr)
- if f.decodeConfig == nil {
- return Config{}, "", ErrFormat
- }
- c, err := f.decodeConfig(rr)
- return c, f.name, err
-}
diff --git a/contrib/go/_std_1.21/src/image/geom.go b/contrib/go/_std_1.21/src/image/geom.go
deleted file mode 100644
index e71aa61187..0000000000
--- a/contrib/go/_std_1.21/src/image/geom.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package image
-
-import (
- "image/color"
- "math/bits"
- "strconv"
-)
-
-// A Point is an X, Y coordinate pair. The axes increase right and down.
-type Point struct {
- X, Y int
-}
-
-// String returns a string representation of p like "(3,4)".
-func (p Point) String() string {
- return "(" + strconv.Itoa(p.X) + "," + strconv.Itoa(p.Y) + ")"
-}
-
-// Add returns the vector p+q.
-func (p Point) Add(q Point) Point {
- return Point{p.X + q.X, p.Y + q.Y}
-}
-
-// Sub returns the vector p-q.
-func (p Point) Sub(q Point) Point {
- return Point{p.X - q.X, p.Y - q.Y}
-}
-
-// Mul returns the vector p*k.
-func (p Point) Mul(k int) Point {
- return Point{p.X * k, p.Y * k}
-}
-
-// Div returns the vector p/k.
-func (p Point) Div(k int) Point {
- return Point{p.X / k, p.Y / k}
-}
-
-// In reports whether p is in r.
-func (p Point) In(r Rectangle) bool {
- return r.Min.X <= p.X && p.X < r.Max.X &&
- r.Min.Y <= p.Y && p.Y < r.Max.Y
-}
-
-// Mod returns the point q in r such that p.X-q.X is a multiple of r's width
-// and p.Y-q.Y is a multiple of r's height.
-func (p Point) Mod(r Rectangle) Point {
- w, h := r.Dx(), r.Dy()
- p = p.Sub(r.Min)
- p.X = p.X % w
- if p.X < 0 {
- p.X += w
- }
- p.Y = p.Y % h
- if p.Y < 0 {
- p.Y += h
- }
- return p.Add(r.Min)
-}
-
-// Eq reports whether p and q are equal.
-func (p Point) Eq(q Point) bool {
- return p == q
-}
-
-// ZP is the zero Point.
-//
-// Deprecated: Use a literal image.Point{} instead.
-var ZP Point
-
-// Pt is shorthand for Point{X, Y}.
-func Pt(X, Y int) Point {
- return Point{X, Y}
-}
-
-// A Rectangle contains the points with Min.X <= X < Max.X, Min.Y <= Y < Max.Y.
-// It is well-formed if Min.X <= Max.X and likewise for Y. Points are always
-// well-formed. A rectangle's methods always return well-formed outputs for
-// well-formed inputs.
-//
-// A Rectangle is also an Image whose bounds are the rectangle itself. At
-// returns color.Opaque for points in the rectangle and color.Transparent
-// otherwise.
-type Rectangle struct {
- Min, Max Point
-}
-
-// String returns a string representation of r like "(3,4)-(6,5)".
-func (r Rectangle) String() string {
- return r.Min.String() + "-" + r.Max.String()
-}
-
-// Dx returns r's width.
-func (r Rectangle) Dx() int {
- return r.Max.X - r.Min.X
-}
-
-// Dy returns r's height.
-func (r Rectangle) Dy() int {
- return r.Max.Y - r.Min.Y
-}
-
-// Size returns r's width and height.
-func (r Rectangle) Size() Point {
- return Point{
- r.Max.X - r.Min.X,
- r.Max.Y - r.Min.Y,
- }
-}
-
-// Add returns the rectangle r translated by p.
-func (r Rectangle) Add(p Point) Rectangle {
- return Rectangle{
- Point{r.Min.X + p.X, r.Min.Y + p.Y},
- Point{r.Max.X + p.X, r.Max.Y + p.Y},
- }
-}
-
-// Sub returns the rectangle r translated by -p.
-func (r Rectangle) Sub(p Point) Rectangle {
- return Rectangle{
- Point{r.Min.X - p.X, r.Min.Y - p.Y},
- Point{r.Max.X - p.X, r.Max.Y - p.Y},
- }
-}
-
-// Inset returns the rectangle r inset by n, which may be negative. If either
-// of r's dimensions is less than 2*n then an empty rectangle near the center
-// of r will be returned.
-func (r Rectangle) Inset(n int) Rectangle {
- if r.Dx() < 2*n {
- r.Min.X = (r.Min.X + r.Max.X) / 2
- r.Max.X = r.Min.X
- } else {
- r.Min.X += n
- r.Max.X -= n
- }
- if r.Dy() < 2*n {
- r.Min.Y = (r.Min.Y + r.Max.Y) / 2
- r.Max.Y = r.Min.Y
- } else {
- r.Min.Y += n
- r.Max.Y -= n
- }
- return r
-}
-
-// Intersect returns the largest rectangle contained by both r and s. If the
-// two rectangles do not overlap then the zero rectangle will be returned.
-func (r Rectangle) Intersect(s Rectangle) Rectangle {
- if r.Min.X < s.Min.X {
- r.Min.X = s.Min.X
- }
- if r.Min.Y < s.Min.Y {
- r.Min.Y = s.Min.Y
- }
- if r.Max.X > s.Max.X {
- r.Max.X = s.Max.X
- }
- if r.Max.Y > s.Max.Y {
- r.Max.Y = s.Max.Y
- }
- // Letting r0 and s0 be the values of r and s at the time that the method
- // is called, this next line is equivalent to:
- //
- // if max(r0.Min.X, s0.Min.X) >= min(r0.Max.X, s0.Max.X) || likewiseForY { etc }
- if r.Empty() {
- return ZR
- }
- return r
-}
-
-// Union returns the smallest rectangle that contains both r and s.
-func (r Rectangle) Union(s Rectangle) Rectangle {
- if r.Empty() {
- return s
- }
- if s.Empty() {
- return r
- }
- if r.Min.X > s.Min.X {
- r.Min.X = s.Min.X
- }
- if r.Min.Y > s.Min.Y {
- r.Min.Y = s.Min.Y
- }
- if r.Max.X < s.Max.X {
- r.Max.X = s.Max.X
- }
- if r.Max.Y < s.Max.Y {
- r.Max.Y = s.Max.Y
- }
- return r
-}
-
-// Empty reports whether the rectangle contains no points.
-func (r Rectangle) Empty() bool {
- return r.Min.X >= r.Max.X || r.Min.Y >= r.Max.Y
-}
-
-// Eq reports whether r and s contain the same set of points. All empty
-// rectangles are considered equal.
-func (r Rectangle) Eq(s Rectangle) bool {
- return r == s || r.Empty() && s.Empty()
-}
-
-// Overlaps reports whether r and s have a non-empty intersection.
-func (r Rectangle) Overlaps(s Rectangle) bool {
- return !r.Empty() && !s.Empty() &&
- r.Min.X < s.Max.X && s.Min.X < r.Max.X &&
- r.Min.Y < s.Max.Y && s.Min.Y < r.Max.Y
-}
-
-// In reports whether every point in r is in s.
-func (r Rectangle) In(s Rectangle) bool {
- if r.Empty() {
- return true
- }
- // Note that r.Max is an exclusive bound for r, so that r.In(s)
- // does not require that r.Max.In(s).
- return s.Min.X <= r.Min.X && r.Max.X <= s.Max.X &&
- s.Min.Y <= r.Min.Y && r.Max.Y <= s.Max.Y
-}
-
-// Canon returns the canonical version of r. The returned rectangle has minimum
-// and maximum coordinates swapped if necessary so that it is well-formed.
-func (r Rectangle) Canon() Rectangle {
- if r.Max.X < r.Min.X {
- r.Min.X, r.Max.X = r.Max.X, r.Min.X
- }
- if r.Max.Y < r.Min.Y {
- r.Min.Y, r.Max.Y = r.Max.Y, r.Min.Y
- }
- return r
-}
-
-// At implements the Image interface.
-func (r Rectangle) At(x, y int) color.Color {
- if (Point{x, y}).In(r) {
- return color.Opaque
- }
- return color.Transparent
-}
-
-// RGBA64At implements the RGBA64Image interface.
-func (r Rectangle) RGBA64At(x, y int) color.RGBA64 {
- if (Point{x, y}).In(r) {
- return color.RGBA64{0xffff, 0xffff, 0xffff, 0xffff}
- }
- return color.RGBA64{}
-}
-
-// Bounds implements the Image interface.
-func (r Rectangle) Bounds() Rectangle {
- return r
-}
-
-// ColorModel implements the Image interface.
-func (r Rectangle) ColorModel() color.Model {
- return color.Alpha16Model
-}
-
-// ZR is the zero Rectangle.
-//
-// Deprecated: Use a literal image.Rectangle{} instead.
-var ZR Rectangle
-
-// Rect is shorthand for Rectangle{Pt(x0, y0), Pt(x1, y1)}. The returned
-// rectangle has minimum and maximum coordinates swapped if necessary so that
-// it is well-formed.
-func Rect(x0, y0, x1, y1 int) Rectangle {
- if x0 > x1 {
- x0, x1 = x1, x0
- }
- if y0 > y1 {
- y0, y1 = y1, y0
- }
- return Rectangle{Point{x0, y0}, Point{x1, y1}}
-}
-
-// mul3NonNeg returns (x * y * z), unless at least one argument is negative or
-// if the computation overflows the int type, in which case it returns -1.
-func mul3NonNeg(x int, y int, z int) int {
- if (x < 0) || (y < 0) || (z < 0) {
- return -1
- }
- hi, lo := bits.Mul64(uint64(x), uint64(y))
- if hi != 0 {
- return -1
- }
- hi, lo = bits.Mul64(lo, uint64(z))
- if hi != 0 {
- return -1
- }
- a := int(lo)
- if (a < 0) || (uint64(a) != lo) {
- return -1
- }
- return a
-}
-
-// add2NonNeg returns (x + y), unless at least one argument is negative or if
-// the computation overflows the int type, in which case it returns -1.
-func add2NonNeg(x int, y int) int {
- if (x < 0) || (y < 0) {
- return -1
- }
- a := x + y
- if a < 0 {
- return -1
- }
- return a
-}
diff --git a/contrib/go/_std_1.21/src/image/image.go b/contrib/go/_std_1.21/src/image/image.go
deleted file mode 100644
index dfb70d4eaf..0000000000
--- a/contrib/go/_std_1.21/src/image/image.go
+++ /dev/null
@@ -1,1273 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package image implements a basic 2-D image library.
-//
-// The fundamental interface is called Image. An Image contains colors, which
-// are described in the image/color package.
-//
-// Values of the Image interface are created either by calling functions such
-// as NewRGBA and NewPaletted, or by calling Decode on an io.Reader containing
-// image data in a format such as GIF, JPEG or PNG. Decoding any particular
-// image format requires the prior registration of a decoder function.
-// Registration is typically automatic as a side effect of initializing that
-// format's package so that, to decode a PNG image, it suffices to have
-//
-// import _ "image/png"
-//
-// in a program's main package. The _ means to import a package purely for its
-// initialization side effects.
-//
-// See "The Go image package" for more details:
-// https://golang.org/doc/articles/image_package.html
-package image
-
-import (
- "image/color"
-)
-
-// Config holds an image's color model and dimensions.
-type Config struct {
- ColorModel color.Model
- Width, Height int
-}
-
-// Image is a finite rectangular grid of color.Color values taken from a color
-// model.
-type Image interface {
- // ColorModel returns the Image's color model.
- ColorModel() color.Model
- // Bounds returns the domain for which At can return non-zero color.
- // The bounds do not necessarily contain the point (0, 0).
- Bounds() Rectangle
- // At returns the color of the pixel at (x, y).
- // At(Bounds().Min.X, Bounds().Min.Y) returns the upper-left pixel of the grid.
- // At(Bounds().Max.X-1, Bounds().Max.Y-1) returns the lower-right one.
- At(x, y int) color.Color
-}
-
-// RGBA64Image is an Image whose pixels can be converted directly to a
-// color.RGBA64.
-type RGBA64Image interface {
- // RGBA64At returns the RGBA64 color of the pixel at (x, y). It is
- // equivalent to calling At(x, y).RGBA() and converting the resulting
- // 32-bit return values to a color.RGBA64, but it can avoid allocations
- // from converting concrete color types to the color.Color interface type.
- RGBA64At(x, y int) color.RGBA64
- Image
-}
-
-// PalettedImage is an image whose colors may come from a limited palette.
-// If m is a PalettedImage and m.ColorModel() returns a color.Palette p,
-// then m.At(x, y) should be equivalent to p[m.ColorIndexAt(x, y)]. If m's
-// color model is not a color.Palette, then ColorIndexAt's behavior is
-// undefined.
-type PalettedImage interface {
- // ColorIndexAt returns the palette index of the pixel at (x, y).
- ColorIndexAt(x, y int) uint8
- Image
-}
-
-// pixelBufferLength returns the length of the []uint8 typed Pix slice field
-// for the NewXxx functions. Conceptually, this is just (bpp * width * height),
-// but this function panics if at least one of those is negative or if the
-// computation would overflow the int type.
-//
-// This panics instead of returning an error because of backwards
-// compatibility. The NewXxx functions do not return an error.
-func pixelBufferLength(bytesPerPixel int, r Rectangle, imageTypeName string) int {
- totalLength := mul3NonNeg(bytesPerPixel, r.Dx(), r.Dy())
- if totalLength < 0 {
- panic("image: New" + imageTypeName + " Rectangle has huge or negative dimensions")
- }
- return totalLength
-}
-
-// RGBA is an in-memory image whose At method returns color.RGBA values.
-type RGBA struct {
- // Pix holds the image's pixels, in R, G, B, A order. The pixel at
- // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
- Pix []uint8
- // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
- Stride int
- // Rect is the image's bounds.
- Rect Rectangle
-}
-
-func (p *RGBA) ColorModel() color.Model { return color.RGBAModel }
-
-func (p *RGBA) Bounds() Rectangle { return p.Rect }
-
-func (p *RGBA) At(x, y int) color.Color {
- return p.RGBAAt(x, y)
-}
-
-func (p *RGBA) RGBA64At(x, y int) color.RGBA64 {
- if !(Point{x, y}.In(p.Rect)) {
- return color.RGBA64{}
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- r := uint16(s[0])
- g := uint16(s[1])
- b := uint16(s[2])
- a := uint16(s[3])
- return color.RGBA64{
- (r << 8) | r,
- (g << 8) | g,
- (b << 8) | b,
- (a << 8) | a,
- }
-}
-
-func (p *RGBA) RGBAAt(x, y int) color.RGBA {
- if !(Point{x, y}.In(p.Rect)) {
- return color.RGBA{}
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- return color.RGBA{s[0], s[1], s[2], s[3]}
-}
-
-// PixOffset returns the index of the first element of Pix that corresponds to
-// the pixel at (x, y).
-func (p *RGBA) PixOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
-}
-
-func (p *RGBA) Set(x, y int, c color.Color) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- c1 := color.RGBAModel.Convert(c).(color.RGBA)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = c1.R
- s[1] = c1.G
- s[2] = c1.B
- s[3] = c1.A
-}
-
-func (p *RGBA) SetRGBA64(x, y int, c color.RGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = uint8(c.R >> 8)
- s[1] = uint8(c.G >> 8)
- s[2] = uint8(c.B >> 8)
- s[3] = uint8(c.A >> 8)
-}
-
-func (p *RGBA) SetRGBA(x, y int, c color.RGBA) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = c.R
- s[1] = c.G
- s[2] = c.B
- s[3] = c.A
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *RGBA) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &RGBA{}
- }
- i := p.PixOffset(r.Min.X, r.Min.Y)
- return &RGBA{
- Pix: p.Pix[i:],
- Stride: p.Stride,
- Rect: r,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *RGBA) Opaque() bool {
- if p.Rect.Empty() {
- return true
- }
- i0, i1 := 3, p.Rect.Dx()*4
- for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
- for i := i0; i < i1; i += 4 {
- if p.Pix[i] != 0xff {
- return false
- }
- }
- i0 += p.Stride
- i1 += p.Stride
- }
- return true
-}
-
-// NewRGBA returns a new RGBA image with the given bounds.
-func NewRGBA(r Rectangle) *RGBA {
- return &RGBA{
- Pix: make([]uint8, pixelBufferLength(4, r, "RGBA")),
- Stride: 4 * r.Dx(),
- Rect: r,
- }
-}
-
-// RGBA64 is an in-memory image whose At method returns color.RGBA64 values.
-type RGBA64 struct {
- // Pix holds the image's pixels, in R, G, B, A order and big-endian format. The pixel at
- // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*8].
- Pix []uint8
- // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
- Stride int
- // Rect is the image's bounds.
- Rect Rectangle
-}
-
-func (p *RGBA64) ColorModel() color.Model { return color.RGBA64Model }
-
-func (p *RGBA64) Bounds() Rectangle { return p.Rect }
-
-func (p *RGBA64) At(x, y int) color.Color {
- return p.RGBA64At(x, y)
-}
-
-func (p *RGBA64) RGBA64At(x, y int) color.RGBA64 {
- if !(Point{x, y}.In(p.Rect)) {
- return color.RGBA64{}
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
- return color.RGBA64{
- uint16(s[0])<<8 | uint16(s[1]),
- uint16(s[2])<<8 | uint16(s[3]),
- uint16(s[4])<<8 | uint16(s[5]),
- uint16(s[6])<<8 | uint16(s[7]),
- }
-}
-
-// PixOffset returns the index of the first element of Pix that corresponds to
-// the pixel at (x, y).
-func (p *RGBA64) PixOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
-}
-
-func (p *RGBA64) Set(x, y int, c color.Color) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- c1 := color.RGBA64Model.Convert(c).(color.RGBA64)
- s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = uint8(c1.R >> 8)
- s[1] = uint8(c1.R)
- s[2] = uint8(c1.G >> 8)
- s[3] = uint8(c1.G)
- s[4] = uint8(c1.B >> 8)
- s[5] = uint8(c1.B)
- s[6] = uint8(c1.A >> 8)
- s[7] = uint8(c1.A)
-}
-
-func (p *RGBA64) SetRGBA64(x, y int, c color.RGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = uint8(c.R >> 8)
- s[1] = uint8(c.R)
- s[2] = uint8(c.G >> 8)
- s[3] = uint8(c.G)
- s[4] = uint8(c.B >> 8)
- s[5] = uint8(c.B)
- s[6] = uint8(c.A >> 8)
- s[7] = uint8(c.A)
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *RGBA64) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &RGBA64{}
- }
- i := p.PixOffset(r.Min.X, r.Min.Y)
- return &RGBA64{
- Pix: p.Pix[i:],
- Stride: p.Stride,
- Rect: r,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *RGBA64) Opaque() bool {
- if p.Rect.Empty() {
- return true
- }
- i0, i1 := 6, p.Rect.Dx()*8
- for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
- for i := i0; i < i1; i += 8 {
- if p.Pix[i+0] != 0xff || p.Pix[i+1] != 0xff {
- return false
- }
- }
- i0 += p.Stride
- i1 += p.Stride
- }
- return true
-}
-
-// NewRGBA64 returns a new RGBA64 image with the given bounds.
-func NewRGBA64(r Rectangle) *RGBA64 {
- return &RGBA64{
- Pix: make([]uint8, pixelBufferLength(8, r, "RGBA64")),
- Stride: 8 * r.Dx(),
- Rect: r,
- }
-}
-
-// NRGBA is an in-memory image whose At method returns color.NRGBA values.
-type NRGBA struct {
- // Pix holds the image's pixels, in R, G, B, A order. The pixel at
- // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
- Pix []uint8
- // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
- Stride int
- // Rect is the image's bounds.
- Rect Rectangle
-}
-
-func (p *NRGBA) ColorModel() color.Model { return color.NRGBAModel }
-
-func (p *NRGBA) Bounds() Rectangle { return p.Rect }
-
-func (p *NRGBA) At(x, y int) color.Color {
- return p.NRGBAAt(x, y)
-}
-
-func (p *NRGBA) RGBA64At(x, y int) color.RGBA64 {
- r, g, b, a := p.NRGBAAt(x, y).RGBA()
- return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
-}
-
-func (p *NRGBA) NRGBAAt(x, y int) color.NRGBA {
- if !(Point{x, y}.In(p.Rect)) {
- return color.NRGBA{}
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- return color.NRGBA{s[0], s[1], s[2], s[3]}
-}
-
-// PixOffset returns the index of the first element of Pix that corresponds to
-// the pixel at (x, y).
-func (p *NRGBA) PixOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
-}
-
-func (p *NRGBA) Set(x, y int, c color.Color) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- c1 := color.NRGBAModel.Convert(c).(color.NRGBA)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = c1.R
- s[1] = c1.G
- s[2] = c1.B
- s[3] = c1.A
-}
-
-func (p *NRGBA) SetRGBA64(x, y int, c color.RGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- r, g, b, a := uint32(c.R), uint32(c.G), uint32(c.B), uint32(c.A)
- if (a != 0) && (a != 0xffff) {
- r = (r * 0xffff) / a
- g = (g * 0xffff) / a
- b = (b * 0xffff) / a
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = uint8(r >> 8)
- s[1] = uint8(g >> 8)
- s[2] = uint8(b >> 8)
- s[3] = uint8(a >> 8)
-}
-
-func (p *NRGBA) SetNRGBA(x, y int, c color.NRGBA) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = c.R
- s[1] = c.G
- s[2] = c.B
- s[3] = c.A
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *NRGBA) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &NRGBA{}
- }
- i := p.PixOffset(r.Min.X, r.Min.Y)
- return &NRGBA{
- Pix: p.Pix[i:],
- Stride: p.Stride,
- Rect: r,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *NRGBA) Opaque() bool {
- if p.Rect.Empty() {
- return true
- }
- i0, i1 := 3, p.Rect.Dx()*4
- for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
- for i := i0; i < i1; i += 4 {
- if p.Pix[i] != 0xff {
- return false
- }
- }
- i0 += p.Stride
- i1 += p.Stride
- }
- return true
-}
-
-// NewNRGBA returns a new NRGBA image with the given bounds.
-func NewNRGBA(r Rectangle) *NRGBA {
- return &NRGBA{
- Pix: make([]uint8, pixelBufferLength(4, r, "NRGBA")),
- Stride: 4 * r.Dx(),
- Rect: r,
- }
-}
-
-// NRGBA64 is an in-memory image whose At method returns color.NRGBA64 values.
-type NRGBA64 struct {
- // Pix holds the image's pixels, in R, G, B, A order and big-endian format. The pixel at
- // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*8].
- Pix []uint8
- // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
- Stride int
- // Rect is the image's bounds.
- Rect Rectangle
-}
-
-func (p *NRGBA64) ColorModel() color.Model { return color.NRGBA64Model }
-
-func (p *NRGBA64) Bounds() Rectangle { return p.Rect }
-
-func (p *NRGBA64) At(x, y int) color.Color {
- return p.NRGBA64At(x, y)
-}
-
-func (p *NRGBA64) RGBA64At(x, y int) color.RGBA64 {
- r, g, b, a := p.NRGBA64At(x, y).RGBA()
- return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
-}
-
-func (p *NRGBA64) NRGBA64At(x, y int) color.NRGBA64 {
- if !(Point{x, y}.In(p.Rect)) {
- return color.NRGBA64{}
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
- return color.NRGBA64{
- uint16(s[0])<<8 | uint16(s[1]),
- uint16(s[2])<<8 | uint16(s[3]),
- uint16(s[4])<<8 | uint16(s[5]),
- uint16(s[6])<<8 | uint16(s[7]),
- }
-}
-
-// PixOffset returns the index of the first element of Pix that corresponds to
-// the pixel at (x, y).
-func (p *NRGBA64) PixOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*8
-}
-
-func (p *NRGBA64) Set(x, y int, c color.Color) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- c1 := color.NRGBA64Model.Convert(c).(color.NRGBA64)
- s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = uint8(c1.R >> 8)
- s[1] = uint8(c1.R)
- s[2] = uint8(c1.G >> 8)
- s[3] = uint8(c1.G)
- s[4] = uint8(c1.B >> 8)
- s[5] = uint8(c1.B)
- s[6] = uint8(c1.A >> 8)
- s[7] = uint8(c1.A)
-}
-
-func (p *NRGBA64) SetRGBA64(x, y int, c color.RGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- r, g, b, a := uint32(c.R), uint32(c.G), uint32(c.B), uint32(c.A)
- if (a != 0) && (a != 0xffff) {
- r = (r * 0xffff) / a
- g = (g * 0xffff) / a
- b = (b * 0xffff) / a
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = uint8(r >> 8)
- s[1] = uint8(r)
- s[2] = uint8(g >> 8)
- s[3] = uint8(g)
- s[4] = uint8(b >> 8)
- s[5] = uint8(b)
- s[6] = uint8(a >> 8)
- s[7] = uint8(a)
-}
-
-func (p *NRGBA64) SetNRGBA64(x, y int, c color.NRGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = uint8(c.R >> 8)
- s[1] = uint8(c.R)
- s[2] = uint8(c.G >> 8)
- s[3] = uint8(c.G)
- s[4] = uint8(c.B >> 8)
- s[5] = uint8(c.B)
- s[6] = uint8(c.A >> 8)
- s[7] = uint8(c.A)
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *NRGBA64) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &NRGBA64{}
- }
- i := p.PixOffset(r.Min.X, r.Min.Y)
- return &NRGBA64{
- Pix: p.Pix[i:],
- Stride: p.Stride,
- Rect: r,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *NRGBA64) Opaque() bool {
- if p.Rect.Empty() {
- return true
- }
- i0, i1 := 6, p.Rect.Dx()*8
- for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
- for i := i0; i < i1; i += 8 {
- if p.Pix[i+0] != 0xff || p.Pix[i+1] != 0xff {
- return false
- }
- }
- i0 += p.Stride
- i1 += p.Stride
- }
- return true
-}
-
-// NewNRGBA64 returns a new NRGBA64 image with the given bounds.
-func NewNRGBA64(r Rectangle) *NRGBA64 {
- return &NRGBA64{
- Pix: make([]uint8, pixelBufferLength(8, r, "NRGBA64")),
- Stride: 8 * r.Dx(),
- Rect: r,
- }
-}
-
-// Alpha is an in-memory image whose At method returns color.Alpha values.
-type Alpha struct {
- // Pix holds the image's pixels, as alpha values. The pixel at
- // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
- Pix []uint8
- // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
- Stride int
- // Rect is the image's bounds.
- Rect Rectangle
-}
-
-func (p *Alpha) ColorModel() color.Model { return color.AlphaModel }
-
-func (p *Alpha) Bounds() Rectangle { return p.Rect }
-
-func (p *Alpha) At(x, y int) color.Color {
- return p.AlphaAt(x, y)
-}
-
-func (p *Alpha) RGBA64At(x, y int) color.RGBA64 {
- a := uint16(p.AlphaAt(x, y).A)
- a |= a << 8
- return color.RGBA64{a, a, a, a}
-}
-
-func (p *Alpha) AlphaAt(x, y int) color.Alpha {
- if !(Point{x, y}.In(p.Rect)) {
- return color.Alpha{}
- }
- i := p.PixOffset(x, y)
- return color.Alpha{p.Pix[i]}
-}
-
-// PixOffset returns the index of the first element of Pix that corresponds to
-// the pixel at (x, y).
-func (p *Alpha) PixOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
-}
-
-func (p *Alpha) Set(x, y int, c color.Color) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i] = color.AlphaModel.Convert(c).(color.Alpha).A
-}
-
-func (p *Alpha) SetRGBA64(x, y int, c color.RGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i] = uint8(c.A >> 8)
-}
-
-func (p *Alpha) SetAlpha(x, y int, c color.Alpha) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i] = c.A
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *Alpha) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &Alpha{}
- }
- i := p.PixOffset(r.Min.X, r.Min.Y)
- return &Alpha{
- Pix: p.Pix[i:],
- Stride: p.Stride,
- Rect: r,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *Alpha) Opaque() bool {
- if p.Rect.Empty() {
- return true
- }
- i0, i1 := 0, p.Rect.Dx()
- for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
- for i := i0; i < i1; i++ {
- if p.Pix[i] != 0xff {
- return false
- }
- }
- i0 += p.Stride
- i1 += p.Stride
- }
- return true
-}
-
-// NewAlpha returns a new Alpha image with the given bounds.
-func NewAlpha(r Rectangle) *Alpha {
- return &Alpha{
- Pix: make([]uint8, pixelBufferLength(1, r, "Alpha")),
- Stride: 1 * r.Dx(),
- Rect: r,
- }
-}
-
-// Alpha16 is an in-memory image whose At method returns color.Alpha16 values.
-type Alpha16 struct {
- // Pix holds the image's pixels, as alpha values in big-endian format. The pixel at
- // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*2].
- Pix []uint8
- // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
- Stride int
- // Rect is the image's bounds.
- Rect Rectangle
-}
-
-func (p *Alpha16) ColorModel() color.Model { return color.Alpha16Model }
-
-func (p *Alpha16) Bounds() Rectangle { return p.Rect }
-
-func (p *Alpha16) At(x, y int) color.Color {
- return p.Alpha16At(x, y)
-}
-
-func (p *Alpha16) RGBA64At(x, y int) color.RGBA64 {
- a := p.Alpha16At(x, y).A
- return color.RGBA64{a, a, a, a}
-}
-
-func (p *Alpha16) Alpha16At(x, y int) color.Alpha16 {
- if !(Point{x, y}.In(p.Rect)) {
- return color.Alpha16{}
- }
- i := p.PixOffset(x, y)
- return color.Alpha16{uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1])}
-}
-
-// PixOffset returns the index of the first element of Pix that corresponds to
-// the pixel at (x, y).
-func (p *Alpha16) PixOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
-}
-
-func (p *Alpha16) Set(x, y int, c color.Color) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- c1 := color.Alpha16Model.Convert(c).(color.Alpha16)
- p.Pix[i+0] = uint8(c1.A >> 8)
- p.Pix[i+1] = uint8(c1.A)
-}
-
-func (p *Alpha16) SetRGBA64(x, y int, c color.RGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i+0] = uint8(c.A >> 8)
- p.Pix[i+1] = uint8(c.A)
-}
-
-func (p *Alpha16) SetAlpha16(x, y int, c color.Alpha16) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i+0] = uint8(c.A >> 8)
- p.Pix[i+1] = uint8(c.A)
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *Alpha16) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &Alpha16{}
- }
- i := p.PixOffset(r.Min.X, r.Min.Y)
- return &Alpha16{
- Pix: p.Pix[i:],
- Stride: p.Stride,
- Rect: r,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *Alpha16) Opaque() bool {
- if p.Rect.Empty() {
- return true
- }
- i0, i1 := 0, p.Rect.Dx()*2
- for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
- for i := i0; i < i1; i += 2 {
- if p.Pix[i+0] != 0xff || p.Pix[i+1] != 0xff {
- return false
- }
- }
- i0 += p.Stride
- i1 += p.Stride
- }
- return true
-}
-
-// NewAlpha16 returns a new Alpha16 image with the given bounds.
-func NewAlpha16(r Rectangle) *Alpha16 {
- return &Alpha16{
- Pix: make([]uint8, pixelBufferLength(2, r, "Alpha16")),
- Stride: 2 * r.Dx(),
- Rect: r,
- }
-}
-
-// Gray is an in-memory image whose At method returns color.Gray values.
-type Gray struct {
- // Pix holds the image's pixels, as gray values. The pixel at
- // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
- Pix []uint8
- // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
- Stride int
- // Rect is the image's bounds.
- Rect Rectangle
-}
-
-func (p *Gray) ColorModel() color.Model { return color.GrayModel }
-
-func (p *Gray) Bounds() Rectangle { return p.Rect }
-
-func (p *Gray) At(x, y int) color.Color {
- return p.GrayAt(x, y)
-}
-
-func (p *Gray) RGBA64At(x, y int) color.RGBA64 {
- gray := uint16(p.GrayAt(x, y).Y)
- gray |= gray << 8
- return color.RGBA64{gray, gray, gray, 0xffff}
-}
-
-func (p *Gray) GrayAt(x, y int) color.Gray {
- if !(Point{x, y}.In(p.Rect)) {
- return color.Gray{}
- }
- i := p.PixOffset(x, y)
- return color.Gray{p.Pix[i]}
-}
-
-// PixOffset returns the index of the first element of Pix that corresponds to
-// the pixel at (x, y).
-func (p *Gray) PixOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
-}
-
-func (p *Gray) Set(x, y int, c color.Color) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i] = color.GrayModel.Convert(c).(color.Gray).Y
-}
-
-func (p *Gray) SetRGBA64(x, y int, c color.RGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- // This formula is the same as in color.grayModel.
- gray := (19595*uint32(c.R) + 38470*uint32(c.G) + 7471*uint32(c.B) + 1<<15) >> 24
- i := p.PixOffset(x, y)
- p.Pix[i] = uint8(gray)
-}
-
-func (p *Gray) SetGray(x, y int, c color.Gray) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i] = c.Y
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *Gray) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &Gray{}
- }
- i := p.PixOffset(r.Min.X, r.Min.Y)
- return &Gray{
- Pix: p.Pix[i:],
- Stride: p.Stride,
- Rect: r,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *Gray) Opaque() bool {
- return true
-}
-
-// NewGray returns a new Gray image with the given bounds.
-func NewGray(r Rectangle) *Gray {
- return &Gray{
- Pix: make([]uint8, pixelBufferLength(1, r, "Gray")),
- Stride: 1 * r.Dx(),
- Rect: r,
- }
-}
-
-// Gray16 is an in-memory image whose At method returns color.Gray16 values.
-type Gray16 struct {
- // Pix holds the image's pixels, as gray values in big-endian format. The pixel at
- // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*2].
- Pix []uint8
- // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
- Stride int
- // Rect is the image's bounds.
- Rect Rectangle
-}
-
-func (p *Gray16) ColorModel() color.Model { return color.Gray16Model }
-
-func (p *Gray16) Bounds() Rectangle { return p.Rect }
-
-func (p *Gray16) At(x, y int) color.Color {
- return p.Gray16At(x, y)
-}
-
-func (p *Gray16) RGBA64At(x, y int) color.RGBA64 {
- gray := p.Gray16At(x, y).Y
- return color.RGBA64{gray, gray, gray, 0xffff}
-}
-
-func (p *Gray16) Gray16At(x, y int) color.Gray16 {
- if !(Point{x, y}.In(p.Rect)) {
- return color.Gray16{}
- }
- i := p.PixOffset(x, y)
- return color.Gray16{uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1])}
-}
-
-// PixOffset returns the index of the first element of Pix that corresponds to
-// the pixel at (x, y).
-func (p *Gray16) PixOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*2
-}
-
-func (p *Gray16) Set(x, y int, c color.Color) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- c1 := color.Gray16Model.Convert(c).(color.Gray16)
- p.Pix[i+0] = uint8(c1.Y >> 8)
- p.Pix[i+1] = uint8(c1.Y)
-}
-
-func (p *Gray16) SetRGBA64(x, y int, c color.RGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- // This formula is the same as in color.gray16Model.
- gray := (19595*uint32(c.R) + 38470*uint32(c.G) + 7471*uint32(c.B) + 1<<15) >> 16
- i := p.PixOffset(x, y)
- p.Pix[i+0] = uint8(gray >> 8)
- p.Pix[i+1] = uint8(gray)
-}
-
-func (p *Gray16) SetGray16(x, y int, c color.Gray16) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i+0] = uint8(c.Y >> 8)
- p.Pix[i+1] = uint8(c.Y)
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *Gray16) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &Gray16{}
- }
- i := p.PixOffset(r.Min.X, r.Min.Y)
- return &Gray16{
- Pix: p.Pix[i:],
- Stride: p.Stride,
- Rect: r,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *Gray16) Opaque() bool {
- return true
-}
-
-// NewGray16 returns a new Gray16 image with the given bounds.
-func NewGray16(r Rectangle) *Gray16 {
- return &Gray16{
- Pix: make([]uint8, pixelBufferLength(2, r, "Gray16")),
- Stride: 2 * r.Dx(),
- Rect: r,
- }
-}
-
-// CMYK is an in-memory image whose At method returns color.CMYK values.
-type CMYK struct {
- // Pix holds the image's pixels, in C, M, Y, K order. The pixel at
- // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
- Pix []uint8
- // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
- Stride int
- // Rect is the image's bounds.
- Rect Rectangle
-}
-
-func (p *CMYK) ColorModel() color.Model { return color.CMYKModel }
-
-func (p *CMYK) Bounds() Rectangle { return p.Rect }
-
-func (p *CMYK) At(x, y int) color.Color {
- return p.CMYKAt(x, y)
-}
-
-func (p *CMYK) RGBA64At(x, y int) color.RGBA64 {
- r, g, b, a := p.CMYKAt(x, y).RGBA()
- return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
-}
-
-func (p *CMYK) CMYKAt(x, y int) color.CMYK {
- if !(Point{x, y}.In(p.Rect)) {
- return color.CMYK{}
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- return color.CMYK{s[0], s[1], s[2], s[3]}
-}
-
-// PixOffset returns the index of the first element of Pix that corresponds to
-// the pixel at (x, y).
-func (p *CMYK) PixOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*4
-}
-
-func (p *CMYK) Set(x, y int, c color.Color) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- c1 := color.CMYKModel.Convert(c).(color.CMYK)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = c1.C
- s[1] = c1.M
- s[2] = c1.Y
- s[3] = c1.K
-}
-
-func (p *CMYK) SetRGBA64(x, y int, c color.RGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- cc, mm, yy, kk := color.RGBToCMYK(uint8(c.R>>8), uint8(c.G>>8), uint8(c.B>>8))
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = cc
- s[1] = mm
- s[2] = yy
- s[3] = kk
-}
-
-func (p *CMYK) SetCMYK(x, y int, c color.CMYK) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857
- s[0] = c.C
- s[1] = c.M
- s[2] = c.Y
- s[3] = c.K
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *CMYK) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &CMYK{}
- }
- i := p.PixOffset(r.Min.X, r.Min.Y)
- return &CMYK{
- Pix: p.Pix[i:],
- Stride: p.Stride,
- Rect: r,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *CMYK) Opaque() bool {
- return true
-}
-
-// NewCMYK returns a new CMYK image with the given bounds.
-func NewCMYK(r Rectangle) *CMYK {
- return &CMYK{
- Pix: make([]uint8, pixelBufferLength(4, r, "CMYK")),
- Stride: 4 * r.Dx(),
- Rect: r,
- }
-}
-
-// Paletted is an in-memory image of uint8 indices into a given palette.
-type Paletted struct {
- // Pix holds the image's pixels, as palette indices. The pixel at
- // (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
- Pix []uint8
- // Stride is the Pix stride (in bytes) between vertically adjacent pixels.
- Stride int
- // Rect is the image's bounds.
- Rect Rectangle
- // Palette is the image's palette.
- Palette color.Palette
-}
-
-func (p *Paletted) ColorModel() color.Model { return p.Palette }
-
-func (p *Paletted) Bounds() Rectangle { return p.Rect }
-
-func (p *Paletted) At(x, y int) color.Color {
- if len(p.Palette) == 0 {
- return nil
- }
- if !(Point{x, y}.In(p.Rect)) {
- return p.Palette[0]
- }
- i := p.PixOffset(x, y)
- return p.Palette[p.Pix[i]]
-}
-
-func (p *Paletted) RGBA64At(x, y int) color.RGBA64 {
- if len(p.Palette) == 0 {
- return color.RGBA64{}
- }
- c := color.Color(nil)
- if !(Point{x, y}.In(p.Rect)) {
- c = p.Palette[0]
- } else {
- i := p.PixOffset(x, y)
- c = p.Palette[p.Pix[i]]
- }
- r, g, b, a := c.RGBA()
- return color.RGBA64{
- uint16(r),
- uint16(g),
- uint16(b),
- uint16(a),
- }
-}
-
-// PixOffset returns the index of the first element of Pix that corresponds to
-// the pixel at (x, y).
-func (p *Paletted) PixOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
-}
-
-func (p *Paletted) Set(x, y int, c color.Color) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i] = uint8(p.Palette.Index(c))
-}
-
-func (p *Paletted) SetRGBA64(x, y int, c color.RGBA64) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i] = uint8(p.Palette.Index(c))
-}
-
-func (p *Paletted) ColorIndexAt(x, y int) uint8 {
- if !(Point{x, y}.In(p.Rect)) {
- return 0
- }
- i := p.PixOffset(x, y)
- return p.Pix[i]
-}
-
-func (p *Paletted) SetColorIndex(x, y int, index uint8) {
- if !(Point{x, y}.In(p.Rect)) {
- return
- }
- i := p.PixOffset(x, y)
- p.Pix[i] = index
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *Paletted) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &Paletted{
- Palette: p.Palette,
- }
- }
- i := p.PixOffset(r.Min.X, r.Min.Y)
- return &Paletted{
- Pix: p.Pix[i:],
- Stride: p.Stride,
- Rect: p.Rect.Intersect(r),
- Palette: p.Palette,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *Paletted) Opaque() bool {
- var present [256]bool
- i0, i1 := 0, p.Rect.Dx()
- for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
- for _, c := range p.Pix[i0:i1] {
- present[c] = true
- }
- i0 += p.Stride
- i1 += p.Stride
- }
- for i, c := range p.Palette {
- if !present[i] {
- continue
- }
- _, _, _, a := c.RGBA()
- if a != 0xffff {
- return false
- }
- }
- return true
-}
-
-// NewPaletted returns a new Paletted image with the given width, height and
-// palette.
-func NewPaletted(r Rectangle, p color.Palette) *Paletted {
- return &Paletted{
- Pix: make([]uint8, pixelBufferLength(1, r, "Paletted")),
- Stride: 1 * r.Dx(),
- Rect: r,
- Palette: p,
- }
-}
diff --git a/contrib/go/_std_1.21/src/image/names.go b/contrib/go/_std_1.21/src/image/names.go
deleted file mode 100644
index 17b06588ac..0000000000
--- a/contrib/go/_std_1.21/src/image/names.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package image
-
-import (
- "image/color"
-)
-
-var (
- // Black is an opaque black uniform image.
- Black = NewUniform(color.Black)
- // White is an opaque white uniform image.
- White = NewUniform(color.White)
- // Transparent is a fully transparent uniform image.
- Transparent = NewUniform(color.Transparent)
- // Opaque is a fully opaque uniform image.
- Opaque = NewUniform(color.Opaque)
-)
-
-// Uniform is an infinite-sized Image of uniform color.
-// It implements the color.Color, color.Model, and Image interfaces.
-type Uniform struct {
- C color.Color
-}
-
-func (c *Uniform) RGBA() (r, g, b, a uint32) {
- return c.C.RGBA()
-}
-
-func (c *Uniform) ColorModel() color.Model {
- return c
-}
-
-func (c *Uniform) Convert(color.Color) color.Color {
- return c.C
-}
-
-func (c *Uniform) Bounds() Rectangle { return Rectangle{Point{-1e9, -1e9}, Point{1e9, 1e9}} }
-
-func (c *Uniform) At(x, y int) color.Color { return c.C }
-
-func (c *Uniform) RGBA64At(x, y int) color.RGBA64 {
- r, g, b, a := c.C.RGBA()
- return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (c *Uniform) Opaque() bool {
- _, _, _, a := c.C.RGBA()
- return a == 0xffff
-}
-
-// NewUniform returns a new Uniform image of the given color.
-func NewUniform(c color.Color) *Uniform {
- return &Uniform{c}
-}
diff --git a/contrib/go/_std_1.21/src/image/ya.make b/contrib/go/_std_1.21/src/image/ya.make
deleted file mode 100644
index b74f64f96a..0000000000
--- a/contrib/go/_std_1.21/src/image/ya.make
+++ /dev/null
@@ -1,31 +0,0 @@
-GO_LIBRARY()
-
-SRCS(
- format.go
- geom.go
- image.go
- names.go
- ycbcr.go
-)
-
-GO_TEST_SRCS(
- geom_test.go
- image_test.go
- ycbcr_test.go
-)
-
-GO_XTEST_SRCS(
- decode_example_test.go
- decode_test.go
-)
-
-END()
-
-RECURSE(
- color
- draw
- gif
- internal
- jpeg
- png
-)
diff --git a/contrib/go/_std_1.21/src/image/ycbcr.go b/contrib/go/_std_1.21/src/image/ycbcr.go
deleted file mode 100644
index 78f5ebe1d8..0000000000
--- a/contrib/go/_std_1.21/src/image/ycbcr.go
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package image
-
-import (
- "image/color"
-)
-
-// YCbCrSubsampleRatio is the chroma subsample ratio used in a YCbCr image.
-type YCbCrSubsampleRatio int
-
-const (
- YCbCrSubsampleRatio444 YCbCrSubsampleRatio = iota
- YCbCrSubsampleRatio422
- YCbCrSubsampleRatio420
- YCbCrSubsampleRatio440
- YCbCrSubsampleRatio411
- YCbCrSubsampleRatio410
-)
-
-func (s YCbCrSubsampleRatio) String() string {
- switch s {
- case YCbCrSubsampleRatio444:
- return "YCbCrSubsampleRatio444"
- case YCbCrSubsampleRatio422:
- return "YCbCrSubsampleRatio422"
- case YCbCrSubsampleRatio420:
- return "YCbCrSubsampleRatio420"
- case YCbCrSubsampleRatio440:
- return "YCbCrSubsampleRatio440"
- case YCbCrSubsampleRatio411:
- return "YCbCrSubsampleRatio411"
- case YCbCrSubsampleRatio410:
- return "YCbCrSubsampleRatio410"
- }
- return "YCbCrSubsampleRatioUnknown"
-}
-
-// YCbCr is an in-memory image of Y'CbCr colors. There is one Y sample per
-// pixel, but each Cb and Cr sample can span one or more pixels.
-// YStride is the Y slice index delta between vertically adjacent pixels.
-// CStride is the Cb and Cr slice index delta between vertically adjacent pixels
-// that map to separate chroma samples.
-// It is not an absolute requirement, but YStride and len(Y) are typically
-// multiples of 8, and:
-//
-// For 4:4:4, CStride == YStride/1 && len(Cb) == len(Cr) == len(Y)/1.
-// For 4:2:2, CStride == YStride/2 && len(Cb) == len(Cr) == len(Y)/2.
-// For 4:2:0, CStride == YStride/2 && len(Cb) == len(Cr) == len(Y)/4.
-// For 4:4:0, CStride == YStride/1 && len(Cb) == len(Cr) == len(Y)/2.
-// For 4:1:1, CStride == YStride/4 && len(Cb) == len(Cr) == len(Y)/4.
-// For 4:1:0, CStride == YStride/4 && len(Cb) == len(Cr) == len(Y)/8.
-type YCbCr struct {
- Y, Cb, Cr []uint8
- YStride int
- CStride int
- SubsampleRatio YCbCrSubsampleRatio
- Rect Rectangle
-}
-
-func (p *YCbCr) ColorModel() color.Model {
- return color.YCbCrModel
-}
-
-func (p *YCbCr) Bounds() Rectangle {
- return p.Rect
-}
-
-func (p *YCbCr) At(x, y int) color.Color {
- return p.YCbCrAt(x, y)
-}
-
-func (p *YCbCr) RGBA64At(x, y int) color.RGBA64 {
- r, g, b, a := p.YCbCrAt(x, y).RGBA()
- return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
-}
-
-func (p *YCbCr) YCbCrAt(x, y int) color.YCbCr {
- if !(Point{x, y}.In(p.Rect)) {
- return color.YCbCr{}
- }
- yi := p.YOffset(x, y)
- ci := p.COffset(x, y)
- return color.YCbCr{
- p.Y[yi],
- p.Cb[ci],
- p.Cr[ci],
- }
-}
-
-// YOffset returns the index of the first element of Y that corresponds to
-// the pixel at (x, y).
-func (p *YCbCr) YOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.YStride + (x - p.Rect.Min.X)
-}
-
-// COffset returns the index of the first element of Cb or Cr that corresponds
-// to the pixel at (x, y).
-func (p *YCbCr) COffset(x, y int) int {
- switch p.SubsampleRatio {
- case YCbCrSubsampleRatio422:
- return (y-p.Rect.Min.Y)*p.CStride + (x/2 - p.Rect.Min.X/2)
- case YCbCrSubsampleRatio420:
- return (y/2-p.Rect.Min.Y/2)*p.CStride + (x/2 - p.Rect.Min.X/2)
- case YCbCrSubsampleRatio440:
- return (y/2-p.Rect.Min.Y/2)*p.CStride + (x - p.Rect.Min.X)
- case YCbCrSubsampleRatio411:
- return (y-p.Rect.Min.Y)*p.CStride + (x/4 - p.Rect.Min.X/4)
- case YCbCrSubsampleRatio410:
- return (y/2-p.Rect.Min.Y/2)*p.CStride + (x/4 - p.Rect.Min.X/4)
- }
- // Default to 4:4:4 subsampling.
- return (y-p.Rect.Min.Y)*p.CStride + (x - p.Rect.Min.X)
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *YCbCr) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &YCbCr{
- SubsampleRatio: p.SubsampleRatio,
- }
- }
- yi := p.YOffset(r.Min.X, r.Min.Y)
- ci := p.COffset(r.Min.X, r.Min.Y)
- return &YCbCr{
- Y: p.Y[yi:],
- Cb: p.Cb[ci:],
- Cr: p.Cr[ci:],
- SubsampleRatio: p.SubsampleRatio,
- YStride: p.YStride,
- CStride: p.CStride,
- Rect: r,
- }
-}
-
-func (p *YCbCr) Opaque() bool {
- return true
-}
-
-func yCbCrSize(r Rectangle, subsampleRatio YCbCrSubsampleRatio) (w, h, cw, ch int) {
- w, h = r.Dx(), r.Dy()
- switch subsampleRatio {
- case YCbCrSubsampleRatio422:
- cw = (r.Max.X+1)/2 - r.Min.X/2
- ch = h
- case YCbCrSubsampleRatio420:
- cw = (r.Max.X+1)/2 - r.Min.X/2
- ch = (r.Max.Y+1)/2 - r.Min.Y/2
- case YCbCrSubsampleRatio440:
- cw = w
- ch = (r.Max.Y+1)/2 - r.Min.Y/2
- case YCbCrSubsampleRatio411:
- cw = (r.Max.X+3)/4 - r.Min.X/4
- ch = h
- case YCbCrSubsampleRatio410:
- cw = (r.Max.X+3)/4 - r.Min.X/4
- ch = (r.Max.Y+1)/2 - r.Min.Y/2
- default:
- // Default to 4:4:4 subsampling.
- cw = w
- ch = h
- }
- return
-}
-
-// NewYCbCr returns a new YCbCr image with the given bounds and subsample
-// ratio.
-func NewYCbCr(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *YCbCr {
- w, h, cw, ch := yCbCrSize(r, subsampleRatio)
-
- // totalLength should be the same as i2, below, for a valid Rectangle r.
- totalLength := add2NonNeg(
- mul3NonNeg(1, w, h),
- mul3NonNeg(2, cw, ch),
- )
- if totalLength < 0 {
- panic("image: NewYCbCr Rectangle has huge or negative dimensions")
- }
-
- i0 := w*h + 0*cw*ch
- i1 := w*h + 1*cw*ch
- i2 := w*h + 2*cw*ch
- b := make([]byte, i2)
- return &YCbCr{
- Y: b[:i0:i0],
- Cb: b[i0:i1:i1],
- Cr: b[i1:i2:i2],
- SubsampleRatio: subsampleRatio,
- YStride: w,
- CStride: cw,
- Rect: r,
- }
-}
-
-// NYCbCrA is an in-memory image of non-alpha-premultiplied Y'CbCr-with-alpha
-// colors. A and AStride are analogous to the Y and YStride fields of the
-// embedded YCbCr.
-type NYCbCrA struct {
- YCbCr
- A []uint8
- AStride int
-}
-
-func (p *NYCbCrA) ColorModel() color.Model {
- return color.NYCbCrAModel
-}
-
-func (p *NYCbCrA) At(x, y int) color.Color {
- return p.NYCbCrAAt(x, y)
-}
-
-func (p *NYCbCrA) RGBA64At(x, y int) color.RGBA64 {
- r, g, b, a := p.NYCbCrAAt(x, y).RGBA()
- return color.RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)}
-}
-
-func (p *NYCbCrA) NYCbCrAAt(x, y int) color.NYCbCrA {
- if !(Point{X: x, Y: y}.In(p.Rect)) {
- return color.NYCbCrA{}
- }
- yi := p.YOffset(x, y)
- ci := p.COffset(x, y)
- ai := p.AOffset(x, y)
- return color.NYCbCrA{
- color.YCbCr{
- Y: p.Y[yi],
- Cb: p.Cb[ci],
- Cr: p.Cr[ci],
- },
- p.A[ai],
- }
-}
-
-// AOffset returns the index of the first element of A that corresponds to the
-// pixel at (x, y).
-func (p *NYCbCrA) AOffset(x, y int) int {
- return (y-p.Rect.Min.Y)*p.AStride + (x - p.Rect.Min.X)
-}
-
-// SubImage returns an image representing the portion of the image p visible
-// through r. The returned value shares pixels with the original image.
-func (p *NYCbCrA) SubImage(r Rectangle) Image {
- r = r.Intersect(p.Rect)
- // If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
- // either r1 or r2 if the intersection is empty. Without explicitly checking for
- // this, the Pix[i:] expression below can panic.
- if r.Empty() {
- return &NYCbCrA{
- YCbCr: YCbCr{
- SubsampleRatio: p.SubsampleRatio,
- },
- }
- }
- yi := p.YOffset(r.Min.X, r.Min.Y)
- ci := p.COffset(r.Min.X, r.Min.Y)
- ai := p.AOffset(r.Min.X, r.Min.Y)
- return &NYCbCrA{
- YCbCr: YCbCr{
- Y: p.Y[yi:],
- Cb: p.Cb[ci:],
- Cr: p.Cr[ci:],
- SubsampleRatio: p.SubsampleRatio,
- YStride: p.YStride,
- CStride: p.CStride,
- Rect: r,
- },
- A: p.A[ai:],
- AStride: p.AStride,
- }
-}
-
-// Opaque scans the entire image and reports whether it is fully opaque.
-func (p *NYCbCrA) Opaque() bool {
- if p.Rect.Empty() {
- return true
- }
- i0, i1 := 0, p.Rect.Dx()
- for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
- for _, a := range p.A[i0:i1] {
- if a != 0xff {
- return false
- }
- }
- i0 += p.AStride
- i1 += p.AStride
- }
- return true
-}
-
-// NewNYCbCrA returns a new NYCbCrA image with the given bounds and subsample
-// ratio.
-func NewNYCbCrA(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *NYCbCrA {
- w, h, cw, ch := yCbCrSize(r, subsampleRatio)
-
- // totalLength should be the same as i3, below, for a valid Rectangle r.
- totalLength := add2NonNeg(
- mul3NonNeg(2, w, h),
- mul3NonNeg(2, cw, ch),
- )
- if totalLength < 0 {
- panic("image: NewNYCbCrA Rectangle has huge or negative dimension")
- }
-
- i0 := 1*w*h + 0*cw*ch
- i1 := 1*w*h + 1*cw*ch
- i2 := 1*w*h + 2*cw*ch
- i3 := 2*w*h + 2*cw*ch
- b := make([]byte, i3)
- return &NYCbCrA{
- YCbCr: YCbCr{
- Y: b[:i0:i0],
- Cb: b[i0:i1:i1],
- Cr: b[i1:i2:i2],
- SubsampleRatio: subsampleRatio,
- YStride: w,
- CStride: cw,
- Rect: r,
- },
- A: b[i2:],
- AStride: w,
- }
-}
diff --git a/contrib/go/_std_1.21/src/testing/fstest/mapfs.go b/contrib/go/_std_1.21/src/testing/fstest/mapfs.go
deleted file mode 100644
index a0b1f65668..0000000000
--- a/contrib/go/_std_1.21/src/testing/fstest/mapfs.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fstest
-
-import (
- "io"
- "io/fs"
- "path"
- "sort"
- "strings"
- "time"
-)
-
-// A MapFS is a simple in-memory file system for use in tests,
-// represented as a map from path names (arguments to Open)
-// to information about the files or directories they represent.
-//
-// The map need not include parent directories for files contained
-// in the map; those will be synthesized if needed.
-// But a directory can still be included by setting the MapFile.Mode's ModeDir bit;
-// this may be necessary for detailed control over the directory's FileInfo
-// or to create an empty directory.
-//
-// File system operations read directly from the map,
-// so that the file system can be changed by editing the map as needed.
-// An implication is that file system operations must not run concurrently
-// with changes to the map, which would be a race.
-// Another implication is that opening or reading a directory requires
-// iterating over the entire map, so a MapFS should typically be used with not more
-// than a few hundred entries or directory reads.
-type MapFS map[string]*MapFile
-
-// A MapFile describes a single file in a MapFS.
-type MapFile struct {
- Data []byte // file content
- Mode fs.FileMode // FileInfo.Mode
- ModTime time.Time // FileInfo.ModTime
- Sys any // FileInfo.Sys
-}
-
-var _ fs.FS = MapFS(nil)
-var _ fs.File = (*openMapFile)(nil)
-
-// Open opens the named file.
-func (fsys MapFS) Open(name string) (fs.File, error) {
- if !fs.ValidPath(name) {
- return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
- }
- file := fsys[name]
- if file != nil && file.Mode&fs.ModeDir == 0 {
- // Ordinary file
- return &openMapFile{name, mapFileInfo{path.Base(name), file}, 0}, nil
- }
-
- // Directory, possibly synthesized.
- // Note that file can be nil here: the map need not contain explicit parent directories for all its files.
- // But file can also be non-nil, in case the user wants to set metadata for the directory explicitly.
- // Either way, we need to construct the list of children of this directory.
- var list []mapFileInfo
- var elem string
- var need = make(map[string]bool)
- if name == "." {
- elem = "."
- for fname, f := range fsys {
- i := strings.Index(fname, "/")
- if i < 0 {
- if fname != "." {
- list = append(list, mapFileInfo{fname, f})
- }
- } else {
- need[fname[:i]] = true
- }
- }
- } else {
- elem = name[strings.LastIndex(name, "/")+1:]
- prefix := name + "/"
- for fname, f := range fsys {
- if strings.HasPrefix(fname, prefix) {
- felem := fname[len(prefix):]
- i := strings.Index(felem, "/")
- if i < 0 {
- list = append(list, mapFileInfo{felem, f})
- } else {
- need[fname[len(prefix):len(prefix)+i]] = true
- }
- }
- }
- // If the directory name is not in the map,
- // and there are no children of the name in the map,
- // then the directory is treated as not existing.
- if file == nil && list == nil && len(need) == 0 {
- return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
- }
- }
- for _, fi := range list {
- delete(need, fi.name)
- }
- for name := range need {
- list = append(list, mapFileInfo{name, &MapFile{Mode: fs.ModeDir}})
- }
- sort.Slice(list, func(i, j int) bool {
- return list[i].name < list[j].name
- })
-
- if file == nil {
- file = &MapFile{Mode: fs.ModeDir}
- }
- return &mapDir{name, mapFileInfo{elem, file}, list, 0}, nil
-}
-
-// fsOnly is a wrapper that hides all but the fs.FS methods,
-// to avoid an infinite recursion when implementing special
-// methods in terms of helpers that would use them.
-// (In general, implementing these methods using the package fs helpers
-// is redundant and unnecessary, but having the methods may make
-// MapFS exercise more code paths when used in tests.)
-type fsOnly struct{ fs.FS }
-
-func (fsys MapFS) ReadFile(name string) ([]byte, error) {
- return fs.ReadFile(fsOnly{fsys}, name)
-}
-
-func (fsys MapFS) Stat(name string) (fs.FileInfo, error) {
- return fs.Stat(fsOnly{fsys}, name)
-}
-
-func (fsys MapFS) ReadDir(name string) ([]fs.DirEntry, error) {
- return fs.ReadDir(fsOnly{fsys}, name)
-}
-
-func (fsys MapFS) Glob(pattern string) ([]string, error) {
- return fs.Glob(fsOnly{fsys}, pattern)
-}
-
-type noSub struct {
- MapFS
-}
-
-func (noSub) Sub() {} // not the fs.SubFS signature
-
-func (fsys MapFS) Sub(dir string) (fs.FS, error) {
- return fs.Sub(noSub{fsys}, dir)
-}
-
-// A mapFileInfo implements fs.FileInfo and fs.DirEntry for a given map file.
-type mapFileInfo struct {
- name string
- f *MapFile
-}
-
-func (i *mapFileInfo) Name() string { return i.name }
-func (i *mapFileInfo) Size() int64 { return int64(len(i.f.Data)) }
-func (i *mapFileInfo) Mode() fs.FileMode { return i.f.Mode }
-func (i *mapFileInfo) Type() fs.FileMode { return i.f.Mode.Type() }
-func (i *mapFileInfo) ModTime() time.Time { return i.f.ModTime }
-func (i *mapFileInfo) IsDir() bool { return i.f.Mode&fs.ModeDir != 0 }
-func (i *mapFileInfo) Sys() any { return i.f.Sys }
-func (i *mapFileInfo) Info() (fs.FileInfo, error) { return i, nil }
-
-func (i *mapFileInfo) String() string {
- return fs.FormatFileInfo(i)
-}
-
-// An openMapFile is a regular (non-directory) fs.File open for reading.
-type openMapFile struct {
- path string
- mapFileInfo
- offset int64
-}
-
-func (f *openMapFile) Stat() (fs.FileInfo, error) { return &f.mapFileInfo, nil }
-
-func (f *openMapFile) Close() error { return nil }
-
-func (f *openMapFile) Read(b []byte) (int, error) {
- if f.offset >= int64(len(f.f.Data)) {
- return 0, io.EOF
- }
- if f.offset < 0 {
- return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid}
- }
- n := copy(b, f.f.Data[f.offset:])
- f.offset += int64(n)
- return n, nil
-}
-
-func (f *openMapFile) Seek(offset int64, whence int) (int64, error) {
- switch whence {
- case 0:
- // offset += 0
- case 1:
- offset += f.offset
- case 2:
- offset += int64(len(f.f.Data))
- }
- if offset < 0 || offset > int64(len(f.f.Data)) {
- return 0, &fs.PathError{Op: "seek", Path: f.path, Err: fs.ErrInvalid}
- }
- f.offset = offset
- return offset, nil
-}
-
-func (f *openMapFile) ReadAt(b []byte, offset int64) (int, error) {
- if offset < 0 || offset > int64(len(f.f.Data)) {
- return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid}
- }
- n := copy(b, f.f.Data[offset:])
- if n < len(b) {
- return n, io.EOF
- }
- return n, nil
-}
-
-// A mapDir is a directory fs.File (so also an fs.ReadDirFile) open for reading.
-type mapDir struct {
- path string
- mapFileInfo
- entry []mapFileInfo
- offset int
-}
-
-func (d *mapDir) Stat() (fs.FileInfo, error) { return &d.mapFileInfo, nil }
-func (d *mapDir) Close() error { return nil }
-func (d *mapDir) Read(b []byte) (int, error) {
- return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid}
-}
-
-func (d *mapDir) ReadDir(count int) ([]fs.DirEntry, error) {
- n := len(d.entry) - d.offset
- if n == 0 && count > 0 {
- return nil, io.EOF
- }
- if count > 0 && n > count {
- n = count
- }
- list := make([]fs.DirEntry, n)
- for i := range list {
- list[i] = &d.entry[d.offset+i]
- }
- d.offset += n
- return list, nil
-}
diff --git a/contrib/go/_std_1.21/src/testing/fstest/testfs.go b/contrib/go/_std_1.21/src/testing/fstest/testfs.go
deleted file mode 100644
index 78b0b82640..0000000000
--- a/contrib/go/_std_1.21/src/testing/fstest/testfs.go
+++ /dev/null
@@ -1,624 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fstest implements support for testing implementations and users of file systems.
-package fstest
-
-import (
- "errors"
- "fmt"
- "io"
- "io/fs"
- "path"
- "reflect"
- "sort"
- "strings"
- "testing/iotest"
-)
-
-// TestFS tests a file system implementation.
-// It walks the entire tree of files in fsys,
-// opening and checking that each file behaves correctly.
-// It also checks that the file system contains at least the expected files.
-// As a special case, if no expected files are listed, fsys must be empty.
-// Otherwise, fsys must contain at least the listed files; it can also contain others.
-// The contents of fsys must not change concurrently with TestFS.
-//
-// If TestFS finds any misbehaviors, it returns an error reporting all of them.
-// The error text spans multiple lines, one per detected misbehavior.
-//
-// Typical usage inside a test is:
-//
-// if err := fstest.TestFS(myFS, "file/that/should/be/present"); err != nil {
-// t.Fatal(err)
-// }
-func TestFS(fsys fs.FS, expected ...string) error {
- if err := testFS(fsys, expected...); err != nil {
- return err
- }
- for _, name := range expected {
- if i := strings.Index(name, "/"); i >= 0 {
- dir, dirSlash := name[:i], name[:i+1]
- var subExpected []string
- for _, name := range expected {
- if strings.HasPrefix(name, dirSlash) {
- subExpected = append(subExpected, name[len(dirSlash):])
- }
- }
- sub, err := fs.Sub(fsys, dir)
- if err != nil {
- return err
- }
- if err := testFS(sub, subExpected...); err != nil {
- return fmt.Errorf("testing fs.Sub(fsys, %s): %v", dir, err)
- }
- break // one sub-test is enough
- }
- }
- return nil
-}
-
-func testFS(fsys fs.FS, expected ...string) error {
- t := fsTester{fsys: fsys}
- t.checkDir(".")
- t.checkOpen(".")
- found := make(map[string]bool)
- for _, dir := range t.dirs {
- found[dir] = true
- }
- for _, file := range t.files {
- found[file] = true
- }
- delete(found, ".")
- if len(expected) == 0 && len(found) > 0 {
- var list []string
- for k := range found {
- if k != "." {
- list = append(list, k)
- }
- }
- sort.Strings(list)
- if len(list) > 15 {
- list = append(list[:10], "...")
- }
- t.errorf("expected empty file system but found files:\n%s", strings.Join(list, "\n"))
- }
- for _, name := range expected {
- if !found[name] {
- t.errorf("expected but not found: %s", name)
- }
- }
- if len(t.errText) == 0 {
- return nil
- }
- return errors.New("TestFS found errors:\n" + string(t.errText))
-}
-
-// An fsTester holds state for running the test.
-type fsTester struct {
- fsys fs.FS
- errText []byte
- dirs []string
- files []string
-}
-
-// errorf adds an error line to errText.
-func (t *fsTester) errorf(format string, args ...any) {
- if len(t.errText) > 0 {
- t.errText = append(t.errText, '\n')
- }
- t.errText = append(t.errText, fmt.Sprintf(format, args...)...)
-}
-
-func (t *fsTester) openDir(dir string) fs.ReadDirFile {
- f, err := t.fsys.Open(dir)
- if err != nil {
- t.errorf("%s: Open: %v", dir, err)
- return nil
- }
- d, ok := f.(fs.ReadDirFile)
- if !ok {
- f.Close()
- t.errorf("%s: Open returned File type %T, not a fs.ReadDirFile", dir, f)
- return nil
- }
- return d
-}
-
-// checkDir checks the directory dir, which is expected to exist
-// (it is either the root or was found in a directory listing with IsDir true).
-func (t *fsTester) checkDir(dir string) {
- // Read entire directory.
- t.dirs = append(t.dirs, dir)
- d := t.openDir(dir)
- if d == nil {
- return
- }
- list, err := d.ReadDir(-1)
- if err != nil {
- d.Close()
- t.errorf("%s: ReadDir(-1): %v", dir, err)
- return
- }
-
- // Check all children.
- var prefix string
- if dir == "." {
- prefix = ""
- } else {
- prefix = dir + "/"
- }
- for _, info := range list {
- name := info.Name()
- switch {
- case name == ".", name == "..", name == "":
- t.errorf("%s: ReadDir: child has invalid name: %#q", dir, name)
- continue
- case strings.Contains(name, "/"):
- t.errorf("%s: ReadDir: child name contains slash: %#q", dir, name)
- continue
- case strings.Contains(name, `\`):
- t.errorf("%s: ReadDir: child name contains backslash: %#q", dir, name)
- continue
- }
- path := prefix + name
- t.checkStat(path, info)
- t.checkOpen(path)
- if info.IsDir() {
- t.checkDir(path)
- } else {
- t.checkFile(path)
- }
- }
-
- // Check ReadDir(-1) at EOF.
- list2, err := d.ReadDir(-1)
- if len(list2) > 0 || err != nil {
- d.Close()
- t.errorf("%s: ReadDir(-1) at EOF = %d entries, %v, wanted 0 entries, nil", dir, len(list2), err)
- return
- }
-
- // Check ReadDir(1) at EOF (different results).
- list2, err = d.ReadDir(1)
- if len(list2) > 0 || err != io.EOF {
- d.Close()
- t.errorf("%s: ReadDir(1) at EOF = %d entries, %v, wanted 0 entries, EOF", dir, len(list2), err)
- return
- }
-
- // Check that close does not report an error.
- if err := d.Close(); err != nil {
- t.errorf("%s: Close: %v", dir, err)
- }
-
- // Check that closing twice doesn't crash.
- // The return value doesn't matter.
- d.Close()
-
- // Reopen directory, read a second time, make sure contents match.
- if d = t.openDir(dir); d == nil {
- return
- }
- defer d.Close()
- list2, err = d.ReadDir(-1)
- if err != nil {
- t.errorf("%s: second Open+ReadDir(-1): %v", dir, err)
- return
- }
- t.checkDirList(dir, "first Open+ReadDir(-1) vs second Open+ReadDir(-1)", list, list2)
-
- // Reopen directory, read a third time in pieces, make sure contents match.
- if d = t.openDir(dir); d == nil {
- return
- }
- defer d.Close()
- list2 = nil
- for {
- n := 1
- if len(list2) > 0 {
- n = 2
- }
- frag, err := d.ReadDir(n)
- if len(frag) > n {
- t.errorf("%s: third Open: ReadDir(%d) after %d: %d entries (too many)", dir, n, len(list2), len(frag))
- return
- }
- list2 = append(list2, frag...)
- if err == io.EOF {
- break
- }
- if err != nil {
- t.errorf("%s: third Open: ReadDir(%d) after %d: %v", dir, n, len(list2), err)
- return
- }
- if n == 0 {
- t.errorf("%s: third Open: ReadDir(%d) after %d: 0 entries but nil error", dir, n, len(list2))
- return
- }
- }
- t.checkDirList(dir, "first Open+ReadDir(-1) vs third Open+ReadDir(1,2) loop", list, list2)
-
- // If fsys has ReadDir, check that it matches and is sorted.
- if fsys, ok := t.fsys.(fs.ReadDirFS); ok {
- list2, err := fsys.ReadDir(dir)
- if err != nil {
- t.errorf("%s: fsys.ReadDir: %v", dir, err)
- return
- }
- t.checkDirList(dir, "first Open+ReadDir(-1) vs fsys.ReadDir", list, list2)
-
- for i := 0; i+1 < len(list2); i++ {
- if list2[i].Name() >= list2[i+1].Name() {
- t.errorf("%s: fsys.ReadDir: list not sorted: %s before %s", dir, list2[i].Name(), list2[i+1].Name())
- }
- }
- }
-
- // Check fs.ReadDir as well.
- list2, err = fs.ReadDir(t.fsys, dir)
- if err != nil {
- t.errorf("%s: fs.ReadDir: %v", dir, err)
- return
- }
- t.checkDirList(dir, "first Open+ReadDir(-1) vs fs.ReadDir", list, list2)
-
- for i := 0; i+1 < len(list2); i++ {
- if list2[i].Name() >= list2[i+1].Name() {
- t.errorf("%s: fs.ReadDir: list not sorted: %s before %s", dir, list2[i].Name(), list2[i+1].Name())
- }
- }
-
- t.checkGlob(dir, list2)
-}
-
-// formatEntry formats an fs.DirEntry into a string for error messages and comparison.
-func formatEntry(entry fs.DirEntry) string {
- return fmt.Sprintf("%s IsDir=%v Type=%v", entry.Name(), entry.IsDir(), entry.Type())
-}
-
-// formatInfoEntry formats an fs.FileInfo into a string like the result of formatEntry, for error messages and comparison.
-func formatInfoEntry(info fs.FileInfo) string {
- return fmt.Sprintf("%s IsDir=%v Type=%v", info.Name(), info.IsDir(), info.Mode().Type())
-}
-
-// formatInfo formats an fs.FileInfo into a string for error messages and comparison.
-func formatInfo(info fs.FileInfo) string {
- return fmt.Sprintf("%s IsDir=%v Mode=%v Size=%d ModTime=%v", info.Name(), info.IsDir(), info.Mode(), info.Size(), info.ModTime())
-}
-
-// checkGlob checks that various glob patterns work if the file system implements GlobFS.
-func (t *fsTester) checkGlob(dir string, list []fs.DirEntry) {
- if _, ok := t.fsys.(fs.GlobFS); !ok {
- return
- }
-
- // Make a complex glob pattern prefix that only matches dir.
- var glob string
- if dir != "." {
- elem := strings.Split(dir, "/")
- for i, e := range elem {
- var pattern []rune
- for j, r := range e {
- if r == '*' || r == '?' || r == '\\' || r == '[' || r == '-' {
- pattern = append(pattern, '\\', r)
- continue
- }
- switch (i + j) % 5 {
- case 0:
- pattern = append(pattern, r)
- case 1:
- pattern = append(pattern, '[', r, ']')
- case 2:
- pattern = append(pattern, '[', r, '-', r, ']')
- case 3:
- pattern = append(pattern, '[', '\\', r, ']')
- case 4:
- pattern = append(pattern, '[', '\\', r, '-', '\\', r, ']')
- }
- }
- elem[i] = string(pattern)
- }
- glob = strings.Join(elem, "/") + "/"
- }
-
- // Test that malformed patterns are detected.
- // The error is likely path.ErrBadPattern but need not be.
- if _, err := t.fsys.(fs.GlobFS).Glob(glob + "nonexist/[]"); err == nil {
- t.errorf("%s: Glob(%#q): bad pattern not detected", dir, glob+"nonexist/[]")
- }
-
- // Try to find a letter that appears in only some of the final names.
- c := rune('a')
- for ; c <= 'z'; c++ {
- have, haveNot := false, false
- for _, d := range list {
- if strings.ContainsRune(d.Name(), c) {
- have = true
- } else {
- haveNot = true
- }
- }
- if have && haveNot {
- break
- }
- }
- if c > 'z' {
- c = 'a'
- }
- glob += "*" + string(c) + "*"
-
- var want []string
- for _, d := range list {
- if strings.ContainsRune(d.Name(), c) {
- want = append(want, path.Join(dir, d.Name()))
- }
- }
-
- names, err := t.fsys.(fs.GlobFS).Glob(glob)
- if err != nil {
- t.errorf("%s: Glob(%#q): %v", dir, glob, err)
- return
- }
- if reflect.DeepEqual(want, names) {
- return
- }
-
- if !sort.StringsAreSorted(names) {
- t.errorf("%s: Glob(%#q): unsorted output:\n%s", dir, glob, strings.Join(names, "\n"))
- sort.Strings(names)
- }
-
- var problems []string
- for len(want) > 0 || len(names) > 0 {
- switch {
- case len(want) > 0 && len(names) > 0 && want[0] == names[0]:
- want, names = want[1:], names[1:]
- case len(want) > 0 && (len(names) == 0 || want[0] < names[0]):
- problems = append(problems, "missing: "+want[0])
- want = want[1:]
- default:
- problems = append(problems, "extra: "+names[0])
- names = names[1:]
- }
- }
- t.errorf("%s: Glob(%#q): wrong output:\n%s", dir, glob, strings.Join(problems, "\n"))
-}
-
-// checkStat checks that a direct stat of path matches entry,
-// which was found in the parent's directory listing.
-func (t *fsTester) checkStat(path string, entry fs.DirEntry) {
- file, err := t.fsys.Open(path)
- if err != nil {
- t.errorf("%s: Open: %v", path, err)
- return
- }
- info, err := file.Stat()
- file.Close()
- if err != nil {
- t.errorf("%s: Stat: %v", path, err)
- return
- }
- fentry := formatEntry(entry)
- fientry := formatInfoEntry(info)
- // Note: mismatch here is OK for symlink, because Open dereferences symlink.
- if fentry != fientry && entry.Type()&fs.ModeSymlink == 0 {
- t.errorf("%s: mismatch:\n\tentry = %s\n\tfile.Stat() = %s", path, fentry, fientry)
- }
-
- einfo, err := entry.Info()
- if err != nil {
- t.errorf("%s: entry.Info: %v", path, err)
- return
- }
- finfo := formatInfo(info)
- if entry.Type()&fs.ModeSymlink != 0 {
- // For symlink, just check that entry.Info matches entry on common fields.
- // Open deferences symlink, so info itself may differ.
- feentry := formatInfoEntry(einfo)
- if fentry != feentry {
- t.errorf("%s: mismatch\n\tentry = %s\n\tentry.Info() = %s\n", path, fentry, feentry)
- }
- } else {
- feinfo := formatInfo(einfo)
- if feinfo != finfo {
- t.errorf("%s: mismatch:\n\tentry.Info() = %s\n\tfile.Stat() = %s\n", path, feinfo, finfo)
- }
- }
-
- // Stat should be the same as Open+Stat, even for symlinks.
- info2, err := fs.Stat(t.fsys, path)
- if err != nil {
- t.errorf("%s: fs.Stat: %v", path, err)
- return
- }
- finfo2 := formatInfo(info2)
- if finfo2 != finfo {
- t.errorf("%s: fs.Stat(...) = %s\n\twant %s", path, finfo2, finfo)
- }
-
- if fsys, ok := t.fsys.(fs.StatFS); ok {
- info2, err := fsys.Stat(path)
- if err != nil {
- t.errorf("%s: fsys.Stat: %v", path, err)
- return
- }
- finfo2 := formatInfo(info2)
- if finfo2 != finfo {
- t.errorf("%s: fsys.Stat(...) = %s\n\twant %s", path, finfo2, finfo)
- }
- }
-}
-
-// checkDirList checks that two directory lists contain the same files and file info.
-// The order of the lists need not match.
-func (t *fsTester) checkDirList(dir, desc string, list1, list2 []fs.DirEntry) {
- old := make(map[string]fs.DirEntry)
- checkMode := func(entry fs.DirEntry) {
- if entry.IsDir() != (entry.Type()&fs.ModeDir != 0) {
- if entry.IsDir() {
- t.errorf("%s: ReadDir returned %s with IsDir() = true, Type() & ModeDir = 0", dir, entry.Name())
- } else {
- t.errorf("%s: ReadDir returned %s with IsDir() = false, Type() & ModeDir = ModeDir", dir, entry.Name())
- }
- }
- }
-
- for _, entry1 := range list1 {
- old[entry1.Name()] = entry1
- checkMode(entry1)
- }
-
- var diffs []string
- for _, entry2 := range list2 {
- entry1 := old[entry2.Name()]
- if entry1 == nil {
- checkMode(entry2)
- diffs = append(diffs, "+ "+formatEntry(entry2))
- continue
- }
- if formatEntry(entry1) != formatEntry(entry2) {
- diffs = append(diffs, "- "+formatEntry(entry1), "+ "+formatEntry(entry2))
- }
- delete(old, entry2.Name())
- }
- for _, entry1 := range old {
- diffs = append(diffs, "- "+formatEntry(entry1))
- }
-
- if len(diffs) == 0 {
- return
- }
-
- sort.Slice(diffs, func(i, j int) bool {
- fi := strings.Fields(diffs[i])
- fj := strings.Fields(diffs[j])
- // sort by name (i < j) and then +/- (j < i, because + < -)
- return fi[1]+" "+fj[0] < fj[1]+" "+fi[0]
- })
-
- t.errorf("%s: diff %s:\n\t%s", dir, desc, strings.Join(diffs, "\n\t"))
-}
-
-// checkFile checks that basic file reading works correctly.
-func (t *fsTester) checkFile(file string) {
- t.files = append(t.files, file)
-
- // Read entire file.
- f, err := t.fsys.Open(file)
- if err != nil {
- t.errorf("%s: Open: %v", file, err)
- return
- }
-
- data, err := io.ReadAll(f)
- if err != nil {
- f.Close()
- t.errorf("%s: Open+ReadAll: %v", file, err)
- return
- }
-
- if err := f.Close(); err != nil {
- t.errorf("%s: Close: %v", file, err)
- }
-
- // Check that closing twice doesn't crash.
- // The return value doesn't matter.
- f.Close()
-
- // Check that ReadFile works if present.
- if fsys, ok := t.fsys.(fs.ReadFileFS); ok {
- data2, err := fsys.ReadFile(file)
- if err != nil {
- t.errorf("%s: fsys.ReadFile: %v", file, err)
- return
- }
- t.checkFileRead(file, "ReadAll vs fsys.ReadFile", data, data2)
-
- // Modify the data and check it again. Modifying the
- // returned byte slice should not affect the next call.
- for i := range data2 {
- data2[i]++
- }
- data2, err = fsys.ReadFile(file)
- if err != nil {
- t.errorf("%s: second call to fsys.ReadFile: %v", file, err)
- return
- }
- t.checkFileRead(file, "Readall vs second fsys.ReadFile", data, data2)
-
- t.checkBadPath(file, "ReadFile",
- func(name string) error { _, err := fsys.ReadFile(name); return err })
- }
-
- // Check that fs.ReadFile works with t.fsys.
- data2, err := fs.ReadFile(t.fsys, file)
- if err != nil {
- t.errorf("%s: fs.ReadFile: %v", file, err)
- return
- }
- t.checkFileRead(file, "ReadAll vs fs.ReadFile", data, data2)
-
- // Use iotest.TestReader to check small reads, Seek, ReadAt.
- f, err = t.fsys.Open(file)
- if err != nil {
- t.errorf("%s: second Open: %v", file, err)
- return
- }
- defer f.Close()
- if err := iotest.TestReader(f, data); err != nil {
- t.errorf("%s: failed TestReader:\n\t%s", file, strings.ReplaceAll(err.Error(), "\n", "\n\t"))
- }
-}
-
-func (t *fsTester) checkFileRead(file, desc string, data1, data2 []byte) {
- if string(data1) != string(data2) {
- t.errorf("%s: %s: different data returned\n\t%q\n\t%q", file, desc, data1, data2)
- return
- }
-}
-
-// checkBadPath checks that various invalid forms of file's name cannot be opened using t.fsys.Open.
-func (t *fsTester) checkOpen(file string) {
- t.checkBadPath(file, "Open", func(file string) error {
- f, err := t.fsys.Open(file)
- if err == nil {
- f.Close()
- }
- return err
- })
-}
-
-// checkBadPath checks that various invalid forms of file's name cannot be opened using open.
-func (t *fsTester) checkBadPath(file string, desc string, open func(string) error) {
- bad := []string{
- "/" + file,
- file + "/.",
- }
- if file == "." {
- bad = append(bad, "/")
- }
- if i := strings.Index(file, "/"); i >= 0 {
- bad = append(bad,
- file[:i]+"//"+file[i+1:],
- file[:i]+"/./"+file[i+1:],
- file[:i]+`\`+file[i+1:],
- file[:i]+"/../"+file,
- )
- }
- if i := strings.LastIndex(file, "/"); i >= 0 {
- bad = append(bad,
- file[:i]+"//"+file[i+1:],
- file[:i]+"/./"+file[i+1:],
- file[:i]+`\`+file[i+1:],
- file+"/../"+file[i+1:],
- )
- }
-
- for _, b := range bad {
- if err := open(b); err == nil {
- t.errorf("%s: %s(%s) succeeded, want error", file, desc, b)
- }
- }
-}
diff --git a/contrib/go/_std_1.21/src/testing/fstest/ya.make b/contrib/go/_std_1.21/src/testing/fstest/ya.make
deleted file mode 100644
index 320a7d80ab..0000000000
--- a/contrib/go/_std_1.21/src/testing/fstest/ya.make
+++ /dev/null
@@ -1,16 +0,0 @@
-GO_LIBRARY()
-
-SRCS(
- mapfs.go
- testfs.go
-)
-
-GO_TEST_SRCS(
- mapfs_test.go
- testfs_test.go
-)
-
-END()
-
-RECURSE(
-)
diff --git a/contrib/go/_std_1.21/src/testing/internal/ya.make b/contrib/go/_std_1.21/src/testing/internal/ya.make
deleted file mode 100644
index 11ff590e8c..0000000000
--- a/contrib/go/_std_1.21/src/testing/internal/ya.make
+++ /dev/null
@@ -1,3 +0,0 @@
-RECURSE(
- testdeps
-)
diff --git a/contrib/go/_std_1.21/src/testing/iotest/logger.go b/contrib/go/_std_1.21/src/testing/iotest/logger.go
deleted file mode 100644
index 99548dcfed..0000000000
--- a/contrib/go/_std_1.21/src/testing/iotest/logger.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package iotest
-
-import (
- "io"
- "log"
-)
-
-type writeLogger struct {
- prefix string
- w io.Writer
-}
-
-func (l *writeLogger) Write(p []byte) (n int, err error) {
- n, err = l.w.Write(p)
- if err != nil {
- log.Printf("%s %x: %v", l.prefix, p[0:n], err)
- } else {
- log.Printf("%s %x", l.prefix, p[0:n])
- }
- return
-}
-
-// NewWriteLogger returns a writer that behaves like w except
-// that it logs (using log.Printf) each write to standard error,
-// printing the prefix and the hexadecimal data written.
-func NewWriteLogger(prefix string, w io.Writer) io.Writer {
- return &writeLogger{prefix, w}
-}
-
-type readLogger struct {
- prefix string
- r io.Reader
-}
-
-func (l *readLogger) Read(p []byte) (n int, err error) {
- n, err = l.r.Read(p)
- if err != nil {
- log.Printf("%s %x: %v", l.prefix, p[0:n], err)
- } else {
- log.Printf("%s %x", l.prefix, p[0:n])
- }
- return
-}
-
-// NewReadLogger returns a reader that behaves like r except
-// that it logs (using log.Printf) each read to standard error,
-// printing the prefix and the hexadecimal data read.
-func NewReadLogger(prefix string, r io.Reader) io.Reader {
- return &readLogger{prefix, r}
-}
diff --git a/contrib/go/_std_1.21/src/testing/iotest/reader.go b/contrib/go/_std_1.21/src/testing/iotest/reader.go
deleted file mode 100644
index 770d87f26b..0000000000
--- a/contrib/go/_std_1.21/src/testing/iotest/reader.go
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package iotest implements Readers and Writers useful mainly for testing.
-package iotest
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-)
-
-// OneByteReader returns a Reader that implements
-// each non-empty Read by reading one byte from r.
-func OneByteReader(r io.Reader) io.Reader { return &oneByteReader{r} }
-
-type oneByteReader struct {
- r io.Reader
-}
-
-func (r *oneByteReader) Read(p []byte) (int, error) {
- if len(p) == 0 {
- return 0, nil
- }
- return r.r.Read(p[0:1])
-}
-
-// HalfReader returns a Reader that implements Read
-// by reading half as many requested bytes from r.
-func HalfReader(r io.Reader) io.Reader { return &halfReader{r} }
-
-type halfReader struct {
- r io.Reader
-}
-
-func (r *halfReader) Read(p []byte) (int, error) {
- return r.r.Read(p[0 : (len(p)+1)/2])
-}
-
-// DataErrReader changes the way errors are handled by a Reader. Normally, a
-// Reader returns an error (typically EOF) from the first Read call after the
-// last piece of data is read. DataErrReader wraps a Reader and changes its
-// behavior so the final error is returned along with the final data, instead
-// of in the first call after the final data.
-func DataErrReader(r io.Reader) io.Reader { return &dataErrReader{r, nil, make([]byte, 1024)} }
-
-type dataErrReader struct {
- r io.Reader
- unread []byte
- data []byte
-}
-
-func (r *dataErrReader) Read(p []byte) (n int, err error) {
- // loop because first call needs two reads:
- // one to get data and a second to look for an error.
- for {
- if len(r.unread) == 0 {
- n1, err1 := r.r.Read(r.data)
- r.unread = r.data[0:n1]
- err = err1
- }
- if n > 0 || err != nil {
- break
- }
- n = copy(p, r.unread)
- r.unread = r.unread[n:]
- }
- return
-}
-
-// ErrTimeout is a fake timeout error.
-var ErrTimeout = errors.New("timeout")
-
-// TimeoutReader returns ErrTimeout on the second read
-// with no data. Subsequent calls to read succeed.
-func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }
-
-type timeoutReader struct {
- r io.Reader
- count int
-}
-
-func (r *timeoutReader) Read(p []byte) (int, error) {
- r.count++
- if r.count == 2 {
- return 0, ErrTimeout
- }
- return r.r.Read(p)
-}
-
-// ErrReader returns an io.Reader that returns 0, err from all Read calls.
-func ErrReader(err error) io.Reader {
- return &errReader{err: err}
-}
-
-type errReader struct {
- err error
-}
-
-func (r *errReader) Read(p []byte) (int, error) {
- return 0, r.err
-}
-
-type smallByteReader struct {
- r io.Reader
- off int
- n int
-}
-
-func (r *smallByteReader) Read(p []byte) (int, error) {
- if len(p) == 0 {
- return 0, nil
- }
- r.n = r.n%3 + 1
- n := r.n
- if n > len(p) {
- n = len(p)
- }
- n, err := r.r.Read(p[0:n])
- if err != nil && err != io.EOF {
- err = fmt.Errorf("Read(%d bytes at offset %d): %v", n, r.off, err)
- }
- r.off += n
- return n, err
-}
-
-// TestReader tests that reading from r returns the expected file content.
-// It does reads of different sizes, until EOF.
-// If r implements io.ReaderAt or io.Seeker, TestReader also checks
-// that those operations behave as they should.
-//
-// If TestReader finds any misbehaviors, it returns an error reporting them.
-// The error text may span multiple lines.
-func TestReader(r io.Reader, content []byte) error {
- if len(content) > 0 {
- n, err := r.Read(nil)
- if n != 0 || err != nil {
- return fmt.Errorf("Read(0) = %d, %v, want 0, nil", n, err)
- }
- }
-
- data, err := io.ReadAll(&smallByteReader{r: r})
- if err != nil {
- return err
- }
- if !bytes.Equal(data, content) {
- return fmt.Errorf("ReadAll(small amounts) = %q\n\twant %q", data, content)
- }
- n, err := r.Read(make([]byte, 10))
- if n != 0 || err != io.EOF {
- return fmt.Errorf("Read(10) at EOF = %v, %v, want 0, EOF", n, err)
- }
-
- if r, ok := r.(io.ReadSeeker); ok {
- // Seek(0, 1) should report the current file position (EOF).
- if off, err := r.Seek(0, 1); off != int64(len(content)) || err != nil {
- return fmt.Errorf("Seek(0, 1) from EOF = %d, %v, want %d, nil", off, err, len(content))
- }
-
- // Seek backward partway through file, in two steps.
- // If middle == 0, len(content) == 0, can't use the -1 and +1 seeks.
- middle := len(content) - len(content)/3
- if middle > 0 {
- if off, err := r.Seek(-1, 1); off != int64(len(content)-1) || err != nil {
- return fmt.Errorf("Seek(-1, 1) from EOF = %d, %v, want %d, nil", -off, err, len(content)-1)
- }
- if off, err := r.Seek(int64(-len(content)/3), 1); off != int64(middle-1) || err != nil {
- return fmt.Errorf("Seek(%d, 1) from %d = %d, %v, want %d, nil", -len(content)/3, len(content)-1, off, err, middle-1)
- }
- if off, err := r.Seek(+1, 1); off != int64(middle) || err != nil {
- return fmt.Errorf("Seek(+1, 1) from %d = %d, %v, want %d, nil", middle-1, off, err, middle)
- }
- }
-
- // Seek(0, 1) should report the current file position (middle).
- if off, err := r.Seek(0, 1); off != int64(middle) || err != nil {
- return fmt.Errorf("Seek(0, 1) from %d = %d, %v, want %d, nil", middle, off, err, middle)
- }
-
- // Reading forward should return the last part of the file.
- data, err := io.ReadAll(&smallByteReader{r: r})
- if err != nil {
- return fmt.Errorf("ReadAll from offset %d: %v", middle, err)
- }
- if !bytes.Equal(data, content[middle:]) {
- return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:])
- }
-
- // Seek relative to end of file, but start elsewhere.
- if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil {
- return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2)
- }
- if off, err := r.Seek(int64(-len(content)/3), 2); off != int64(middle) || err != nil {
- return fmt.Errorf("Seek(%d, 2) from %d = %d, %v, want %d, nil", -len(content)/3, middle/2, off, err, middle)
- }
-
- // Reading forward should return the last part of the file (again).
- data, err = io.ReadAll(&smallByteReader{r: r})
- if err != nil {
- return fmt.Errorf("ReadAll from offset %d: %v", middle, err)
- }
- if !bytes.Equal(data, content[middle:]) {
- return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:])
- }
-
- // Absolute seek & read forward.
- if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil {
- return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2)
- }
- data, err = io.ReadAll(r)
- if err != nil {
- return fmt.Errorf("ReadAll from offset %d: %v", middle/2, err)
- }
- if !bytes.Equal(data, content[middle/2:]) {
- return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle/2, data, content[middle/2:])
- }
- }
-
- if r, ok := r.(io.ReaderAt); ok {
- data := make([]byte, len(content), len(content)+1)
- for i := range data {
- data[i] = 0xfe
- }
- n, err := r.ReadAt(data, 0)
- if n != len(data) || err != nil && err != io.EOF {
- return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, nil or EOF", len(data), n, err, len(data))
- }
- if !bytes.Equal(data, content) {
- return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content)
- }
-
- n, err = r.ReadAt(data[:1], int64(len(data)))
- if n != 0 || err != io.EOF {
- return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 0, EOF", len(data), n, err)
- }
-
- for i := range data {
- data[i] = 0xfe
- }
- n, err = r.ReadAt(data[:cap(data)], 0)
- if n != len(data) || err != io.EOF {
- return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, EOF", cap(data), n, err, len(data))
- }
- if !bytes.Equal(data, content) {
- return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content)
- }
-
- for i := range data {
- data[i] = 0xfe
- }
- for i := range data {
- n, err = r.ReadAt(data[i:i+1], int64(i))
- if n != 1 || err != nil && (i != len(data)-1 || err != io.EOF) {
- want := "nil"
- if i == len(data)-1 {
- want = "nil or EOF"
- }
- return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 1, %s", i, n, err, want)
- }
- if data[i] != content[i] {
- return fmt.Errorf("ReadAt(1, %d) = %q want %q", i, data[i:i+1], content[i:i+1])
- }
- }
- }
- return nil
-}
diff --git a/contrib/go/_std_1.21/src/testing/iotest/writer.go b/contrib/go/_std_1.21/src/testing/iotest/writer.go
deleted file mode 100644
index af61ab8584..0000000000
--- a/contrib/go/_std_1.21/src/testing/iotest/writer.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package iotest
-
-import "io"
-
-// TruncateWriter returns a Writer that writes to w
-// but stops silently after n bytes.
-func TruncateWriter(w io.Writer, n int64) io.Writer {
- return &truncateWriter{w, n}
-}
-
-type truncateWriter struct {
- w io.Writer
- n int64
-}
-
-func (t *truncateWriter) Write(p []byte) (n int, err error) {
- if t.n <= 0 {
- return len(p), nil
- }
- // real write
- n = len(p)
- if int64(n) > t.n {
- n = int(t.n)
- }
- n, err = t.w.Write(p[0:n])
- t.n -= int64(n)
- if err == nil {
- n = len(p)
- }
- return
-}
diff --git a/contrib/go/_std_1.21/src/testing/iotest/ya.make b/contrib/go/_std_1.21/src/testing/iotest/ya.make
deleted file mode 100644
index 8371548788..0000000000
--- a/contrib/go/_std_1.21/src/testing/iotest/ya.make
+++ /dev/null
@@ -1,20 +0,0 @@
-GO_LIBRARY()
-
-SRCS(
- logger.go
- reader.go
- writer.go
-)
-
-GO_TEST_SRCS(
- logger_test.go
- reader_test.go
- writer_test.go
-)
-
-GO_XTEST_SRCS(example_test.go)
-
-END()
-
-RECURSE(
-)
diff --git a/contrib/go/_std_1.21/src/testing/quick/quick.go b/contrib/go/_std_1.21/src/testing/quick/quick.go
deleted file mode 100644
index d7117420a3..0000000000
--- a/contrib/go/_std_1.21/src/testing/quick/quick.go
+++ /dev/null
@@ -1,385 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package quick implements utility functions to help with black box testing.
-//
-// The testing/quick package is frozen and is not accepting new features.
-package quick
-
-import (
- "flag"
- "fmt"
- "math"
- "math/rand"
- "reflect"
- "strings"
- "time"
-)
-
-var defaultMaxCount *int = flag.Int("quickchecks", 100, "The default number of iterations for each check")
-
-// A Generator can generate random values of its own type.
-type Generator interface {
- // Generate returns a random instance of the type on which it is a
- // method using the size as a size hint.
- Generate(rand *rand.Rand, size int) reflect.Value
-}
-
-// randFloat32 generates a random float taking the full range of a float32.
-func randFloat32(rand *rand.Rand) float32 {
- f := rand.Float64() * math.MaxFloat32
- if rand.Int()&1 == 1 {
- f = -f
- }
- return float32(f)
-}
-
-// randFloat64 generates a random float taking the full range of a float64.
-func randFloat64(rand *rand.Rand) float64 {
- f := rand.Float64() * math.MaxFloat64
- if rand.Int()&1 == 1 {
- f = -f
- }
- return f
-}
-
-// randInt64 returns a random int64.
-func randInt64(rand *rand.Rand) int64 {
- return int64(rand.Uint64())
-}
-
-// complexSize is the maximum length of arbitrary values that contain other
-// values.
-const complexSize = 50
-
-// Value returns an arbitrary value of the given type.
-// If the type implements the Generator interface, that will be used.
-// Note: To create arbitrary values for structs, all the fields must be exported.
-func Value(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool) {
- return sizedValue(t, rand, complexSize)
-}
-
-// sizedValue returns an arbitrary value of the given type. The size
-// hint is used for shrinking as a function of indirection level so
-// that recursive data structures will terminate.
-func sizedValue(t reflect.Type, rand *rand.Rand, size int) (value reflect.Value, ok bool) {
- if m, ok := reflect.Zero(t).Interface().(Generator); ok {
- return m.Generate(rand, size), true
- }
-
- v := reflect.New(t).Elem()
- switch concrete := t; concrete.Kind() {
- case reflect.Bool:
- v.SetBool(rand.Int()&1 == 0)
- case reflect.Float32:
- v.SetFloat(float64(randFloat32(rand)))
- case reflect.Float64:
- v.SetFloat(randFloat64(rand))
- case reflect.Complex64:
- v.SetComplex(complex(float64(randFloat32(rand)), float64(randFloat32(rand))))
- case reflect.Complex128:
- v.SetComplex(complex(randFloat64(rand), randFloat64(rand)))
- case reflect.Int16:
- v.SetInt(randInt64(rand))
- case reflect.Int32:
- v.SetInt(randInt64(rand))
- case reflect.Int64:
- v.SetInt(randInt64(rand))
- case reflect.Int8:
- v.SetInt(randInt64(rand))
- case reflect.Int:
- v.SetInt(randInt64(rand))
- case reflect.Uint16:
- v.SetUint(uint64(randInt64(rand)))
- case reflect.Uint32:
- v.SetUint(uint64(randInt64(rand)))
- case reflect.Uint64:
- v.SetUint(uint64(randInt64(rand)))
- case reflect.Uint8:
- v.SetUint(uint64(randInt64(rand)))
- case reflect.Uint:
- v.SetUint(uint64(randInt64(rand)))
- case reflect.Uintptr:
- v.SetUint(uint64(randInt64(rand)))
- case reflect.Map:
- numElems := rand.Intn(size)
- v.Set(reflect.MakeMap(concrete))
- for i := 0; i < numElems; i++ {
- key, ok1 := sizedValue(concrete.Key(), rand, size)
- value, ok2 := sizedValue(concrete.Elem(), rand, size)
- if !ok1 || !ok2 {
- return reflect.Value{}, false
- }
- v.SetMapIndex(key, value)
- }
- case reflect.Pointer:
- if rand.Intn(size) == 0 {
- v.SetZero() // Generate nil pointer.
- } else {
- elem, ok := sizedValue(concrete.Elem(), rand, size)
- if !ok {
- return reflect.Value{}, false
- }
- v.Set(reflect.New(concrete.Elem()))
- v.Elem().Set(elem)
- }
- case reflect.Slice:
- numElems := rand.Intn(size)
- sizeLeft := size - numElems
- v.Set(reflect.MakeSlice(concrete, numElems, numElems))
- for i := 0; i < numElems; i++ {
- elem, ok := sizedValue(concrete.Elem(), rand, sizeLeft)
- if !ok {
- return reflect.Value{}, false
- }
- v.Index(i).Set(elem)
- }
- case reflect.Array:
- for i := 0; i < v.Len(); i++ {
- elem, ok := sizedValue(concrete.Elem(), rand, size)
- if !ok {
- return reflect.Value{}, false
- }
- v.Index(i).Set(elem)
- }
- case reflect.String:
- numChars := rand.Intn(complexSize)
- codePoints := make([]rune, numChars)
- for i := 0; i < numChars; i++ {
- codePoints[i] = rune(rand.Intn(0x10ffff))
- }
- v.SetString(string(codePoints))
- case reflect.Struct:
- n := v.NumField()
- // Divide sizeLeft evenly among the struct fields.
- sizeLeft := size
- if n > sizeLeft {
- sizeLeft = 1
- } else if n > 0 {
- sizeLeft /= n
- }
- for i := 0; i < n; i++ {
- elem, ok := sizedValue(concrete.Field(i).Type, rand, sizeLeft)
- if !ok {
- return reflect.Value{}, false
- }
- v.Field(i).Set(elem)
- }
- default:
- return reflect.Value{}, false
- }
-
- return v, true
-}
-
-// A Config structure contains options for running a test.
-type Config struct {
- // MaxCount sets the maximum number of iterations.
- // If zero, MaxCountScale is used.
- MaxCount int
- // MaxCountScale is a non-negative scale factor applied to the
- // default maximum.
- // A count of zero implies the default, which is usually 100
- // but can be set by the -quickchecks flag.
- MaxCountScale float64
- // Rand specifies a source of random numbers.
- // If nil, a default pseudo-random source will be used.
- Rand *rand.Rand
- // Values specifies a function to generate a slice of
- // arbitrary reflect.Values that are congruent with the
- // arguments to the function being tested.
- // If nil, the top-level Value function is used to generate them.
- Values func([]reflect.Value, *rand.Rand)
-}
-
-var defaultConfig Config
-
-// getRand returns the *rand.Rand to use for a given Config.
-func (c *Config) getRand() *rand.Rand {
- if c.Rand == nil {
- return rand.New(rand.NewSource(time.Now().UnixNano()))
- }
- return c.Rand
-}
-
-// getMaxCount returns the maximum number of iterations to run for a given
-// Config.
-func (c *Config) getMaxCount() (maxCount int) {
- maxCount = c.MaxCount
- if maxCount == 0 {
- if c.MaxCountScale != 0 {
- maxCount = int(c.MaxCountScale * float64(*defaultMaxCount))
- } else {
- maxCount = *defaultMaxCount
- }
- }
-
- return
-}
-
-// A SetupError is the result of an error in the way that check is being
-// used, independent of the functions being tested.
-type SetupError string
-
-func (s SetupError) Error() string { return string(s) }
-
-// A CheckError is the result of Check finding an error.
-type CheckError struct {
- Count int
- In []any
-}
-
-func (s *CheckError) Error() string {
- return fmt.Sprintf("#%d: failed on input %s", s.Count, toString(s.In))
-}
-
-// A CheckEqualError is the result CheckEqual finding an error.
-type CheckEqualError struct {
- CheckError
- Out1 []any
- Out2 []any
-}
-
-func (s *CheckEqualError) Error() string {
- return fmt.Sprintf("#%d: failed on input %s. Output 1: %s. Output 2: %s", s.Count, toString(s.In), toString(s.Out1), toString(s.Out2))
-}
-
-// Check looks for an input to f, any function that returns bool,
-// such that f returns false. It calls f repeatedly, with arbitrary
-// values for each argument. If f returns false on a given input,
-// Check returns that input as a *CheckError.
-// For example:
-//
-// func TestOddMultipleOfThree(t *testing.T) {
-// f := func(x int) bool {
-// y := OddMultipleOfThree(x)
-// return y%2 == 1 && y%3 == 0
-// }
-// if err := quick.Check(f, nil); err != nil {
-// t.Error(err)
-// }
-// }
-func Check(f any, config *Config) error {
- if config == nil {
- config = &defaultConfig
- }
-
- fVal, fType, ok := functionAndType(f)
- if !ok {
- return SetupError("argument is not a function")
- }
-
- if fType.NumOut() != 1 {
- return SetupError("function does not return one value")
- }
- if fType.Out(0).Kind() != reflect.Bool {
- return SetupError("function does not return a bool")
- }
-
- arguments := make([]reflect.Value, fType.NumIn())
- rand := config.getRand()
- maxCount := config.getMaxCount()
-
- for i := 0; i < maxCount; i++ {
- err := arbitraryValues(arguments, fType, config, rand)
- if err != nil {
- return err
- }
-
- if !fVal.Call(arguments)[0].Bool() {
- return &CheckError{i + 1, toInterfaces(arguments)}
- }
- }
-
- return nil
-}
-
-// CheckEqual looks for an input on which f and g return different results.
-// It calls f and g repeatedly with arbitrary values for each argument.
-// If f and g return different answers, CheckEqual returns a *CheckEqualError
-// describing the input and the outputs.
-func CheckEqual(f, g any, config *Config) error {
- if config == nil {
- config = &defaultConfig
- }
-
- x, xType, ok := functionAndType(f)
- if !ok {
- return SetupError("f is not a function")
- }
- y, yType, ok := functionAndType(g)
- if !ok {
- return SetupError("g is not a function")
- }
-
- if xType != yType {
- return SetupError("functions have different types")
- }
-
- arguments := make([]reflect.Value, xType.NumIn())
- rand := config.getRand()
- maxCount := config.getMaxCount()
-
- for i := 0; i < maxCount; i++ {
- err := arbitraryValues(arguments, xType, config, rand)
- if err != nil {
- return err
- }
-
- xOut := toInterfaces(x.Call(arguments))
- yOut := toInterfaces(y.Call(arguments))
-
- if !reflect.DeepEqual(xOut, yOut) {
- return &CheckEqualError{CheckError{i + 1, toInterfaces(arguments)}, xOut, yOut}
- }
- }
-
- return nil
-}
-
-// arbitraryValues writes Values to args such that args contains Values
-// suitable for calling f.
-func arbitraryValues(args []reflect.Value, f reflect.Type, config *Config, rand *rand.Rand) (err error) {
- if config.Values != nil {
- config.Values(args, rand)
- return
- }
-
- for j := 0; j < len(args); j++ {
- var ok bool
- args[j], ok = Value(f.In(j), rand)
- if !ok {
- err = SetupError(fmt.Sprintf("cannot create arbitrary value of type %s for argument %d", f.In(j), j))
- return
- }
- }
-
- return
-}
-
-func functionAndType(f any) (v reflect.Value, t reflect.Type, ok bool) {
- v = reflect.ValueOf(f)
- ok = v.Kind() == reflect.Func
- if !ok {
- return
- }
- t = v.Type()
- return
-}
-
-func toInterfaces(values []reflect.Value) []any {
- ret := make([]any, len(values))
- for i, v := range values {
- ret[i] = v.Interface()
- }
- return ret
-}
-
-func toString(interfaces []any) string {
- s := make([]string, len(interfaces))
- for i, v := range interfaces {
- s[i] = fmt.Sprintf("%#v", v)
- }
- return strings.Join(s, ", ")
-}
diff --git a/contrib/go/_std_1.21/src/testing/quick/ya.make b/contrib/go/_std_1.21/src/testing/quick/ya.make
deleted file mode 100644
index 17ed663d9a..0000000000
--- a/contrib/go/_std_1.21/src/testing/quick/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-GO_LIBRARY()
-
-SRCS(
- quick.go
-)
-
-GO_TEST_SRCS(quick_test.go)
-
-END()
-
-RECURSE(
-)
diff --git a/contrib/go/_std_1.21/src/testing/slogtest/slogtest.go b/contrib/go/_std_1.21/src/testing/slogtest/slogtest.go
deleted file mode 100644
index b16d1227dc..0000000000
--- a/contrib/go/_std_1.21/src/testing/slogtest/slogtest.go
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package slogtest implements support for testing implementations of log/slog.Handler.
-package slogtest
-
-import (
- "context"
- "errors"
- "fmt"
- "log/slog"
- "reflect"
- "runtime"
- "time"
-)
-
-type testCase struct {
- // If non-empty, explanation explains the violated constraint.
- explanation string
- // f executes a single log event using its argument logger.
- // So that mkdescs.sh can generate the right description,
- // the body of f must appear on a single line whose first
- // non-whitespace characters are "l.".
- f func(*slog.Logger)
- // If mod is not nil, it is called to modify the Record
- // generated by the Logger before it is passed to the Handler.
- mod func(*slog.Record)
- // checks is a list of checks to run on the result.
- checks []check
-}
-
-// TestHandler tests a [slog.Handler].
-// If TestHandler finds any misbehaviors, it returns an error for each,
-// combined into a single error with errors.Join.
-//
-// TestHandler installs the given Handler in a [slog.Logger] and
-// makes several calls to the Logger's output methods.
-//
-// The results function is invoked after all such calls.
-// It should return a slice of map[string]any, one for each call to a Logger output method.
-// The keys and values of the map should correspond to the keys and values of the Handler's
-// output. Each group in the output should be represented as its own nested map[string]any.
-// The standard keys slog.TimeKey, slog.LevelKey and slog.MessageKey should be used.
-//
-// If the Handler outputs JSON, then calling [encoding/json.Unmarshal] with a `map[string]any`
-// will create the right data structure.
-//
-// If a Handler intentionally drops an attribute that is checked by a test,
-// then the results function should check for its absence and add it to the map it returns.
-func TestHandler(h slog.Handler, results func() []map[string]any) error {
- cases := []testCase{
- {
- explanation: withSource("this test expects slog.TimeKey, slog.LevelKey and slog.MessageKey"),
- f: func(l *slog.Logger) {
- l.Info("message")
- },
- checks: []check{
- hasKey(slog.TimeKey),
- hasKey(slog.LevelKey),
- hasAttr(slog.MessageKey, "message"),
- },
- },
- {
- explanation: withSource("a Handler should output attributes passed to the logging function"),
- f: func(l *slog.Logger) {
- l.Info("message", "k", "v")
- },
- checks: []check{
- hasAttr("k", "v"),
- },
- },
- {
- explanation: withSource("a Handler should ignore an empty Attr"),
- f: func(l *slog.Logger) {
- l.Info("msg", "a", "b", "", nil, "c", "d")
- },
- checks: []check{
- hasAttr("a", "b"),
- missingKey(""),
- hasAttr("c", "d"),
- },
- },
- {
- explanation: withSource("a Handler should ignore a zero Record.Time"),
- f: func(l *slog.Logger) {
- l.Info("msg", "k", "v")
- },
- mod: func(r *slog.Record) { r.Time = time.Time{} },
- checks: []check{
- missingKey(slog.TimeKey),
- },
- },
- {
- explanation: withSource("a Handler should include the attributes from the WithAttrs method"),
- f: func(l *slog.Logger) {
- l.With("a", "b").Info("msg", "k", "v")
- },
- checks: []check{
- hasAttr("a", "b"),
- hasAttr("k", "v"),
- },
- },
- {
- explanation: withSource("a Handler should handle Group attributes"),
- f: func(l *slog.Logger) {
- l.Info("msg", "a", "b", slog.Group("G", slog.String("c", "d")), "e", "f")
- },
- checks: []check{
- hasAttr("a", "b"),
- inGroup("G", hasAttr("c", "d")),
- hasAttr("e", "f"),
- },
- },
- {
- explanation: withSource("a Handler should ignore an empty group"),
- f: func(l *slog.Logger) {
- l.Info("msg", "a", "b", slog.Group("G"), "e", "f")
- },
- checks: []check{
- hasAttr("a", "b"),
- missingKey("G"),
- hasAttr("e", "f"),
- },
- },
- {
- explanation: withSource("a Handler should inline the Attrs of a group with an empty key"),
- f: func(l *slog.Logger) {
- l.Info("msg", "a", "b", slog.Group("", slog.String("c", "d")), "e", "f")
-
- },
- checks: []check{
- hasAttr("a", "b"),
- hasAttr("c", "d"),
- hasAttr("e", "f"),
- },
- },
- {
- explanation: withSource("a Handler should handle the WithGroup method"),
- f: func(l *slog.Logger) {
- l.WithGroup("G").Info("msg", "a", "b")
- },
- checks: []check{
- hasKey(slog.TimeKey),
- hasKey(slog.LevelKey),
- hasAttr(slog.MessageKey, "msg"),
- missingKey("a"),
- inGroup("G", hasAttr("a", "b")),
- },
- },
- {
- explanation: withSource("a Handler should handle multiple WithGroup and WithAttr calls"),
- f: func(l *slog.Logger) {
- l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg", "e", "f")
- },
- checks: []check{
- hasKey(slog.TimeKey),
- hasKey(slog.LevelKey),
- hasAttr(slog.MessageKey, "msg"),
- hasAttr("a", "b"),
- inGroup("G", hasAttr("c", "d")),
- inGroup("G", inGroup("H", hasAttr("e", "f"))),
- },
- },
- {
- explanation: withSource("a Handler should not output groups for an empty Record"),
- f: func(l *slog.Logger) {
- l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg")
- },
- checks: []check{
- hasKey(slog.TimeKey),
- hasKey(slog.LevelKey),
- hasAttr(slog.MessageKey, "msg"),
- hasAttr("a", "b"),
- inGroup("G", hasAttr("c", "d")),
- inGroup("G", missingKey("H")),
- },
- },
- {
- explanation: withSource("a Handler should call Resolve on attribute values"),
- f: func(l *slog.Logger) {
- l.Info("msg", "k", &replace{"replaced"})
- },
- checks: []check{hasAttr("k", "replaced")},
- },
- {
- explanation: withSource("a Handler should call Resolve on attribute values in groups"),
- f: func(l *slog.Logger) {
- l.Info("msg",
- slog.Group("G",
- slog.String("a", "v1"),
- slog.Any("b", &replace{"v2"})))
- },
- checks: []check{
- inGroup("G", hasAttr("a", "v1")),
- inGroup("G", hasAttr("b", "v2")),
- },
- },
- {
- explanation: withSource("a Handler should call Resolve on attribute values from WithAttrs"),
- f: func(l *slog.Logger) {
- l = l.With("k", &replace{"replaced"})
- l.Info("msg")
- },
- checks: []check{hasAttr("k", "replaced")},
- },
- {
- explanation: withSource("a Handler should call Resolve on attribute values in groups from WithAttrs"),
- f: func(l *slog.Logger) {
- l = l.With(slog.Group("G",
- slog.String("a", "v1"),
- slog.Any("b", &replace{"v2"})))
- l.Info("msg")
- },
- checks: []check{
- inGroup("G", hasAttr("a", "v1")),
- inGroup("G", hasAttr("b", "v2")),
- },
- },
- }
-
- // Run the handler on the test cases.
- for _, c := range cases {
- ht := h
- if c.mod != nil {
- ht = &wrapper{h, c.mod}
- }
- l := slog.New(ht)
- c.f(l)
- }
-
- // Collect and check the results.
- var errs []error
- res := results()
- if g, w := len(res), len(cases); g != w {
- return fmt.Errorf("got %d results, want %d", g, w)
- }
- for i, got := range results() {
- c := cases[i]
- for _, check := range c.checks {
- if p := check(got); p != "" {
- errs = append(errs, fmt.Errorf("%s: %s", p, c.explanation))
- }
- }
- }
- return errors.Join(errs...)
-}
-
-type check func(map[string]any) string
-
-func hasKey(key string) check {
- return func(m map[string]any) string {
- if _, ok := m[key]; !ok {
- return fmt.Sprintf("missing key %q", key)
- }
- return ""
- }
-}
-
-func missingKey(key string) check {
- return func(m map[string]any) string {
- if _, ok := m[key]; ok {
- return fmt.Sprintf("unexpected key %q", key)
- }
- return ""
- }
-}
-
-func hasAttr(key string, wantVal any) check {
- return func(m map[string]any) string {
- if s := hasKey(key)(m); s != "" {
- return s
- }
- gotVal := m[key]
- if !reflect.DeepEqual(gotVal, wantVal) {
- return fmt.Sprintf("%q: got %#v, want %#v", key, gotVal, wantVal)
- }
- return ""
- }
-}
-
-func inGroup(name string, c check) check {
- return func(m map[string]any) string {
- v, ok := m[name]
- if !ok {
- return fmt.Sprintf("missing group %q", name)
- }
- g, ok := v.(map[string]any)
- if !ok {
- return fmt.Sprintf("value for group %q is not map[string]any", name)
- }
- return c(g)
- }
-}
-
-type wrapper struct {
- slog.Handler
- mod func(*slog.Record)
-}
-
-func (h *wrapper) Handle(ctx context.Context, r slog.Record) error {
- h.mod(&r)
- return h.Handler.Handle(ctx, r)
-}
-
-func withSource(s string) string {
- _, file, line, ok := runtime.Caller(1)
- if !ok {
- panic("runtime.Caller failed")
- }
- return fmt.Sprintf("%s (%s:%d)", s, file, line)
-}
-
-type replace struct {
- v any
-}
-
-func (r *replace) LogValue() slog.Value { return slog.AnyValue(r.v) }
-
-func (r *replace) String() string {
- return fmt.Sprintf("<replace(%v)>", r.v)
-}
diff --git a/contrib/go/_std_1.21/src/testing/slogtest/ya.make b/contrib/go/_std_1.21/src/testing/slogtest/ya.make
deleted file mode 100644
index 3d74614016..0000000000
--- a/contrib/go/_std_1.21/src/testing/slogtest/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-GO_LIBRARY()
-
-SRCS(
- slogtest.go
-)
-
-GO_XTEST_SRCS(example_test.go)
-
-END()
-
-RECURSE(
-)