mirror of
				https://gitea.com/gitea/tea.git
				synced 2025-11-01 01:35:27 +01:00 
			
		
		
		
	migrate src-d/go-git -> go-git/go-git (#128)
Merge branch 'master' into vendor-migrate-go-git Merge branch 'master' into vendor-migrate-go-git migrate src-d/go-git -> go-git/go-git Co-authored-by: 6543 <6543@obermui.de> Reviewed-on: https://gitea.com/gitea/tea/pulls/128 Reviewed-by: techknowlogick <techknowlogick@gitea.io> Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
This commit is contained in:
		
							
								
								
									
										28
									
								
								vendor/github.com/go-git/gcfg/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								vendor/github.com/go-git/gcfg/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go | ||||
| Authors. All rights reserved. | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without | ||||
| modification, are permitted provided that the following conditions are | ||||
| met: | ||||
|  | ||||
|    * Redistributions of source code must retain the above copyright | ||||
| notice, this list of conditions and the following disclaimer. | ||||
|    * Redistributions in binary form must reproduce the above | ||||
| copyright notice, this list of conditions and the following disclaimer | ||||
| in the documentation and/or other materials provided with the | ||||
| distribution. | ||||
|    * Neither the name of Google Inc. nor the names of its | ||||
| contributors may be used to endorse or promote products derived from | ||||
| this software without specific prior written permission. | ||||
|  | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
							
								
								
									
										4
									
								
								vendor/github.com/go-git/gcfg/README
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/go-git/gcfg/README
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | ||||
| Gcfg reads INI-style configuration files into Go structs; | ||||
| supports user-defined types and subsections. | ||||
|  | ||||
| Package docs: https://godoc.org/gopkg.in/gcfg.v1 | ||||
							
								
								
									
										145
									
								
								vendor/github.com/go-git/gcfg/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										145
									
								
								vendor/github.com/go-git/gcfg/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,145 @@ | ||||
| // Package gcfg reads "INI-style" text-based configuration files with | ||||
| // "name=value" pairs grouped into sections (gcfg files). | ||||
| // | ||||
| // This package is still a work in progress; see the sections below for planned | ||||
| // changes. | ||||
| // | ||||
| // Syntax | ||||
| // | ||||
| // The syntax is based on that used by git config: | ||||
| // http://git-scm.com/docs/git-config#_syntax . | ||||
| // There are some (planned) differences compared to the git config format: | ||||
| //  - improve data portability: | ||||
| //    - must be encoded in UTF-8 (for now) and must not contain the 0 byte | ||||
| //    - include and "path" type is not supported | ||||
| //      (path type may be implementable as a user-defined type) | ||||
| //  - internationalization | ||||
| //    - section and variable names can contain unicode letters, unicode digits | ||||
| //      (as defined in http://golang.org/ref/spec#Characters ) and hyphens | ||||
| //      (U+002D), starting with a unicode letter | ||||
| //  - disallow potentially ambiguous or misleading definitions: | ||||
| //    - `[sec.sub]` format is not allowed (deprecated in gitconfig) | ||||
| //    - `[sec ""]` is not allowed | ||||
| //      - use `[sec]` for section name "sec" and empty subsection name | ||||
| //    - (planned) within a single file, definitions must be contiguous for each: | ||||
| //      - section: '[secA]' -> '[secB]' -> '[secA]' is an error | ||||
| //      - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error | ||||
| //      - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error | ||||
| // | ||||
| // Data structure | ||||
| // | ||||
| // The functions in this package read values into a user-defined struct. | ||||
| // Each section corresponds to a struct field in the config struct, and each | ||||
| // variable in a section corresponds to a data field in the section struct. | ||||
| // The mapping of each section or variable name to fields is done either based | ||||
| // on the "gcfg" struct tag or by matching the name of the section or variable, | ||||
| // ignoring case. In the latter case, hyphens '-' in section and variable names | ||||
| // correspond to underscores '_' in field names. | ||||
| // Fields must be exported; to use a section or variable name starting with a | ||||
| // letter that is neither upper- or lower-case, prefix the field name with 'X'. | ||||
| // (See https://code.google.com/p/go/issues/detail?id=5763#c4 .) | ||||
| // | ||||
| // For sections with subsections, the corresponding field in config must be a | ||||
| // map, rather than a struct, with string keys and pointer-to-struct values. | ||||
| // Values for subsection variables are stored in the map with the subsection | ||||
| // name used as the map key. | ||||
| // (Note that unlike section and variable names, subsection names are case | ||||
| // sensitive.) | ||||
| // When using a map, and there is a section with the same section name but | ||||
| // without a subsection name, its values are stored with the empty string used | ||||
| // as the key. | ||||
| // It is possible to provide default values for subsections in the section | ||||
| // "default-<sectionname>" (or by setting values in the corresponding struct | ||||
| // field "Default_<sectionname>"). | ||||
| // | ||||
| // The functions in this package panic if config is not a pointer to a struct, | ||||
| // or when a field is not of a suitable type (either a struct or a map with | ||||
| // string keys and pointer-to-struct values). | ||||
| // | ||||
| // Parsing of values | ||||
| // | ||||
| // The section structs in the config struct may contain single-valued or | ||||
| // multi-valued variables. Variables of unnamed slice type (that is, a type | ||||
| // starting with `[]`) are treated as multi-value; all others (including named | ||||
| // slice types) are treated as single-valued variables. | ||||
| // | ||||
| // Single-valued variables are handled based on the type as follows. | ||||
| // Unnamed pointer types (that is, types starting with `*`) are dereferenced, | ||||
| // and if necessary, a new instance is allocated. | ||||
| // | ||||
| // For types implementing the encoding.TextUnmarshaler interface, the | ||||
| // UnmarshalText method is used to set the value. Implementing this method is | ||||
| // the recommended way for parsing user-defined types. | ||||
| // | ||||
| // For fields of string kind, the value string is assigned to the field, after | ||||
| // unquoting and unescaping as needed. | ||||
| // For fields of bool kind, the field is set to true if the value is "true", | ||||
| // "yes", "on" or "1", and set to false if the value is "false", "no", "off" or | ||||
| // "0", ignoring case. In addition, single-valued bool fields can be specified | ||||
| // with a "blank" value (variable name without equals sign and value); in such | ||||
| // case the value is set to true. | ||||
| // | ||||
| // Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as | ||||
| // decimal or hexadecimal (if having '0x' prefix). (This is to prevent | ||||
| // unintuitively handling zero-padded numbers as octal.) Other types having | ||||
| // [u]int* as the underlying type, such as os.FileMode and uintptr allow | ||||
| // decimal, hexadecimal, or octal values. | ||||
| // Parsing mode for integer types can be overridden using the struct tag option | ||||
| // ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters | ||||
| // (each standing for decimal, hexadecimal, and octal, respectively.) | ||||
| // | ||||
| // All other types are parsed using fmt.Sscanf with the "%v" verb. | ||||
| // | ||||
| // For multi-valued variables, each individual value is parsed as above and | ||||
| // appended to the slice. If the first value is specified as a "blank" value | ||||
| // (variable name without equals sign and value), a new slice is allocated; | ||||
| // that is any values previously set in the slice will be ignored. | ||||
| // | ||||
| // The types subpackage for provides helpers for parsing "enum-like" and integer | ||||
| // types. | ||||
| // | ||||
| // Error handling | ||||
| // | ||||
| // There are 3 types of errors: | ||||
| // | ||||
| //  - programmer errors / panics: | ||||
| //    - invalid configuration structure | ||||
| //  - data errors: | ||||
| //    - fatal errors: | ||||
| //      - invalid configuration syntax | ||||
| //    - warnings: | ||||
| //      - data that doesn't belong to any part of the config structure | ||||
| // | ||||
| // Programmer errors trigger panics. These are should be fixed by the programmer | ||||
| // before releasing code that uses gcfg. | ||||
| // | ||||
| // Data errors cause gcfg to return a non-nil error value. This includes the | ||||
| // case when there are extra unknown key-value definitions in the configuration | ||||
| // data (extra data). | ||||
| // However, in some occasions it is desirable to be able to proceed in | ||||
| // situations when the only data error is that of extra data. | ||||
| // These errors are handled at a different (warning) priority and can be | ||||
| // filtered out programmatically. To ignore extra data warnings, wrap the | ||||
| // gcfg.Read*Into invocation into a call to gcfg.FatalOnly. | ||||
| // | ||||
| // TODO | ||||
| // | ||||
| // The following is a list of changes under consideration: | ||||
| //  - documentation | ||||
| //    - self-contained syntax documentation | ||||
| //    - more practical examples | ||||
| //    - move TODOs to issue tracker (eventually) | ||||
| //  - syntax | ||||
| //    - reconsider valid escape sequences | ||||
| //      (gitconfig doesn't support \r in value, \t in subsection name, etc.) | ||||
| //  - reading / parsing gcfg files | ||||
| //    - define internal representation structure | ||||
| //    - support multiple inputs (readers, strings, files) | ||||
| //    - support declaring encoding (?) | ||||
| //    - support varying fields sets for subsections (?) | ||||
| //  - writing gcfg files | ||||
| //  - error handling | ||||
| //    - make error context accessible programmatically? | ||||
| //    - limit input size? | ||||
| // | ||||
| package gcfg // import "github.com/go-git/gcfg" | ||||
							
								
								
									
										41
									
								
								vendor/github.com/go-git/gcfg/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								vendor/github.com/go-git/gcfg/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,41 @@ | ||||
| package gcfg | ||||
|  | ||||
| import ( | ||||
| 	"gopkg.in/warnings.v0" | ||||
| ) | ||||
|  | ||||
| // FatalOnly filters the results of a Read*Into invocation and returns only | ||||
| // fatal errors. That is, errors (warnings) indicating data for unknown | ||||
| // sections / variables is ignored. Example invocation: | ||||
| // | ||||
| //  err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile)) | ||||
| //  if err != nil { | ||||
| //      ... | ||||
| // | ||||
| func FatalOnly(err error) error { | ||||
| 	return warnings.FatalOnly(err) | ||||
| } | ||||
|  | ||||
| func isFatal(err error) bool { | ||||
| 	_, ok := err.(extraData) | ||||
| 	return !ok | ||||
| } | ||||
|  | ||||
| type extraData struct { | ||||
| 	section    string | ||||
| 	subsection *string | ||||
| 	variable   *string | ||||
| } | ||||
|  | ||||
| func (e extraData) Error() string { | ||||
| 	s := "can't store data at section \"" + e.section + "\"" | ||||
| 	if e.subsection != nil { | ||||
| 		s += ", subsection \"" + *e.subsection + "\"" | ||||
| 	} | ||||
| 	if e.variable != nil { | ||||
| 		s += ", variable \"" + *e.variable + "\"" | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| var _ error = extraData{} | ||||
							
								
								
									
										7
									
								
								vendor/github.com/go-git/gcfg/go1_0.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								vendor/github.com/go-git/gcfg/go1_0.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,7 @@ | ||||
| // +build !go1.2 | ||||
|  | ||||
| package gcfg | ||||
|  | ||||
| type textUnmarshaler interface { | ||||
| 	UnmarshalText(text []byte) error | ||||
| } | ||||
							
								
								
									
										9
									
								
								vendor/github.com/go-git/gcfg/go1_2.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								vendor/github.com/go-git/gcfg/go1_2.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,9 @@ | ||||
| // +build go1.2 | ||||
|  | ||||
| package gcfg | ||||
|  | ||||
| import ( | ||||
| 	"encoding" | ||||
| ) | ||||
|  | ||||
| type textUnmarshaler encoding.TextUnmarshaler | ||||
							
								
								
									
										273
									
								
								vendor/github.com/go-git/gcfg/read.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										273
									
								
								vendor/github.com/go-git/gcfg/read.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,273 @@ | ||||
| package gcfg | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/go-git/gcfg/scanner" | ||||
| 	"github.com/go-git/gcfg/token" | ||||
| 	"gopkg.in/warnings.v0" | ||||
| ) | ||||
|  | ||||
| var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'} | ||||
|  | ||||
| // no error: invalid literals should be caught by scanner | ||||
| func unquote(s string) string { | ||||
| 	u, q, esc := make([]rune, 0, len(s)), false, false | ||||
| 	for _, c := range s { | ||||
| 		if esc { | ||||
| 			uc, ok := unescape[c] | ||||
| 			switch { | ||||
| 			case ok: | ||||
| 				u = append(u, uc) | ||||
| 				fallthrough | ||||
| 			case !q && c == '\n': | ||||
| 				esc = false | ||||
| 				continue | ||||
| 			} | ||||
| 			panic("invalid escape sequence") | ||||
| 		} | ||||
| 		switch c { | ||||
| 		case '"': | ||||
| 			q = !q | ||||
| 		case '\\': | ||||
| 			esc = true | ||||
| 		default: | ||||
| 			u = append(u, c) | ||||
| 		} | ||||
| 	} | ||||
| 	if q { | ||||
| 		panic("missing end quote") | ||||
| 	} | ||||
| 	if esc { | ||||
| 		panic("invalid escape sequence") | ||||
| 	} | ||||
| 	return string(u) | ||||
| } | ||||
|  | ||||
| func read(c *warnings.Collector, callback func(string, string, string, string, bool) error, | ||||
| 	fset *token.FileSet, file *token.File, src []byte) error { | ||||
| 	// | ||||
| 	var s scanner.Scanner | ||||
| 	var errs scanner.ErrorList | ||||
| 	s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0) | ||||
| 	sect, sectsub := "", "" | ||||
| 	pos, tok, lit := s.Scan() | ||||
| 	errfn := func(msg string) error { | ||||
| 		return fmt.Errorf("%s: %s", fset.Position(pos), msg) | ||||
| 	} | ||||
| 	for { | ||||
| 		if errs.Len() > 0 { | ||||
| 			if err := c.Collect(errs.Err()); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 		switch tok { | ||||
| 		case token.EOF: | ||||
| 			return nil | ||||
| 		case token.EOL, token.COMMENT: | ||||
| 			pos, tok, lit = s.Scan() | ||||
| 		case token.LBRACK: | ||||
| 			pos, tok, lit = s.Scan() | ||||
| 			if errs.Len() > 0 { | ||||
| 				if err := c.Collect(errs.Err()); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
| 			if tok != token.IDENT { | ||||
| 				if err := c.Collect(errfn("expected section name")); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
| 			sect, sectsub = lit, "" | ||||
| 			pos, tok, lit = s.Scan() | ||||
| 			if errs.Len() > 0 { | ||||
| 				if err := c.Collect(errs.Err()); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
| 			if tok == token.STRING { | ||||
| 				sectsub = unquote(lit) | ||||
| 				if sectsub == "" { | ||||
| 					if err := c.Collect(errfn("empty subsection name")); err != nil { | ||||
| 						return err | ||||
| 					} | ||||
| 				} | ||||
| 				pos, tok, lit = s.Scan() | ||||
| 				if errs.Len() > 0 { | ||||
| 					if err := c.Collect(errs.Err()); err != nil { | ||||
| 						return err | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			if tok != token.RBRACK { | ||||
| 				if sectsub == "" { | ||||
| 					if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil { | ||||
| 						return err | ||||
| 					} | ||||
| 				} | ||||
| 				if err := c.Collect(errfn("expected right bracket")); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
| 			pos, tok, lit = s.Scan() | ||||
| 			if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { | ||||
| 				if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
| 			// If a section/subsection header was found, ensure a | ||||
| 			// container object is created, even if there are no | ||||
| 			// variables further down. | ||||
| 			err := c.Collect(callback(sect, sectsub, "", "", true)) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		case token.IDENT: | ||||
| 			if sect == "" { | ||||
| 				if err := c.Collect(errfn("expected section header")); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
| 			n := lit | ||||
| 			pos, tok, lit = s.Scan() | ||||
| 			if errs.Len() > 0 { | ||||
| 				return errs.Err() | ||||
| 			} | ||||
| 			blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, "" | ||||
| 			if !blank { | ||||
| 				if tok != token.ASSIGN { | ||||
| 					if err := c.Collect(errfn("expected '='")); err != nil { | ||||
| 						return err | ||||
| 					} | ||||
| 				} | ||||
| 				pos, tok, lit = s.Scan() | ||||
| 				if errs.Len() > 0 { | ||||
| 					if err := c.Collect(errs.Err()); err != nil { | ||||
| 						return err | ||||
| 					} | ||||
| 				} | ||||
| 				if tok != token.STRING { | ||||
| 					if err := c.Collect(errfn("expected value")); err != nil { | ||||
| 						return err | ||||
| 					} | ||||
| 				} | ||||
| 				v = unquote(lit) | ||||
| 				pos, tok, lit = s.Scan() | ||||
| 				if errs.Len() > 0 { | ||||
| 					if err := c.Collect(errs.Err()); err != nil { | ||||
| 						return err | ||||
| 					} | ||||
| 				} | ||||
| 				if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { | ||||
| 					if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { | ||||
| 						return err | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			err := c.Collect(callback(sect, sectsub, n, v, blank)) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		default: | ||||
| 			if sect == "" { | ||||
| 				if err := c.Collect(errfn("expected section header")); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
| 			if err := c.Collect(errfn("expected section header or variable declaration")); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	panic("never reached") | ||||
| } | ||||
|  | ||||
| func readInto(config interface{}, fset *token.FileSet, file *token.File, | ||||
| 	src []byte) error { | ||||
| 	// | ||||
| 	c := warnings.NewCollector(isFatal) | ||||
| 	firstPassCallback := func(s string, ss string, k string, v string, bv bool) error { | ||||
| 		return set(c, config, s, ss, k, v, bv, false) | ||||
| 	} | ||||
| 	err := read(c, firstPassCallback, fset, file, src) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	secondPassCallback := func(s string, ss string, k string, v string, bv bool) error { | ||||
| 		return set(c, config, s, ss, k, v, bv, true) | ||||
| 	} | ||||
| 	err = read(c, secondPassCallback, fset, file, src) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return c.Done() | ||||
| } | ||||
|  | ||||
| // ReadWithCallback reads gcfg formatted data from reader and calls | ||||
| // callback with each section and option found. | ||||
| // | ||||
| // Callback is called with section, subsection, option key, option value | ||||
| // and blank value flag as arguments. | ||||
| // | ||||
| // When a section is found, callback is called with nil subsection, option key | ||||
| // and option value. | ||||
| // | ||||
| // When a subsection is found, callback is called with nil option key and | ||||
| // option value. | ||||
| // | ||||
| // If blank value flag is true, it means that the value was not set for an option | ||||
| // (as opposed to set to empty string). | ||||
| // | ||||
| // If callback returns an error, ReadWithCallback terminates with an error too. | ||||
| func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error { | ||||
| 	src, err := ioutil.ReadAll(reader) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	fset := token.NewFileSet() | ||||
| 	file := fset.AddFile("", fset.Base(), len(src)) | ||||
| 	c := warnings.NewCollector(isFatal) | ||||
|  | ||||
| 	return read(c, callback, fset, file, src) | ||||
| } | ||||
|  | ||||
| // ReadInto reads gcfg formatted data from reader and sets the values into the | ||||
| // corresponding fields in config. | ||||
| func ReadInto(config interface{}, reader io.Reader) error { | ||||
| 	src, err := ioutil.ReadAll(reader) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	fset := token.NewFileSet() | ||||
| 	file := fset.AddFile("", fset.Base(), len(src)) | ||||
| 	return readInto(config, fset, file, src) | ||||
| } | ||||
|  | ||||
| // ReadStringInto reads gcfg formatted data from str and sets the values into | ||||
| // the corresponding fields in config. | ||||
| func ReadStringInto(config interface{}, str string) error { | ||||
| 	r := strings.NewReader(str) | ||||
| 	return ReadInto(config, r) | ||||
| } | ||||
|  | ||||
| // ReadFileInto reads gcfg formatted data from the file filename and sets the | ||||
| // values into the corresponding fields in config. | ||||
| func ReadFileInto(config interface{}, filename string) error { | ||||
| 	f, err := os.Open(filename) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 	src, err := ioutil.ReadAll(f) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	fset := token.NewFileSet() | ||||
| 	file := fset.AddFile(filename, fset.Base(), len(src)) | ||||
| 	return readInto(config, fset, file, src) | ||||
| } | ||||
							
								
								
									
										121
									
								
								vendor/github.com/go-git/gcfg/scanner/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										121
									
								
								vendor/github.com/go-git/gcfg/scanner/errors.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,121 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package scanner | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"sort" | ||||
| ) | ||||
|  | ||||
| import ( | ||||
| 	"github.com/go-git/gcfg/token" | ||||
| ) | ||||
|  | ||||
| // In an ErrorList, an error is represented by an *Error. | ||||
| // The position Pos, if valid, points to the beginning of | ||||
| // the offending token, and the error condition is described | ||||
| // by Msg. | ||||
| // | ||||
| type Error struct { | ||||
| 	Pos token.Position | ||||
| 	Msg string | ||||
| } | ||||
|  | ||||
| // Error implements the error interface. | ||||
| func (e Error) Error() string { | ||||
| 	if e.Pos.Filename != "" || e.Pos.IsValid() { | ||||
| 		// don't print "<unknown position>" | ||||
| 		// TODO(gri) reconsider the semantics of Position.IsValid | ||||
| 		return e.Pos.String() + ": " + e.Msg | ||||
| 	} | ||||
| 	return e.Msg | ||||
| } | ||||
|  | ||||
| // ErrorList is a list of *Errors. | ||||
| // The zero value for an ErrorList is an empty ErrorList ready to use. | ||||
| // | ||||
| type ErrorList []*Error | ||||
|  | ||||
| // Add adds an Error with given position and error message to an ErrorList. | ||||
| func (p *ErrorList) Add(pos token.Position, msg string) { | ||||
| 	*p = append(*p, &Error{pos, msg}) | ||||
| } | ||||
|  | ||||
| // Reset resets an ErrorList to no errors. | ||||
| func (p *ErrorList) Reset() { *p = (*p)[0:0] } | ||||
|  | ||||
| // ErrorList implements the sort Interface. | ||||
| func (p ErrorList) Len() int      { return len(p) } | ||||
| func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } | ||||
|  | ||||
| func (p ErrorList) Less(i, j int) bool { | ||||
| 	e := &p[i].Pos | ||||
| 	f := &p[j].Pos | ||||
| 	if e.Filename < f.Filename { | ||||
| 		return true | ||||
| 	} | ||||
| 	if e.Filename == f.Filename { | ||||
| 		return e.Offset < f.Offset | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // Sort sorts an ErrorList. *Error entries are sorted by position, | ||||
| // other errors are sorted by error message, and before any *Error | ||||
| // entry. | ||||
| // | ||||
| func (p ErrorList) Sort() { | ||||
| 	sort.Sort(p) | ||||
| } | ||||
|  | ||||
| // RemoveMultiples sorts an ErrorList and removes all but the first error per line. | ||||
| func (p *ErrorList) RemoveMultiples() { | ||||
| 	sort.Sort(p) | ||||
| 	var last token.Position // initial last.Line is != any legal error line | ||||
| 	i := 0 | ||||
| 	for _, e := range *p { | ||||
| 		if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line { | ||||
| 			last = e.Pos | ||||
| 			(*p)[i] = e | ||||
| 			i++ | ||||
| 		} | ||||
| 	} | ||||
| 	(*p) = (*p)[0:i] | ||||
| } | ||||
|  | ||||
| // An ErrorList implements the error interface. | ||||
| func (p ErrorList) Error() string { | ||||
| 	switch len(p) { | ||||
| 	case 0: | ||||
| 		return "no errors" | ||||
| 	case 1: | ||||
| 		return p[0].Error() | ||||
| 	} | ||||
| 	return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1) | ||||
| } | ||||
|  | ||||
| // Err returns an error equivalent to this error list. | ||||
| // If the list is empty, Err returns nil. | ||||
| func (p ErrorList) Err() error { | ||||
| 	if len(p) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| // PrintError is a utility function that prints a list of errors to w, | ||||
| // one error per line, if the err parameter is an ErrorList. Otherwise | ||||
| // it prints the err string. | ||||
| // | ||||
| func PrintError(w io.Writer, err error) { | ||||
| 	if list, ok := err.(ErrorList); ok { | ||||
| 		for _, e := range list { | ||||
| 			fmt.Fprintf(w, "%s\n", e) | ||||
| 		} | ||||
| 	} else if err != nil { | ||||
| 		fmt.Fprintf(w, "%s\n", err) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										342
									
								
								vendor/github.com/go-git/gcfg/scanner/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										342
									
								
								vendor/github.com/go-git/gcfg/scanner/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,342 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package scanner implements a scanner for gcfg configuration text. | ||||
| // It takes a []byte as source which can then be tokenized | ||||
| // through repeated calls to the Scan method. | ||||
| // | ||||
| // Note that the API for the scanner package may change to accommodate new | ||||
| // features or implementation changes in gcfg. | ||||
| // | ||||
| package scanner | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"path/filepath" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| import ( | ||||
| 	"github.com/go-git/gcfg/token" | ||||
| ) | ||||
|  | ||||
| // An ErrorHandler may be provided to Scanner.Init. If a syntax error is | ||||
| // encountered and a handler was installed, the handler is called with a | ||||
| // position and an error message. The position points to the beginning of | ||||
| // the offending token. | ||||
| // | ||||
| type ErrorHandler func(pos token.Position, msg string) | ||||
|  | ||||
| // A Scanner holds the scanner's internal state while processing | ||||
| // a given text.  It can be allocated as part of another data | ||||
| // structure but must be initialized via Init before use. | ||||
| // | ||||
| type Scanner struct { | ||||
| 	// immutable state | ||||
| 	file *token.File  // source file handle | ||||
| 	dir  string       // directory portion of file.Name() | ||||
| 	src  []byte       // source | ||||
| 	err  ErrorHandler // error reporting; or nil | ||||
| 	mode Mode         // scanning mode | ||||
|  | ||||
| 	// scanning state | ||||
| 	ch         rune // current character | ||||
| 	offset     int  // character offset | ||||
| 	rdOffset   int  // reading offset (position after current character) | ||||
| 	lineOffset int  // current line offset | ||||
| 	nextVal    bool // next token is expected to be a value | ||||
|  | ||||
| 	// public state - ok to modify | ||||
| 	ErrorCount int // number of errors encountered | ||||
| } | ||||
|  | ||||
| // Read the next Unicode char into s.ch. | ||||
| // s.ch < 0 means end-of-file. | ||||
| // | ||||
| func (s *Scanner) next() { | ||||
| 	if s.rdOffset < len(s.src) { | ||||
| 		s.offset = s.rdOffset | ||||
| 		if s.ch == '\n' { | ||||
| 			s.lineOffset = s.offset | ||||
| 			s.file.AddLine(s.offset) | ||||
| 		} | ||||
| 		r, w := rune(s.src[s.rdOffset]), 1 | ||||
| 		switch { | ||||
| 		case r == 0: | ||||
| 			s.error(s.offset, "illegal character NUL") | ||||
| 		case r >= 0x80: | ||||
| 			// not ASCII | ||||
| 			r, w = utf8.DecodeRune(s.src[s.rdOffset:]) | ||||
| 			if r == utf8.RuneError && w == 1 { | ||||
| 				s.error(s.offset, "illegal UTF-8 encoding") | ||||
| 			} | ||||
| 		} | ||||
| 		s.rdOffset += w | ||||
| 		s.ch = r | ||||
| 	} else { | ||||
| 		s.offset = len(s.src) | ||||
| 		if s.ch == '\n' { | ||||
| 			s.lineOffset = s.offset | ||||
| 			s.file.AddLine(s.offset) | ||||
| 		} | ||||
| 		s.ch = -1 // eof | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // A mode value is a set of flags (or 0). | ||||
| // They control scanner behavior. | ||||
| // | ||||
| type Mode uint | ||||
|  | ||||
| const ( | ||||
| 	ScanComments Mode = 1 << iota // return comments as COMMENT tokens | ||||
| ) | ||||
|  | ||||
| // Init prepares the scanner s to tokenize the text src by setting the | ||||
| // scanner at the beginning of src. The scanner uses the file set file | ||||
| // for position information and it adds line information for each line. | ||||
| // It is ok to re-use the same file when re-scanning the same file as | ||||
| // line information which is already present is ignored. Init causes a | ||||
| // panic if the file size does not match the src size. | ||||
| // | ||||
| // Calls to Scan will invoke the error handler err if they encounter a | ||||
| // syntax error and err is not nil. Also, for each error encountered, | ||||
| // the Scanner field ErrorCount is incremented by one. The mode parameter | ||||
| // determines how comments are handled. | ||||
| // | ||||
| // Note that Init may call err if there is an error in the first character | ||||
| // of the file. | ||||
| // | ||||
| func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { | ||||
| 	// Explicitly initialize all fields since a scanner may be reused. | ||||
| 	if file.Size() != len(src) { | ||||
| 		panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) | ||||
| 	} | ||||
| 	s.file = file | ||||
| 	s.dir, _ = filepath.Split(file.Name()) | ||||
| 	s.src = src | ||||
| 	s.err = err | ||||
| 	s.mode = mode | ||||
|  | ||||
| 	s.ch = ' ' | ||||
| 	s.offset = 0 | ||||
| 	s.rdOffset = 0 | ||||
| 	s.lineOffset = 0 | ||||
| 	s.ErrorCount = 0 | ||||
| 	s.nextVal = false | ||||
|  | ||||
| 	s.next() | ||||
| } | ||||
|  | ||||
| func (s *Scanner) error(offs int, msg string) { | ||||
| 	if s.err != nil { | ||||
| 		s.err(s.file.Position(s.file.Pos(offs)), msg) | ||||
| 	} | ||||
| 	s.ErrorCount++ | ||||
| } | ||||
|  | ||||
| func (s *Scanner) scanComment() string { | ||||
| 	// initial [;#] already consumed | ||||
| 	offs := s.offset - 1 // position of initial [;#] | ||||
|  | ||||
| 	for s.ch != '\n' && s.ch >= 0 { | ||||
| 		s.next() | ||||
| 	} | ||||
| 	return string(s.src[offs:s.offset]) | ||||
| } | ||||
|  | ||||
| func isLetter(ch rune) bool { | ||||
| 	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch) | ||||
| } | ||||
|  | ||||
| func isDigit(ch rune) bool { | ||||
| 	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) | ||||
| } | ||||
|  | ||||
| func (s *Scanner) scanIdentifier() string { | ||||
| 	offs := s.offset | ||||
| 	for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' { | ||||
| 		s.next() | ||||
| 	} | ||||
| 	return string(s.src[offs:s.offset]) | ||||
| } | ||||
|  | ||||
| func (s *Scanner) scanEscape(val bool) { | ||||
| 	offs := s.offset | ||||
| 	ch := s.ch | ||||
| 	s.next() // always make progress | ||||
| 	switch ch { | ||||
| 	case '\\', '"': | ||||
| 		// ok | ||||
| 	case 'n', 't', 'b': | ||||
| 		if val { | ||||
| 			break // ok | ||||
| 		} | ||||
| 		fallthrough | ||||
| 	default: | ||||
| 		s.error(offs, "unknown escape sequence") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *Scanner) scanString() string { | ||||
| 	// '"' opening already consumed | ||||
| 	offs := s.offset - 1 | ||||
|  | ||||
| 	for s.ch != '"' { | ||||
| 		ch := s.ch | ||||
| 		s.next() | ||||
| 		if ch == '\n' || ch < 0 { | ||||
| 			s.error(offs, "string not terminated") | ||||
| 			break | ||||
| 		} | ||||
| 		if ch == '\\' { | ||||
| 			s.scanEscape(false) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	s.next() | ||||
|  | ||||
| 	return string(s.src[offs:s.offset]) | ||||
| } | ||||
|  | ||||
| func stripCR(b []byte) []byte { | ||||
| 	c := make([]byte, len(b)) | ||||
| 	i := 0 | ||||
| 	for _, ch := range b { | ||||
| 		if ch != '\r' { | ||||
| 			c[i] = ch | ||||
| 			i++ | ||||
| 		} | ||||
| 	} | ||||
| 	return c[:i] | ||||
| } | ||||
|  | ||||
| func (s *Scanner) scanValString() string { | ||||
| 	offs := s.offset | ||||
|  | ||||
| 	hasCR := false | ||||
| 	end := offs | ||||
| 	inQuote := false | ||||
| loop: | ||||
| 	for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' { | ||||
| 		ch := s.ch | ||||
| 		s.next() | ||||
| 		switch { | ||||
| 		case inQuote && ch == '\\': | ||||
| 			s.scanEscape(true) | ||||
| 		case !inQuote && ch == '\\': | ||||
| 			if s.ch == '\r' { | ||||
| 				hasCR = true | ||||
| 				s.next() | ||||
| 			} | ||||
| 			if s.ch != '\n' { | ||||
| 				s.scanEscape(true) | ||||
| 			} else { | ||||
| 				s.next() | ||||
| 			} | ||||
| 		case ch == '"': | ||||
| 			inQuote = !inQuote | ||||
| 		case ch == '\r': | ||||
| 			hasCR = true | ||||
| 		case ch < 0 || inQuote && ch == '\n': | ||||
| 			s.error(offs, "string not terminated") | ||||
| 			break loop | ||||
| 		} | ||||
| 		if inQuote || !isWhiteSpace(ch) { | ||||
| 			end = s.offset | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	lit := s.src[offs:end] | ||||
| 	if hasCR { | ||||
| 		lit = stripCR(lit) | ||||
| 	} | ||||
|  | ||||
| 	return string(lit) | ||||
| } | ||||
|  | ||||
| func isWhiteSpace(ch rune) bool { | ||||
| 	return ch == ' ' || ch == '\t' || ch == '\r' | ||||
| } | ||||
|  | ||||
| func (s *Scanner) skipWhitespace() { | ||||
| 	for isWhiteSpace(s.ch) { | ||||
| 		s.next() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Scan scans the next token and returns the token position, the token, | ||||
| // and its literal string if applicable. The source end is indicated by | ||||
| // token.EOF. | ||||
| // | ||||
| // If the returned token is a literal (token.IDENT, token.STRING) or | ||||
| // token.COMMENT, the literal string has the corresponding value. | ||||
| // | ||||
| // If the returned token is token.ILLEGAL, the literal string is the | ||||
| // offending character. | ||||
| // | ||||
| // In all other cases, Scan returns an empty literal string. | ||||
| // | ||||
| // For more tolerant parsing, Scan will return a valid token if | ||||
| // possible even if a syntax error was encountered. Thus, even | ||||
| // if the resulting token sequence contains no illegal tokens, | ||||
| // a client may not assume that no error occurred. Instead it | ||||
| // must check the scanner's ErrorCount or the number of calls | ||||
| // of the error handler, if there was one installed. | ||||
| // | ||||
| // Scan adds line information to the file added to the file | ||||
| // set with Init. Token positions are relative to that file | ||||
| // and thus relative to the file set. | ||||
| // | ||||
| func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { | ||||
| scanAgain: | ||||
| 	s.skipWhitespace() | ||||
|  | ||||
| 	// current token start | ||||
| 	pos = s.file.Pos(s.offset) | ||||
|  | ||||
| 	// determine token value | ||||
| 	switch ch := s.ch; { | ||||
| 	case s.nextVal: | ||||
| 		lit = s.scanValString() | ||||
| 		tok = token.STRING | ||||
| 		s.nextVal = false | ||||
| 	case isLetter(ch): | ||||
| 		lit = s.scanIdentifier() | ||||
| 		tok = token.IDENT | ||||
| 	default: | ||||
| 		s.next() // always make progress | ||||
| 		switch ch { | ||||
| 		case -1: | ||||
| 			tok = token.EOF | ||||
| 		case '\n': | ||||
| 			tok = token.EOL | ||||
| 		case '"': | ||||
| 			tok = token.STRING | ||||
| 			lit = s.scanString() | ||||
| 		case '[': | ||||
| 			tok = token.LBRACK | ||||
| 		case ']': | ||||
| 			tok = token.RBRACK | ||||
| 		case ';', '#': | ||||
| 			// comment | ||||
| 			lit = s.scanComment() | ||||
| 			if s.mode&ScanComments == 0 { | ||||
| 				// skip comment | ||||
| 				goto scanAgain | ||||
| 			} | ||||
| 			tok = token.COMMENT | ||||
| 		case '=': | ||||
| 			tok = token.ASSIGN | ||||
| 			s.nextVal = true | ||||
| 		default: | ||||
| 			s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) | ||||
| 			tok = token.ILLEGAL | ||||
| 			lit = string(ch) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
							
								
								
									
										332
									
								
								vendor/github.com/go-git/gcfg/set.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										332
									
								
								vendor/github.com/go-git/gcfg/set.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,332 @@ | ||||
| package gcfg | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/gob" | ||||
| 	"fmt" | ||||
| 	"math/big" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"github.com/go-git/gcfg/types" | ||||
| 	"gopkg.in/warnings.v0" | ||||
| ) | ||||
|  | ||||
| type tag struct { | ||||
| 	ident   string | ||||
| 	intMode string | ||||
| } | ||||
|  | ||||
| func newTag(ts string) tag { | ||||
| 	t := tag{} | ||||
| 	s := strings.Split(ts, ",") | ||||
| 	t.ident = s[0] | ||||
| 	for _, tse := range s[1:] { | ||||
| 		if strings.HasPrefix(tse, "int=") { | ||||
| 			t.intMode = tse[len("int="):] | ||||
| 		} | ||||
| 	} | ||||
| 	return t | ||||
| } | ||||
|  | ||||
| func fieldFold(v reflect.Value, name string) (reflect.Value, tag) { | ||||
| 	var n string | ||||
| 	r0, _ := utf8.DecodeRuneInString(name) | ||||
| 	if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) { | ||||
| 		n = "X" | ||||
| 	} | ||||
| 	n += strings.Replace(name, "-", "_", -1) | ||||
| 	f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool { | ||||
| 		if !v.FieldByName(fieldName).CanSet() { | ||||
| 			return false | ||||
| 		} | ||||
| 		f, _ := v.Type().FieldByName(fieldName) | ||||
| 		t := newTag(f.Tag.Get("gcfg")) | ||||
| 		if t.ident != "" { | ||||
| 			return strings.EqualFold(t.ident, name) | ||||
| 		} | ||||
| 		return strings.EqualFold(n, fieldName) | ||||
| 	}) | ||||
| 	if !ok { | ||||
| 		return reflect.Value{}, tag{} | ||||
| 	} | ||||
| 	return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg")) | ||||
| } | ||||
|  | ||||
| type setter func(destp interface{}, blank bool, val string, t tag) error | ||||
|  | ||||
| var errUnsupportedType = fmt.Errorf("unsupported type") | ||||
| var errBlankUnsupported = fmt.Errorf("blank value not supported for type") | ||||
|  | ||||
| var setters = []setter{ | ||||
| 	typeSetter, textUnmarshalerSetter, kindSetter, scanSetter, | ||||
| } | ||||
|  | ||||
| func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { | ||||
| 	dtu, ok := d.(textUnmarshaler) | ||||
| 	if !ok { | ||||
| 		return errUnsupportedType | ||||
| 	} | ||||
| 	if blank { | ||||
| 		return errBlankUnsupported | ||||
| 	} | ||||
| 	return dtu.UnmarshalText([]byte(val)) | ||||
| } | ||||
|  | ||||
| func boolSetter(d interface{}, blank bool, val string, t tag) error { | ||||
| 	if blank { | ||||
| 		reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true)) | ||||
| 		return nil | ||||
| 	} | ||||
| 	b, err := types.ParseBool(val) | ||||
| 	if err == nil { | ||||
| 		reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b)) | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func intMode(mode string) types.IntMode { | ||||
| 	var m types.IntMode | ||||
| 	if strings.ContainsAny(mode, "dD") { | ||||
| 		m |= types.Dec | ||||
| 	} | ||||
| 	if strings.ContainsAny(mode, "hH") { | ||||
| 		m |= types.Hex | ||||
| 	} | ||||
| 	if strings.ContainsAny(mode, "oO") { | ||||
| 		m |= types.Oct | ||||
| 	} | ||||
| 	return m | ||||
| } | ||||
|  | ||||
| var typeModes = map[reflect.Type]types.IntMode{ | ||||
| 	reflect.TypeOf(int(0)):    types.Dec | types.Hex, | ||||
| 	reflect.TypeOf(int8(0)):   types.Dec | types.Hex, | ||||
| 	reflect.TypeOf(int16(0)):  types.Dec | types.Hex, | ||||
| 	reflect.TypeOf(int32(0)):  types.Dec | types.Hex, | ||||
| 	reflect.TypeOf(int64(0)):  types.Dec | types.Hex, | ||||
| 	reflect.TypeOf(uint(0)):   types.Dec | types.Hex, | ||||
| 	reflect.TypeOf(uint8(0)):  types.Dec | types.Hex, | ||||
| 	reflect.TypeOf(uint16(0)): types.Dec | types.Hex, | ||||
| 	reflect.TypeOf(uint32(0)): types.Dec | types.Hex, | ||||
| 	reflect.TypeOf(uint64(0)): types.Dec | types.Hex, | ||||
| 	// use default mode (allow dec/hex/oct) for uintptr type | ||||
| 	reflect.TypeOf(big.Int{}): types.Dec | types.Hex, | ||||
| } | ||||
|  | ||||
| func intModeDefault(t reflect.Type) types.IntMode { | ||||
| 	m, ok := typeModes[t] | ||||
| 	if !ok { | ||||
| 		m = types.Dec | types.Hex | types.Oct | ||||
| 	} | ||||
| 	return m | ||||
| } | ||||
|  | ||||
| func intSetter(d interface{}, blank bool, val string, t tag) error { | ||||
| 	if blank { | ||||
| 		return errBlankUnsupported | ||||
| 	} | ||||
| 	mode := intMode(t.intMode) | ||||
| 	if mode == 0 { | ||||
| 		mode = intModeDefault(reflect.TypeOf(d).Elem()) | ||||
| 	} | ||||
| 	return types.ParseInt(d, val, mode) | ||||
| } | ||||
|  | ||||
| func stringSetter(d interface{}, blank bool, val string, t tag) error { | ||||
| 	if blank { | ||||
| 		return errBlankUnsupported | ||||
| 	} | ||||
| 	dsp, ok := d.(*string) | ||||
| 	if !ok { | ||||
| 		return errUnsupportedType | ||||
| 	} | ||||
| 	*dsp = val | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| var kindSetters = map[reflect.Kind]setter{ | ||||
| 	reflect.String:  stringSetter, | ||||
| 	reflect.Bool:    boolSetter, | ||||
| 	reflect.Int:     intSetter, | ||||
| 	reflect.Int8:    intSetter, | ||||
| 	reflect.Int16:   intSetter, | ||||
| 	reflect.Int32:   intSetter, | ||||
| 	reflect.Int64:   intSetter, | ||||
| 	reflect.Uint:    intSetter, | ||||
| 	reflect.Uint8:   intSetter, | ||||
| 	reflect.Uint16:  intSetter, | ||||
| 	reflect.Uint32:  intSetter, | ||||
| 	reflect.Uint64:  intSetter, | ||||
| 	reflect.Uintptr: intSetter, | ||||
| } | ||||
|  | ||||
| var typeSetters = map[reflect.Type]setter{ | ||||
| 	reflect.TypeOf(big.Int{}): intSetter, | ||||
| } | ||||
|  | ||||
| func typeSetter(d interface{}, blank bool, val string, tt tag) error { | ||||
| 	t := reflect.ValueOf(d).Type().Elem() | ||||
| 	setter, ok := typeSetters[t] | ||||
| 	if !ok { | ||||
| 		return errUnsupportedType | ||||
| 	} | ||||
| 	return setter(d, blank, val, tt) | ||||
| } | ||||
|  | ||||
| func kindSetter(d interface{}, blank bool, val string, tt tag) error { | ||||
| 	k := reflect.ValueOf(d).Type().Elem().Kind() | ||||
| 	setter, ok := kindSetters[k] | ||||
| 	if !ok { | ||||
| 		return errUnsupportedType | ||||
| 	} | ||||
| 	return setter(d, blank, val, tt) | ||||
| } | ||||
|  | ||||
| func scanSetter(d interface{}, blank bool, val string, tt tag) error { | ||||
| 	if blank { | ||||
| 		return errBlankUnsupported | ||||
| 	} | ||||
| 	return types.ScanFully(d, val, 'v') | ||||
| } | ||||
|  | ||||
| func newValue(c *warnings.Collector, sect string, vCfg reflect.Value, | ||||
| 	vType reflect.Type) (reflect.Value, error) { | ||||
| 	// | ||||
| 	pv := reflect.New(vType) | ||||
| 	dfltName := "default-" + sect | ||||
| 	dfltField, _ := fieldFold(vCfg, dfltName) | ||||
| 	var err error | ||||
| 	if dfltField.IsValid() { | ||||
| 		b := bytes.NewBuffer(nil) | ||||
| 		ge := gob.NewEncoder(b) | ||||
| 		if err = c.Collect(ge.EncodeValue(dfltField)); err != nil { | ||||
| 			return pv, err | ||||
| 		} | ||||
| 		gd := gob.NewDecoder(bytes.NewReader(b.Bytes())) | ||||
| 		if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil { | ||||
| 			return pv, err | ||||
| 		} | ||||
| 	} | ||||
| 	return pv, nil | ||||
| } | ||||
|  | ||||
| func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, | ||||
| 	 value string, blankValue bool, subsectPass bool) error { | ||||
| 	// | ||||
| 	vPCfg := reflect.ValueOf(cfg) | ||||
| 	if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct { | ||||
| 		panic(fmt.Errorf("config must be a pointer to a struct")) | ||||
| 	} | ||||
| 	vCfg := vPCfg.Elem() | ||||
| 	vSect, _ := fieldFold(vCfg, sect) | ||||
| 	if !vSect.IsValid() { | ||||
| 		err := extraData{section: sect} | ||||
| 		return c.Collect(err) | ||||
| 	} | ||||
| 	isSubsect := vSect.Kind() == reflect.Map | ||||
| 	if subsectPass != isSubsect { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if isSubsect { | ||||
| 		vst := vSect.Type() | ||||
| 		if vst.Key().Kind() != reflect.String || | ||||
| 			vst.Elem().Kind() != reflect.Ptr || | ||||
| 			vst.Elem().Elem().Kind() != reflect.Struct { | ||||
| 			panic(fmt.Errorf("map field for section must have string keys and "+ | ||||
| 				" pointer-to-struct values: section %q", sect)) | ||||
| 		} | ||||
| 		if vSect.IsNil() { | ||||
| 			vSect.Set(reflect.MakeMap(vst)) | ||||
| 		} | ||||
| 		k := reflect.ValueOf(sub) | ||||
| 		pv := vSect.MapIndex(k) | ||||
| 		if !pv.IsValid() { | ||||
| 			vType := vSect.Type().Elem().Elem() | ||||
| 			var err error | ||||
| 			if pv, err = newValue(c, sect, vCfg, vType); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			vSect.SetMapIndex(k, pv) | ||||
| 		} | ||||
| 		vSect = pv.Elem() | ||||
| 	} else if vSect.Kind() != reflect.Struct { | ||||
| 		panic(fmt.Errorf("field for section must be a map or a struct: "+ | ||||
| 			"section %q", sect)) | ||||
| 	} else if sub != "" { | ||||
| 		err := extraData{section: sect, subsection: &sub} | ||||
| 		return c.Collect(err) | ||||
| 	} | ||||
| 	// Empty name is a special value, meaning that only the | ||||
| 	// section/subsection object is to be created, with no values set. | ||||
| 	if name == "" { | ||||
| 		return nil | ||||
| 	} | ||||
| 	vVar, t := fieldFold(vSect, name) | ||||
| 	if !vVar.IsValid() { | ||||
| 		var err error | ||||
| 		if isSubsect { | ||||
| 			err = extraData{section: sect, subsection: &sub, variable: &name} | ||||
| 		} else { | ||||
| 			err = extraData{section: sect, variable: &name} | ||||
| 		} | ||||
| 		return c.Collect(err) | ||||
| 	} | ||||
| 	// vVal is either single-valued var, or newly allocated value within multi-valued var | ||||
| 	var vVal reflect.Value | ||||
| 	// multi-value if unnamed slice type | ||||
| 	isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice || | ||||
| 		vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice | ||||
| 	if isMulti && vVar.Kind() == reflect.Ptr { | ||||
| 		if vVar.IsNil() { | ||||
| 			vVar.Set(reflect.New(vVar.Type().Elem())) | ||||
| 		} | ||||
| 		vVar = vVar.Elem() | ||||
| 	} | ||||
| 	if isMulti && blankValue { | ||||
| 		vVar.Set(reflect.Zero(vVar.Type())) | ||||
| 		return nil | ||||
| 	} | ||||
| 	if isMulti { | ||||
| 		vVal = reflect.New(vVar.Type().Elem()).Elem() | ||||
| 	} else { | ||||
| 		vVal = vVar | ||||
| 	} | ||||
| 	isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr | ||||
| 	isNew := isDeref && vVal.IsNil() | ||||
| 	// vAddr is address of value to set (dereferenced & allocated as needed) | ||||
| 	var vAddr reflect.Value | ||||
| 	switch { | ||||
| 	case isNew: | ||||
| 		vAddr = reflect.New(vVal.Type().Elem()) | ||||
| 	case isDeref && !isNew: | ||||
| 		vAddr = vVal | ||||
| 	default: | ||||
| 		vAddr = vVal.Addr() | ||||
| 	} | ||||
| 	vAddrI := vAddr.Interface() | ||||
| 	err, ok := error(nil), false | ||||
| 	for _, s := range setters { | ||||
| 		err = s(vAddrI, blankValue, value, t) | ||||
| 		if err == nil { | ||||
| 			ok = true | ||||
| 			break | ||||
| 		} | ||||
| 		if err != errUnsupportedType { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	if !ok { | ||||
| 		// in case all setters returned errUnsupportedType | ||||
| 		return err | ||||
| 	} | ||||
| 	if isNew { // set reference if it was dereferenced and newly allocated | ||||
| 		vVal.Set(vAddr) | ||||
| 	} | ||||
| 	if isMulti { // append if multi-valued | ||||
| 		vVar.Set(reflect.Append(vVar, vVal)) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										435
									
								
								vendor/github.com/go-git/gcfg/token/position.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										435
									
								
								vendor/github.com/go-git/gcfg/token/position.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,435 @@ | ||||
| // Copyright 2010 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // TODO(gri) consider making this a separate package outside the go directory. | ||||
|  | ||||
| package token | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // ----------------------------------------------------------------------------- | ||||
| // Positions | ||||
|  | ||||
| // Position describes an arbitrary source position | ||||
| // including the file, line, and column location. | ||||
| // A Position is valid if the line number is > 0. | ||||
| // | ||||
| type Position struct { | ||||
| 	Filename string // filename, if any | ||||
| 	Offset   int    // offset, starting at 0 | ||||
| 	Line     int    // line number, starting at 1 | ||||
| 	Column   int    // column number, starting at 1 (character count) | ||||
| } | ||||
|  | ||||
| // IsValid returns true if the position is valid. | ||||
| func (pos *Position) IsValid() bool { return pos.Line > 0 } | ||||
|  | ||||
| // String returns a string in one of several forms: | ||||
| // | ||||
| //	file:line:column    valid position with file name | ||||
| //	line:column         valid position without file name | ||||
| //	file                invalid position with file name | ||||
| //	-                   invalid position without file name | ||||
| // | ||||
| func (pos Position) String() string { | ||||
| 	s := pos.Filename | ||||
| 	if pos.IsValid() { | ||||
| 		if s != "" { | ||||
| 			s += ":" | ||||
| 		} | ||||
| 		s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) | ||||
| 	} | ||||
| 	if s == "" { | ||||
| 		s = "-" | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // Pos is a compact encoding of a source position within a file set. | ||||
| // It can be converted into a Position for a more convenient, but much | ||||
| // larger, representation. | ||||
| // | ||||
| // The Pos value for a given file is a number in the range [base, base+size], | ||||
| // where base and size are specified when adding the file to the file set via | ||||
| // AddFile. | ||||
| // | ||||
| // To create the Pos value for a specific source offset, first add | ||||
| // the respective file to the current file set (via FileSet.AddFile) | ||||
| // and then call File.Pos(offset) for that file. Given a Pos value p | ||||
| // for a specific file set fset, the corresponding Position value is | ||||
| // obtained by calling fset.Position(p). | ||||
| // | ||||
| // Pos values can be compared directly with the usual comparison operators: | ||||
| // If two Pos values p and q are in the same file, comparing p and q is | ||||
| // equivalent to comparing the respective source file offsets. If p and q | ||||
| // are in different files, p < q is true if the file implied by p was added | ||||
| // to the respective file set before the file implied by q. | ||||
| // | ||||
| type Pos int | ||||
|  | ||||
| // The zero value for Pos is NoPos; there is no file and line information | ||||
| // associated with it, and NoPos().IsValid() is false. NoPos is always | ||||
| // smaller than any other Pos value. The corresponding Position value | ||||
| // for NoPos is the zero value for Position. | ||||
| // | ||||
| const NoPos Pos = 0 | ||||
|  | ||||
| // IsValid returns true if the position is valid. | ||||
| func (p Pos) IsValid() bool { | ||||
| 	return p != NoPos | ||||
| } | ||||
|  | ||||
| // ----------------------------------------------------------------------------- | ||||
| // File | ||||
|  | ||||
| // A File is a handle for a file belonging to a FileSet. | ||||
| // A File has a name, size, and line offset table. | ||||
| // | ||||
| type File struct { | ||||
| 	set  *FileSet | ||||
| 	name string // file name as provided to AddFile | ||||
| 	base int    // Pos value range for this file is [base...base+size] | ||||
| 	size int    // file size as provided to AddFile | ||||
|  | ||||
| 	// lines and infos are protected by set.mutex | ||||
| 	lines []int | ||||
| 	infos []lineInfo | ||||
| } | ||||
|  | ||||
| // Name returns the file name of file f as registered with AddFile. | ||||
| func (f *File) Name() string { | ||||
| 	return f.name | ||||
| } | ||||
|  | ||||
| // Base returns the base offset of file f as registered with AddFile. | ||||
| func (f *File) Base() int { | ||||
| 	return f.base | ||||
| } | ||||
|  | ||||
| // Size returns the size of file f as registered with AddFile. | ||||
| func (f *File) Size() int { | ||||
| 	return f.size | ||||
| } | ||||
|  | ||||
| // LineCount returns the number of lines in file f. | ||||
| func (f *File) LineCount() int { | ||||
| 	f.set.mutex.RLock() | ||||
| 	n := len(f.lines) | ||||
| 	f.set.mutex.RUnlock() | ||||
| 	return n | ||||
| } | ||||
|  | ||||
| // AddLine adds the line offset for a new line. | ||||
| // The line offset must be larger than the offset for the previous line | ||||
| // and smaller than the file size; otherwise the line offset is ignored. | ||||
| // | ||||
| func (f *File) AddLine(offset int) { | ||||
| 	f.set.mutex.Lock() | ||||
| 	if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { | ||||
| 		f.lines = append(f.lines, offset) | ||||
| 	} | ||||
| 	f.set.mutex.Unlock() | ||||
| } | ||||
|  | ||||
| // SetLines sets the line offsets for a file and returns true if successful. | ||||
| // The line offsets are the offsets of the first character of each line; | ||||
| // for instance for the content "ab\nc\n" the line offsets are {0, 3}. | ||||
| // An empty file has an empty line offset table. | ||||
| // Each line offset must be larger than the offset for the previous line | ||||
| // and smaller than the file size; otherwise SetLines fails and returns | ||||
| // false. | ||||
| // | ||||
| func (f *File) SetLines(lines []int) bool { | ||||
| 	// verify validity of lines table | ||||
| 	size := f.size | ||||
| 	for i, offset := range lines { | ||||
| 		if i > 0 && offset <= lines[i-1] || size <= offset { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// set lines table | ||||
| 	f.set.mutex.Lock() | ||||
| 	f.lines = lines | ||||
| 	f.set.mutex.Unlock() | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // SetLinesForContent sets the line offsets for the given file content. | ||||
| func (f *File) SetLinesForContent(content []byte) { | ||||
| 	var lines []int | ||||
| 	line := 0 | ||||
| 	for offset, b := range content { | ||||
| 		if line >= 0 { | ||||
| 			lines = append(lines, line) | ||||
| 		} | ||||
| 		line = -1 | ||||
| 		if b == '\n' { | ||||
| 			line = offset + 1 | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// set lines table | ||||
| 	f.set.mutex.Lock() | ||||
| 	f.lines = lines | ||||
| 	f.set.mutex.Unlock() | ||||
| } | ||||
|  | ||||
| // A lineInfo object describes alternative file and line number | ||||
| // information (such as provided via a //line comment in a .go | ||||
| // file) for a given file offset. | ||||
| type lineInfo struct { | ||||
| 	// fields are exported to make them accessible to gob | ||||
| 	Offset   int | ||||
| 	Filename string | ||||
| 	Line     int | ||||
| } | ||||
|  | ||||
| // AddLineInfo adds alternative file and line number information for | ||||
| // a given file offset. The offset must be larger than the offset for | ||||
| // the previously added alternative line info and smaller than the | ||||
| // file size; otherwise the information is ignored. | ||||
| // | ||||
| // AddLineInfo is typically used to register alternative position | ||||
| // information for //line filename:line comments in source files. | ||||
| // | ||||
| func (f *File) AddLineInfo(offset int, filename string, line int) { | ||||
| 	f.set.mutex.Lock() | ||||
| 	if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size { | ||||
| 		f.infos = append(f.infos, lineInfo{offset, filename, line}) | ||||
| 	} | ||||
| 	f.set.mutex.Unlock() | ||||
| } | ||||
|  | ||||
| // Pos returns the Pos value for the given file offset; | ||||
| // the offset must be <= f.Size(). | ||||
| // f.Pos(f.Offset(p)) == p. | ||||
| // | ||||
| func (f *File) Pos(offset int) Pos { | ||||
| 	if offset > f.size { | ||||
| 		panic("illegal file offset") | ||||
| 	} | ||||
| 	return Pos(f.base + offset) | ||||
| } | ||||
|  | ||||
| // Offset returns the offset for the given file position p; | ||||
| // p must be a valid Pos value in that file. | ||||
| // f.Offset(f.Pos(offset)) == offset. | ||||
| // | ||||
| func (f *File) Offset(p Pos) int { | ||||
| 	if int(p) < f.base || int(p) > f.base+f.size { | ||||
| 		panic("illegal Pos value") | ||||
| 	} | ||||
| 	return int(p) - f.base | ||||
| } | ||||
|  | ||||
| // Line returns the line number for the given file position p; | ||||
| // p must be a Pos value in that file or NoPos. | ||||
| // | ||||
| func (f *File) Line(p Pos) int { | ||||
| 	// TODO(gri) this can be implemented much more efficiently | ||||
| 	return f.Position(p).Line | ||||
| } | ||||
|  | ||||
| func searchLineInfos(a []lineInfo, x int) int { | ||||
| 	return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 | ||||
| } | ||||
|  | ||||
| // info returns the file name, line, and column number for a file offset. | ||||
| func (f *File) info(offset int) (filename string, line, column int) { | ||||
| 	filename = f.name | ||||
| 	if i := searchInts(f.lines, offset); i >= 0 { | ||||
| 		line, column = i+1, offset-f.lines[i]+1 | ||||
| 	} | ||||
| 	if len(f.infos) > 0 { | ||||
| 		// almost no files have extra line infos | ||||
| 		if i := searchLineInfos(f.infos, offset); i >= 0 { | ||||
| 			alt := &f.infos[i] | ||||
| 			filename = alt.Filename | ||||
| 			if i := searchInts(f.lines, alt.Offset); i >= 0 { | ||||
| 				line += alt.Line - i - 1 | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (f *File) position(p Pos) (pos Position) { | ||||
| 	offset := int(p) - f.base | ||||
| 	pos.Offset = offset | ||||
| 	pos.Filename, pos.Line, pos.Column = f.info(offset) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Position returns the Position value for the given file position p; | ||||
| // p must be a Pos value in that file or NoPos. | ||||
| // | ||||
| func (f *File) Position(p Pos) (pos Position) { | ||||
| 	if p != NoPos { | ||||
| 		if int(p) < f.base || int(p) > f.base+f.size { | ||||
| 			panic("illegal Pos value") | ||||
| 		} | ||||
| 		pos = f.position(p) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // ----------------------------------------------------------------------------- | ||||
| // FileSet | ||||
|  | ||||
| // A FileSet represents a set of source files. | ||||
| // Methods of file sets are synchronized; multiple goroutines | ||||
| // may invoke them concurrently. | ||||
| // | ||||
| type FileSet struct { | ||||
| 	mutex sync.RWMutex // protects the file set | ||||
| 	base  int          // base offset for the next file | ||||
| 	files []*File      // list of files in the order added to the set | ||||
| 	last  *File        // cache of last file looked up | ||||
| } | ||||
|  | ||||
| // NewFileSet creates a new file set. | ||||
| func NewFileSet() *FileSet { | ||||
| 	s := new(FileSet) | ||||
| 	s.base = 1 // 0 == NoPos | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // Base returns the minimum base offset that must be provided to | ||||
| // AddFile when adding the next file. | ||||
| // | ||||
| func (s *FileSet) Base() int { | ||||
| 	s.mutex.RLock() | ||||
| 	b := s.base | ||||
| 	s.mutex.RUnlock() | ||||
| 	return b | ||||
|  | ||||
| } | ||||
|  | ||||
| // AddFile adds a new file with a given filename, base offset, and file size | ||||
| // to the file set s and returns the file. Multiple files may have the same | ||||
| // name. The base offset must not be smaller than the FileSet's Base(), and | ||||
| // size must not be negative. | ||||
| // | ||||
| // Adding the file will set the file set's Base() value to base + size + 1 | ||||
| // as the minimum base value for the next file. The following relationship | ||||
| // exists between a Pos value p for a given file offset offs: | ||||
| // | ||||
| //	int(p) = base + offs | ||||
| // | ||||
| // with offs in the range [0, size] and thus p in the range [base, base+size]. | ||||
| // For convenience, File.Pos may be used to create file-specific position | ||||
| // values from a file offset. | ||||
| // | ||||
| func (s *FileSet) AddFile(filename string, base, size int) *File { | ||||
| 	s.mutex.Lock() | ||||
| 	defer s.mutex.Unlock() | ||||
| 	if base < s.base || size < 0 { | ||||
| 		panic("illegal base or size") | ||||
| 	} | ||||
| 	// base >= s.base && size >= 0 | ||||
| 	f := &File{s, filename, base, size, []int{0}, nil} | ||||
| 	base += size + 1 // +1 because EOF also has a position | ||||
| 	if base < 0 { | ||||
| 		panic("token.Pos offset overflow (> 2G of source code in file set)") | ||||
| 	} | ||||
| 	// add the file to the file set | ||||
| 	s.base = base | ||||
| 	s.files = append(s.files, f) | ||||
| 	s.last = f | ||||
| 	return f | ||||
| } | ||||
|  | ||||
| // Iterate calls f for the files in the file set in the order they were added | ||||
| // until f returns false. | ||||
| // | ||||
| func (s *FileSet) Iterate(f func(*File) bool) { | ||||
| 	for i := 0; ; i++ { | ||||
| 		var file *File | ||||
| 		s.mutex.RLock() | ||||
| 		if i < len(s.files) { | ||||
| 			file = s.files[i] | ||||
| 		} | ||||
| 		s.mutex.RUnlock() | ||||
| 		if file == nil || !f(file) { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func searchFiles(a []*File, x int) int { | ||||
| 	return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 | ||||
| } | ||||
|  | ||||
| func (s *FileSet) file(p Pos) *File { | ||||
| 	// common case: p is in last file | ||||
| 	if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { | ||||
| 		return f | ||||
| 	} | ||||
| 	// p is not in last file - search all files | ||||
| 	if i := searchFiles(s.files, int(p)); i >= 0 { | ||||
| 		f := s.files[i] | ||||
| 		// f.base <= int(p) by definition of searchFiles | ||||
| 		if int(p) <= f.base+f.size { | ||||
| 			s.last = f | ||||
| 			return f | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // File returns the file that contains the position p. | ||||
| // If no such file is found (for instance for p == NoPos), | ||||
| // the result is nil. | ||||
| // | ||||
| func (s *FileSet) File(p Pos) (f *File) { | ||||
| 	if p != NoPos { | ||||
| 		s.mutex.RLock() | ||||
| 		f = s.file(p) | ||||
| 		s.mutex.RUnlock() | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Position converts a Pos in the fileset into a general Position. | ||||
| func (s *FileSet) Position(p Pos) (pos Position) { | ||||
| 	if p != NoPos { | ||||
| 		s.mutex.RLock() | ||||
| 		if f := s.file(p); f != nil { | ||||
| 			pos = f.position(p) | ||||
| 		} | ||||
| 		s.mutex.RUnlock() | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // ----------------------------------------------------------------------------- | ||||
| // Helper functions | ||||
|  | ||||
| func searchInts(a []int, x int) int { | ||||
| 	// This function body is a manually inlined version of: | ||||
| 	// | ||||
| 	//   return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 | ||||
| 	// | ||||
| 	// With better compiler optimizations, this may not be needed in the | ||||
| 	// future, but at the moment this change improves the go/printer | ||||
| 	// benchmark performance by ~30%. This has a direct impact on the | ||||
| 	// speed of gofmt and thus seems worthwhile (2011-04-29). | ||||
| 	// TODO(gri): Remove this when compilers have caught up. | ||||
| 	i, j := 0, len(a) | ||||
| 	for i < j { | ||||
| 		h := i + (j-i)/2 // avoid overflow when computing h | ||||
| 		// i ≤ h < j | ||||
| 		if a[h] <= x { | ||||
| 			i = h + 1 | ||||
| 		} else { | ||||
| 			j = h | ||||
| 		} | ||||
| 	} | ||||
| 	return i - 1 | ||||
| } | ||||
							
								
								
									
										56
									
								
								vendor/github.com/go-git/gcfg/token/serialize.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								vendor/github.com/go-git/gcfg/token/serialize.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,56 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package token | ||||
|  | ||||
| type serializedFile struct { | ||||
| 	// fields correspond 1:1 to fields with same (lower-case) name in File | ||||
| 	Name  string | ||||
| 	Base  int | ||||
| 	Size  int | ||||
| 	Lines []int | ||||
| 	Infos []lineInfo | ||||
| } | ||||
|  | ||||
| type serializedFileSet struct { | ||||
| 	Base  int | ||||
| 	Files []serializedFile | ||||
| } | ||||
|  | ||||
| // Read calls decode to deserialize a file set into s; s must not be nil. | ||||
| func (s *FileSet) Read(decode func(interface{}) error) error { | ||||
| 	var ss serializedFileSet | ||||
| 	if err := decode(&ss); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	s.mutex.Lock() | ||||
| 	s.base = ss.Base | ||||
| 	files := make([]*File, len(ss.Files)) | ||||
| 	for i := 0; i < len(ss.Files); i++ { | ||||
| 		f := &ss.Files[i] | ||||
| 		files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos} | ||||
| 	} | ||||
| 	s.files = files | ||||
| 	s.last = nil | ||||
| 	s.mutex.Unlock() | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Write calls encode to serialize the file set s. | ||||
| func (s *FileSet) Write(encode func(interface{}) error) error { | ||||
| 	var ss serializedFileSet | ||||
|  | ||||
| 	s.mutex.Lock() | ||||
| 	ss.Base = s.base | ||||
| 	files := make([]serializedFile, len(s.files)) | ||||
| 	for i, f := range s.files { | ||||
| 		files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos} | ||||
| 	} | ||||
| 	ss.Files = files | ||||
| 	s.mutex.Unlock() | ||||
|  | ||||
| 	return encode(ss) | ||||
| } | ||||
							
								
								
									
										83
									
								
								vendor/github.com/go-git/gcfg/token/token.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										83
									
								
								vendor/github.com/go-git/gcfg/token/token.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,83 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package token defines constants representing the lexical tokens of the gcfg | ||||
| // configuration syntax and basic operations on tokens (printing, predicates). | ||||
| // | ||||
| // Note that the API for the token package may change to accommodate new | ||||
| // features or implementation changes in gcfg. | ||||
| // | ||||
| package token | ||||
|  | ||||
| import "strconv" | ||||
|  | ||||
| // Token is the set of lexical tokens of the gcfg configuration syntax. | ||||
| type Token int | ||||
|  | ||||
| // The list of tokens. | ||||
| const ( | ||||
| 	// Special tokens | ||||
| 	ILLEGAL Token = iota | ||||
| 	EOF | ||||
| 	COMMENT | ||||
|  | ||||
| 	literal_beg | ||||
| 	// Identifiers and basic type literals | ||||
| 	// (these tokens stand for classes of literals) | ||||
| 	IDENT  // section-name, variable-name | ||||
| 	STRING // "subsection-name", variable value | ||||
| 	literal_end | ||||
|  | ||||
| 	operator_beg | ||||
| 	// Operators and delimiters | ||||
| 	ASSIGN // = | ||||
| 	LBRACK // [ | ||||
| 	RBRACK // ] | ||||
| 	EOL    // \n | ||||
| 	operator_end | ||||
| ) | ||||
|  | ||||
| var tokens = [...]string{ | ||||
| 	ILLEGAL: "ILLEGAL", | ||||
|  | ||||
| 	EOF:     "EOF", | ||||
| 	COMMENT: "COMMENT", | ||||
|  | ||||
| 	IDENT:  "IDENT", | ||||
| 	STRING: "STRING", | ||||
|  | ||||
| 	ASSIGN: "=", | ||||
| 	LBRACK: "[", | ||||
| 	RBRACK: "]", | ||||
| 	EOL:    "\n", | ||||
| } | ||||
|  | ||||
| // String returns the string corresponding to the token tok. | ||||
| // For operators and delimiters, the string is the actual token character | ||||
| // sequence (e.g., for the token ASSIGN, the string is "="). For all other | ||||
| // tokens the string corresponds to the token constant name (e.g. for the | ||||
| // token IDENT, the string is "IDENT"). | ||||
| // | ||||
| func (tok Token) String() string { | ||||
| 	s := "" | ||||
| 	if 0 <= tok && tok < Token(len(tokens)) { | ||||
| 		s = tokens[tok] | ||||
| 	} | ||||
| 	if s == "" { | ||||
| 		s = "token(" + strconv.Itoa(int(tok)) + ")" | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // Predicates | ||||
|  | ||||
| // IsLiteral returns true for tokens corresponding to identifiers | ||||
| // and basic type literals; it returns false otherwise. | ||||
| // | ||||
| func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end } | ||||
|  | ||||
| // IsOperator returns true for tokens corresponding to operators and | ||||
| // delimiters; it returns false otherwise. | ||||
| // | ||||
| func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end } | ||||
							
								
								
									
										23
									
								
								vendor/github.com/go-git/gcfg/types/bool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/go-git/gcfg/types/bool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| package types | ||||
|  | ||||
| // BoolValues defines the name and value mappings for ParseBool. | ||||
| var BoolValues = map[string]interface{}{ | ||||
| 	"true": true, "yes": true, "on": true, "1": true, | ||||
| 	"false": false, "no": false, "off": false, "0": false, | ||||
| } | ||||
|  | ||||
| var boolParser = func() *EnumParser { | ||||
| 	ep := &EnumParser{} | ||||
| 	ep.AddVals(BoolValues) | ||||
| 	return ep | ||||
| }() | ||||
|  | ||||
| // ParseBool parses bool values according to the definitions in BoolValues. | ||||
| // Parsing is case-insensitive. | ||||
| func ParseBool(s string) (bool, error) { | ||||
| 	v, err := boolParser.Parse(s) | ||||
| 	if err != nil { | ||||
| 		return false, err | ||||
| 	} | ||||
| 	return v.(bool), nil | ||||
| } | ||||
							
								
								
									
										4
									
								
								vendor/github.com/go-git/gcfg/types/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/go-git/gcfg/types/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | ||||
| // Package types defines helpers for type conversions. | ||||
| // | ||||
| // The API for this package is not finalized yet. | ||||
| package types | ||||
							
								
								
									
										44
									
								
								vendor/github.com/go-git/gcfg/types/enum.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								vendor/github.com/go-git/gcfg/types/enum.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,44 @@ | ||||
| package types | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // EnumParser parses "enum" values; i.e. a predefined set of strings to | ||||
| // predefined values. | ||||
| type EnumParser struct { | ||||
| 	Type      string // type name; if not set, use type of first value added | ||||
| 	CaseMatch bool   // if true, matching of strings is case-sensitive | ||||
| 	// PrefixMatch bool | ||||
| 	vals map[string]interface{} | ||||
| } | ||||
|  | ||||
| // AddVals adds strings and values to an EnumParser. | ||||
| func (ep *EnumParser) AddVals(vals map[string]interface{}) { | ||||
| 	if ep.vals == nil { | ||||
| 		ep.vals = make(map[string]interface{}) | ||||
| 	} | ||||
| 	for k, v := range vals { | ||||
| 		if ep.Type == "" { | ||||
| 			ep.Type = reflect.TypeOf(v).Name() | ||||
| 		} | ||||
| 		if !ep.CaseMatch { | ||||
| 			k = strings.ToLower(k) | ||||
| 		} | ||||
| 		ep.vals[k] = v | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Parse parses the string and returns the value or an error. | ||||
| func (ep EnumParser) Parse(s string) (interface{}, error) { | ||||
| 	if !ep.CaseMatch { | ||||
| 		s = strings.ToLower(s) | ||||
| 	} | ||||
| 	v, ok := ep.vals[s] | ||||
| 	if !ok { | ||||
| 		return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s) | ||||
| 	} | ||||
| 	return v, nil | ||||
| } | ||||
							
								
								
									
										86
									
								
								vendor/github.com/go-git/gcfg/types/int.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										86
									
								
								vendor/github.com/go-git/gcfg/types/int.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,86 @@ | ||||
| package types | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // An IntMode is a mode for parsing integer values, representing a set of | ||||
| // accepted bases. | ||||
| type IntMode uint8 | ||||
|  | ||||
| // IntMode values for ParseInt; can be combined using binary or. | ||||
| const ( | ||||
| 	Dec IntMode = 1 << iota | ||||
| 	Hex | ||||
| 	Oct | ||||
| ) | ||||
|  | ||||
| // String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`. | ||||
| func (m IntMode) String() string { | ||||
| 	var modes []string | ||||
| 	if m&Dec != 0 { | ||||
| 		modes = append(modes, "Dec") | ||||
| 	} | ||||
| 	if m&Hex != 0 { | ||||
| 		modes = append(modes, "Hex") | ||||
| 	} | ||||
| 	if m&Oct != 0 { | ||||
| 		modes = append(modes, "Oct") | ||||
| 	} | ||||
| 	return "IntMode(" + strings.Join(modes, "|") + ")" | ||||
| } | ||||
|  | ||||
| var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix") | ||||
|  | ||||
| func prefix0(val string) bool { | ||||
| 	return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0") | ||||
| } | ||||
|  | ||||
| func prefix0x(val string) bool { | ||||
| 	return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x") | ||||
| } | ||||
|  | ||||
| // ParseInt parses val using mode into intptr, which must be a pointer to an | ||||
| // integer kind type. Non-decimal value require prefix `0` or `0x` in the cases | ||||
| // when mode permits ambiguity of base; otherwise the prefix can be omitted. | ||||
| func ParseInt(intptr interface{}, val string, mode IntMode) error { | ||||
| 	val = strings.TrimSpace(val) | ||||
| 	verb := byte(0) | ||||
| 	switch mode { | ||||
| 	case Dec: | ||||
| 		verb = 'd' | ||||
| 	case Dec + Hex: | ||||
| 		if prefix0x(val) { | ||||
| 			verb = 'v' | ||||
| 		} else { | ||||
| 			verb = 'd' | ||||
| 		} | ||||
| 	case Dec + Oct: | ||||
| 		if prefix0(val) && !prefix0x(val) { | ||||
| 			verb = 'v' | ||||
| 		} else { | ||||
| 			verb = 'd' | ||||
| 		} | ||||
| 	case Dec + Hex + Oct: | ||||
| 		verb = 'v' | ||||
| 	case Hex: | ||||
| 		if prefix0x(val) { | ||||
| 			verb = 'v' | ||||
| 		} else { | ||||
| 			verb = 'x' | ||||
| 		} | ||||
| 	case Oct: | ||||
| 		verb = 'o' | ||||
| 	case Hex + Oct: | ||||
| 		if prefix0(val) { | ||||
| 			verb = 'v' | ||||
| 		} else { | ||||
| 			return errIntAmbig | ||||
| 		} | ||||
| 	} | ||||
| 	if verb == 0 { | ||||
| 		panic("unsupported mode") | ||||
| 	} | ||||
| 	return ScanFully(intptr, val, verb) | ||||
| } | ||||
							
								
								
									
										23
									
								
								vendor/github.com/go-git/gcfg/types/scan.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/go-git/gcfg/types/scan.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| package types | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"reflect" | ||||
| ) | ||||
|  | ||||
| // ScanFully uses fmt.Sscanf with verb to fully scan val into ptr. | ||||
| func ScanFully(ptr interface{}, val string, verb byte) error { | ||||
| 	t := reflect.ValueOf(ptr).Elem().Type() | ||||
| 	// attempt to read extra bytes to make sure the value is consumed | ||||
| 	var b []byte | ||||
| 	n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b) | ||||
| 	switch { | ||||
| 	case n < 1 || n == 1 && err != io.EOF: | ||||
| 		return fmt.Errorf("failed to parse %q as %v: %v", val, t, err) | ||||
| 	case n > 1: | ||||
| 		return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b)) | ||||
| 	} | ||||
| 	// n == 1 && err == io.EOF | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										4
									
								
								vendor/github.com/go-git/go-billy/v5/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/go-git/go-billy/v5/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | ||||
| /coverage.txt | ||||
| /vendor | ||||
| Gopkg.lock | ||||
| Gopkg.toml | ||||
							
								
								
									
										201
									
								
								vendor/github.com/go-git/go-billy/v5/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/go-git/go-billy/v5/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,201 @@ | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "{}" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright 2017 Sourced Technologies S.L. | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										73
									
								
								vendor/github.com/go-git/go-billy/v5/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								vendor/github.com/go-git/go-billy/v5/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,73 @@ | ||||
| # go-billy [](https://pkg.go.dev/github.com/go-git/go-billy) [](https://github.com/go-git/go-billy/actions?query=workflow%3ATest) | ||||
|  | ||||
| The missing interface filesystem abstraction for Go. | ||||
| Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations. | ||||
|  | ||||
| Billy was born as part of [go-git/go-git](https://github.com/go-git/go-git) project. | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| ```go | ||||
| import "github.com/go-git/go-billy/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH) | ||||
| import "github.com/go-git/go-billy" // with go modules disabled | ||||
| ``` | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| Billy exposes filesystems using the | ||||
| [`Filesystem` interface](https://pkg.go.dev/github.com/go-git/go-billy/v5?tab=doc#Filesystem). | ||||
| Each filesystem implementation gives you a `New` method, whose arguments depend on | ||||
| the implementation itself, that returns a new `Filesystem`. | ||||
|  | ||||
| The following example caches in memory all readable files in a directory from any | ||||
| billy's filesystem implementation. | ||||
|  | ||||
| ```go | ||||
| func LoadToMemory(origin billy.Filesystem, path string) (*memory.Memory, error) { | ||||
| 	memory := memory.New() | ||||
|  | ||||
| 	files, err := origin.ReadDir("/") | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	for _, file := range files { | ||||
| 		if file.IsDir() { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		src, err := origin.Open(file.Name()) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		dst, err := memory.Create(file.Name()) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		if _, err = io.Copy(dst, src); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		if err := dst.Close(); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		if err := src.Close(); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return memory, nil | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ## Why billy? | ||||
|  | ||||
| The library billy deals with storage systems and Billy is the name of a well-known, IKEA | ||||
| bookcase. That's it. | ||||
|  | ||||
| ## License | ||||
|  | ||||
| Apache License Version 2.0, see [LICENSE](LICENSE) | ||||
							
								
								
									
										202
									
								
								vendor/github.com/go-git/go-billy/v5/fs.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										202
									
								
								vendor/github.com/go-git/go-billy/v5/fs.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,202 @@ | ||||
| package billy | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	ErrReadOnly        = errors.New("read-only filesystem") | ||||
| 	ErrNotSupported    = errors.New("feature not supported") | ||||
| 	ErrCrossedBoundary = errors.New("chroot boundary crossed") | ||||
| ) | ||||
|  | ||||
| // Capability holds the supported features of a billy filesystem. This does | ||||
| // not mean that the capability has to be supported by the underlying storage. | ||||
| // For example, a billy filesystem may support WriteCapability but the | ||||
| // storage be mounted in read only mode. | ||||
| type Capability uint64 | ||||
|  | ||||
| const ( | ||||
| 	// WriteCapability means that the fs is writable. | ||||
| 	WriteCapability Capability = 1 << iota | ||||
| 	// ReadCapability means that the fs is readable. | ||||
| 	ReadCapability | ||||
| 	// ReadAndWriteCapability is the ability to open a file in read and write mode. | ||||
| 	ReadAndWriteCapability | ||||
| 	// SeekCapability means it is able to move position inside the file. | ||||
| 	SeekCapability | ||||
| 	// TruncateCapability means that a file can be truncated. | ||||
| 	TruncateCapability | ||||
| 	// LockCapability is the ability to lock a file. | ||||
| 	LockCapability | ||||
|  | ||||
| 	// DefaultCapabilities lists all capable features supported by filesystems | ||||
| 	// without Capability interface. This list should not be changed until a | ||||
| 	// major version is released. | ||||
| 	DefaultCapabilities Capability = WriteCapability | ReadCapability | | ||||
| 		ReadAndWriteCapability | SeekCapability | TruncateCapability | | ||||
| 		LockCapability | ||||
|  | ||||
| 	// AllCapabilities lists all capable features. | ||||
| 	AllCapabilities Capability = WriteCapability | ReadCapability | | ||||
| 		ReadAndWriteCapability | SeekCapability | TruncateCapability | | ||||
| 		LockCapability | ||||
| ) | ||||
|  | ||||
| // Filesystem abstract the operations in a storage-agnostic interface. | ||||
| // Each method implementation mimics the behavior of the equivalent functions | ||||
| // at the os package from the standard library. | ||||
| type Filesystem interface { | ||||
| 	Basic | ||||
| 	TempFile | ||||
| 	Dir | ||||
| 	Symlink | ||||
| 	Chroot | ||||
| } | ||||
|  | ||||
| // Basic abstract the basic operations in a storage-agnostic interface as | ||||
| // an extension to the Basic interface. | ||||
| type Basic interface { | ||||
| 	// Create creates the named file with mode 0666 (before umask), truncating | ||||
| 	// it if it already exists. If successful, methods on the returned File can | ||||
| 	// be used for I/O; the associated file descriptor has mode O_RDWR. | ||||
| 	Create(filename string) (File, error) | ||||
| 	// Open opens the named file for reading. If successful, methods on the | ||||
| 	// returned file can be used for reading; the associated file descriptor has | ||||
| 	// mode O_RDONLY. | ||||
| 	Open(filename string) (File, error) | ||||
| 	// OpenFile is the generalized open call; most users will use Open or Create | ||||
| 	// instead. It opens the named file with specified flag (O_RDONLY etc.) and | ||||
| 	// perm, (0666 etc.) if applicable. If successful, methods on the returned | ||||
| 	// File can be used for I/O. | ||||
| 	OpenFile(filename string, flag int, perm os.FileMode) (File, error) | ||||
| 	// Stat returns a FileInfo describing the named file. | ||||
| 	Stat(filename string) (os.FileInfo, error) | ||||
| 	// Rename renames (moves) oldpath to newpath. If newpath already exists and | ||||
| 	// is not a directory, Rename replaces it. OS-specific restrictions may | ||||
| 	// apply when oldpath and newpath are in different directories. | ||||
| 	Rename(oldpath, newpath string) error | ||||
| 	// Remove removes the named file or directory. | ||||
| 	Remove(filename string) error | ||||
| 	// Join joins any number of path elements into a single path, adding a | ||||
| 	// Separator if necessary. Join calls filepath.Clean on the result; in | ||||
| 	// particular, all empty strings are ignored. On Windows, the result is a | ||||
| 	// UNC path if and only if the first path element is a UNC path. | ||||
| 	Join(elem ...string) string | ||||
| } | ||||
|  | ||||
| type TempFile interface { | ||||
| 	// TempFile creates a new temporary file in the directory dir with a name | ||||
| 	// beginning with prefix, opens the file for reading and writing, and | ||||
| 	// returns the resulting *os.File. If dir is the empty string, TempFile | ||||
| 	// uses the default directory for temporary files (see os.TempDir). | ||||
| 	// Multiple programs calling TempFile simultaneously will not choose the | ||||
| 	// same file. The caller can use f.Name() to find the pathname of the file. | ||||
| 	// It is the caller's responsibility to remove the file when no longer | ||||
| 	// needed. | ||||
| 	TempFile(dir, prefix string) (File, error) | ||||
| } | ||||
|  | ||||
| // Dir abstract the dir related operations in a storage-agnostic interface as | ||||
| // an extension to the Basic interface. | ||||
| type Dir interface { | ||||
| 	// ReadDir reads the directory named by dirname and returns a list of | ||||
| 	// directory entries sorted by filename. | ||||
| 	ReadDir(path string) ([]os.FileInfo, error) | ||||
| 	// MkdirAll creates a directory named path, along with any necessary | ||||
| 	// parents, and returns nil, or else returns an error. The permission bits | ||||
| 	// perm are used for all directories that MkdirAll creates. If path is/ | ||||
| 	// already a directory, MkdirAll does nothing and returns nil. | ||||
| 	MkdirAll(filename string, perm os.FileMode) error | ||||
| } | ||||
|  | ||||
| // Symlink abstract the symlink related operations in a storage-agnostic | ||||
| // interface as an extension to the Basic interface. | ||||
| type Symlink interface { | ||||
| 	// Lstat returns a FileInfo describing the named file. If the file is a | ||||
| 	// symbolic link, the returned FileInfo describes the symbolic link. Lstat | ||||
| 	// makes no attempt to follow the link. | ||||
| 	Lstat(filename string) (os.FileInfo, error) | ||||
| 	// Symlink creates a symbolic-link from link to target. target may be an | ||||
| 	// absolute or relative path, and need not refer to an existing node. | ||||
| 	// Parent directories of link are created as necessary. | ||||
| 	Symlink(target, link string) error | ||||
| 	// Readlink returns the target path of link. | ||||
| 	Readlink(link string) (string, error) | ||||
| } | ||||
|  | ||||
| // Change abstract the FileInfo change related operations in a storage-agnostic | ||||
| // interface as an extension to the Basic interface | ||||
| type Change interface { | ||||
| 	// Chmod changes the mode of the named file to mode. If the file is a | ||||
| 	// symbolic link, it changes the mode of the link's target. | ||||
| 	Chmod(name string, mode os.FileMode) error | ||||
| 	// Lchown changes the numeric uid and gid of the named file. If the file is | ||||
| 	// a symbolic link, it changes the uid and gid of the link itself. | ||||
| 	Lchown(name string, uid, gid int) error | ||||
| 	// Chown changes the numeric uid and gid of the named file. If the file is a | ||||
| 	// symbolic link, it changes the uid and gid of the link's target. | ||||
| 	Chown(name string, uid, gid int) error | ||||
| 	// Chtimes changes the access and modification times of the named file, | ||||
| 	// similar to the Unix utime() or utimes() functions. | ||||
| 	// | ||||
| 	// The underlying filesystem may truncate or round the values to a less | ||||
| 	// precise time unit. | ||||
| 	Chtimes(name string, atime time.Time, mtime time.Time) error | ||||
| } | ||||
|  | ||||
| // Chroot abstract the chroot related operations in a storage-agnostic interface | ||||
| // as an extension to the Basic interface. | ||||
| type Chroot interface { | ||||
| 	// Chroot returns a new filesystem from the same type where the new root is | ||||
| 	// the given path. Files outside of the designated directory tree cannot be | ||||
| 	// accessed. | ||||
| 	Chroot(path string) (Filesystem, error) | ||||
| 	// Root returns the root path of the filesystem. | ||||
| 	Root() string | ||||
| } | ||||
|  | ||||
| // File represent a file, being a subset of the os.File | ||||
| type File interface { | ||||
| 	// Name returns the name of the file as presented to Open. | ||||
| 	Name() string | ||||
| 	io.Writer | ||||
| 	io.Reader | ||||
| 	io.ReaderAt | ||||
| 	io.Seeker | ||||
| 	io.Closer | ||||
| 	// Lock locks the file like e.g. flock. It protects against access from | ||||
| 	// other processes. | ||||
| 	Lock() error | ||||
| 	// Unlock unlocks the file. | ||||
| 	Unlock() error | ||||
| 	// Truncate the file. | ||||
| 	Truncate(size int64) error | ||||
| } | ||||
|  | ||||
| // Capable interface can return the available features of a filesystem. | ||||
| type Capable interface { | ||||
| 	// Capabilities returns the capabilities of a filesystem in bit flags. | ||||
| 	Capabilities() Capability | ||||
| } | ||||
|  | ||||
| // Capabilities returns the features supported by a filesystem. If the FS | ||||
| // does not implement Capable interface it returns all features. | ||||
| func Capabilities(fs Basic) Capability { | ||||
| 	capable, ok := fs.(Capable) | ||||
| 	if !ok { | ||||
| 		return DefaultCapabilities | ||||
| 	} | ||||
|  | ||||
| 	return capable.Capabilities() | ||||
| } | ||||
|  | ||||
| // CapabilityCheck tests the filesystem for the provided capabilities and | ||||
| // returns true in case it supports all of them. | ||||
| func CapabilityCheck(fs Basic, capabilities Capability) bool { | ||||
| 	fsCaps := Capabilities(fs) | ||||
| 	return fsCaps&capabilities == capabilities | ||||
| } | ||||
							
								
								
									
										10
									
								
								vendor/github.com/go-git/go-billy/v5/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/go-git/go-billy/v5/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| module github.com/go-git/go-billy/v5 | ||||
|  | ||||
| require ( | ||||
| 	github.com/kr/text v0.2.0 // indirect | ||||
| 	github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect | ||||
| 	golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 | ||||
| 	gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f | ||||
| ) | ||||
|  | ||||
| go 1.13 | ||||
							
								
								
									
										14
									
								
								vendor/github.com/go-git/go-billy/v5/go.sum
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/go-git/go-billy/v5/go.sum
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | ||||
| github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= | ||||
| github.com/go-git/go-billy v1.0.0 h1:bXR6Zu3opPSg0R4dDxqaLglY4rxw7ja7wS16qSpOKL4= | ||||
| github.com/go-git/go-billy v3.1.0+incompatible h1:dwrJ8G2Jt1srYgIJs+lRjA36qBY68O2Lg5idKG8ef5M= | ||||
| github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= | ||||
| github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= | ||||
| github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= | ||||
| github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= | ||||
| github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= | ||||
| github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= | ||||
| github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= | ||||
| golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= | ||||
| golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= | ||||
| gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||
							
								
								
									
										242
									
								
								vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										242
									
								
								vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,242 @@ | ||||
| package chroot | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/go-git/go-billy/v5" | ||||
| 	"github.com/go-git/go-billy/v5/helper/polyfill" | ||||
| ) | ||||
|  | ||||
| // ChrootHelper is a helper to implement billy.Chroot. | ||||
| type ChrootHelper struct { | ||||
| 	underlying billy.Filesystem | ||||
| 	base       string | ||||
| } | ||||
|  | ||||
| // New creates a new filesystem wrapping up the given 'fs'. | ||||
| // The created filesystem has its base in the given ChrootHelperectory of the | ||||
| // underlying filesystem. | ||||
| func New(fs billy.Basic, base string) billy.Filesystem { | ||||
| 	return &ChrootHelper{ | ||||
| 		underlying: polyfill.New(fs), | ||||
| 		base:       base, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) underlyingPath(filename string) (string, error) { | ||||
| 	if isCrossBoundaries(filename) { | ||||
| 		return "", billy.ErrCrossedBoundary | ||||
| 	} | ||||
|  | ||||
| 	return fs.Join(fs.Root(), filename), nil | ||||
| } | ||||
|  | ||||
| func isCrossBoundaries(path string) bool { | ||||
| 	path = filepath.ToSlash(path) | ||||
| 	path = filepath.Clean(path) | ||||
|  | ||||
| 	return strings.HasPrefix(path, ".."+string(filepath.Separator)) | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Create(filename string) (billy.File, error) { | ||||
| 	fullpath, err := fs.underlyingPath(filename) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	f, err := fs.underlying.Create(fullpath) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return newFile(fs, f, filename), nil | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Open(filename string) (billy.File, error) { | ||||
| 	fullpath, err := fs.underlyingPath(filename) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	f, err := fs.underlying.Open(fullpath) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return newFile(fs, f, filename), nil | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) OpenFile(filename string, flag int, mode os.FileMode) (billy.File, error) { | ||||
| 	fullpath, err := fs.underlyingPath(filename) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	f, err := fs.underlying.OpenFile(fullpath, flag, mode) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return newFile(fs, f, filename), nil | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Stat(filename string) (os.FileInfo, error) { | ||||
| 	fullpath, err := fs.underlyingPath(filename) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return fs.underlying.Stat(fullpath) | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Rename(from, to string) error { | ||||
| 	var err error | ||||
| 	from, err = fs.underlyingPath(from) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	to, err = fs.underlyingPath(to) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return fs.underlying.Rename(from, to) | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Remove(path string) error { | ||||
| 	fullpath, err := fs.underlyingPath(path) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return fs.underlying.Remove(fullpath) | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Join(elem ...string) string { | ||||
| 	return fs.underlying.Join(elem...) | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) TempFile(dir, prefix string) (billy.File, error) { | ||||
| 	fullpath, err := fs.underlyingPath(dir) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	f, err := fs.underlying.(billy.TempFile).TempFile(fullpath, prefix) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return newFile(fs, f, fs.Join(dir, filepath.Base(f.Name()))), nil | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) ReadDir(path string) ([]os.FileInfo, error) { | ||||
| 	fullpath, err := fs.underlyingPath(path) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return fs.underlying.(billy.Dir).ReadDir(fullpath) | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) MkdirAll(filename string, perm os.FileMode) error { | ||||
| 	fullpath, err := fs.underlyingPath(filename) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return fs.underlying.(billy.Dir).MkdirAll(fullpath, perm) | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Lstat(filename string) (os.FileInfo, error) { | ||||
| 	fullpath, err := fs.underlyingPath(filename) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return fs.underlying.(billy.Symlink).Lstat(fullpath) | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Symlink(target, link string) error { | ||||
| 	target = filepath.FromSlash(target) | ||||
|  | ||||
| 	// only rewrite target if it's already absolute | ||||
| 	if filepath.IsAbs(target) || strings.HasPrefix(target, string(filepath.Separator)) { | ||||
| 		target = fs.Join(fs.Root(), target) | ||||
| 		target = filepath.Clean(filepath.FromSlash(target)) | ||||
| 	} | ||||
|  | ||||
| 	link, err := fs.underlyingPath(link) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return fs.underlying.(billy.Symlink).Symlink(target, link) | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Readlink(link string) (string, error) { | ||||
| 	fullpath, err := fs.underlyingPath(link) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	target, err := fs.underlying.(billy.Symlink).Readlink(fullpath) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	if !filepath.IsAbs(target) && !strings.HasPrefix(target, string(filepath.Separator)) { | ||||
| 		return target, nil | ||||
| 	} | ||||
|  | ||||
| 	target, err = filepath.Rel(fs.base, target) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	return string(os.PathSeparator) + target, nil | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Chroot(path string) (billy.Filesystem, error) { | ||||
| 	fullpath, err := fs.underlyingPath(path) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return New(fs.underlying, fullpath), nil | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Root() string { | ||||
| 	return fs.base | ||||
| } | ||||
|  | ||||
| func (fs *ChrootHelper) Underlying() billy.Basic { | ||||
| 	return fs.underlying | ||||
| } | ||||
|  | ||||
| // Capabilities implements the Capable interface. | ||||
| func (fs *ChrootHelper) Capabilities() billy.Capability { | ||||
| 	return billy.Capabilities(fs.underlying) | ||||
| } | ||||
|  | ||||
| type file struct { | ||||
| 	billy.File | ||||
| 	name string | ||||
| } | ||||
|  | ||||
| func newFile(fs billy.Filesystem, f billy.File, filename string) billy.File { | ||||
| 	filename = fs.Join(fs.Root(), filename) | ||||
| 	filename, _ = filepath.Rel(fs.Root(), filename) | ||||
|  | ||||
| 	return &file{ | ||||
| 		File: f, | ||||
| 		name: filename, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (f *file) Name() string { | ||||
| 	return f.name | ||||
| } | ||||
							
								
								
									
										105
									
								
								vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										105
									
								
								vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,105 @@ | ||||
| package polyfill | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
|  | ||||
| 	"github.com/go-git/go-billy/v5" | ||||
| ) | ||||
|  | ||||
| // Polyfill is a helper that implements all missing method from billy.Filesystem. | ||||
| type Polyfill struct { | ||||
| 	billy.Basic | ||||
| 	c capabilities | ||||
| } | ||||
|  | ||||
| type capabilities struct{ tempfile, dir, symlink, chroot bool } | ||||
|  | ||||
| // New creates a new filesystem wrapping up 'fs' the intercepts all the calls | ||||
| // made and errors if fs doesn't implement any of the billy interfaces. | ||||
| func New(fs billy.Basic) billy.Filesystem { | ||||
| 	if original, ok := fs.(billy.Filesystem); ok { | ||||
| 		return original | ||||
| 	} | ||||
|  | ||||
| 	h := &Polyfill{Basic: fs} | ||||
|  | ||||
| 	_, h.c.tempfile = h.Basic.(billy.TempFile) | ||||
| 	_, h.c.dir = h.Basic.(billy.Dir) | ||||
| 	_, h.c.symlink = h.Basic.(billy.Symlink) | ||||
| 	_, h.c.chroot = h.Basic.(billy.Chroot) | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| func (h *Polyfill) TempFile(dir, prefix string) (billy.File, error) { | ||||
| 	if !h.c.tempfile { | ||||
| 		return nil, billy.ErrNotSupported | ||||
| 	} | ||||
|  | ||||
| 	return h.Basic.(billy.TempFile).TempFile(dir, prefix) | ||||
| } | ||||
|  | ||||
| func (h *Polyfill) ReadDir(path string) ([]os.FileInfo, error) { | ||||
| 	if !h.c.dir { | ||||
| 		return nil, billy.ErrNotSupported | ||||
| 	} | ||||
|  | ||||
| 	return h.Basic.(billy.Dir).ReadDir(path) | ||||
| } | ||||
|  | ||||
| func (h *Polyfill) MkdirAll(filename string, perm os.FileMode) error { | ||||
| 	if !h.c.dir { | ||||
| 		return billy.ErrNotSupported | ||||
| 	} | ||||
|  | ||||
| 	return h.Basic.(billy.Dir).MkdirAll(filename, perm) | ||||
| } | ||||
|  | ||||
| func (h *Polyfill) Symlink(target, link string) error { | ||||
| 	if !h.c.symlink { | ||||
| 		return billy.ErrNotSupported | ||||
| 	} | ||||
|  | ||||
| 	return h.Basic.(billy.Symlink).Symlink(target, link) | ||||
| } | ||||
|  | ||||
| func (h *Polyfill) Readlink(link string) (string, error) { | ||||
| 	if !h.c.symlink { | ||||
| 		return "", billy.ErrNotSupported | ||||
| 	} | ||||
|  | ||||
| 	return h.Basic.(billy.Symlink).Readlink(link) | ||||
| } | ||||
|  | ||||
| func (h *Polyfill) Lstat(path string) (os.FileInfo, error) { | ||||
| 	if !h.c.symlink { | ||||
| 		return nil, billy.ErrNotSupported | ||||
| 	} | ||||
|  | ||||
| 	return h.Basic.(billy.Symlink).Lstat(path) | ||||
| } | ||||
|  | ||||
| func (h *Polyfill) Chroot(path string) (billy.Filesystem, error) { | ||||
| 	if !h.c.chroot { | ||||
| 		return nil, billy.ErrNotSupported | ||||
| 	} | ||||
|  | ||||
| 	return h.Basic.(billy.Chroot).Chroot(path) | ||||
| } | ||||
|  | ||||
| func (h *Polyfill) Root() string { | ||||
| 	if !h.c.chroot { | ||||
| 		return string(filepath.Separator) | ||||
| 	} | ||||
|  | ||||
| 	return h.Basic.(billy.Chroot).Root() | ||||
| } | ||||
|  | ||||
| func (h *Polyfill) Underlying() billy.Basic { | ||||
| 	return h.Basic | ||||
| } | ||||
|  | ||||
| // Capabilities implements the Capable interface. | ||||
| func (h *Polyfill) Capabilities() billy.Capability { | ||||
| 	return billy.Capabilities(h.Basic) | ||||
| } | ||||
							
								
								
									
										139
									
								
								vendor/github.com/go-git/go-billy/v5/osfs/os.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										139
									
								
								vendor/github.com/go-git/go-billy/v5/osfs/os.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,139 @@ | ||||
| // Package osfs provides a billy filesystem for the OS. | ||||
| package osfs // import "github.com/go-git/go-billy/v5/osfs" | ||||
|  | ||||
| import ( | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/go-git/go-billy/v5" | ||||
| 	"github.com/go-git/go-billy/v5/helper/chroot" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	defaultDirectoryMode = 0755 | ||||
| 	defaultCreateMode    = 0666 | ||||
| ) | ||||
|  | ||||
| // OS is a filesystem based on the os filesystem. | ||||
| type OS struct{} | ||||
|  | ||||
| // New returns a new OS filesystem. | ||||
| func New(baseDir string) billy.Filesystem { | ||||
| 	return chroot.New(&OS{}, baseDir) | ||||
| } | ||||
|  | ||||
| func (fs *OS) Create(filename string) (billy.File, error) { | ||||
| 	return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode) | ||||
| } | ||||
|  | ||||
| func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { | ||||
| 	if flag&os.O_CREATE != 0 { | ||||
| 		if err := fs.createDir(filename); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	f, err := os.OpenFile(filename, flag, perm) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &file{File: f}, err | ||||
| } | ||||
|  | ||||
| func (fs *OS) createDir(fullpath string) error { | ||||
| 	dir := filepath.Dir(fullpath) | ||||
| 	if dir != "." { | ||||
| 		if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (fs *OS) ReadDir(path string) ([]os.FileInfo, error) { | ||||
| 	l, err := ioutil.ReadDir(path) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	var s = make([]os.FileInfo, len(l)) | ||||
| 	for i, f := range l { | ||||
| 		s[i] = f | ||||
| 	} | ||||
|  | ||||
| 	return s, nil | ||||
| } | ||||
|  | ||||
| func (fs *OS) Rename(from, to string) error { | ||||
| 	if err := fs.createDir(to); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return rename(from, to) | ||||
| } | ||||
|  | ||||
| func (fs *OS) MkdirAll(path string, perm os.FileMode) error { | ||||
| 	return os.MkdirAll(path, defaultDirectoryMode) | ||||
| } | ||||
|  | ||||
| func (fs *OS) Open(filename string) (billy.File, error) { | ||||
| 	return fs.OpenFile(filename, os.O_RDONLY, 0) | ||||
| } | ||||
|  | ||||
| func (fs *OS) Stat(filename string) (os.FileInfo, error) { | ||||
| 	return os.Stat(filename) | ||||
| } | ||||
|  | ||||
| func (fs *OS) Remove(filename string) error { | ||||
| 	return os.Remove(filename) | ||||
| } | ||||
|  | ||||
| func (fs *OS) TempFile(dir, prefix string) (billy.File, error) { | ||||
| 	if err := fs.createDir(dir + string(os.PathSeparator)); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	f, err := ioutil.TempFile(dir, prefix) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &file{File: f}, nil | ||||
| } | ||||
|  | ||||
| func (fs *OS) Join(elem ...string) string { | ||||
| 	return filepath.Join(elem...) | ||||
| } | ||||
|  | ||||
| func (fs *OS) RemoveAll(path string) error { | ||||
| 	return os.RemoveAll(filepath.Clean(path)) | ||||
| } | ||||
|  | ||||
| func (fs *OS) Lstat(filename string) (os.FileInfo, error) { | ||||
| 	return os.Lstat(filepath.Clean(filename)) | ||||
| } | ||||
|  | ||||
| func (fs *OS) Symlink(target, link string) error { | ||||
| 	if err := fs.createDir(link); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return os.Symlink(target, link) | ||||
| } | ||||
|  | ||||
| func (fs *OS) Readlink(link string) (string, error) { | ||||
| 	return os.Readlink(link) | ||||
| } | ||||
|  | ||||
| // Capabilities implements the Capable interface. | ||||
| func (fs *OS) Capabilities() billy.Capability { | ||||
| 	return billy.DefaultCapabilities | ||||
| } | ||||
|  | ||||
| // file is a wrapper for an os.File which adds support for file locking. | ||||
| type file struct { | ||||
| 	*os.File | ||||
| 	m sync.Mutex | ||||
| } | ||||
							
								
								
									
										83
									
								
								vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										83
									
								
								vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,83 @@ | ||||
| package osfs | ||||
|  | ||||
| import ( | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"syscall" | ||||
| ) | ||||
|  | ||||
| func (f *file) Lock() error { | ||||
| 	// Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. | ||||
| 	// | ||||
| 	// Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open | ||||
| 	// for I/O by only one fid at a time across all clients of the server. If a | ||||
| 	// second open is attempted, it draws an error.” | ||||
| 	// | ||||
| 	// There is no obvious way to implement this function using the exclusive use bit. | ||||
| 	// See https://golang.org/src/cmd/go/internal/lockedfile/lockedfile_plan9.go | ||||
| 	// for how file locking is done by the go tool on Plan 9. | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (f *file) Unlock() error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func rename(from, to string) error { | ||||
| 	// If from and to are in different directories, copy the file | ||||
| 	// since Plan 9 does not support cross-directory rename. | ||||
| 	if filepath.Dir(from) != filepath.Dir(to) { | ||||
| 		fi, err := os.Stat(from) | ||||
| 		if err != nil { | ||||
| 			return &os.LinkError{"rename", from, to, err} | ||||
| 		} | ||||
| 		if fi.Mode().IsDir() { | ||||
| 			return &os.LinkError{"rename", from, to, syscall.EISDIR} | ||||
| 		} | ||||
| 		fromFile, err := os.Open(from) | ||||
| 		if err != nil { | ||||
| 			return &os.LinkError{"rename", from, to, err} | ||||
| 		} | ||||
| 		toFile, err := os.OpenFile(to, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode()) | ||||
| 		if err != nil { | ||||
| 			return &os.LinkError{"rename", from, to, err} | ||||
| 		} | ||||
| 		_, err = io.Copy(toFile, fromFile) | ||||
| 		if err != nil { | ||||
| 			return &os.LinkError{"rename", from, to, err} | ||||
| 		} | ||||
|  | ||||
| 		// Copy mtime and mode from original file. | ||||
| 		// We need only one syscall if we avoid os.Chmod and os.Chtimes. | ||||
| 		dir := fi.Sys().(*syscall.Dir) | ||||
| 		var d syscall.Dir | ||||
| 		d.Null() | ||||
| 		d.Mtime = dir.Mtime | ||||
| 		d.Mode = dir.Mode | ||||
| 		if err = dirwstat(to, &d); err != nil { | ||||
| 			return &os.LinkError{"rename", from, to, err} | ||||
| 		} | ||||
|  | ||||
| 		// Remove original file. | ||||
| 		err = os.Remove(from) | ||||
| 		if err != nil { | ||||
| 			return &os.LinkError{"rename", from, to, err} | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	return os.Rename(from, to) | ||||
| } | ||||
|  | ||||
| func dirwstat(name string, d *syscall.Dir) error { | ||||
| 	var buf [syscall.STATFIXLEN]byte | ||||
|  | ||||
| 	n, err := d.Marshal(buf[:]) | ||||
| 	if err != nil { | ||||
| 		return &os.PathError{"dirwstat", name, err} | ||||
| 	} | ||||
| 	if err = syscall.Wstat(name, buf[:n]); err != nil { | ||||
| 		return &os.PathError{"dirwstat", name, err} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										27
									
								
								vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| // +build !plan9,!windows | ||||
|  | ||||
| package osfs | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
|  | ||||
| 	"golang.org/x/sys/unix" | ||||
| ) | ||||
|  | ||||
| func (f *file) Lock() error { | ||||
| 	f.m.Lock() | ||||
| 	defer f.m.Unlock() | ||||
|  | ||||
| 	return unix.Flock(int(f.File.Fd()), unix.LOCK_EX) | ||||
| } | ||||
|  | ||||
| func (f *file) Unlock() error { | ||||
| 	f.m.Lock() | ||||
| 	defer f.m.Unlock() | ||||
|  | ||||
| 	return unix.Flock(int(f.File.Fd()), unix.LOCK_UN) | ||||
| } | ||||
|  | ||||
| func rename(from, to string) error { | ||||
| 	return os.Rename(from, to) | ||||
| } | ||||
							
								
								
									
										61
									
								
								vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										61
									
								
								vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,61 @@ | ||||
| // +build windows | ||||
|  | ||||
| package osfs | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"runtime" | ||||
| 	"unsafe" | ||||
|  | ||||
| 	"golang.org/x/sys/windows" | ||||
| ) | ||||
|  | ||||
| type fileInfo struct { | ||||
| 	os.FileInfo | ||||
| 	name string | ||||
| } | ||||
|  | ||||
| func (fi *fileInfo) Name() string { | ||||
| 	return fi.name | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	kernel32DLL    = windows.NewLazySystemDLL("kernel32.dll") | ||||
| 	lockFileExProc = kernel32DLL.NewProc("LockFileEx") | ||||
| 	unlockFileProc = kernel32DLL.NewProc("UnlockFile") | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	lockfileExclusiveLock = 0x2 | ||||
| ) | ||||
|  | ||||
| func (f *file) Lock() error { | ||||
| 	f.m.Lock() | ||||
| 	defer f.m.Unlock() | ||||
|  | ||||
| 	var overlapped windows.Overlapped | ||||
| 	// err is always non-nil as per sys/windows semantics. | ||||
| 	ret, _, err := lockFileExProc.Call(f.File.Fd(), lockfileExclusiveLock, 0, 0xFFFFFFFF, 0, | ||||
| 		uintptr(unsafe.Pointer(&overlapped))) | ||||
| 	runtime.KeepAlive(&overlapped) | ||||
| 	if ret == 0 { | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (f *file) Unlock() error { | ||||
| 	f.m.Lock() | ||||
| 	defer f.m.Unlock() | ||||
|  | ||||
| 	// err is always non-nil as per sys/windows semantics. | ||||
| 	ret, _, err := unlockFileProc.Call(f.File.Fd(), 0, 0, 0xFFFFFFFF, 0) | ||||
| 	if ret == 0 { | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func rename(from, to string) error { | ||||
| 	return os.Rename(from, to) | ||||
| } | ||||
							
								
								
									
										111
									
								
								vendor/github.com/go-git/go-billy/v5/util/glob.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										111
									
								
								vendor/github.com/go-git/go-billy/v5/util/glob.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,111 @@ | ||||
| package util | ||||
|  | ||||
| import ( | ||||
| 	"path/filepath" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/go-git/go-billy/v5" | ||||
| ) | ||||
|  | ||||
| // Glob returns the names of all files matching pattern or nil | ||||
| // if there is no matching file. The syntax of patterns is the same | ||||
| // as in Match. The pattern may describe hierarchical names such as | ||||
| // /usr/*/bin/ed (assuming the Separator is '/'). | ||||
| // | ||||
| // Glob ignores file system errors such as I/O errors reading directories. | ||||
| // The only possible returned error is ErrBadPattern, when pattern | ||||
| // is malformed. | ||||
| // | ||||
| // Function originally from https://golang.org/src/path/filepath/match_test.go | ||||
| func Glob(fs billy.Filesystem, pattern string) (matches []string, err error) { | ||||
| 	if !hasMeta(pattern) { | ||||
| 		if _, err = fs.Lstat(pattern); err != nil { | ||||
| 			return nil, nil | ||||
| 		} | ||||
| 		return []string{pattern}, nil | ||||
| 	} | ||||
|  | ||||
| 	dir, file := filepath.Split(pattern) | ||||
| 	// Prevent infinite recursion. See issue 15879. | ||||
| 	if dir == pattern { | ||||
| 		return nil, filepath.ErrBadPattern | ||||
| 	} | ||||
|  | ||||
| 	var m []string | ||||
| 	m, err = Glob(fs, cleanGlobPath(dir)) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	for _, d := range m { | ||||
| 		matches, err = glob(fs, d, file, matches) | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // cleanGlobPath prepares path for glob matching. | ||||
| func cleanGlobPath(path string) string { | ||||
| 	switch path { | ||||
| 	case "": | ||||
| 		return "." | ||||
| 	case string(filepath.Separator): | ||||
| 		// do nothing to the path | ||||
| 		return path | ||||
| 	default: | ||||
| 		return path[0 : len(path)-1] // chop off trailing separator | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // glob searches for files matching pattern in the directory dir | ||||
| // and appends them to matches. If the directory cannot be | ||||
| // opened, it returns the existing matches. New matches are | ||||
| // added in lexicographical order. | ||||
| func glob(fs billy.Filesystem, dir, pattern string, matches []string) (m []string, e error) { | ||||
| 	m = matches | ||||
| 	fi, err := fs.Stat(dir) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if !fi.IsDir() { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	names, _ := readdirnames(fs, dir) | ||||
| 	sort.Strings(names) | ||||
|  | ||||
| 	for _, n := range names { | ||||
| 		matched, err := filepath.Match(pattern, n) | ||||
| 		if err != nil { | ||||
| 			return m, err | ||||
| 		} | ||||
| 		if matched { | ||||
| 			m = append(m, filepath.Join(dir, n)) | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // hasMeta reports whether path contains any of the magic characters | ||||
| // recognized by Match. | ||||
| func hasMeta(path string) bool { | ||||
| 	// TODO(niemeyer): Should other magic characters be added here? | ||||
| 	return strings.ContainsAny(path, "*?[") | ||||
| } | ||||
|  | ||||
| func readdirnames(fs billy.Filesystem, dir string) ([]string, error) { | ||||
| 	files, err := fs.ReadDir(dir) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	var names []string | ||||
| 	for _, file := range files { | ||||
| 		names = append(names, file.Name()) | ||||
| 	} | ||||
|  | ||||
| 	return names, nil | ||||
| } | ||||
							
								
								
									
										224
									
								
								vendor/github.com/go-git/go-billy/v5/util/util.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										224
									
								
								vendor/github.com/go-git/go-billy/v5/util/util.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,224 @@ | ||||
| package util | ||||
|  | ||||
| import ( | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"strconv" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-git/go-billy/v5" | ||||
| ) | ||||
|  | ||||
| // RemoveAll removes path and any children it contains. It removes everything it | ||||
| // can but returns the first error it encounters. If the path does not exist, | ||||
| // RemoveAll returns nil (no error). | ||||
| func RemoveAll(fs billy.Basic, path string) error { | ||||
| 	fs, path = getUnderlyingAndPath(fs, path) | ||||
|  | ||||
| 	if r, ok := fs.(removerAll); ok { | ||||
| 		return r.RemoveAll(path) | ||||
| 	} | ||||
|  | ||||
| 	return removeAll(fs, path) | ||||
| } | ||||
|  | ||||
| type removerAll interface { | ||||
| 	RemoveAll(string) error | ||||
| } | ||||
|  | ||||
| func removeAll(fs billy.Basic, path string) error { | ||||
| 	// This implementation is adapted from os.RemoveAll. | ||||
|  | ||||
| 	// Simple case: if Remove works, we're done. | ||||
| 	err := fs.Remove(path) | ||||
| 	if err == nil || os.IsNotExist(err) { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	// Otherwise, is this a directory we need to recurse into? | ||||
| 	dir, serr := fs.Stat(path) | ||||
| 	if serr != nil { | ||||
| 		if os.IsNotExist(serr) { | ||||
| 			return nil | ||||
| 		} | ||||
|  | ||||
| 		return serr | ||||
| 	} | ||||
|  | ||||
| 	if !dir.IsDir() { | ||||
| 		// Not a directory; return the error from Remove. | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	dirfs, ok := fs.(billy.Dir) | ||||
| 	if !ok { | ||||
| 		return billy.ErrNotSupported | ||||
| 	} | ||||
|  | ||||
| 	// Directory. | ||||
| 	fis, err := dirfs.ReadDir(path) | ||||
| 	if err != nil { | ||||
| 		if os.IsNotExist(err) { | ||||
| 			// Race. It was deleted between the Lstat and Open. | ||||
| 			// Return nil per RemoveAll's docs. | ||||
| 			return nil | ||||
| 		} | ||||
|  | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	// Remove contents & return first error. | ||||
| 	err = nil | ||||
| 	for _, fi := range fis { | ||||
| 		cpath := fs.Join(path, fi.Name()) | ||||
| 		err1 := removeAll(fs, cpath) | ||||
| 		if err == nil { | ||||
| 			err = err1 | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Remove directory. | ||||
| 	err1 := fs.Remove(path) | ||||
| 	if err1 == nil || os.IsNotExist(err1) { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	if err == nil { | ||||
| 		err = err1 | ||||
| 	} | ||||
|  | ||||
| 	return err | ||||
|  | ||||
| } | ||||
|  | ||||
| // WriteFile writes data to a file named by filename in the given filesystem. | ||||
| // If the file does not exist, WriteFile creates it with permissions perm; | ||||
| // otherwise WriteFile truncates it before writing. | ||||
| func WriteFile(fs billy.Basic, filename string, data []byte, perm os.FileMode) error { | ||||
| 	f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	n, err := f.Write(data) | ||||
| 	if err == nil && n < len(data) { | ||||
| 		err = io.ErrShortWrite | ||||
| 	} | ||||
|  | ||||
| 	if err1 := f.Close(); err == nil { | ||||
| 		err = err1 | ||||
| 	} | ||||
|  | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Random number state. | ||||
| // We generate random temporary file names so that there's a good | ||||
| // chance the file doesn't exist yet - keeps the number of tries in | ||||
| // TempFile to a minimum. | ||||
| var rand uint32 | ||||
| var randmu sync.Mutex | ||||
|  | ||||
| func reseed() uint32 { | ||||
| 	return uint32(time.Now().UnixNano() + int64(os.Getpid())) | ||||
| } | ||||
|  | ||||
| func nextSuffix() string { | ||||
| 	randmu.Lock() | ||||
| 	r := rand | ||||
| 	if r == 0 { | ||||
| 		r = reseed() | ||||
| 	} | ||||
| 	r = r*1664525 + 1013904223 // constants from Numerical Recipes | ||||
| 	rand = r | ||||
| 	randmu.Unlock() | ||||
| 	return strconv.Itoa(int(1e9 + r%1e9))[1:] | ||||
| } | ||||
|  | ||||
| // TempFile creates a new temporary file in the directory dir with a name | ||||
| // beginning with prefix, opens the file for reading and writing, and returns | ||||
| // the resulting *os.File. If dir is the empty string, TempFile uses the default | ||||
| // directory for temporary files (see os.TempDir). Multiple programs calling | ||||
| // TempFile simultaneously will not choose the same file. The caller can use | ||||
| // f.Name() to find the pathname of the file. It is the caller's responsibility | ||||
| // to remove the file when no longer needed. | ||||
| func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) { | ||||
| 	// This implementation is based on stdlib ioutil.TempFile. | ||||
|  | ||||
| 	if dir == "" { | ||||
| 		dir = os.TempDir() | ||||
| 	} | ||||
|  | ||||
| 	nconflict := 0 | ||||
| 	for i := 0; i < 10000; i++ { | ||||
| 		name := filepath.Join(dir, prefix+nextSuffix()) | ||||
| 		f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) | ||||
| 		if os.IsExist(err) { | ||||
| 			if nconflict++; nconflict > 10 { | ||||
| 				randmu.Lock() | ||||
| 				rand = reseed() | ||||
| 				randmu.Unlock() | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
| 		break | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // TempDir creates a new temporary directory in the directory dir | ||||
| // with a name beginning with prefix and returns the path of the | ||||
| // new directory. If dir is the empty string, TempDir uses the | ||||
| // default directory for temporary files (see os.TempDir). | ||||
| // Multiple programs calling TempDir simultaneously | ||||
| // will not choose the same directory. It is the caller's responsibility | ||||
| // to remove the directory when no longer needed. | ||||
| func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) { | ||||
| 	// This implementation is based on stdlib ioutil.TempDir | ||||
|  | ||||
| 	if dir == "" { | ||||
| 		dir = os.TempDir() | ||||
| 	} | ||||
|  | ||||
| 	nconflict := 0 | ||||
| 	for i := 0; i < 10000; i++ { | ||||
| 		try := filepath.Join(dir, prefix+nextSuffix()) | ||||
| 		err = fs.MkdirAll(try, 0700) | ||||
| 		if os.IsExist(err) { | ||||
| 			if nconflict++; nconflict > 10 { | ||||
| 				randmu.Lock() | ||||
| 				rand = reseed() | ||||
| 				randmu.Unlock() | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
| 		if os.IsNotExist(err) { | ||||
| 			if _, err := os.Stat(dir); os.IsNotExist(err) { | ||||
| 				return "", err | ||||
| 			} | ||||
| 		} | ||||
| 		if err == nil { | ||||
| 			name = try | ||||
| 		} | ||||
| 		break | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| type underlying interface { | ||||
| 	Underlying() billy.Basic | ||||
| } | ||||
|  | ||||
| func getUnderlyingAndPath(fs billy.Basic, path string) (billy.Basic, string) { | ||||
| 	u, ok := fs.(underlying) | ||||
| 	if !ok { | ||||
| 		return fs, path | ||||
| 	} | ||||
| 	if ch, ok := fs.(billy.Chroot); ok { | ||||
| 		path = fs.Join(ch.Root(), path) | ||||
| 	} | ||||
|  | ||||
| 	return u.Underlying(), path | ||||
| } | ||||
							
								
								
									
										4
									
								
								vendor/github.com/go-git/go-git/v5/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/go-git/go-git/v5/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | ||||
| coverage.out | ||||
| *~ | ||||
| coverage.txt | ||||
| profile.out | ||||
							
								
								
									
										74
									
								
								vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										74
									
								
								vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,74 @@ | ||||
| # Contributor Covenant Code of Conduct | ||||
|  | ||||
| ## Our Pledge | ||||
|  | ||||
| In the interest of fostering an open and welcoming environment, we as | ||||
| contributors and maintainers pledge to making participation in our project and | ||||
| our community a harassment-free experience for everyone, regardless of age, body | ||||
| size, disability, ethnicity, gender identity and expression, level of experience, | ||||
| education, socio-economic status, nationality, personal appearance, race, | ||||
| religion, or sexual identity and orientation. | ||||
|  | ||||
| ## Our Standards | ||||
|  | ||||
| Examples of behavior that contributes to creating a positive environment | ||||
| include: | ||||
|  | ||||
| * Using welcoming and inclusive language | ||||
| * Being respectful of differing viewpoints and experiences | ||||
| * Gracefully accepting constructive criticism | ||||
| * Focusing on what is best for the community | ||||
| * Showing empathy towards other community members | ||||
|  | ||||
| Examples of unacceptable behavior by participants include: | ||||
|  | ||||
| * The use of sexualized language or imagery and unwelcome sexual attention or | ||||
|   advances | ||||
| * Trolling, insulting/derogatory comments, and personal or political attacks | ||||
| * Public or private harassment | ||||
| * Publishing others' private information, such as a physical or electronic | ||||
|   address, without explicit permission | ||||
| * Other conduct which could reasonably be considered inappropriate in a | ||||
|   professional setting | ||||
|  | ||||
| ## Our Responsibilities | ||||
|  | ||||
| Project maintainers are responsible for clarifying the standards of acceptable | ||||
| behavior and are expected to take appropriate and fair corrective action in | ||||
| response to any instances of unacceptable behavior. | ||||
|  | ||||
| Project maintainers have the right and responsibility to remove, edit, or | ||||
| reject comments, commits, code, wiki edits, issues, and other contributions | ||||
| that are not aligned to this Code of Conduct, or to ban temporarily or | ||||
| permanently any contributor for other behaviors that they deem inappropriate, | ||||
| threatening, offensive, or harmful. | ||||
|  | ||||
| ## Scope | ||||
|  | ||||
| This Code of Conduct applies both within project spaces and in public spaces | ||||
| when an individual is representing the project or its community. Examples of | ||||
| representing a project or community include using an official project e-mail | ||||
| address, posting via an official social media account, or acting as an appointed | ||||
| representative at an online or offline event. Representation of a project may be | ||||
| further defined and clarified by project maintainers. | ||||
|  | ||||
| ## Enforcement | ||||
|  | ||||
| Instances of abusive, harassing, or otherwise unacceptable behavior may be | ||||
| reported by contacting the project team at conduct@sourced.tech. All | ||||
| complaints will be reviewed and investigated and will result in a response that | ||||
| is deemed necessary and appropriate to the circumstances. The project team is | ||||
| obligated to maintain confidentiality with regard to the reporter of an incident. | ||||
| Further details of specific enforcement policies may be posted separately. | ||||
|  | ||||
| Project maintainers who do not follow or enforce the Code of Conduct in good | ||||
| faith may face temporary or permanent repercussions as determined by other | ||||
| members of the project's leadership. | ||||
|  | ||||
| ## Attribution | ||||
|  | ||||
| This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, | ||||
| available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html | ||||
|  | ||||
| [homepage]: https://www.contributor-covenant.org | ||||
|  | ||||
							
								
								
									
										111
									
								
								vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										111
									
								
								vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,111 @@ | ||||
| Supported Capabilities | ||||
| ====================== | ||||
|  | ||||
| Here is a non-comprehensive table of git commands and features whose equivalent | ||||
| is supported by go-git. | ||||
|  | ||||
| | Feature                               | Status | Notes | | ||||
| |---------------------------------------|--------|-------| | ||||
| | **config**                            | | ||||
| | config                                | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. | | ||||
| | **getting and creating repositories** | | ||||
| | init                                  | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. | | ||||
| | clone                                 | ✔ | Plain clone and equivalents to `--progress`,  `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. | | ||||
| | **basic snapshotting** | | ||||
| | add                                   | ✔ | Plain add is supported. Any other flags aren't supported | | ||||
| | status                                | ✔ | | ||||
| | commit                                | ✔ | | ||||
| | reset                                 | ✔ | | ||||
| | rm                                    | ✔ | | ||||
| | mv                                    | ✔ | | ||||
| | **branching and merging** | | ||||
| | branch                                | ✔ | | ||||
| | checkout                              | ✔ | Basic usages of checkout are supported. | | ||||
| | merge                                 | ✖ | | ||||
| | mergetool                             | ✖ | | ||||
| | stash                                 | ✖ | | ||||
| | tag                                   | ✔ | | ||||
| | **sharing and updating projects** | | ||||
| | fetch                                 | ✔ | | ||||
| | pull                                  | ✔ | Only supports merges where the merge can be resolved as a fast-forward. | | ||||
| | push                                  | ✔ | | ||||
| | remote                                | ✔ | | ||||
| | submodule                             | ✔ | | ||||
| | **inspection and comparison** | | ||||
| | show                                  | ✔ | | ||||
| | log                                   | ✔ | | ||||
| | shortlog                              | (see log) | | ||||
| | describe                              | | | ||||
| | **patching** | | ||||
| | apply                                 | ✖ | | ||||
| | cherry-pick                           | ✖ | | ||||
| | diff                                  | ✔ | Patch object with UnifiedDiff output representation | | ||||
| | rebase                                | ✖ | | ||||
| | revert                                | ✖ | | ||||
| | **debugging** | | ||||
| | bisect                                | ✖ | | ||||
| | blame                                 | ✔ | | ||||
| | grep                                  | ✔ | | ||||
| | **email** || | ||||
| | am                                    | ✖ | | ||||
| | apply                                 | ✖ | | ||||
| | format-patch                          | ✖ | | ||||
| | send-email                            | ✖ | | ||||
| | request-pull                          | ✖ | | ||||
| | **external systems** | | ||||
| | svn                                   | ✖ | | ||||
| | fast-import                           | ✖ | | ||||
| | **administration** | | ||||
| | clean                                 | ✔ | | ||||
| | gc                                    | ✖ | | ||||
| | fsck                                  | ✖ | | ||||
| | reflog                                | ✖ | | ||||
| | filter-branch                         | ✖ | | ||||
| | instaweb                              | ✖ | | ||||
| | archive                               | ✖ | | ||||
| | bundle                                | ✖ | | ||||
| | prune                                 | ✖ | | ||||
| | repack                                | ✖ | | ||||
| | **server admin** | | ||||
| | daemon                                | | | ||||
| | update-server-info                    | | | ||||
| | **advanced** | | ||||
| | notes                                 | ✖ | | ||||
| | replace                               | ✖ | | ||||
| | worktree                              | ✖ | | ||||
| | annotate                              | (see blame) | | ||||
| | **gpg** | | ||||
| | git-verify-commit                     | ✔ | | ||||
| | git-verify-tag                        | ✔ | | ||||
| | **plumbing commands** | | ||||
| | cat-file                              | ✔ | | ||||
| | check-ignore                          | | | ||||
| | commit-tree                           | | | ||||
| | count-objects                         | | | ||||
| | diff-index                            | | | ||||
| | for-each-ref                          | ✔ | | ||||
| | hash-object                           | ✔ | | ||||
| | ls-files                              | ✔ | | ||||
| | merge-base                            | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. | | ||||
| | read-tree                             | | | ||||
| | rev-list                              | ✔ | | ||||
| | rev-parse                             | | | ||||
| | show-ref                              | ✔ | | ||||
| | symbolic-ref                          | ✔ | | ||||
| | update-index                          | | | ||||
| | update-ref                            | | | ||||
| | verify-pack                           | | | ||||
| | write-tree                            | | | ||||
| | **protocols** | | ||||
| | http(s):// (dumb)                     | ✖ | | ||||
| | http(s):// (smart)                    | ✔ | | ||||
| | git://                                | ✔ | | ||||
| | ssh://                                | ✔ | | ||||
| | file://                               | ✔ | | ||||
| | custom                                | ✔ | | ||||
| | **other features** | | ||||
| | gitignore                             | ✔ | | ||||
| | gitattributes                         | ✖ | | ||||
| | index version                         | | | ||||
| | packfile version                      | | | ||||
| | push-certs                            | ✖ | | ||||
							
								
								
									
										46
									
								
								vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,46 @@ | ||||
| # Contributing Guidelines | ||||
|  | ||||
| source{d} go-git project is [Apache 2.0 licensed](LICENSE) and accepts | ||||
| contributions via GitHub pull requests.  This document outlines some of the | ||||
| conventions on development workflow, commit message formatting, contact points, | ||||
| and other resources to make it easier to get your contribution accepted. | ||||
|  | ||||
| ## Support Channels | ||||
|  | ||||
| The official support channels, for both users and contributors, are: | ||||
|  | ||||
| - [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions. | ||||
| - GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests. | ||||
|  | ||||
| *Before opening a new issue or submitting a new pull request, it's helpful to | ||||
| search the project - it's likely that another user has already reported the | ||||
| issue you're facing, or it's a known issue that we're already aware of. | ||||
|  | ||||
|  | ||||
| ## How to Contribute | ||||
|  | ||||
| Pull Requests (PRs) are the main and exclusive way to contribute to the official go-git project. | ||||
| In order for a PR to be accepted it needs to pass a list of requirements: | ||||
|  | ||||
| - You should be able to run the same query using `git`. We don't accept features that are not implemented in the official git implementation. | ||||
| - The expected behavior must match the [official git implementation](https://github.com/git/git). | ||||
| - The actual behavior must be correctly explained with natural language and providing a minimum working example in Go that reproduces it. | ||||
| - All PRs must be written in idiomatic Go, formatted according to [gofmt](https://golang.org/cmd/gofmt/), and without any warnings from [go lint](https://github.com/golang/lint) nor [go vet](https://golang.org/cmd/vet/). | ||||
| - They should in general include tests, and those shall pass. | ||||
| - If the PR is a bug fix, it has to include a suite of unit tests for the new functionality. | ||||
| - If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality. | ||||
| - In any case, all the PRs have to pass the personal evaluation of at least one of the maintainers of go-git. | ||||
|  | ||||
| ### Format of the commit message | ||||
|  | ||||
| Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to: | ||||
|  | ||||
| ``` | ||||
| plumbing: packp, Skip argument validations for unknown capabilities. Fixes #623 | ||||
| ``` | ||||
|  | ||||
| The format can be described more formally as follows: | ||||
|  | ||||
| ``` | ||||
| <package>: <subpackage>, <what changed>. [Fixes #<issue-number>] | ||||
| ``` | ||||
							
								
								
									
										201
									
								
								vendor/github.com/go-git/go-git/v5/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/go-git/go-git/v5/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,201 @@ | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "{}" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright 2018 Sourced Technologies, S.L. | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										38
									
								
								vendor/github.com/go-git/go-git/v5/Makefile
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								vendor/github.com/go-git/go-git/v5/Makefile
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| # General | ||||
| WORKDIR = $(PWD) | ||||
|  | ||||
| # Go parameters | ||||
| GOCMD = go | ||||
| GOTEST = $(GOCMD) test  | ||||
|  | ||||
| # Git config | ||||
| GIT_VERSION ?= | ||||
| GIT_DIST_PATH ?= $(PWD)/.git-dist | ||||
| GIT_REPOSITORY = http://github.com/git/git.git | ||||
|  | ||||
| # Coverage | ||||
| COVERAGE_REPORT = coverage.out | ||||
| COVERAGE_MODE = count | ||||
|  | ||||
| build-git: | ||||
| 	@if [ -f $(GIT_DIST_PATH)/git ]; then \ | ||||
| 		echo "nothing to do, using cache $(GIT_DIST_PATH)"; \ | ||||
| 	else \ | ||||
| 		git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \ | ||||
| 		cd $(GIT_DIST_PATH); \ | ||||
| 		make configure; \ | ||||
| 		./configure; \ | ||||
| 		make all; \ | ||||
| 	fi | ||||
|  | ||||
| test: | ||||
| 	@echo "running against `git version`"; \ | ||||
| 	$(GOTEST) ./... | ||||
|  | ||||
| test-coverage: | ||||
| 	@echo "running against `git version`"; \ | ||||
| 	echo "" > $(COVERAGE_REPORT); \ | ||||
| 	$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./... | ||||
|  | ||||
| clean: | ||||
| 	rm -rf $(GIT_DIST_PATH) | ||||
							
								
								
									
										131
									
								
								vendor/github.com/go-git/go-git/v5/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										131
									
								
								vendor/github.com/go-git/go-git/v5/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,131 @@ | ||||
|  | ||||
| [](https://godoc.org/github.com/src-d/go-git) [](https://github.com/go-git/go-git/actions) [](https://goreportcard.com/report/github.com/src-d/go-git) | ||||
|  | ||||
| *go-git* is a highly extensible git implementation library written in **pure Go**. | ||||
|  | ||||
| It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://godoc.org/github.com/go-git/go-git/v5/plumbing/storer) interface. | ||||
|  | ||||
| It's being actively developed since 2015 and is being used extensively by [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), [Gitea](https://gitea.io/en-us/) or [Pulumi](https://github.com/search?q=org%3Apulumi+go-git&type=Code), and by many other libraries and tools. | ||||
|  | ||||
| Project Status | ||||
| -------------- | ||||
|  | ||||
| After the legal issues with the [`src-d`](https://github.com/src-d) organization, the lack of update for four months and the requirement to make a hard fork, the project is **now back to normality**. | ||||
|  | ||||
| The project is currently actively maintained by individual contributors, including several of the original authors, but also backed by a new company `gitsigth` where `go-git` is a critical component used at scale. | ||||
|  | ||||
|  | ||||
| Comparison with git | ||||
| ------------------- | ||||
|  | ||||
| *go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does. | ||||
|  | ||||
| *git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md). | ||||
|  | ||||
|  | ||||
| Installation | ||||
| ------------ | ||||
|  | ||||
| The recommended way to install *go-git* is: | ||||
|  | ||||
| ```go | ||||
| import "github.com/go-git/go-git/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH) | ||||
| import "github.com/go-git/go-git" // with go modules disabled | ||||
| ``` | ||||
|  | ||||
|  | ||||
| Examples | ||||
| -------- | ||||
|  | ||||
| > Please note that the `CheckIfError` and `Info` functions  used in the examples are from the [examples package](https://github.com/src-d/go-git/blob/master/_examples/common.go#L17) just to be used in the examples. | ||||
|  | ||||
|  | ||||
| ### Basic example | ||||
|  | ||||
| A basic example that mimics the standard `git clone` command | ||||
|  | ||||
| ```go | ||||
| // Clone the given repository to the given directory | ||||
| Info("git clone https://github.com/go-git/go-git") | ||||
|  | ||||
| _, err := git.PlainClone("/tmp/foo", false, &git.CloneOptions{ | ||||
|     URL:      "https://github.com/go-git/go-git", | ||||
|     Progress: os.Stdout, | ||||
| }) | ||||
|  | ||||
| CheckIfError(err) | ||||
| ``` | ||||
|  | ||||
| Outputs: | ||||
| ``` | ||||
| Counting objects: 4924, done. | ||||
| Compressing objects: 100% (1333/1333), done. | ||||
| Total 4924 (delta 530), reused 6 (delta 6), pack-reused 3533 | ||||
| ``` | ||||
|  | ||||
| ### In-memory example | ||||
|  | ||||
| Cloning a repository into memory and printing the history of HEAD, just like `git log` does | ||||
|  | ||||
|  | ||||
| ```go | ||||
| // Clones the given repository in memory, creating the remote, the local | ||||
| // branches and fetching the objects, exactly as: | ||||
| Info("git clone https://github.com/go-git/go-billy") | ||||
|  | ||||
| r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{ | ||||
|     URL: "https://github.com/go-git/go-billy", | ||||
| }) | ||||
|  | ||||
| CheckIfError(err) | ||||
|  | ||||
| // Gets the HEAD history from HEAD, just like this command: | ||||
| Info("git log") | ||||
|  | ||||
| // ... retrieves the branch pointed by HEAD | ||||
| ref, err := r.Head() | ||||
| CheckIfError(err) | ||||
|  | ||||
|  | ||||
| // ... retrieves the commit history | ||||
| cIter, err := r.Log(&git.LogOptions{From: ref.Hash()}) | ||||
| CheckIfError(err) | ||||
|  | ||||
| // ... just iterates over the commits, printing it | ||||
| err = cIter.ForEach(func(c *object.Commit) error { | ||||
| 	fmt.Println(c) | ||||
| 	return nil | ||||
| }) | ||||
| CheckIfError(err) | ||||
| ``` | ||||
|  | ||||
| Outputs: | ||||
| ``` | ||||
| commit ded8054fd0c3994453e9c8aacaf48d118d42991e | ||||
| Author: Santiago M. Mola <santi@mola.io> | ||||
| Date:   Sat Nov 12 21:18:41 2016 +0100 | ||||
|  | ||||
|     index: ReadFrom/WriteTo returns IndexReadError/IndexWriteError. (#9) | ||||
|  | ||||
| commit df707095626f384ce2dc1a83b30f9a21d69b9dfc | ||||
| Author: Santiago M. Mola <santi@mola.io> | ||||
| Date:   Fri Nov 11 13:23:22 2016 +0100 | ||||
|  | ||||
|     readwriter: fix bug when writing index. (#10) | ||||
|  | ||||
|     When using ReadWriter on an existing siva file, absolute offset for | ||||
|     index entries was not being calculated correctly. | ||||
| ... | ||||
| ``` | ||||
|  | ||||
| You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder. | ||||
|  | ||||
| Contribute | ||||
| ---------- | ||||
|  | ||||
| [Contributions](https://github.com/go-git/go-git/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are more than welcome, if you are interested please take a look to | ||||
| our [Contributing Guidelines](CONTRIBUTING.md). | ||||
|  | ||||
| License | ||||
| ------- | ||||
| Apache License Version 2.0, see [LICENSE](LICENSE) | ||||
							
								
								
									
										302
									
								
								vendor/github.com/go-git/go-git/v5/blame.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										302
									
								
								vendor/github.com/go-git/go-git/v5/blame.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,302 @@ | ||||
| package git | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/object" | ||||
| 	"github.com/go-git/go-git/v5/utils/diff" | ||||
| ) | ||||
|  | ||||
| // BlameResult represents the result of a Blame operation. | ||||
| type BlameResult struct { | ||||
| 	// Path is the path of the File that we're blaming. | ||||
| 	Path string | ||||
| 	// Rev (Revision) is the hash of the specified Commit used to generate this result. | ||||
| 	Rev plumbing.Hash | ||||
| 	// Lines contains every line with its authorship. | ||||
| 	Lines []*Line | ||||
| } | ||||
|  | ||||
| // Blame returns a BlameResult with the information about the last author of | ||||
| // each line from file `path` at commit `c`. | ||||
| func Blame(c *object.Commit, path string) (*BlameResult, error) { | ||||
| 	// The file to blame is identified by the input arguments: | ||||
| 	// commit and path. commit is a Commit object obtained from a Repository. Path | ||||
| 	// represents a path to a specific file contained into the repository. | ||||
| 	// | ||||
| 	// Blaming a file is a two step process: | ||||
| 	// | ||||
| 	// 1. Create a linear history of the commits affecting a file. We use | ||||
| 	// revlist.New for that. | ||||
| 	// | ||||
| 	// 2. Then build a graph with a node for every line in every file in | ||||
| 	// the history of the file. | ||||
| 	// | ||||
| 	// Each node is assigned a commit: Start by the nodes in the first | ||||
| 	// commit. Assign that commit as the creator of all its lines. | ||||
| 	// | ||||
| 	// Then jump to the nodes in the next commit, and calculate the diff | ||||
| 	// between the two files. Newly created lines get | ||||
| 	// assigned the new commit as its origin. Modified lines also get | ||||
| 	// this new commit. Untouched lines retain the old commit. | ||||
| 	// | ||||
| 	// All this work is done in the assignOrigin function which holds all | ||||
| 	// the internal relevant data in a "blame" struct, that is not | ||||
| 	// exported. | ||||
| 	// | ||||
| 	// TODO: ways to improve the efficiency of this function: | ||||
| 	// 1. Improve revlist | ||||
| 	// 2. Improve how to traverse the history (example a backward traversal will | ||||
| 	// be much more efficient) | ||||
| 	// | ||||
| 	// TODO: ways to improve the function in general: | ||||
| 	// 1. Add memoization between revlist and assign. | ||||
| 	// 2. It is using much more memory than needed, see the TODOs below. | ||||
|  | ||||
| 	b := new(blame) | ||||
| 	b.fRev = c | ||||
| 	b.path = path | ||||
|  | ||||
| 	// get all the file revisions | ||||
| 	if err := b.fillRevs(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	// calculate the line tracking graph and fill in | ||||
| 	// file contents in data. | ||||
| 	if err := b.fillGraphAndData(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	file, err := b.fRev.File(b.path) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	finalLines, err := file.Lines() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	// Each node (line) holds the commit where it was introduced or | ||||
| 	// last modified. To achieve that we use the FORWARD algorithm | ||||
| 	// described in Zimmermann, et al. "Mining Version Archives for | ||||
| 	// Co-changed Lines", in proceedings of the Mining Software | ||||
| 	// Repositories workshop, Shanghai, May 22-23, 2006. | ||||
| 	lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1)) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return &BlameResult{ | ||||
| 		Path:  path, | ||||
| 		Rev:   c.Hash, | ||||
| 		Lines: lines, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // Line values represent the contents and author of a line in BlamedResult values. | ||||
| type Line struct { | ||||
| 	// Author is the email address of the last author that modified the line. | ||||
| 	Author string | ||||
| 	// Text is the original text of the line. | ||||
| 	Text string | ||||
| 	// Date is when the original text of the line was introduced | ||||
| 	Date time.Time | ||||
| 	// Hash is the commit hash that introduced the original line | ||||
| 	Hash plumbing.Hash | ||||
| } | ||||
|  | ||||
| func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line { | ||||
| 	return &Line{ | ||||
| 		Author: author, | ||||
| 		Text:   text, | ||||
| 		Hash:   hash, | ||||
| 		Date:   date, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { | ||||
| 	lcontents := len(contents) | ||||
| 	lcommits := len(commits) | ||||
|  | ||||
| 	if lcontents != lcommits { | ||||
| 		if lcontents == lcommits-1 && contents[lcontents-1] != "\n" { | ||||
| 			contents = append(contents, "\n") | ||||
| 		} else { | ||||
| 			return nil, errors.New("contents and commits have different length") | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	result := make([]*Line, 0, lcontents) | ||||
| 	for i := range contents { | ||||
| 		result = append(result, newLine( | ||||
| 			commits[i].Author.Email, contents[i], | ||||
| 			commits[i].Author.When, commits[i].Hash, | ||||
| 		)) | ||||
| 	} | ||||
|  | ||||
| 	return result, nil | ||||
| } | ||||
|  | ||||
| // this struct is internally used by the blame function to hold its | ||||
| // inputs, outputs and state. | ||||
| type blame struct { | ||||
| 	// the path of the file to blame | ||||
| 	path string | ||||
| 	// the commit of the final revision of the file to blame | ||||
| 	fRev *object.Commit | ||||
| 	// the chain of revisions affecting the the file to blame | ||||
| 	revs []*object.Commit | ||||
| 	// the contents of the file across all its revisions | ||||
| 	data []string | ||||
| 	// the graph of the lines in the file across all the revisions | ||||
| 	graph [][]*object.Commit | ||||
| } | ||||
|  | ||||
| // calculate the history of a file "path", starting from commit "from", sorted by commit date. | ||||
| func (b *blame) fillRevs() error { | ||||
| 	var err error | ||||
|  | ||||
| 	b.revs, err = references(b.fRev, b.path) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // build graph of a file from its revision history | ||||
| func (b *blame) fillGraphAndData() error { | ||||
| 	//TODO: not all commits are needed, only the current rev and the prev | ||||
| 	b.graph = make([][]*object.Commit, len(b.revs)) | ||||
| 	b.data = make([]string, len(b.revs)) // file contents in all the revisions | ||||
| 	// for every revision of the file, starting with the first | ||||
| 	// one... | ||||
| 	for i, rev := range b.revs { | ||||
| 		// get the contents of the file | ||||
| 		file, err := rev.File(b.path) | ||||
| 		if err != nil { | ||||
| 			return nil | ||||
| 		} | ||||
| 		b.data[i], err = file.Contents() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		nLines := countLines(b.data[i]) | ||||
| 		// create a node for each line | ||||
| 		b.graph[i] = make([]*object.Commit, nLines) | ||||
| 		// assign a commit to each node | ||||
| 		// if this is the first revision, then the node is assigned to | ||||
| 		// this first commit. | ||||
| 		if i == 0 { | ||||
| 			for j := 0; j < nLines; j++ { | ||||
| 				b.graph[i][j] = b.revs[i] | ||||
| 			} | ||||
| 		} else { | ||||
| 			// if this is not the first commit, then assign to the old | ||||
| 			// commit or to the new one, depending on what the diff | ||||
| 			// says. | ||||
| 			b.assignOrigin(i, i-1) | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // sliceGraph returns a slice of commits (one per line) for a particular | ||||
| // revision of a file (0=first revision). | ||||
| func (b *blame) sliceGraph(i int) []*object.Commit { | ||||
| 	fVs := b.graph[i] | ||||
| 	result := make([]*object.Commit, 0, len(fVs)) | ||||
| 	for _, v := range fVs { | ||||
| 		c := *v | ||||
| 		result = append(result, &c) | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // Assigns origin to vertexes in current (c) rev from data in its previous (p) | ||||
| // revision | ||||
| func (b *blame) assignOrigin(c, p int) { | ||||
| 	// assign origin based on diff info | ||||
| 	hunks := diff.Do(b.data[p], b.data[c]) | ||||
| 	sl := -1 // source line | ||||
| 	dl := -1 // destination line | ||||
| 	for h := range hunks { | ||||
| 		hLines := countLines(hunks[h].Text) | ||||
| 		for hl := 0; hl < hLines; hl++ { | ||||
| 			switch { | ||||
| 			case hunks[h].Type == 0: | ||||
| 				sl++ | ||||
| 				dl++ | ||||
| 				b.graph[c][dl] = b.graph[p][sl] | ||||
| 			case hunks[h].Type == 1: | ||||
| 				dl++ | ||||
| 				b.graph[c][dl] = b.revs[c] | ||||
| 			case hunks[h].Type == -1: | ||||
| 				sl++ | ||||
| 			default: | ||||
| 				panic("unreachable") | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // GoString prints the results of a Blame using git-blame's style. | ||||
| func (b *blame) GoString() string { | ||||
| 	var buf bytes.Buffer | ||||
|  | ||||
| 	file, err := b.fRev.File(b.path) | ||||
| 	if err != nil { | ||||
| 		panic("PrettyPrint: internal error in repo.Data") | ||||
| 	} | ||||
| 	contents, err := file.Contents() | ||||
| 	if err != nil { | ||||
| 		panic("PrettyPrint: internal error in repo.Data") | ||||
| 	} | ||||
|  | ||||
| 	lines := strings.Split(contents, "\n") | ||||
| 	// max line number length | ||||
| 	mlnl := len(strconv.Itoa(len(lines))) | ||||
| 	// max author length | ||||
| 	mal := b.maxAuthorLength() | ||||
| 	format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n", | ||||
| 		mal, mlnl) | ||||
|  | ||||
| 	fVs := b.graph[len(b.graph)-1] | ||||
| 	for ln, v := range fVs { | ||||
| 		fmt.Fprintf(&buf, format, v.Hash.String()[:8], | ||||
| 			prettyPrintAuthor(fVs[ln]), ln+1, lines[ln]) | ||||
| 	} | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| // utility function to pretty print the author. | ||||
| func prettyPrintAuthor(c *object.Commit) string { | ||||
| 	return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02")) | ||||
| } | ||||
|  | ||||
| // utility function to calculate the number of runes needed | ||||
| // to print the longest author name in the blame of a file. | ||||
| func (b *blame) maxAuthorLength() int { | ||||
| 	memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1) | ||||
| 	fVs := b.graph[len(b.graph)-1] | ||||
| 	m := 0 | ||||
| 	for ln := range fVs { | ||||
| 		if _, ok := memo[fVs[ln].Hash]; ok { | ||||
| 			continue | ||||
| 		} | ||||
| 		memo[fVs[ln].Hash] = struct{}{} | ||||
| 		m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln]))) | ||||
| 	} | ||||
| 	return m | ||||
| } | ||||
|  | ||||
| func max(a, b int) int { | ||||
| 	if a > b { | ||||
| 		return a | ||||
| 	} | ||||
| 	return b | ||||
| } | ||||
							
								
								
									
										22
									
								
								vendor/github.com/go-git/go-git/v5/common.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								vendor/github.com/go-git/go-git/v5/common.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| package git | ||||
|  | ||||
| import "strings" | ||||
|  | ||||
| const defaultDotGitPath = ".git" | ||||
|  | ||||
| // countLines returns the number of lines in a string à la git, this is | ||||
| // The newline character is assumed to be '\n'.  The empty string | ||||
| // contains 0 lines.  If the last line of the string doesn't end with a | ||||
| // newline, it will still be considered a line. | ||||
| func countLines(s string) int { | ||||
| 	if s == "" { | ||||
| 		return 0 | ||||
| 	} | ||||
|  | ||||
| 	nEOL := strings.Count(s, "\n") | ||||
| 	if strings.HasSuffix(s, "\n") { | ||||
| 		return nEOL | ||||
| 	} | ||||
|  | ||||
| 	return nEOL + 1 | ||||
| } | ||||
							
								
								
									
										90
									
								
								vendor/github.com/go-git/go-git/v5/config/branch.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										90
									
								
								vendor/github.com/go-git/go-git/v5/config/branch.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,90 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	format "github.com/go-git/go-git/v5/plumbing/format/config" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	errBranchEmptyName     = errors.New("branch config: empty name") | ||||
| 	errBranchInvalidMerge  = errors.New("branch config: invalid merge") | ||||
| 	errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'") | ||||
| ) | ||||
|  | ||||
| // Branch contains information on the | ||||
| // local branches and which remote to track | ||||
| type Branch struct { | ||||
| 	// Name of branch | ||||
| 	Name string | ||||
| 	// Remote name of remote to track | ||||
| 	Remote string | ||||
| 	// Merge is the local refspec for the branch | ||||
| 	Merge plumbing.ReferenceName | ||||
| 	// Rebase instead of merge when pulling. Valid values are | ||||
| 	// "true" and "interactive".  "false" is undocumented and | ||||
| 	// typically represented by the non-existence of this field | ||||
| 	Rebase string | ||||
|  | ||||
| 	raw *format.Subsection | ||||
| } | ||||
|  | ||||
| // Validate validates fields of branch | ||||
| func (b *Branch) Validate() error { | ||||
| 	if b.Name == "" { | ||||
| 		return errBranchEmptyName | ||||
| 	} | ||||
|  | ||||
| 	if b.Merge != "" && !b.Merge.IsBranch() { | ||||
| 		return errBranchInvalidMerge | ||||
| 	} | ||||
|  | ||||
| 	if b.Rebase != "" && | ||||
| 		b.Rebase != "true" && | ||||
| 		b.Rebase != "interactive" && | ||||
| 		b.Rebase != "false" { | ||||
| 		return errBranchInvalidRebase | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (b *Branch) marshal() *format.Subsection { | ||||
| 	if b.raw == nil { | ||||
| 		b.raw = &format.Subsection{} | ||||
| 	} | ||||
|  | ||||
| 	b.raw.Name = b.Name | ||||
|  | ||||
| 	if b.Remote == "" { | ||||
| 		b.raw.RemoveOption(remoteSection) | ||||
| 	} else { | ||||
| 		b.raw.SetOption(remoteSection, b.Remote) | ||||
| 	} | ||||
|  | ||||
| 	if b.Merge == "" { | ||||
| 		b.raw.RemoveOption(mergeKey) | ||||
| 	} else { | ||||
| 		b.raw.SetOption(mergeKey, string(b.Merge)) | ||||
| 	} | ||||
|  | ||||
| 	if b.Rebase == "" { | ||||
| 		b.raw.RemoveOption(rebaseKey) | ||||
| 	} else { | ||||
| 		b.raw.SetOption(rebaseKey, b.Rebase) | ||||
| 	} | ||||
|  | ||||
| 	return b.raw | ||||
| } | ||||
|  | ||||
| func (b *Branch) unmarshal(s *format.Subsection) error { | ||||
| 	b.raw = s | ||||
|  | ||||
| 	b.Name = b.raw.Name | ||||
| 	b.Remote = b.raw.Options.Get(remoteSection) | ||||
| 	b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey)) | ||||
| 	b.Rebase = b.raw.Options.Get(rebaseKey) | ||||
|  | ||||
| 	return b.Validate() | ||||
| } | ||||
							
								
								
									
										407
									
								
								vendor/github.com/go-git/go-git/v5/config/config.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										407
									
								
								vendor/github.com/go-git/go-git/v5/config/config.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,407 @@ | ||||
| // Package config contains the abstraction of multiple config files | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/internal/url" | ||||
| 	format "github.com/go-git/go-git/v5/plumbing/format/config" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// DefaultFetchRefSpec is the default refspec used for fetch. | ||||
| 	DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*" | ||||
| 	// DefaultPushRefSpec is the default refspec used for push. | ||||
| 	DefaultPushRefSpec = "refs/heads/*:refs/heads/*" | ||||
| ) | ||||
|  | ||||
| // ConfigStorer generic storage of Config object | ||||
| type ConfigStorer interface { | ||||
| 	Config() (*Config, error) | ||||
| 	SetConfig(*Config) error | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	ErrInvalid               = errors.New("config invalid key in remote or branch") | ||||
| 	ErrRemoteConfigNotFound  = errors.New("remote config not found") | ||||
| 	ErrRemoteConfigEmptyURL  = errors.New("remote config: empty URL") | ||||
| 	ErrRemoteConfigEmptyName = errors.New("remote config: empty name") | ||||
| ) | ||||
|  | ||||
| // Config contains the repository configuration | ||||
| // https://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES | ||||
| type Config struct { | ||||
| 	Core struct { | ||||
| 		// IsBare if true this repository is assumed to be bare and has no | ||||
| 		// working directory associated with it. | ||||
| 		IsBare bool | ||||
| 		// Worktree is the path to the root of the working tree. | ||||
| 		Worktree string | ||||
| 		// CommentChar is the character indicating the start of a | ||||
| 		// comment for commands like commit and tag | ||||
| 		CommentChar string | ||||
| 	} | ||||
|  | ||||
| 	Pack struct { | ||||
| 		// Window controls the size of the sliding window for delta | ||||
| 		// compression.  The default is 10.  A value of 0 turns off | ||||
| 		// delta compression entirely. | ||||
| 		Window uint | ||||
| 	} | ||||
|  | ||||
| 	// Remotes list of repository remotes, the key of the map is the name | ||||
| 	// of the remote, should equal to RemoteConfig.Name. | ||||
| 	Remotes map[string]*RemoteConfig | ||||
| 	// Submodules list of repository submodules, the key of the map is the name | ||||
| 	// of the submodule, should equal to Submodule.Name. | ||||
| 	Submodules map[string]*Submodule | ||||
| 	// Branches list of branches, the key is the branch name and should | ||||
| 	// equal Branch.Name | ||||
| 	Branches map[string]*Branch | ||||
| 	// Raw contains the raw information of a config file. The main goal is | ||||
| 	// preserve the parsed information from the original format, to avoid | ||||
| 	// dropping unsupported fields. | ||||
| 	Raw *format.Config | ||||
| } | ||||
|  | ||||
| // NewConfig returns a new empty Config. | ||||
| func NewConfig() *Config { | ||||
| 	config := &Config{ | ||||
| 		Remotes:    make(map[string]*RemoteConfig), | ||||
| 		Submodules: make(map[string]*Submodule), | ||||
| 		Branches:   make(map[string]*Branch), | ||||
| 		Raw:        format.New(), | ||||
| 	} | ||||
|  | ||||
| 	config.Pack.Window = DefaultPackWindow | ||||
|  | ||||
| 	return config | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (c *Config) Validate() error { | ||||
| 	for name, r := range c.Remotes { | ||||
| 		if r.Name != name { | ||||
| 			return ErrInvalid | ||||
| 		} | ||||
|  | ||||
| 		if err := r.Validate(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for name, b := range c.Branches { | ||||
| 		if b.Name != name { | ||||
| 			return ErrInvalid | ||||
| 		} | ||||
|  | ||||
| 		if err := b.Validate(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	remoteSection    = "remote" | ||||
| 	submoduleSection = "submodule" | ||||
| 	branchSection    = "branch" | ||||
| 	coreSection      = "core" | ||||
| 	packSection      = "pack" | ||||
| 	fetchKey         = "fetch" | ||||
| 	urlKey           = "url" | ||||
| 	bareKey          = "bare" | ||||
| 	worktreeKey      = "worktree" | ||||
| 	commentCharKey   = "commentChar" | ||||
| 	windowKey        = "window" | ||||
| 	mergeKey         = "merge" | ||||
| 	rebaseKey        = "rebase" | ||||
|  | ||||
| 	// DefaultPackWindow holds the number of previous objects used to | ||||
| 	// generate deltas. The value 10 is the same used by git command. | ||||
| 	DefaultPackWindow = uint(10) | ||||
| ) | ||||
|  | ||||
| // Unmarshal parses a git-config file and stores it. | ||||
| func (c *Config) Unmarshal(b []byte) error { | ||||
| 	r := bytes.NewBuffer(b) | ||||
| 	d := format.NewDecoder(r) | ||||
|  | ||||
| 	c.Raw = format.New() | ||||
| 	if err := d.Decode(c.Raw); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	c.unmarshalCore() | ||||
| 	if err := c.unmarshalPack(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	unmarshalSubmodules(c.Raw, c.Submodules) | ||||
|  | ||||
| 	if err := c.unmarshalBranches(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return c.unmarshalRemotes() | ||||
| } | ||||
|  | ||||
| func (c *Config) unmarshalCore() { | ||||
| 	s := c.Raw.Section(coreSection) | ||||
| 	if s.Options.Get(bareKey) == "true" { | ||||
| 		c.Core.IsBare = true | ||||
| 	} | ||||
|  | ||||
| 	c.Core.Worktree = s.Options.Get(worktreeKey) | ||||
| 	c.Core.CommentChar = s.Options.Get(commentCharKey) | ||||
| } | ||||
|  | ||||
| func (c *Config) unmarshalPack() error { | ||||
| 	s := c.Raw.Section(packSection) | ||||
| 	window := s.Options.Get(windowKey) | ||||
| 	if window == "" { | ||||
| 		c.Pack.Window = DefaultPackWindow | ||||
| 	} else { | ||||
| 		winUint, err := strconv.ParseUint(window, 10, 32) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		c.Pack.Window = uint(winUint) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (c *Config) unmarshalRemotes() error { | ||||
| 	s := c.Raw.Section(remoteSection) | ||||
| 	for _, sub := range s.Subsections { | ||||
| 		r := &RemoteConfig{} | ||||
| 		if err := r.unmarshal(sub); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		c.Remotes[r.Name] = r | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) { | ||||
| 	s := fc.Section(submoduleSection) | ||||
| 	for _, sub := range s.Subsections { | ||||
| 		m := &Submodule{} | ||||
| 		m.unmarshal(sub) | ||||
|  | ||||
| 		if m.Validate() == ErrModuleBadPath { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		submodules[m.Name] = m | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *Config) unmarshalBranches() error { | ||||
| 	bs := c.Raw.Section(branchSection) | ||||
| 	for _, sub := range bs.Subsections { | ||||
| 		b := &Branch{} | ||||
|  | ||||
| 		if err := b.unmarshal(sub); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		c.Branches[b.Name] = b | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Marshal returns Config encoded as a git-config file. | ||||
| func (c *Config) Marshal() ([]byte, error) { | ||||
| 	c.marshalCore() | ||||
| 	c.marshalPack() | ||||
| 	c.marshalRemotes() | ||||
| 	c.marshalSubmodules() | ||||
| 	c.marshalBranches() | ||||
|  | ||||
| 	buf := bytes.NewBuffer(nil) | ||||
| 	if err := format.NewEncoder(buf).Encode(c.Raw); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return buf.Bytes(), nil | ||||
| } | ||||
|  | ||||
| func (c *Config) marshalCore() { | ||||
| 	s := c.Raw.Section(coreSection) | ||||
| 	s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare)) | ||||
|  | ||||
| 	if c.Core.Worktree != "" { | ||||
| 		s.SetOption(worktreeKey, c.Core.Worktree) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *Config) marshalPack() { | ||||
| 	s := c.Raw.Section(packSection) | ||||
| 	if c.Pack.Window != DefaultPackWindow { | ||||
| 		s.SetOption(windowKey, fmt.Sprintf("%d", c.Pack.Window)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *Config) marshalRemotes() { | ||||
| 	s := c.Raw.Section(remoteSection) | ||||
| 	newSubsections := make(format.Subsections, 0, len(c.Remotes)) | ||||
| 	added := make(map[string]bool) | ||||
| 	for _, subsection := range s.Subsections { | ||||
| 		if remote, ok := c.Remotes[subsection.Name]; ok { | ||||
| 			newSubsections = append(newSubsections, remote.marshal()) | ||||
| 			added[subsection.Name] = true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	remoteNames := make([]string, 0, len(c.Remotes)) | ||||
| 	for name := range c.Remotes { | ||||
| 		remoteNames = append(remoteNames, name) | ||||
| 	} | ||||
|  | ||||
| 	sort.Strings(remoteNames) | ||||
|  | ||||
| 	for _, name := range remoteNames { | ||||
| 		if !added[name] { | ||||
| 			newSubsections = append(newSubsections, c.Remotes[name].marshal()) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	s.Subsections = newSubsections | ||||
| } | ||||
|  | ||||
| func (c *Config) marshalSubmodules() { | ||||
| 	s := c.Raw.Section(submoduleSection) | ||||
| 	s.Subsections = make(format.Subsections, len(c.Submodules)) | ||||
|  | ||||
| 	var i int | ||||
| 	for _, r := range c.Submodules { | ||||
| 		section := r.marshal() | ||||
| 		// the submodule section at config is a subset of the .gitmodule file | ||||
| 		// we should remove the non-valid options for the config file. | ||||
| 		section.RemoveOption(pathKey) | ||||
| 		s.Subsections[i] = section | ||||
| 		i++ | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *Config) marshalBranches() { | ||||
| 	s := c.Raw.Section(branchSection) | ||||
| 	newSubsections := make(format.Subsections, 0, len(c.Branches)) | ||||
| 	added := make(map[string]bool) | ||||
| 	for _, subsection := range s.Subsections { | ||||
| 		if branch, ok := c.Branches[subsection.Name]; ok { | ||||
| 			newSubsections = append(newSubsections, branch.marshal()) | ||||
| 			added[subsection.Name] = true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	branchNames := make([]string, 0, len(c.Branches)) | ||||
| 	for name := range c.Branches { | ||||
| 		branchNames = append(branchNames, name) | ||||
| 	} | ||||
|  | ||||
| 	sort.Strings(branchNames) | ||||
|  | ||||
| 	for _, name := range branchNames { | ||||
| 		if !added[name] { | ||||
| 			newSubsections = append(newSubsections, c.Branches[name].marshal()) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	s.Subsections = newSubsections | ||||
| } | ||||
|  | ||||
| // RemoteConfig contains the configuration for a given remote repository. | ||||
| type RemoteConfig struct { | ||||
| 	// Name of the remote | ||||
| 	Name string | ||||
| 	// URLs the URLs of a remote repository. It must be non-empty. Fetch will | ||||
| 	// always use the first URL, while push will use all of them. | ||||
| 	URLs []string | ||||
| 	// Fetch the default set of "refspec" for fetch operation | ||||
| 	Fetch []RefSpec | ||||
|  | ||||
| 	// raw representation of the subsection, filled by marshal or unmarshal are | ||||
| 	// called | ||||
| 	raw *format.Subsection | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (c *RemoteConfig) Validate() error { | ||||
| 	if c.Name == "" { | ||||
| 		return ErrRemoteConfigEmptyName | ||||
| 	} | ||||
|  | ||||
| 	if len(c.URLs) == 0 { | ||||
| 		return ErrRemoteConfigEmptyURL | ||||
| 	} | ||||
|  | ||||
| 	for _, r := range c.Fetch { | ||||
| 		if err := r.Validate(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if len(c.Fetch) == 0 { | ||||
| 		c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (c *RemoteConfig) unmarshal(s *format.Subsection) error { | ||||
| 	c.raw = s | ||||
|  | ||||
| 	fetch := []RefSpec{} | ||||
| 	for _, f := range c.raw.Options.GetAll(fetchKey) { | ||||
| 		rs := RefSpec(f) | ||||
| 		if err := rs.Validate(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		fetch = append(fetch, rs) | ||||
| 	} | ||||
|  | ||||
| 	c.Name = c.raw.Name | ||||
| 	c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...) | ||||
| 	c.Fetch = fetch | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (c *RemoteConfig) marshal() *format.Subsection { | ||||
| 	if c.raw == nil { | ||||
| 		c.raw = &format.Subsection{} | ||||
| 	} | ||||
|  | ||||
| 	c.raw.Name = c.Name | ||||
| 	if len(c.URLs) == 0 { | ||||
| 		c.raw.RemoveOption(urlKey) | ||||
| 	} else { | ||||
| 		c.raw.SetOption(urlKey, c.URLs...) | ||||
| 	} | ||||
|  | ||||
| 	if len(c.Fetch) == 0 { | ||||
| 		c.raw.RemoveOption(fetchKey) | ||||
| 	} else { | ||||
| 		var values []string | ||||
| 		for _, rs := range c.Fetch { | ||||
| 			values = append(values, rs.String()) | ||||
| 		} | ||||
|  | ||||
| 		c.raw.SetOption(fetchKey, values...) | ||||
| 	} | ||||
|  | ||||
| 	return c.raw | ||||
| } | ||||
|  | ||||
| func (c *RemoteConfig) IsFirstURLLocal() bool { | ||||
| 	return url.IsLocalEndpoint(c.URLs[0]) | ||||
| } | ||||
							
								
								
									
										139
									
								
								vendor/github.com/go-git/go-git/v5/config/modules.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										139
									
								
								vendor/github.com/go-git/go-git/v5/config/modules.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,139 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"regexp" | ||||
|  | ||||
| 	format "github.com/go-git/go-git/v5/plumbing/format/config" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	ErrModuleEmptyURL  = errors.New("module config: empty URL") | ||||
| 	ErrModuleEmptyPath = errors.New("module config: empty path") | ||||
| 	ErrModuleBadPath   = errors.New("submodule has an invalid path") | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// Matches module paths with dotdot ".." components. | ||||
| 	dotdotPath = regexp.MustCompile(`(^|[/\\])\.\.([/\\]|$)`) | ||||
| ) | ||||
|  | ||||
| // Modules defines the submodules properties, represents a .gitmodules file | ||||
| // https://www.kernel.org/pub/software/scm/git/docs/gitmodules.html | ||||
| type Modules struct { | ||||
| 	// Submodules is a map of submodules being the key the name of the submodule. | ||||
| 	Submodules map[string]*Submodule | ||||
|  | ||||
| 	raw *format.Config | ||||
| } | ||||
|  | ||||
| // NewModules returns a new empty Modules | ||||
| func NewModules() *Modules { | ||||
| 	return &Modules{ | ||||
| 		Submodules: make(map[string]*Submodule), | ||||
| 		raw:        format.New(), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	pathKey   = "path" | ||||
| 	branchKey = "branch" | ||||
| ) | ||||
|  | ||||
| // Unmarshal parses a git-config file and stores it. | ||||
| func (m *Modules) Unmarshal(b []byte) error { | ||||
| 	r := bytes.NewBuffer(b) | ||||
| 	d := format.NewDecoder(r) | ||||
|  | ||||
| 	m.raw = format.New() | ||||
| 	if err := d.Decode(m.raw); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	unmarshalSubmodules(m.raw, m.Submodules) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Marshal returns Modules encoded as a git-config file. | ||||
| func (m *Modules) Marshal() ([]byte, error) { | ||||
| 	s := m.raw.Section(submoduleSection) | ||||
| 	s.Subsections = make(format.Subsections, len(m.Submodules)) | ||||
|  | ||||
| 	var i int | ||||
| 	for _, r := range m.Submodules { | ||||
| 		s.Subsections[i] = r.marshal() | ||||
| 		i++ | ||||
| 	} | ||||
|  | ||||
| 	buf := bytes.NewBuffer(nil) | ||||
| 	if err := format.NewEncoder(buf).Encode(m.raw); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return buf.Bytes(), nil | ||||
| } | ||||
|  | ||||
| // Submodule defines a submodule. | ||||
| type Submodule struct { | ||||
| 	// Name module name | ||||
| 	Name string | ||||
| 	// Path defines the path, relative to the top-level directory of the Git | ||||
| 	// working tree. | ||||
| 	Path string | ||||
| 	// URL defines a URL from which the submodule repository can be cloned. | ||||
| 	URL string | ||||
| 	// Branch is a remote branch name for tracking updates in the upstream | ||||
| 	// submodule. Optional value. | ||||
| 	Branch string | ||||
|  | ||||
| 	// raw representation of the subsection, filled by marshal or unmarshal are | ||||
| 	// called. | ||||
| 	raw *format.Subsection | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (m *Submodule) Validate() error { | ||||
| 	if m.Path == "" { | ||||
| 		return ErrModuleEmptyPath | ||||
| 	} | ||||
|  | ||||
| 	if m.URL == "" { | ||||
| 		return ErrModuleEmptyURL | ||||
| 	} | ||||
|  | ||||
| 	if dotdotPath.MatchString(m.Path) { | ||||
| 		return ErrModuleBadPath | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *Submodule) unmarshal(s *format.Subsection) { | ||||
| 	m.raw = s | ||||
|  | ||||
| 	m.Name = m.raw.Name | ||||
| 	m.Path = m.raw.Option(pathKey) | ||||
| 	m.URL = m.raw.Option(urlKey) | ||||
| 	m.Branch = m.raw.Option(branchKey) | ||||
| } | ||||
|  | ||||
| func (m *Submodule) marshal() *format.Subsection { | ||||
| 	if m.raw == nil { | ||||
| 		m.raw = &format.Subsection{} | ||||
| 	} | ||||
|  | ||||
| 	m.raw.Name = m.Name | ||||
| 	if m.raw.Name == "" { | ||||
| 		m.raw.Name = m.Path | ||||
| 	} | ||||
|  | ||||
| 	m.raw.SetOption(pathKey, m.Path) | ||||
| 	m.raw.SetOption(urlKey, m.URL) | ||||
|  | ||||
| 	if m.Branch != "" { | ||||
| 		m.raw.SetOption(branchKey, m.Branch) | ||||
| 	} | ||||
|  | ||||
| 	return m.raw | ||||
| } | ||||
							
								
								
									
										150
									
								
								vendor/github.com/go-git/go-git/v5/config/refspec.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										150
									
								
								vendor/github.com/go-git/go-git/v5/config/refspec.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,150 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	refSpecWildcard  = "*" | ||||
| 	refSpecForce     = "+" | ||||
| 	refSpecSeparator = ":" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong") | ||||
| 	ErrRefSpecMalformedWildcard  = errors.New("malformed refspec, mismatched number of wildcards") | ||||
| ) | ||||
|  | ||||
| // RefSpec is a mapping from local branches to remote references. | ||||
| // The format of the refspec is an optional +, followed by <src>:<dst>, where | ||||
| // <src> is the pattern for references on the remote side and <dst> is where | ||||
| // those references will be written locally. The + tells Git to update the | ||||
| // reference even if it isn’t a fast-forward. | ||||
| // eg.: "+refs/heads/*:refs/remotes/origin/*" | ||||
| // | ||||
| // https://git-scm.com/book/es/v2/Git-Internals-The-Refspec | ||||
| type RefSpec string | ||||
|  | ||||
| // Validate validates the RefSpec | ||||
| func (s RefSpec) Validate() error { | ||||
| 	spec := string(s) | ||||
| 	if strings.Count(spec, refSpecSeparator) != 1 { | ||||
| 		return ErrRefSpecMalformedSeparator | ||||
| 	} | ||||
|  | ||||
| 	sep := strings.Index(spec, refSpecSeparator) | ||||
| 	if sep == len(spec)-1 { | ||||
| 		return ErrRefSpecMalformedSeparator | ||||
| 	} | ||||
|  | ||||
| 	ws := strings.Count(spec[0:sep], refSpecWildcard) | ||||
| 	wd := strings.Count(spec[sep+1:], refSpecWildcard) | ||||
| 	if ws == wd && ws < 2 && wd < 2 { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	return ErrRefSpecMalformedWildcard | ||||
| } | ||||
|  | ||||
| // IsForceUpdate returns if update is allowed in non fast-forward merges. | ||||
| func (s RefSpec) IsForceUpdate() bool { | ||||
| 	return s[0] == refSpecForce[0] | ||||
| } | ||||
|  | ||||
| // IsDelete returns true if the refspec indicates a delete (empty src). | ||||
| func (s RefSpec) IsDelete() bool { | ||||
| 	return s[0] == refSpecSeparator[0] | ||||
| } | ||||
|  | ||||
| // Src return the src side. | ||||
| func (s RefSpec) Src() string { | ||||
| 	spec := string(s) | ||||
|  | ||||
| 	var start int | ||||
| 	if s.IsForceUpdate() { | ||||
| 		start = 1 | ||||
| 	} else { | ||||
| 		start = 0 | ||||
| 	} | ||||
| 	end := strings.Index(spec, refSpecSeparator) | ||||
|  | ||||
| 	return spec[start:end] | ||||
| } | ||||
|  | ||||
| // Match match the given plumbing.ReferenceName against the source. | ||||
| func (s RefSpec) Match(n plumbing.ReferenceName) bool { | ||||
| 	if !s.IsWildcard() { | ||||
| 		return s.matchExact(n) | ||||
| 	} | ||||
|  | ||||
| 	return s.matchGlob(n) | ||||
| } | ||||
|  | ||||
| // IsWildcard returns true if the RefSpec contains a wildcard. | ||||
| func (s RefSpec) IsWildcard() bool { | ||||
| 	return strings.Contains(string(s), refSpecWildcard) | ||||
| } | ||||
|  | ||||
| func (s RefSpec) matchExact(n plumbing.ReferenceName) bool { | ||||
| 	return s.Src() == n.String() | ||||
| } | ||||
|  | ||||
| func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool { | ||||
| 	src := s.Src() | ||||
| 	name := n.String() | ||||
| 	wildcard := strings.Index(src, refSpecWildcard) | ||||
|  | ||||
| 	var prefix, suffix string | ||||
| 	prefix = src[0:wildcard] | ||||
| 	if len(src) > wildcard+1 { | ||||
| 		suffix = src[wildcard+1:] | ||||
| 	} | ||||
|  | ||||
| 	return len(name) >= len(prefix)+len(suffix) && | ||||
| 		strings.HasPrefix(name, prefix) && | ||||
| 		strings.HasSuffix(name, suffix) | ||||
| } | ||||
|  | ||||
| // Dst returns the destination for the given remote reference. | ||||
| func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName { | ||||
| 	spec := string(s) | ||||
| 	start := strings.Index(spec, refSpecSeparator) + 1 | ||||
| 	dst := spec[start:] | ||||
| 	src := s.Src() | ||||
|  | ||||
| 	if !s.IsWildcard() { | ||||
| 		return plumbing.ReferenceName(dst) | ||||
| 	} | ||||
|  | ||||
| 	name := n.String() | ||||
| 	ws := strings.Index(src, refSpecWildcard) | ||||
| 	wd := strings.Index(dst, refSpecWildcard) | ||||
| 	match := name[ws : len(name)-(len(src)-(ws+1))] | ||||
|  | ||||
| 	return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:]) | ||||
| } | ||||
|  | ||||
| func (s RefSpec) Reverse() RefSpec { | ||||
| 	spec := string(s) | ||||
| 	separator := strings.Index(spec, refSpecSeparator) | ||||
|  | ||||
| 	return RefSpec(spec[separator+1:] + refSpecSeparator + spec[:separator]) | ||||
| } | ||||
|  | ||||
| func (s RefSpec) String() string { | ||||
| 	return string(s) | ||||
| } | ||||
|  | ||||
| // MatchAny returns true if any of the RefSpec match with the given ReferenceName. | ||||
| func MatchAny(l []RefSpec, n plumbing.ReferenceName) bool { | ||||
| 	for _, r := range l { | ||||
| 		if r.Match(n) { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return false | ||||
| } | ||||
							
								
								
									
										10
									
								
								vendor/github.com/go-git/go-git/v5/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/go-git/go-git/v5/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| // A highly extensible git implementation in pure Go. | ||||
| // | ||||
| // go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the | ||||
| // majority of the plumbing read operations and some of the main write | ||||
| // operations, but lacks the main porcelain operations such as merges. | ||||
| // | ||||
| // It is highly extensible, we have been following the open/close principle in | ||||
| // its design to facilitate extensions, mainly focusing the efforts on the | ||||
| // persistence of the objects. | ||||
| package git | ||||
							
								
								
									
										27
									
								
								vendor/github.com/go-git/go-git/v5/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/go-git/go-git/v5/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| module github.com/go-git/go-git/v5 | ||||
|  | ||||
| require ( | ||||
| 	github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect | ||||
| 	github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 | ||||
| 	github.com/emirpasic/gods v1.12.0 | ||||
| 	github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect | ||||
| 	github.com/gliderlabs/ssh v0.2.2 | ||||
| 	github.com/go-git/gcfg v1.5.0 | ||||
| 	github.com/go-git/go-billy/v5 v5.0.0 | ||||
| 	github.com/go-git/go-git-fixtures/v4 v4.0.1 | ||||
| 	github.com/google/go-cmp v0.3.0 | ||||
| 	github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 | ||||
| 	github.com/jessevdk/go-flags v1.4.0 | ||||
| 	github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd | ||||
| 	github.com/mitchellh/go-homedir v1.1.0 | ||||
| 	github.com/pkg/errors v0.8.1 // indirect | ||||
| 	github.com/sergi/go-diff v1.1.0 | ||||
| 	github.com/xanzy/ssh-agent v0.2.1 | ||||
| 	golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 | ||||
| 	golang.org/x/net v0.0.0-20200301022130-244492dfa37a | ||||
| 	golang.org/x/text v0.3.2 | ||||
| 	gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f | ||||
| 	gopkg.in/warnings.v0 v0.1.2 // indirect | ||||
| ) | ||||
|  | ||||
| go 1.13 | ||||
							
								
								
									
										78
									
								
								vendor/github.com/go-git/go-git/v5/go.sum
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										78
									
								
								vendor/github.com/go-git/go-git/v5/go.sum
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,78 @@ | ||||
| github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= | ||||
| github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= | ||||
| github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= | ||||
| github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= | ||||
| github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= | ||||
| github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= | ||||
| github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= | ||||
| github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= | ||||
| github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= | ||||
| github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= | ||||
| github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= | ||||
| github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= | ||||
| github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= | ||||
| github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= | ||||
| github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= | ||||
| github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= | ||||
| github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= | ||||
| github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= | ||||
| github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp/pqnefH+Bc= | ||||
| github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= | ||||
| github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= | ||||
| github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= | ||||
| github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= | ||||
| github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= | ||||
| github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= | ||||
| github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= | ||||
| github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= | ||||
| github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= | ||||
| github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= | ||||
| github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= | ||||
| github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= | ||||
| github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= | ||||
| github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= | ||||
| github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= | ||||
| github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= | ||||
| github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= | ||||
| github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= | ||||
| github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= | ||||
| github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= | ||||
| github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= | ||||
| github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= | ||||
| github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= | ||||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= | ||||
| github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= | ||||
| github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | ||||
| github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= | ||||
| github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= | ||||
| github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= | ||||
| github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= | ||||
| golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= | ||||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | ||||
| golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= | ||||
| golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | ||||
| golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= | ||||
| golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= | ||||
| golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= | ||||
| golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||
| golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= | ||||
| golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= | ||||
| golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
| gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||
| gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||
| gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= | ||||
| gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||
| gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= | ||||
| gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= | ||||
| gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
| gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= | ||||
| gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
							
								
								
									
										622
									
								
								vendor/github.com/go-git/go-git/v5/internal/revision/parser.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										622
									
								
								vendor/github.com/go-git/go-git/v5/internal/revision/parser.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,622 @@ | ||||
| // Package revision extracts git revision from string | ||||
| // More information about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html | ||||
| package revision | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| // ErrInvalidRevision is emitted if string doesn't match valid revision | ||||
| type ErrInvalidRevision struct { | ||||
| 	s string | ||||
| } | ||||
|  | ||||
| func (e *ErrInvalidRevision) Error() string { | ||||
| 	return "Revision invalid : " + e.s | ||||
| } | ||||
|  | ||||
| // Revisioner represents a revision component. | ||||
| // A revision is made of multiple revision components | ||||
| // obtained after parsing a revision string, | ||||
| // for instance revision "master~" will be converted in | ||||
| // two revision components Ref and TildePath | ||||
| type Revisioner interface { | ||||
| } | ||||
|  | ||||
| // Ref represents a reference name : HEAD, master | ||||
| type Ref string | ||||
|  | ||||
| // TildePath represents ~, ~{n} | ||||
| type TildePath struct { | ||||
| 	Depth int | ||||
| } | ||||
|  | ||||
| // CaretPath represents ^, ^{n} | ||||
| type CaretPath struct { | ||||
| 	Depth int | ||||
| } | ||||
|  | ||||
| // CaretReg represents ^{/foo bar} | ||||
| type CaretReg struct { | ||||
| 	Regexp *regexp.Regexp | ||||
| 	Negate bool | ||||
| } | ||||
|  | ||||
| // CaretType represents ^{commit} | ||||
| type CaretType struct { | ||||
| 	ObjectType string | ||||
| } | ||||
|  | ||||
| // AtReflog represents @{n} | ||||
| type AtReflog struct { | ||||
| 	Depth int | ||||
| } | ||||
|  | ||||
| // AtCheckout represents @{-n} | ||||
| type AtCheckout struct { | ||||
| 	Depth int | ||||
| } | ||||
|  | ||||
| // AtUpstream represents @{upstream}, @{u} | ||||
| type AtUpstream struct { | ||||
| 	BranchName string | ||||
| } | ||||
|  | ||||
| // AtPush represents @{push} | ||||
| type AtPush struct { | ||||
| 	BranchName string | ||||
| } | ||||
|  | ||||
| // AtDate represents @{"2006-01-02T15:04:05Z"} | ||||
| type AtDate struct { | ||||
| 	Date time.Time | ||||
| } | ||||
|  | ||||
| // ColonReg represents :/foo bar | ||||
| type ColonReg struct { | ||||
| 	Regexp *regexp.Regexp | ||||
| 	Negate bool | ||||
| } | ||||
|  | ||||
| // ColonPath represents :./<path> :<path> | ||||
| type ColonPath struct { | ||||
| 	Path string | ||||
| } | ||||
|  | ||||
| // ColonStagePath represents :<n>:/<path> | ||||
| type ColonStagePath struct { | ||||
| 	Path  string | ||||
| 	Stage int | ||||
| } | ||||
|  | ||||
| // Parser represents a parser | ||||
| // use to tokenize and transform to revisioner chunks | ||||
| // a given string | ||||
| type Parser struct { | ||||
| 	s                 *scanner | ||||
| 	currentParsedChar struct { | ||||
| 		tok token | ||||
| 		lit string | ||||
| 	} | ||||
| 	unreadLastChar bool | ||||
| } | ||||
|  | ||||
| // NewParserFromString returns a new instance of parser from a string. | ||||
| func NewParserFromString(s string) *Parser { | ||||
| 	return NewParser(bytes.NewBufferString(s)) | ||||
| } | ||||
|  | ||||
| // NewParser returns a new instance of parser. | ||||
| func NewParser(r io.Reader) *Parser { | ||||
| 	return &Parser{s: newScanner(r)} | ||||
| } | ||||
|  | ||||
| // scan returns the next token from the underlying scanner | ||||
| // or the last scanned token if an unscan was requested | ||||
| func (p *Parser) scan() (token, string, error) { | ||||
| 	if p.unreadLastChar { | ||||
| 		p.unreadLastChar = false | ||||
| 		return p.currentParsedChar.tok, p.currentParsedChar.lit, nil | ||||
| 	} | ||||
|  | ||||
| 	tok, lit, err := p.s.scan() | ||||
|  | ||||
| 	p.currentParsedChar.tok, p.currentParsedChar.lit = tok, lit | ||||
|  | ||||
| 	return tok, lit, err | ||||
| } | ||||
|  | ||||
| // unscan pushes the previously read token back onto the buffer. | ||||
| func (p *Parser) unscan() { p.unreadLastChar = true } | ||||
|  | ||||
| // Parse explode a revision string into revisioner chunks | ||||
| func (p *Parser) Parse() ([]Revisioner, error) { | ||||
| 	var rev Revisioner | ||||
| 	var revs []Revisioner | ||||
| 	var tok token | ||||
| 	var err error | ||||
|  | ||||
| 	for { | ||||
| 		tok, _, err = p.scan() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		switch tok { | ||||
| 		case at: | ||||
| 			rev, err = p.parseAt() | ||||
| 		case tilde: | ||||
| 			rev, err = p.parseTilde() | ||||
| 		case caret: | ||||
| 			rev, err = p.parseCaret() | ||||
| 		case colon: | ||||
| 			rev, err = p.parseColon() | ||||
| 		case eof: | ||||
| 			err = p.validateFullRevision(&revs) | ||||
|  | ||||
| 			if err != nil { | ||||
| 				return []Revisioner{}, err | ||||
| 			} | ||||
|  | ||||
| 			return revs, nil | ||||
| 		default: | ||||
| 			p.unscan() | ||||
| 			rev, err = p.parseRef() | ||||
| 		} | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return []Revisioner{}, err | ||||
| 		} | ||||
|  | ||||
| 		revs = append(revs, rev) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // validateFullRevision ensures all revisioner chunks make a valid revision | ||||
| func (p *Parser) validateFullRevision(chunks *[]Revisioner) error { | ||||
| 	var hasReference bool | ||||
|  | ||||
| 	for i, chunk := range *chunks { | ||||
| 		switch chunk.(type) { | ||||
| 		case Ref: | ||||
| 			if i == 0 { | ||||
| 				hasReference = true | ||||
| 			} else { | ||||
| 				return &ErrInvalidRevision{`reference must be defined once at the beginning`} | ||||
| 			} | ||||
| 		case AtDate: | ||||
| 			if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { | ||||
| 				return nil | ||||
| 			} | ||||
|  | ||||
| 			return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{<ISO-8601 date>}, @{<ISO-8601 date>}`} | ||||
| 		case AtReflog: | ||||
| 			if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { | ||||
| 				return nil | ||||
| 			} | ||||
|  | ||||
| 			return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{<n>}, @{<n>}`} | ||||
| 		case AtCheckout: | ||||
| 			if len(*chunks) == 1 { | ||||
| 				return nil | ||||
| 			} | ||||
|  | ||||
| 			return &ErrInvalidRevision{`"@" statement is not valid, could be : @{-<n>}`} | ||||
| 		case AtUpstream: | ||||
| 			if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { | ||||
| 				return nil | ||||
| 			} | ||||
|  | ||||
| 			return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{upstream}, @{upstream}, <refname>@{u}, @{u}`} | ||||
| 		case AtPush: | ||||
| 			if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { | ||||
| 				return nil | ||||
| 			} | ||||
|  | ||||
| 			return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{push}, @{push}`} | ||||
| 		case TildePath, CaretPath, CaretReg: | ||||
| 			if !hasReference { | ||||
| 				return &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`} | ||||
| 			} | ||||
| 		case ColonReg: | ||||
| 			if len(*chunks) == 1 { | ||||
| 				return nil | ||||
| 			} | ||||
|  | ||||
| 			return &ErrInvalidRevision{`":" statement is not valid, could be : :/<regexp>`} | ||||
| 		case ColonPath: | ||||
| 			if i == len(*chunks)-1 && hasReference || len(*chunks) == 1 { | ||||
| 				return nil | ||||
| 			} | ||||
|  | ||||
| 			return &ErrInvalidRevision{`":" statement is not valid, could be : <revision>:<path>`} | ||||
| 		case ColonStagePath: | ||||
| 			if len(*chunks) == 1 { | ||||
| 				return nil | ||||
| 			} | ||||
|  | ||||
| 			return &ErrInvalidRevision{`":" statement is not valid, could be : :<n>:<path>`} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // parseAt extract @ statements | ||||
| func (p *Parser) parseAt() (Revisioner, error) { | ||||
| 	var tok, nextTok token | ||||
| 	var lit, nextLit string | ||||
| 	var err error | ||||
|  | ||||
| 	tok, _, err = p.scan() | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if tok != obrace { | ||||
| 		p.unscan() | ||||
|  | ||||
| 		return Ref("HEAD"), nil | ||||
| 	} | ||||
|  | ||||
| 	tok, lit, err = p.scan() | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	nextTok, nextLit, err = p.scan() | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	switch { | ||||
| 	case tok == word && (lit == "u" || lit == "upstream") && nextTok == cbrace: | ||||
| 		return AtUpstream{}, nil | ||||
| 	case tok == word && lit == "push" && nextTok == cbrace: | ||||
| 		return AtPush{}, nil | ||||
| 	case tok == number && nextTok == cbrace: | ||||
| 		n, _ := strconv.Atoi(lit) | ||||
|  | ||||
| 		return AtReflog{n}, nil | ||||
| 	case tok == minus && nextTok == number: | ||||
| 		n, _ := strconv.Atoi(nextLit) | ||||
|  | ||||
| 		t, _, err := p.scan() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		if t != cbrace { | ||||
| 			return nil, &ErrInvalidRevision{fmt.Sprintf(`missing "}" in @{-n} structure`)} | ||||
| 		} | ||||
|  | ||||
| 		return AtCheckout{n}, nil | ||||
| 	default: | ||||
| 		p.unscan() | ||||
|  | ||||
| 		date := lit | ||||
|  | ||||
| 		for { | ||||
| 			tok, lit, err = p.scan() | ||||
|  | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
|  | ||||
| 			switch { | ||||
| 			case tok == cbrace: | ||||
| 				t, err := time.Parse("2006-01-02T15:04:05Z", date) | ||||
|  | ||||
| 				if err != nil { | ||||
| 					return nil, &ErrInvalidRevision{fmt.Sprintf(`wrong date "%s" must fit ISO-8601 format : 2006-01-02T15:04:05Z`, date)} | ||||
| 				} | ||||
|  | ||||
| 				return AtDate{t}, nil | ||||
| 			default: | ||||
| 				date += lit | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // parseTilde extract ~ statements | ||||
| func (p *Parser) parseTilde() (Revisioner, error) { | ||||
| 	var tok token | ||||
| 	var lit string | ||||
| 	var err error | ||||
|  | ||||
| 	tok, lit, err = p.scan() | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	switch { | ||||
| 	case tok == number: | ||||
| 		n, _ := strconv.Atoi(lit) | ||||
|  | ||||
| 		return TildePath{n}, nil | ||||
| 	default: | ||||
| 		p.unscan() | ||||
| 		return TildePath{1}, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // parseCaret extract ^ statements | ||||
| func (p *Parser) parseCaret() (Revisioner, error) { | ||||
| 	var tok token | ||||
| 	var lit string | ||||
| 	var err error | ||||
|  | ||||
| 	tok, lit, err = p.scan() | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	switch { | ||||
| 	case tok == obrace: | ||||
| 		r, err := p.parseCaretBraces() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		return r, nil | ||||
| 	case tok == number: | ||||
| 		n, _ := strconv.Atoi(lit) | ||||
|  | ||||
| 		if n > 2 { | ||||
| 			return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" found must be 0, 1 or 2 after "^"`, lit)} | ||||
| 		} | ||||
|  | ||||
| 		return CaretPath{n}, nil | ||||
| 	default: | ||||
| 		p.unscan() | ||||
| 		return CaretPath{1}, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // parseCaretBraces extract ^{<data>} statements | ||||
| func (p *Parser) parseCaretBraces() (Revisioner, error) { | ||||
| 	var tok, nextTok token | ||||
| 	var lit, _ string | ||||
| 	start := true | ||||
| 	var re string | ||||
| 	var negate bool | ||||
| 	var err error | ||||
|  | ||||
| 	for { | ||||
| 		tok, lit, err = p.scan() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		nextTok, _, err = p.scan() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| 		case tok == word && nextTok == cbrace && (lit == "commit" || lit == "tree" || lit == "blob" || lit == "tag" || lit == "object"): | ||||
| 			return CaretType{lit}, nil | ||||
| 		case re == "" && tok == cbrace: | ||||
| 			return CaretType{"tag"}, nil | ||||
| 		case re == "" && tok == emark && nextTok == emark: | ||||
| 			re += lit | ||||
| 		case re == "" && tok == emark && nextTok == minus: | ||||
| 			negate = true | ||||
| 		case re == "" && tok == emark: | ||||
| 			return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)} | ||||
| 		case re == "" && tok == slash: | ||||
| 			p.unscan() | ||||
| 		case tok != slash && start: | ||||
| 			return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)} | ||||
| 		case tok != cbrace: | ||||
| 			p.unscan() | ||||
| 			re += lit | ||||
| 		case tok == cbrace: | ||||
| 			p.unscan() | ||||
|  | ||||
| 			reg, err := regexp.Compile(re) | ||||
|  | ||||
| 			if err != nil { | ||||
| 				return CaretReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} | ||||
| 			} | ||||
|  | ||||
| 			return CaretReg{reg, negate}, nil | ||||
| 		} | ||||
|  | ||||
| 		start = false | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // parseColon extract : statements | ||||
| func (p *Parser) parseColon() (Revisioner, error) { | ||||
| 	var tok token | ||||
| 	var err error | ||||
|  | ||||
| 	tok, _, err = p.scan() | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	switch tok { | ||||
| 	case slash: | ||||
| 		return p.parseColonSlash() | ||||
| 	default: | ||||
| 		p.unscan() | ||||
| 		return p.parseColonDefault() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // parseColonSlash extract :/<data> statements | ||||
| func (p *Parser) parseColonSlash() (Revisioner, error) { | ||||
| 	var tok, nextTok token | ||||
| 	var lit string | ||||
| 	var re string | ||||
| 	var negate bool | ||||
| 	var err error | ||||
|  | ||||
| 	for { | ||||
| 		tok, lit, err = p.scan() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		nextTok, _, err = p.scan() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| 		case tok == emark && nextTok == emark: | ||||
| 			re += lit | ||||
| 		case re == "" && tok == emark && nextTok == minus: | ||||
| 			negate = true | ||||
| 		case re == "" && tok == emark: | ||||
| 			return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)} | ||||
| 		case tok == eof: | ||||
| 			p.unscan() | ||||
| 			reg, err := regexp.Compile(re) | ||||
|  | ||||
| 			if err != nil { | ||||
| 				return ColonReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} | ||||
| 			} | ||||
|  | ||||
| 			return ColonReg{reg, negate}, nil | ||||
| 		default: | ||||
| 			p.unscan() | ||||
| 			re += lit | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // parseColonDefault extract :<data> statements | ||||
| func (p *Parser) parseColonDefault() (Revisioner, error) { | ||||
| 	var tok token | ||||
| 	var lit string | ||||
| 	var path string | ||||
| 	var stage int | ||||
| 	var err error | ||||
| 	var n = -1 | ||||
|  | ||||
| 	tok, lit, err = p.scan() | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	nextTok, _, err := p.scan() | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if tok == number && nextTok == colon { | ||||
| 		n, _ = strconv.Atoi(lit) | ||||
| 	} | ||||
|  | ||||
| 	switch n { | ||||
| 	case 0, 1, 2, 3: | ||||
| 		stage = n | ||||
| 	default: | ||||
| 		path += lit | ||||
| 		p.unscan() | ||||
| 	} | ||||
|  | ||||
| 	for { | ||||
| 		tok, lit, err = p.scan() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| 		case tok == eof && n == -1: | ||||
| 			return ColonPath{path}, nil | ||||
| 		case tok == eof: | ||||
| 			return ColonStagePath{path, stage}, nil | ||||
| 		default: | ||||
| 			path += lit | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // parseRef extract reference name | ||||
| func (p *Parser) parseRef() (Revisioner, error) { | ||||
| 	var tok, prevTok token | ||||
| 	var lit, buf string | ||||
| 	var endOfRef bool | ||||
| 	var err error | ||||
|  | ||||
| 	for { | ||||
| 		tok, lit, err = p.scan() | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		switch tok { | ||||
| 		case eof, at, colon, tilde, caret: | ||||
| 			endOfRef = true | ||||
| 		} | ||||
|  | ||||
| 		err := p.checkRefFormat(tok, lit, prevTok, buf, endOfRef) | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
|  | ||||
| 		if endOfRef { | ||||
| 			p.unscan() | ||||
| 			return Ref(buf), nil | ||||
| 		} | ||||
|  | ||||
| 		buf += lit | ||||
| 		prevTok = tok | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // checkRefFormat ensure reference name follow rules defined here : | ||||
| // https://git-scm.com/docs/git-check-ref-format | ||||
| func (p *Parser) checkRefFormat(token token, literal string, previousToken token, buffer string, endOfRef bool) error { | ||||
| 	switch token { | ||||
| 	case aslash, space, control, qmark, asterisk, obracket: | ||||
| 		return &ErrInvalidRevision{fmt.Sprintf(`must not contains "%s"`, literal)} | ||||
| 	} | ||||
|  | ||||
| 	switch { | ||||
| 	case (token == dot || token == slash) && buffer == "": | ||||
| 		return &ErrInvalidRevision{fmt.Sprintf(`must not start with "%s"`, literal)} | ||||
| 	case previousToken == slash && endOfRef: | ||||
| 		return &ErrInvalidRevision{`must not end with "/"`} | ||||
| 	case previousToken == dot && endOfRef: | ||||
| 		return &ErrInvalidRevision{`must not end with "."`} | ||||
| 	case token == dot && previousToken == slash: | ||||
| 		return &ErrInvalidRevision{`must not contains "/."`} | ||||
| 	case previousToken == dot && token == dot: | ||||
| 		return &ErrInvalidRevision{`must not contains ".."`} | ||||
| 	case previousToken == slash && token == slash: | ||||
| 		return &ErrInvalidRevision{`must not contains consecutively "/"`} | ||||
| 	case (token == slash || endOfRef) && len(buffer) > 4 && buffer[len(buffer)-5:] == ".lock": | ||||
| 		return &ErrInvalidRevision{"cannot end with .lock"} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										117
									
								
								vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										117
									
								
								vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,117 @@ | ||||
| package revision | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"io" | ||||
| 	"unicode" | ||||
| ) | ||||
|  | ||||
| // runeCategoryValidator takes a rune as input and | ||||
| // validates it belongs to a rune category | ||||
| type runeCategoryValidator func(r rune) bool | ||||
|  | ||||
| // tokenizeExpression aggregates a series of runes matching check predicate into a single | ||||
| // string and provides given tokenType as token type | ||||
| func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) { | ||||
| 	var data []rune | ||||
| 	data = append(data, ch) | ||||
|  | ||||
| 	for { | ||||
| 		c, _, err := r.ReadRune() | ||||
|  | ||||
| 		if c == zeroRune { | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return tokenError, "", err | ||||
| 		} | ||||
|  | ||||
| 		if check(c) { | ||||
| 			data = append(data, c) | ||||
| 		} else { | ||||
| 			err := r.UnreadRune() | ||||
|  | ||||
| 			if err != nil { | ||||
| 				return tokenError, "", err | ||||
| 			} | ||||
|  | ||||
| 			return tokenType, string(data), nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return tokenType, string(data), nil | ||||
| } | ||||
|  | ||||
| var zeroRune = rune(0) | ||||
|  | ||||
| // scanner represents a lexical scanner. | ||||
| type scanner struct { | ||||
| 	r *bufio.Reader | ||||
| } | ||||
|  | ||||
| // newScanner returns a new instance of scanner. | ||||
| func newScanner(r io.Reader) *scanner { | ||||
| 	return &scanner{r: bufio.NewReader(r)} | ||||
| } | ||||
|  | ||||
| // Scan extracts tokens and their strings counterpart | ||||
| // from the reader | ||||
| func (s *scanner) scan() (token, string, error) { | ||||
| 	ch, _, err := s.r.ReadRune() | ||||
|  | ||||
| 	if err != nil && err != io.EOF { | ||||
| 		return tokenError, "", err | ||||
| 	} | ||||
|  | ||||
| 	switch ch { | ||||
| 	case zeroRune: | ||||
| 		return eof, "", nil | ||||
| 	case ':': | ||||
| 		return colon, string(ch), nil | ||||
| 	case '~': | ||||
| 		return tilde, string(ch), nil | ||||
| 	case '^': | ||||
| 		return caret, string(ch), nil | ||||
| 	case '.': | ||||
| 		return dot, string(ch), nil | ||||
| 	case '/': | ||||
| 		return slash, string(ch), nil | ||||
| 	case '{': | ||||
| 		return obrace, string(ch), nil | ||||
| 	case '}': | ||||
| 		return cbrace, string(ch), nil | ||||
| 	case '-': | ||||
| 		return minus, string(ch), nil | ||||
| 	case '@': | ||||
| 		return at, string(ch), nil | ||||
| 	case '\\': | ||||
| 		return aslash, string(ch), nil | ||||
| 	case '?': | ||||
| 		return qmark, string(ch), nil | ||||
| 	case '*': | ||||
| 		return asterisk, string(ch), nil | ||||
| 	case '[': | ||||
| 		return obracket, string(ch), nil | ||||
| 	case '!': | ||||
| 		return emark, string(ch), nil | ||||
| 	} | ||||
|  | ||||
| 	if unicode.IsSpace(ch) { | ||||
| 		return space, string(ch), nil | ||||
| 	} | ||||
|  | ||||
| 	if unicode.IsControl(ch) { | ||||
| 		return control, string(ch), nil | ||||
| 	} | ||||
|  | ||||
| 	if unicode.IsLetter(ch) { | ||||
| 		return tokenizeExpression(ch, word, unicode.IsLetter, s.r) | ||||
| 	} | ||||
|  | ||||
| 	if unicode.IsNumber(ch) { | ||||
| 		return tokenizeExpression(ch, number, unicode.IsNumber, s.r) | ||||
| 	} | ||||
|  | ||||
| 	return tokenError, string(ch), nil | ||||
| } | ||||
							
								
								
									
										28
									
								
								vendor/github.com/go-git/go-git/v5/internal/revision/token.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								vendor/github.com/go-git/go-git/v5/internal/revision/token.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| package revision | ||||
|  | ||||
| // token represents a entity extracted from string parsing | ||||
| type token int | ||||
|  | ||||
| const ( | ||||
| 	eof token = iota | ||||
|  | ||||
| 	aslash | ||||
| 	asterisk | ||||
| 	at | ||||
| 	caret | ||||
| 	cbrace | ||||
| 	colon | ||||
| 	control | ||||
| 	dot | ||||
| 	emark | ||||
| 	minus | ||||
| 	number | ||||
| 	obrace | ||||
| 	obracket | ||||
| 	qmark | ||||
| 	slash | ||||
| 	space | ||||
| 	tilde | ||||
| 	tokenError | ||||
| 	word | ||||
| ) | ||||
							
								
								
									
										37
									
								
								vendor/github.com/go-git/go-git/v5/internal/url/url.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								vendor/github.com/go-git/go-git/v5/internal/url/url.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| package url | ||||
|  | ||||
| import ( | ||||
| 	"regexp" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	isSchemeRegExp   = regexp.MustCompile(`^[^:]+://`) | ||||
| 	scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})(?:\/|:))?(?P<path>[^\\].*\/[^\\].*)$`) | ||||
| ) | ||||
|  | ||||
| // MatchesScheme returns true if the given string matches a URL-like | ||||
| // format scheme. | ||||
| func MatchesScheme(url string) bool { | ||||
| 	return isSchemeRegExp.MatchString(url) | ||||
| } | ||||
|  | ||||
| // MatchesScpLike returns true if the given string matches an SCP-like | ||||
| // format scheme. | ||||
| func MatchesScpLike(url string) bool { | ||||
| 	return scpLikeUrlRegExp.MatchString(url) | ||||
| } | ||||
|  | ||||
| // FindScpLikeComponents returns the user, host, port and path of the | ||||
| // given SCP-like URL. | ||||
| func FindScpLikeComponents(url string) (user, host, port, path string) { | ||||
| 	m := scpLikeUrlRegExp.FindStringSubmatch(url) | ||||
| 	return m[1], m[2], m[3], m[4] | ||||
| } | ||||
|  | ||||
| // IsLocalEndpoint returns true if the given URL string specifies a | ||||
| // local file endpoint.  For example, on a Linux machine, | ||||
| // `/home/user/src/go-git` would match as a local endpoint, but | ||||
| // `https://github.com/src-d/go-git` would not. | ||||
| func IsLocalEndpoint(url string) bool { | ||||
| 	return !MatchesScheme(url) && !MatchesScpLike(url) | ||||
| } | ||||
							
								
								
									
										104
									
								
								vendor/github.com/go-git/go-git/v5/object_walker.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								vendor/github.com/go-git/go-git/v5/object_walker.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,104 @@ | ||||
| package git | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/filemode" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/object" | ||||
| 	"github.com/go-git/go-git/v5/storage" | ||||
| ) | ||||
|  | ||||
| type objectWalker struct { | ||||
| 	Storer storage.Storer | ||||
| 	// seen is the set of objects seen in the repo. | ||||
| 	// seen map can become huge if walking over large | ||||
| 	// repos. Thus using struct{} as the value type. | ||||
| 	seen map[plumbing.Hash]struct{} | ||||
| } | ||||
|  | ||||
| func newObjectWalker(s storage.Storer) *objectWalker { | ||||
| 	return &objectWalker{s, map[plumbing.Hash]struct{}{}} | ||||
| } | ||||
|  | ||||
| // walkAllRefs walks all (hash) references from the repo. | ||||
| func (p *objectWalker) walkAllRefs() error { | ||||
| 	// Walk over all the references in the repo. | ||||
| 	it, err := p.Storer.IterReferences() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	defer it.Close() | ||||
| 	err = it.ForEach(func(ref *plumbing.Reference) error { | ||||
| 		// Exit this iteration early for non-hash references. | ||||
| 		if ref.Type() != plumbing.HashReference { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return p.walkObjectTree(ref.Hash()) | ||||
| 	}) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (p *objectWalker) isSeen(hash plumbing.Hash) bool { | ||||
| 	_, seen := p.seen[hash] | ||||
| 	return seen | ||||
| } | ||||
|  | ||||
| func (p *objectWalker) add(hash plumbing.Hash) { | ||||
| 	p.seen[hash] = struct{}{} | ||||
| } | ||||
|  | ||||
| // walkObjectTree walks over all objects and remembers references | ||||
| // to them in the objectWalker. This is used instead of the revlist | ||||
| // walks because memory usage is tight with huge repos. | ||||
| func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error { | ||||
| 	// Check if we have already seen, and mark this object | ||||
| 	if p.isSeen(hash) { | ||||
| 		return nil | ||||
| 	} | ||||
| 	p.add(hash) | ||||
| 	// Fetch the object. | ||||
| 	obj, err := object.GetObject(p.Storer, hash) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("Getting object %s failed: %v", hash, err) | ||||
| 	} | ||||
| 	// Walk all children depending on object type. | ||||
| 	switch obj := obj.(type) { | ||||
| 	case *object.Commit: | ||||
| 		err = p.walkObjectTree(obj.TreeHash) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		for _, h := range obj.ParentHashes { | ||||
| 			err = p.walkObjectTree(h) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	case *object.Tree: | ||||
| 		for i := range obj.Entries { | ||||
| 			// Shortcut for blob objects: | ||||
| 			// 'or' the lower bits of a mode and check that it | ||||
| 			// it matches a filemode.Executable. The type information | ||||
| 			// is in the higher bits, but this is the cleanest way | ||||
| 			// to handle plain files with different modes. | ||||
| 			// Other non-tree objects are somewhat rare, so they | ||||
| 			// are not special-cased. | ||||
| 			if obj.Entries[i].Mode|0755 == filemode.Executable { | ||||
| 				p.add(obj.Entries[i].Hash) | ||||
| 				continue | ||||
| 			} | ||||
| 			// Normal walk for sub-trees (and symlinks etc). | ||||
| 			err = p.walkObjectTree(obj.Entries[i].Hash) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	case *object.Tag: | ||||
| 		return p.walkObjectTree(obj.Target) | ||||
| 	default: | ||||
| 		// Error out on unhandled object types. | ||||
| 		return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										508
									
								
								vendor/github.com/go-git/go-git/v5/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										508
									
								
								vendor/github.com/go-git/go-git/v5/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,508 @@ | ||||
| package git | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"golang.org/x/crypto/openpgp" | ||||
| 	"github.com/go-git/go-git/v5/config" | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/object" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/transport" | ||||
| ) | ||||
|  | ||||
| // SubmoduleRescursivity defines how depth will affect any submodule recursive | ||||
| // operation. | ||||
| type SubmoduleRescursivity uint | ||||
|  | ||||
| const ( | ||||
| 	// DefaultRemoteName name of the default Remote, just like git command. | ||||
| 	DefaultRemoteName = "origin" | ||||
|  | ||||
| 	// NoRecurseSubmodules disables the recursion for a submodule operation. | ||||
| 	NoRecurseSubmodules SubmoduleRescursivity = 0 | ||||
| 	// DefaultSubmoduleRecursionDepth allow recursion in a submodule operation. | ||||
| 	DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10 | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	ErrMissingURL = errors.New("URL field is required") | ||||
| ) | ||||
|  | ||||
| // CloneOptions describes how a clone should be performed. | ||||
| type CloneOptions struct { | ||||
| 	// The (possibly remote) repository URL to clone from. | ||||
| 	URL string | ||||
| 	// Auth credentials, if required, to use with the remote repository. | ||||
| 	Auth transport.AuthMethod | ||||
| 	// Name of the remote to be added, by default `origin`. | ||||
| 	RemoteName string | ||||
| 	// Remote branch to clone. | ||||
| 	ReferenceName plumbing.ReferenceName | ||||
| 	// Fetch only ReferenceName if true. | ||||
| 	SingleBranch bool | ||||
| 	// No checkout of HEAD after clone if true. | ||||
| 	NoCheckout bool | ||||
| 	// Limit fetching to the specified number of commits. | ||||
| 	Depth int | ||||
| 	// RecurseSubmodules after the clone is created, initialize all submodules | ||||
| 	// within, using their default settings. This option is ignored if the | ||||
| 	// cloned repository does not have a worktree. | ||||
| 	RecurseSubmodules SubmoduleRescursivity | ||||
| 	// Progress is where the human readable information sent by the server is | ||||
| 	// stored, if nil nothing is stored and the capability (if supported) | ||||
| 	// no-progress, is sent to the server to avoid send this information. | ||||
| 	Progress sideband.Progress | ||||
| 	// Tags describe how the tags will be fetched from the remote repository, | ||||
| 	// by default is AllTags. | ||||
| 	Tags TagMode | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (o *CloneOptions) Validate() error { | ||||
| 	if o.URL == "" { | ||||
| 		return ErrMissingURL | ||||
| 	} | ||||
|  | ||||
| 	if o.RemoteName == "" { | ||||
| 		o.RemoteName = DefaultRemoteName | ||||
| 	} | ||||
|  | ||||
| 	if o.ReferenceName == "" { | ||||
| 		o.ReferenceName = plumbing.HEAD | ||||
| 	} | ||||
|  | ||||
| 	if o.Tags == InvalidTagMode { | ||||
| 		o.Tags = AllTags | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // PullOptions describes how a pull should be performed. | ||||
| type PullOptions struct { | ||||
| 	// Name of the remote to be pulled. If empty, uses the default. | ||||
| 	RemoteName string | ||||
| 	// Remote branch to clone. If empty, uses HEAD. | ||||
| 	ReferenceName plumbing.ReferenceName | ||||
| 	// Fetch only ReferenceName if true. | ||||
| 	SingleBranch bool | ||||
| 	// Limit fetching to the specified number of commits. | ||||
| 	Depth int | ||||
| 	// Auth credentials, if required, to use with the remote repository. | ||||
| 	Auth transport.AuthMethod | ||||
| 	// RecurseSubmodules controls if new commits of all populated submodules | ||||
| 	// should be fetched too. | ||||
| 	RecurseSubmodules SubmoduleRescursivity | ||||
| 	// Progress is where the human readable information sent by the server is | ||||
| 	// stored, if nil nothing is stored and the capability (if supported) | ||||
| 	// no-progress, is sent to the server to avoid send this information. | ||||
| 	Progress sideband.Progress | ||||
| 	// Force allows the pull to update a local branch even when the remote | ||||
| 	// branch does not descend from it. | ||||
| 	Force bool | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (o *PullOptions) Validate() error { | ||||
| 	if o.RemoteName == "" { | ||||
| 		o.RemoteName = DefaultRemoteName | ||||
| 	} | ||||
|  | ||||
| 	if o.ReferenceName == "" { | ||||
| 		o.ReferenceName = plumbing.HEAD | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type TagMode int | ||||
|  | ||||
| const ( | ||||
| 	InvalidTagMode TagMode = iota | ||||
| 	// TagFollowing any tag that points into the histories being fetched is also | ||||
| 	// fetched. TagFollowing requires a server with `include-tag` capability | ||||
| 	// in order to fetch the annotated tags objects. | ||||
| 	TagFollowing | ||||
| 	// AllTags fetch all tags from the remote (i.e., fetch remote tags | ||||
| 	// refs/tags/* into local tags with the same name) | ||||
| 	AllTags | ||||
| 	//NoTags fetch no tags from the remote at all | ||||
| 	NoTags | ||||
| ) | ||||
|  | ||||
| // FetchOptions describes how a fetch should be performed | ||||
| type FetchOptions struct { | ||||
| 	// Name of the remote to fetch from. Defaults to origin. | ||||
| 	RemoteName string | ||||
| 	RefSpecs   []config.RefSpec | ||||
| 	// Depth limit fetching to the specified number of commits from the tip of | ||||
| 	// each remote branch history. | ||||
| 	Depth int | ||||
| 	// Auth credentials, if required, to use with the remote repository. | ||||
| 	Auth transport.AuthMethod | ||||
| 	// Progress is where the human readable information sent by the server is | ||||
| 	// stored, if nil nothing is stored and the capability (if supported) | ||||
| 	// no-progress, is sent to the server to avoid send this information. | ||||
| 	Progress sideband.Progress | ||||
| 	// Tags describe how the tags will be fetched from the remote repository, | ||||
| 	// by default is TagFollowing. | ||||
| 	Tags TagMode | ||||
| 	// Force allows the fetch to update a local branch even when the remote | ||||
| 	// branch does not descend from it. | ||||
| 	Force bool | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (o *FetchOptions) Validate() error { | ||||
| 	if o.RemoteName == "" { | ||||
| 		o.RemoteName = DefaultRemoteName | ||||
| 	} | ||||
|  | ||||
| 	if o.Tags == InvalidTagMode { | ||||
| 		o.Tags = TagFollowing | ||||
| 	} | ||||
|  | ||||
| 	for _, r := range o.RefSpecs { | ||||
| 		if err := r.Validate(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // PushOptions describes how a push should be performed. | ||||
| type PushOptions struct { | ||||
| 	// RemoteName is the name of the remote to be pushed to. | ||||
| 	RemoteName string | ||||
| 	// RefSpecs specify what destination ref to update with what source | ||||
| 	// object. A refspec with empty src can be used to delete a reference. | ||||
| 	RefSpecs []config.RefSpec | ||||
| 	// Auth credentials, if required, to use with the remote repository. | ||||
| 	Auth transport.AuthMethod | ||||
| 	// Progress is where the human readable information sent by the server is | ||||
| 	// stored, if nil nothing is stored. | ||||
| 	Progress sideband.Progress | ||||
| 	// Prune specify that remote refs that match given RefSpecs and that do | ||||
| 	// not exist locally will be removed. | ||||
| 	Prune bool | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (o *PushOptions) Validate() error { | ||||
| 	if o.RemoteName == "" { | ||||
| 		o.RemoteName = DefaultRemoteName | ||||
| 	} | ||||
|  | ||||
| 	if len(o.RefSpecs) == 0 { | ||||
| 		o.RefSpecs = []config.RefSpec{ | ||||
| 			config.RefSpec(config.DefaultPushRefSpec), | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for _, r := range o.RefSpecs { | ||||
| 		if err := r.Validate(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // SubmoduleUpdateOptions describes how a submodule update should be performed. | ||||
| type SubmoduleUpdateOptions struct { | ||||
| 	// Init, if true initializes the submodules recorded in the index. | ||||
| 	Init bool | ||||
| 	// NoFetch tell to the update command to not fetch new objects from the | ||||
| 	// remote site. | ||||
| 	NoFetch bool | ||||
| 	// RecurseSubmodules the update is performed not only in the submodules of | ||||
| 	// the current repository but also in any nested submodules inside those | ||||
| 	// submodules (and so on). Until the SubmoduleRescursivity is reached. | ||||
| 	RecurseSubmodules SubmoduleRescursivity | ||||
| 	// Auth credentials, if required, to use with the remote repository. | ||||
| 	Auth transport.AuthMethod | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	ErrBranchHashExclusive  = errors.New("Branch and Hash are mutually exclusive") | ||||
| 	ErrCreateRequiresBranch = errors.New("Branch is mandatory when Create is used") | ||||
| ) | ||||
|  | ||||
| // CheckoutOptions describes how a checkout operation should be performed. | ||||
| type CheckoutOptions struct { | ||||
| 	// Hash is the hash of the commit to be checked out. If used, HEAD will be | ||||
| 	// in detached mode. If Create is not used, Branch and Hash are mutually | ||||
| 	// exclusive. | ||||
| 	Hash plumbing.Hash | ||||
| 	// Branch to be checked out, if Branch and Hash are empty is set to `master`. | ||||
| 	Branch plumbing.ReferenceName | ||||
| 	// Create a new branch named Branch and start it at Hash. | ||||
| 	Create bool | ||||
| 	// Force, if true when switching branches, proceed even if the index or the | ||||
| 	// working tree differs from HEAD. This is used to throw away local changes | ||||
| 	Force bool | ||||
| 	// Keep, if true when switching branches, local changes (the index or the | ||||
| 	// working tree changes) will be kept so that they can be committed to the | ||||
| 	// target branch. Force and Keep are mutually exclusive, should not be both | ||||
| 	// set to true. | ||||
| 	Keep bool | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (o *CheckoutOptions) Validate() error { | ||||
| 	if !o.Create && !o.Hash.IsZero() && o.Branch != "" { | ||||
| 		return ErrBranchHashExclusive | ||||
| 	} | ||||
|  | ||||
| 	if o.Create && o.Branch == "" { | ||||
| 		return ErrCreateRequiresBranch | ||||
| 	} | ||||
|  | ||||
| 	if o.Branch == "" { | ||||
| 		o.Branch = plumbing.Master | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // ResetMode defines the mode of a reset operation. | ||||
| type ResetMode int8 | ||||
|  | ||||
| const ( | ||||
| 	// MixedReset resets the index but not the working tree (i.e., the changed | ||||
| 	// files are preserved but not marked for commit) and reports what has not | ||||
| 	// been updated. This is the default action. | ||||
| 	MixedReset ResetMode = iota | ||||
| 	// HardReset resets the index and working tree. Any changes to tracked files | ||||
| 	// in the working tree are discarded. | ||||
| 	HardReset | ||||
| 	// MergeReset resets the index and updates the files in the working tree | ||||
| 	// that are different between Commit and HEAD, but keeps those which are | ||||
| 	// different between the index and working tree (i.e. which have changes | ||||
| 	// which have not been added). | ||||
| 	// | ||||
| 	// If a file that is different between Commit and the index has unstaged | ||||
| 	// changes, reset is aborted. | ||||
| 	MergeReset | ||||
| 	// SoftReset does not touch the index file or the working tree at all (but | ||||
| 	// resets the head to <commit>, just like all modes do). This leaves all | ||||
| 	// your changed files "Changes to be committed", as git status would put it. | ||||
| 	SoftReset | ||||
| ) | ||||
|  | ||||
| // ResetOptions describes how a reset operation should be performed. | ||||
| type ResetOptions struct { | ||||
| 	// Commit, if commit is present set the current branch head (HEAD) to it. | ||||
| 	Commit plumbing.Hash | ||||
| 	// Mode, form resets the current branch head to Commit and possibly updates | ||||
| 	// the index (resetting it to the tree of Commit) and the working tree | ||||
| 	// depending on Mode. If empty MixedReset is used. | ||||
| 	Mode ResetMode | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (o *ResetOptions) Validate(r *Repository) error { | ||||
| 	if o.Commit == plumbing.ZeroHash { | ||||
| 		ref, err := r.Head() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		o.Commit = ref.Hash() | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type LogOrder int8 | ||||
|  | ||||
| const ( | ||||
| 	LogOrderDefault LogOrder = iota | ||||
| 	LogOrderDFS | ||||
| 	LogOrderDFSPost | ||||
| 	LogOrderBSF | ||||
| 	LogOrderCommitterTime | ||||
| ) | ||||
|  | ||||
| // LogOptions describes how a log action should be performed. | ||||
| type LogOptions struct { | ||||
| 	// When the From option is set the log will only contain commits | ||||
| 	// reachable from it. If this option is not set, HEAD will be used as | ||||
| 	// the default From. | ||||
| 	From plumbing.Hash | ||||
|  | ||||
| 	// The default traversal algorithm is Depth-first search | ||||
| 	// set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`) | ||||
| 	// set Order=LogOrderBSF for Breadth-first search | ||||
| 	Order LogOrder | ||||
|  | ||||
| 	// Show only those commits in which the specified file was inserted/updated. | ||||
| 	// It is equivalent to running `git log -- <file-name>`. | ||||
| 	// this field is kept for compatility, it can be replaced with PathFilter | ||||
| 	FileName *string | ||||
|  | ||||
| 	// Filter commits based on the path of files that are updated | ||||
| 	// takes file path as argument and should return true if the file is desired | ||||
| 	// It can be used to implement `git log -- <path>` | ||||
| 	// either <path> is a file path, or directory path, or a regexp of file/directory path | ||||
| 	PathFilter func(string) bool | ||||
|  | ||||
| 	// Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>. | ||||
| 	// It is equivalent to running `git log --all`. | ||||
| 	// If set on true, the From option will be ignored. | ||||
| 	All bool | ||||
|  | ||||
| 	// Show commits more recent than a specific date. | ||||
| 	// It is equivalent to running `git log --since <date>` or `git log --after <date>`. | ||||
| 	Since *time.Time | ||||
|  | ||||
| 	// Show commits older than a specific date. | ||||
| 	// It is equivalent to running `git log --until <date>` or `git log --before <date>`. | ||||
| 	Until *time.Time | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	ErrMissingAuthor = errors.New("author field is required") | ||||
| ) | ||||
|  | ||||
| // CommitOptions describes how a commit operation should be performed. | ||||
| type CommitOptions struct { | ||||
| 	// All automatically stage files that have been modified and deleted, but | ||||
| 	// new files you have not told Git about are not affected. | ||||
| 	All bool | ||||
| 	// Author is the author's signature of the commit. | ||||
| 	Author *object.Signature | ||||
| 	// Committer is the committer's signature of the commit. If Committer is | ||||
| 	// nil the Author signature is used. | ||||
| 	Committer *object.Signature | ||||
| 	// Parents are the parents commits for the new commit, by default when | ||||
| 	// len(Parents) is zero, the hash of HEAD reference is used. | ||||
| 	Parents []plumbing.Hash | ||||
| 	// SignKey denotes a key to sign the commit with. A nil value here means the | ||||
| 	// commit will not be signed. The private key must be present and already | ||||
| 	// decrypted. | ||||
| 	SignKey *openpgp.Entity | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (o *CommitOptions) Validate(r *Repository) error { | ||||
| 	if o.Author == nil { | ||||
| 		return ErrMissingAuthor | ||||
| 	} | ||||
|  | ||||
| 	if o.Committer == nil { | ||||
| 		o.Committer = o.Author | ||||
| 	} | ||||
|  | ||||
| 	if len(o.Parents) == 0 { | ||||
| 		head, err := r.Head() | ||||
| 		if err != nil && err != plumbing.ErrReferenceNotFound { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		if head != nil { | ||||
| 			o.Parents = []plumbing.Hash{head.Hash()} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	ErrMissingName    = errors.New("name field is required") | ||||
| 	ErrMissingTagger  = errors.New("tagger field is required") | ||||
| 	ErrMissingMessage = errors.New("message field is required") | ||||
| ) | ||||
|  | ||||
| // CreateTagOptions describes how a tag object should be created. | ||||
| type CreateTagOptions struct { | ||||
| 	// Tagger defines the signature of the tag creator. | ||||
| 	Tagger *object.Signature | ||||
| 	// Message defines the annotation of the tag. It is canonicalized during | ||||
| 	// validation into the format expected by git - no leading whitespace and | ||||
| 	// ending in a newline. | ||||
| 	Message string | ||||
| 	// SignKey denotes a key to sign the tag with. A nil value here means the tag | ||||
| 	// will not be signed. The private key must be present and already decrypted. | ||||
| 	SignKey *openpgp.Entity | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error { | ||||
| 	if o.Tagger == nil { | ||||
| 		return ErrMissingTagger | ||||
| 	} | ||||
|  | ||||
| 	if o.Message == "" { | ||||
| 		return ErrMissingMessage | ||||
| 	} | ||||
|  | ||||
| 	// Canonicalize the message into the expected message format. | ||||
| 	o.Message = strings.TrimSpace(o.Message) + "\n" | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // ListOptions describes how a remote list should be performed. | ||||
| type ListOptions struct { | ||||
| 	// Auth credentials, if required, to use with the remote repository. | ||||
| 	Auth transport.AuthMethod | ||||
| } | ||||
|  | ||||
| // CleanOptions describes how a clean should be performed. | ||||
| type CleanOptions struct { | ||||
| 	Dir bool | ||||
| } | ||||
|  | ||||
| // GrepOptions describes how a grep should be performed. | ||||
| type GrepOptions struct { | ||||
| 	// Patterns are compiled Regexp objects to be matched. | ||||
| 	Patterns []*regexp.Regexp | ||||
| 	// InvertMatch selects non-matching lines. | ||||
| 	InvertMatch bool | ||||
| 	// CommitHash is the hash of the commit from which worktree should be derived. | ||||
| 	CommitHash plumbing.Hash | ||||
| 	// ReferenceName is the branch or tag name from which worktree should be derived. | ||||
| 	ReferenceName plumbing.ReferenceName | ||||
| 	// PathSpecs are compiled Regexp objects of pathspec to use in the matching. | ||||
| 	PathSpecs []*regexp.Regexp | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	ErrHashOrReference = errors.New("ambiguous options, only one of CommitHash or ReferenceName can be passed") | ||||
| ) | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (o *GrepOptions) Validate(w *Worktree) error { | ||||
| 	if !o.CommitHash.IsZero() && o.ReferenceName != "" { | ||||
| 		return ErrHashOrReference | ||||
| 	} | ||||
|  | ||||
| 	// If none of CommitHash and ReferenceName are provided, set commit hash of | ||||
| 	// the repository's head. | ||||
| 	if o.CommitHash.IsZero() && o.ReferenceName == "" { | ||||
| 		ref, err := w.r.Head() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		o.CommitHash = ref.Hash() | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // PlainOpenOptions describes how opening a plain repository should be | ||||
| // performed. | ||||
| type PlainOpenOptions struct { | ||||
| 	// DetectDotGit defines whether parent directories should be | ||||
| 	// walked until a .git directory or file is found. | ||||
| 	DetectDotGit bool | ||||
| } | ||||
|  | ||||
| // Validate validates the fields and sets the default values. | ||||
| func (o *PlainOpenOptions) Validate() error { return nil } | ||||
							
								
								
									
										98
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										98
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,98 @@ | ||||
| package cache | ||||
|  | ||||
| import ( | ||||
| 	"container/list" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // BufferLRU implements an object cache with an LRU eviction policy and a | ||||
| // maximum size (measured in object size). | ||||
| type BufferLRU struct { | ||||
| 	MaxSize FileSize | ||||
|  | ||||
| 	actualSize FileSize | ||||
| 	ll         *list.List | ||||
| 	cache      map[int64]*list.Element | ||||
| 	mut        sync.Mutex | ||||
| } | ||||
|  | ||||
| // NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum | ||||
| // size will never be exceeded. | ||||
| func NewBufferLRU(maxSize FileSize) *BufferLRU { | ||||
| 	return &BufferLRU{MaxSize: maxSize} | ||||
| } | ||||
|  | ||||
| // NewBufferLRUDefault creates a new BufferLRU with the default cache size. | ||||
| func NewBufferLRUDefault() *BufferLRU { | ||||
| 	return &BufferLRU{MaxSize: DefaultMaxSize} | ||||
| } | ||||
|  | ||||
| type buffer struct { | ||||
| 	Key   int64 | ||||
| 	Slice []byte | ||||
| } | ||||
|  | ||||
| // Put puts a buffer into the cache. If the buffer is already in the cache, it | ||||
| // will be marked as used. Otherwise, it will be inserted. A buffers might | ||||
| // be evicted to make room for the new one. | ||||
| func (c *BufferLRU) Put(key int64, slice []byte) { | ||||
| 	c.mut.Lock() | ||||
| 	defer c.mut.Unlock() | ||||
|  | ||||
| 	if c.cache == nil { | ||||
| 		c.actualSize = 0 | ||||
| 		c.cache = make(map[int64]*list.Element, 1000) | ||||
| 		c.ll = list.New() | ||||
| 	} | ||||
|  | ||||
| 	bufSize := FileSize(len(slice)) | ||||
| 	if ee, ok := c.cache[key]; ok { | ||||
| 		oldBuf := ee.Value.(buffer) | ||||
| 		// in this case bufSize is a delta: new size - old size | ||||
| 		bufSize -= FileSize(len(oldBuf.Slice)) | ||||
| 		c.ll.MoveToFront(ee) | ||||
| 		ee.Value = buffer{key, slice} | ||||
| 	} else { | ||||
| 		if bufSize > c.MaxSize { | ||||
| 			return | ||||
| 		} | ||||
| 		ee := c.ll.PushFront(buffer{key, slice}) | ||||
| 		c.cache[key] = ee | ||||
| 	} | ||||
|  | ||||
| 	c.actualSize += bufSize | ||||
| 	for c.actualSize > c.MaxSize { | ||||
| 		last := c.ll.Back() | ||||
| 		lastObj := last.Value.(buffer) | ||||
| 		lastSize := FileSize(len(lastObj.Slice)) | ||||
|  | ||||
| 		c.ll.Remove(last) | ||||
| 		delete(c.cache, lastObj.Key) | ||||
| 		c.actualSize -= lastSize | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Get returns a buffer by its key. It marks the buffer as used. If the buffer | ||||
| // is not in the cache, (nil, false) will be returned. | ||||
| func (c *BufferLRU) Get(key int64) ([]byte, bool) { | ||||
| 	c.mut.Lock() | ||||
| 	defer c.mut.Unlock() | ||||
|  | ||||
| 	ee, ok := c.cache[key] | ||||
| 	if !ok { | ||||
| 		return nil, false | ||||
| 	} | ||||
|  | ||||
| 	c.ll.MoveToFront(ee) | ||||
| 	return ee.Value.(buffer).Slice, true | ||||
| } | ||||
|  | ||||
| // Clear the content of this buffer cache. | ||||
| func (c *BufferLRU) Clear() { | ||||
| 	c.mut.Lock() | ||||
| 	defer c.mut.Unlock() | ||||
|  | ||||
| 	c.ll = nil | ||||
| 	c.cache = nil | ||||
| 	c.actualSize = 0 | ||||
| } | ||||
							
								
								
									
										39
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,39 @@ | ||||
| package cache | ||||
|  | ||||
| import "github.com/go-git/go-git/v5/plumbing" | ||||
|  | ||||
| const ( | ||||
| 	Byte FileSize = 1 << (iota * 10) | ||||
| 	KiByte | ||||
| 	MiByte | ||||
| 	GiByte | ||||
| ) | ||||
|  | ||||
| type FileSize int64 | ||||
|  | ||||
| const DefaultMaxSize FileSize = 96 * MiByte | ||||
|  | ||||
| // Object is an interface to a object cache. | ||||
| type Object interface { | ||||
| 	// Put puts the given object into the cache. Whether this object will | ||||
| 	// actually be put into the cache or not is implementation specific. | ||||
| 	Put(o plumbing.EncodedObject) | ||||
| 	// Get gets an object from the cache given its hash. The second return value | ||||
| 	// is true if the object was returned, and false otherwise. | ||||
| 	Get(k plumbing.Hash) (plumbing.EncodedObject, bool) | ||||
| 	// Clear clears every object from the cache. | ||||
| 	Clear() | ||||
| } | ||||
|  | ||||
| // Buffer is an interface to a buffer cache. | ||||
| type Buffer interface { | ||||
| 	// Put puts a buffer into the cache. If the buffer is already in the cache, | ||||
| 	// it will be marked as used. Otherwise, it will be inserted. Buffer might | ||||
| 	// be evicted to make room for the new one. | ||||
| 	Put(key int64, slice []byte) | ||||
| 	// Get returns a buffer by its key. It marks the buffer as used. If the | ||||
| 	// buffer is not in the cache, (nil, false) will be returned. | ||||
| 	Get(key int64) ([]byte, bool) | ||||
| 	// Clear clears every object from the cache. | ||||
| 	Clear() | ||||
| } | ||||
							
								
								
									
										101
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										101
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,101 @@ | ||||
| package cache | ||||
|  | ||||
| import ( | ||||
| 	"container/list" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| ) | ||||
|  | ||||
| // ObjectLRU implements an object cache with an LRU eviction policy and a | ||||
| // maximum size (measured in object size). | ||||
| type ObjectLRU struct { | ||||
| 	MaxSize FileSize | ||||
|  | ||||
| 	actualSize FileSize | ||||
| 	ll         *list.List | ||||
| 	cache      map[interface{}]*list.Element | ||||
| 	mut        sync.Mutex | ||||
| } | ||||
|  | ||||
| // NewObjectLRU creates a new ObjectLRU with the given maximum size. The maximum | ||||
| // size will never be exceeded. | ||||
| func NewObjectLRU(maxSize FileSize) *ObjectLRU { | ||||
| 	return &ObjectLRU{MaxSize: maxSize} | ||||
| } | ||||
|  | ||||
| // NewObjectLRUDefault creates a new ObjectLRU with the default cache size. | ||||
| func NewObjectLRUDefault() *ObjectLRU { | ||||
| 	return &ObjectLRU{MaxSize: DefaultMaxSize} | ||||
| } | ||||
|  | ||||
| // Put puts an object into the cache. If the object is already in the cache, it | ||||
| // will be marked as used. Otherwise, it will be inserted. A single object might | ||||
| // be evicted to make room for the new object. | ||||
| func (c *ObjectLRU) Put(obj plumbing.EncodedObject) { | ||||
| 	c.mut.Lock() | ||||
| 	defer c.mut.Unlock() | ||||
|  | ||||
| 	if c.cache == nil { | ||||
| 		c.actualSize = 0 | ||||
| 		c.cache = make(map[interface{}]*list.Element, 1000) | ||||
| 		c.ll = list.New() | ||||
| 	} | ||||
|  | ||||
| 	objSize := FileSize(obj.Size()) | ||||
| 	key := obj.Hash() | ||||
| 	if ee, ok := c.cache[key]; ok { | ||||
| 		oldObj := ee.Value.(plumbing.EncodedObject) | ||||
| 		// in this case objSize is a delta: new size - old size | ||||
| 		objSize -= FileSize(oldObj.Size()) | ||||
| 		c.ll.MoveToFront(ee) | ||||
| 		ee.Value = obj | ||||
| 	} else { | ||||
| 		if objSize > c.MaxSize { | ||||
| 			return | ||||
| 		} | ||||
| 		ee := c.ll.PushFront(obj) | ||||
| 		c.cache[key] = ee | ||||
| 	} | ||||
|  | ||||
| 	c.actualSize += objSize | ||||
| 	for c.actualSize > c.MaxSize { | ||||
| 		last := c.ll.Back() | ||||
| 		if last == nil { | ||||
| 			c.actualSize = 0 | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		lastObj := last.Value.(plumbing.EncodedObject) | ||||
| 		lastSize := FileSize(lastObj.Size()) | ||||
|  | ||||
| 		c.ll.Remove(last) | ||||
| 		delete(c.cache, lastObj.Hash()) | ||||
| 		c.actualSize -= lastSize | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Get returns an object by its hash. It marks the object as used. If the object | ||||
| // is not in the cache, (nil, false) will be returned. | ||||
| func (c *ObjectLRU) Get(k plumbing.Hash) (plumbing.EncodedObject, bool) { | ||||
| 	c.mut.Lock() | ||||
| 	defer c.mut.Unlock() | ||||
|  | ||||
| 	ee, ok := c.cache[k] | ||||
| 	if !ok { | ||||
| 		return nil, false | ||||
| 	} | ||||
|  | ||||
| 	c.ll.MoveToFront(ee) | ||||
| 	return ee.Value.(plumbing.EncodedObject), true | ||||
| } | ||||
|  | ||||
| // Clear the content of this object cache. | ||||
| func (c *ObjectLRU) Clear() { | ||||
| 	c.mut.Lock() | ||||
| 	defer c.mut.Unlock() | ||||
|  | ||||
| 	c.ll = nil | ||||
| 	c.cache = nil | ||||
| 	c.actualSize = 0 | ||||
| } | ||||
							
								
								
									
										35
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/error.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/error.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | ||||
| package plumbing | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| type PermanentError struct { | ||||
| 	Err error | ||||
| } | ||||
|  | ||||
| func NewPermanentError(err error) *PermanentError { | ||||
| 	if err == nil { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	return &PermanentError{Err: err} | ||||
| } | ||||
|  | ||||
| func (e *PermanentError) Error() string { | ||||
| 	return fmt.Sprintf("permanent client error: %s", e.Err.Error()) | ||||
| } | ||||
|  | ||||
| type UnexpectedError struct { | ||||
| 	Err error | ||||
| } | ||||
|  | ||||
| func NewUnexpectedError(err error) *UnexpectedError { | ||||
| 	if err == nil { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	return &UnexpectedError{Err: err} | ||||
| } | ||||
|  | ||||
| func (e *UnexpectedError) Error() string { | ||||
| 	return fmt.Sprintf("unexpected client error: %s", e.Err.Error()) | ||||
| } | ||||
							
								
								
									
										188
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										188
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,188 @@ | ||||
| package filemode | ||||
|  | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"strconv" | ||||
| ) | ||||
|  | ||||
| // A FileMode represents the kind of tree entries used by git. It | ||||
| // resembles regular file systems modes, although FileModes are | ||||
| // considerably simpler (there are not so many), and there are some, | ||||
| // like Submodule that has no file system equivalent. | ||||
| type FileMode uint32 | ||||
|  | ||||
| const ( | ||||
| 	// Empty is used as the FileMode of tree elements when comparing | ||||
| 	// trees in the following situations: | ||||
| 	// | ||||
| 	// - the mode of tree elements before their creation.  - the mode of | ||||
| 	// tree elements after their deletion.  - the mode of unmerged | ||||
| 	// elements when checking the index. | ||||
| 	// | ||||
| 	// Empty has no file system equivalent.  As Empty is the zero value | ||||
| 	// of FileMode, it is also returned by New and | ||||
| 	// NewFromOsNewFromOSFileMode along with an error, when they fail. | ||||
| 	Empty FileMode = 0 | ||||
| 	// Dir represent a Directory. | ||||
| 	Dir FileMode = 0040000 | ||||
| 	// Regular represent non-executable files.  Please note this is not | ||||
| 	// the same as golang regular files, which include executable files. | ||||
| 	Regular FileMode = 0100644 | ||||
| 	// Deprecated represent non-executable files with the group writable | ||||
| 	// bit set.  This mode was supported by the first versions of git, | ||||
| 	// but it has been deprecated nowadays.  This library uses them | ||||
| 	// internally, so you can read old packfiles, but will treat them as | ||||
| 	// Regulars when interfacing with the outside world.  This is the | ||||
| 	// standard git behaviour. | ||||
| 	Deprecated FileMode = 0100664 | ||||
| 	// Executable represents executable files. | ||||
| 	Executable FileMode = 0100755 | ||||
| 	// Symlink represents symbolic links to files. | ||||
| 	Symlink FileMode = 0120000 | ||||
| 	// Submodule represents git submodules.  This mode has no file system | ||||
| 	// equivalent. | ||||
| 	Submodule FileMode = 0160000 | ||||
| ) | ||||
|  | ||||
| // New takes the octal string representation of a FileMode and returns | ||||
| // the FileMode and a nil error.  If the string can not be parsed to a | ||||
| // 32 bit unsigned octal number, it returns Empty and the parsing error. | ||||
| // | ||||
| // Example: "40000" means Dir, "100644" means Regular. | ||||
| // | ||||
| // Please note this function does not check if the returned FileMode | ||||
| // is valid in git or if it is malformed.  For instance, "1" will | ||||
| // return the malformed FileMode(1) and a nil error. | ||||
| func New(s string) (FileMode, error) { | ||||
| 	n, err := strconv.ParseUint(s, 8, 32) | ||||
| 	if err != nil { | ||||
| 		return Empty, err | ||||
| 	} | ||||
|  | ||||
| 	return FileMode(n), nil | ||||
| } | ||||
|  | ||||
| // NewFromOSFileMode returns the FileMode used by git to represent | ||||
| // the provided file system modes and a nil error on success.  If the | ||||
| // file system mode cannot be mapped to any valid git mode (as with | ||||
| // sockets or named pipes), it will return Empty and an error. | ||||
| // | ||||
| // Note that some git modes cannot be generated from os.FileModes, like | ||||
| // Deprecated and Submodule; while Empty will be returned, along with an | ||||
| // error, only when the method fails. | ||||
| func NewFromOSFileMode(m os.FileMode) (FileMode, error) { | ||||
| 	if m.IsRegular() { | ||||
| 		if isSetTemporary(m) { | ||||
| 			return Empty, fmt.Errorf("no equivalent git mode for %s", m) | ||||
| 		} | ||||
| 		if isSetCharDevice(m) { | ||||
| 			return Empty, fmt.Errorf("no equivalent git mode for %s", m) | ||||
| 		} | ||||
| 		if isSetUserExecutable(m) { | ||||
| 			return Executable, nil | ||||
| 		} | ||||
| 		return Regular, nil | ||||
| 	} | ||||
|  | ||||
| 	if m.IsDir() { | ||||
| 		return Dir, nil | ||||
| 	} | ||||
|  | ||||
| 	if isSetSymLink(m) { | ||||
| 		return Symlink, nil | ||||
| 	} | ||||
|  | ||||
| 	return Empty, fmt.Errorf("no equivalent git mode for %s", m) | ||||
| } | ||||
|  | ||||
| func isSetCharDevice(m os.FileMode) bool { | ||||
| 	return m&os.ModeCharDevice != 0 | ||||
| } | ||||
|  | ||||
| func isSetTemporary(m os.FileMode) bool { | ||||
| 	return m&os.ModeTemporary != 0 | ||||
| } | ||||
|  | ||||
| func isSetUserExecutable(m os.FileMode) bool { | ||||
| 	return m&0100 != 0 | ||||
| } | ||||
|  | ||||
| func isSetSymLink(m os.FileMode) bool { | ||||
| 	return m&os.ModeSymlink != 0 | ||||
| } | ||||
|  | ||||
| // Bytes return a slice of 4 bytes with the mode in little endian | ||||
| // encoding. | ||||
| func (m FileMode) Bytes() []byte { | ||||
| 	ret := make([]byte, 4) | ||||
| 	binary.LittleEndian.PutUint32(ret, uint32(m)) | ||||
| 	return ret[:] | ||||
| } | ||||
|  | ||||
| // IsMalformed returns if the FileMode should not appear in a git packfile, | ||||
| // this is: Empty and any other mode not mentioned as a constant in this | ||||
| // package. | ||||
| func (m FileMode) IsMalformed() bool { | ||||
| 	return m != Dir && | ||||
| 		m != Regular && | ||||
| 		m != Deprecated && | ||||
| 		m != Executable && | ||||
| 		m != Symlink && | ||||
| 		m != Submodule | ||||
| } | ||||
|  | ||||
| // String returns the FileMode as a string in the standatd git format, | ||||
| // this is, an octal number padded with ceros to 7 digits.  Malformed | ||||
| // modes are printed in that same format, for easier debugging. | ||||
| // | ||||
| // Example: Regular is "0100644", Empty is "0000000". | ||||
| func (m FileMode) String() string { | ||||
| 	return fmt.Sprintf("%07o", uint32(m)) | ||||
| } | ||||
|  | ||||
| // IsRegular returns if the FileMode represents that of a regular file, | ||||
| // this is, either Regular or Deprecated.  Please note that Executable | ||||
| // are not regular even though in the UNIX tradition, they usually are: | ||||
| // See the IsFile method. | ||||
| func (m FileMode) IsRegular() bool { | ||||
| 	return m == Regular || | ||||
| 		m == Deprecated | ||||
| } | ||||
|  | ||||
| // IsFile returns if the FileMode represents that of a file, this is, | ||||
| // Regular, Deprecated, Executable or Link. | ||||
| func (m FileMode) IsFile() bool { | ||||
| 	return m == Regular || | ||||
| 		m == Deprecated || | ||||
| 		m == Executable || | ||||
| 		m == Symlink | ||||
| } | ||||
|  | ||||
| // ToOSFileMode returns the os.FileMode to be used when creating file | ||||
| // system elements with the given git mode and a nil error on success. | ||||
| // | ||||
| // When the provided mode cannot be mapped to a valid file system mode | ||||
| // (e.g.  Submodule) it returns os.FileMode(0) and an error. | ||||
| // | ||||
| // The returned file mode does not take into account the umask. | ||||
| func (m FileMode) ToOSFileMode() (os.FileMode, error) { | ||||
| 	switch m { | ||||
| 	case Dir: | ||||
| 		return os.ModePerm | os.ModeDir, nil | ||||
| 	case Submodule: | ||||
| 		return os.ModePerm | os.ModeDir, nil | ||||
| 	case Regular: | ||||
| 		return os.FileMode(0644), nil | ||||
| 	// Deprecated is no longer allowed: treated as a Regular instead | ||||
| 	case Deprecated: | ||||
| 		return os.FileMode(0644), nil | ||||
| 	case Executable: | ||||
| 		return os.FileMode(0755), nil | ||||
| 	case Symlink: | ||||
| 		return os.ModePerm | os.ModeSymlink, nil | ||||
| 	} | ||||
|  | ||||
| 	return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m) | ||||
| } | ||||
							
								
								
									
										99
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										99
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,99 @@ | ||||
| package config | ||||
|  | ||||
| // New creates a new config instance. | ||||
| func New() *Config { | ||||
| 	return &Config{} | ||||
| } | ||||
|  | ||||
| // Config contains all the sections, comments and includes from a config file. | ||||
| type Config struct { | ||||
| 	Comment  *Comment | ||||
| 	Sections Sections | ||||
| 	Includes Includes | ||||
| } | ||||
|  | ||||
| // Includes is a list of Includes in a config file. | ||||
| type Includes []*Include | ||||
|  | ||||
| // Include is a reference to an included config file. | ||||
| type Include struct { | ||||
| 	Path   string | ||||
| 	Config *Config | ||||
| } | ||||
|  | ||||
| // Comment string without the prefix '#' or ';'. | ||||
| type Comment string | ||||
|  | ||||
| const ( | ||||
| 	// NoSubsection token is passed to Config.Section and Config.SetSection to | ||||
| 	// represent the absence of a section. | ||||
| 	NoSubsection = "" | ||||
| ) | ||||
|  | ||||
| // Section returns a existing section with the given name or creates a new one. | ||||
| func (c *Config) Section(name string) *Section { | ||||
| 	for i := len(c.Sections) - 1; i >= 0; i-- { | ||||
| 		s := c.Sections[i] | ||||
| 		if s.IsName(name) { | ||||
| 			return s | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	s := &Section{Name: name} | ||||
| 	c.Sections = append(c.Sections, s) | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // AddOption adds an option to a given section and subsection. Use the | ||||
| // NoSubsection constant for the subsection argument if no subsection is wanted. | ||||
| func (c *Config) AddOption(section string, subsection string, key string, value string) *Config { | ||||
| 	if subsection == "" { | ||||
| 		c.Section(section).AddOption(key, value) | ||||
| 	} else { | ||||
| 		c.Section(section).Subsection(subsection).AddOption(key, value) | ||||
| 	} | ||||
|  | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // SetOption sets an option to a given section and subsection. Use the | ||||
| // NoSubsection constant for the subsection argument if no subsection is wanted. | ||||
| func (c *Config) SetOption(section string, subsection string, key string, value string) *Config { | ||||
| 	if subsection == "" { | ||||
| 		c.Section(section).SetOption(key, value) | ||||
| 	} else { | ||||
| 		c.Section(section).Subsection(subsection).SetOption(key, value) | ||||
| 	} | ||||
|  | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // RemoveSection removes a section from a config file. | ||||
| func (c *Config) RemoveSection(name string) *Config { | ||||
| 	result := Sections{} | ||||
| 	for _, s := range c.Sections { | ||||
| 		if !s.IsName(name) { | ||||
| 			result = append(result, s) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	c.Sections = result | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| // RemoveSubsection remove	s a subsection from a config file. | ||||
| func (c *Config) RemoveSubsection(section string, subsection string) *Config { | ||||
| 	for _, s := range c.Sections { | ||||
| 		if s.IsName(section) { | ||||
| 			result := Subsections{} | ||||
| 			for _, ss := range s.Subsections { | ||||
| 				if !ss.IsName(subsection) { | ||||
| 					result = append(result, ss) | ||||
| 				} | ||||
| 			} | ||||
| 			s.Subsections = result | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return c | ||||
| } | ||||
							
								
								
									
										37
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"io" | ||||
|  | ||||
| 	"github.com/go-git/gcfg" | ||||
| ) | ||||
|  | ||||
| // A Decoder reads and decodes config files from an input stream. | ||||
| type Decoder struct { | ||||
| 	io.Reader | ||||
| } | ||||
|  | ||||
| // NewDecoder returns a new decoder that reads from r. | ||||
| func NewDecoder(r io.Reader) *Decoder { | ||||
| 	return &Decoder{r} | ||||
| } | ||||
|  | ||||
| // Decode reads the whole config from its input and stores it in the | ||||
| // value pointed to by config. | ||||
| func (d *Decoder) Decode(config *Config) error { | ||||
| 	cb := func(s string, ss string, k string, v string, bv bool) error { | ||||
| 		if ss == "" && k == "" { | ||||
| 			config.Section(s) | ||||
| 			return nil | ||||
| 		} | ||||
|  | ||||
| 		if ss != "" && k == "" { | ||||
| 			config.Section(s).Subsection(ss) | ||||
| 			return nil | ||||
| 		} | ||||
|  | ||||
| 		config.AddOption(s, ss, k, v) | ||||
| 		return nil | ||||
| 	} | ||||
| 	return gcfg.ReadWithCallback(d, cb) | ||||
| } | ||||
							
								
								
									
										122
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,122 @@ | ||||
| // Package config implements encoding and decoding of git config files. | ||||
| // | ||||
| // 	Configuration File | ||||
| // 	------------------ | ||||
| // | ||||
| // 	The Git configuration file contains a number of variables that affect | ||||
| // 	the Git commands' behavior. The `.git/config` file in each repository | ||||
| // 	is used to store the configuration for that repository, and | ||||
| // 	`$HOME/.gitconfig` is used to store a per-user configuration as | ||||
| // 	fallback values for the `.git/config` file. The file `/etc/gitconfig` | ||||
| // 	can be used to store a system-wide default configuration. | ||||
| // | ||||
| // 	The configuration variables are used by both the Git plumbing | ||||
| // 	and the porcelains. The variables are divided into sections, wherein | ||||
| // 	the fully qualified variable name of the variable itself is the last | ||||
| // 	dot-separated segment and the section name is everything before the last | ||||
| // 	dot. The variable names are case-insensitive, allow only alphanumeric | ||||
| // 	characters and `-`, and must start with an alphabetic character.  Some | ||||
| // 	variables may appear multiple times; we say then that the variable is | ||||
| // 	multivalued. | ||||
| // | ||||
| // 	Syntax | ||||
| // 	~~~~~~ | ||||
| // | ||||
| // 	The syntax is fairly flexible and permissive; whitespaces are mostly | ||||
| // 	ignored.  The '#' and ';' characters begin comments to the end of line, | ||||
| // 	blank lines are ignored. | ||||
| // | ||||
| // 	The file consists of sections and variables.  A section begins with | ||||
| // 	the name of the section in square brackets and continues until the next | ||||
| // 	section begins.  Section names are case-insensitive.  Only alphanumeric | ||||
| // 	characters, `-` and `.` are allowed in section names.  Each variable | ||||
| // 	must belong to some section, which means that there must be a section | ||||
| // 	header before the first setting of a variable. | ||||
| // | ||||
| // 	Sections can be further divided into subsections.  To begin a subsection | ||||
| // 	put its name in double quotes, separated by space from the section name, | ||||
| // 	in the section header, like in the example below: | ||||
| // | ||||
| // 	-------- | ||||
| // 		[section "subsection"] | ||||
| // | ||||
| // 	-------- | ||||
| // | ||||
| // 	Subsection names are case sensitive and can contain any characters except | ||||
| // 	newline (doublequote `"` and backslash can be included by escaping them | ||||
| // 	as `\"` and `\\`, respectively).  Section headers cannot span multiple | ||||
| // 	lines.  Variables may belong directly to a section or to a given subsection. | ||||
| // 	You can have `[section]` if you have `[section "subsection"]`, but you | ||||
| // 	don't need to. | ||||
| // | ||||
| // 	There is also a deprecated `[section.subsection]` syntax. With this | ||||
| // 	syntax, the subsection name is converted to lower-case and is also | ||||
| // 	compared case sensitively. These subsection names follow the same | ||||
| // 	restrictions as section names. | ||||
| // | ||||
| // 	All the other lines (and the remainder of the line after the section | ||||
| // 	header) are recognized as setting variables, in the form | ||||
| // 	'name = value' (or just 'name', which is a short-hand to say that | ||||
| // 	the variable is the boolean "true"). | ||||
| // 	The variable names are case-insensitive, allow only alphanumeric characters | ||||
| // 	and `-`, and must start with an alphabetic character. | ||||
| // | ||||
| // 	A line that defines a value can be continued to the next line by | ||||
| // 	ending it with a `\`; the backquote and the end-of-line are | ||||
| // 	stripped.  Leading whitespaces after 'name =', the remainder of the | ||||
| // 	line after the first comment character '#' or ';', and trailing | ||||
| // 	whitespaces of the line are discarded unless they are enclosed in | ||||
| // 	double quotes.  Internal whitespaces within the value are retained | ||||
| // 	verbatim. | ||||
| // | ||||
| // 	Inside double quotes, double quote `"` and backslash `\` characters | ||||
| // 	must be escaped: use `\"` for `"` and `\\` for `\`. | ||||
| // | ||||
| // 	The following escape sequences (beside `\"` and `\\`) are recognized: | ||||
| // 	`\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB) | ||||
| // 	and `\b` for backspace (BS).  Other char escape sequences (including octal | ||||
| // 	escape sequences) are invalid. | ||||
| // | ||||
| // 	Includes | ||||
| // 	~~~~~~~~ | ||||
| // | ||||
| // 	You can include one config file from another by setting the special | ||||
| // 	`include.path` variable to the name of the file to be included. The | ||||
| // 	variable takes a pathname as its value, and is subject to tilde | ||||
| // 	expansion. | ||||
| // | ||||
| // 	The included file is expanded immediately, as if its contents had been | ||||
| // 	found at the location of the include directive. If the value of the | ||||
| // 	`include.path` variable is a relative path, the path is considered to be | ||||
| // 	relative to the configuration file in which the include directive was | ||||
| // 	found.  See below for examples. | ||||
| // | ||||
| // | ||||
| // 	Example | ||||
| // 	~~~~~~~ | ||||
| // | ||||
| // 		# Core variables | ||||
| // 		[core] | ||||
| // 			; Don't trust file modes | ||||
| // 			filemode = false | ||||
| // | ||||
| // 		# Our diff algorithm | ||||
| // 		[diff] | ||||
| // 			external = /usr/local/bin/diff-wrapper | ||||
| // 			renames = true | ||||
| // | ||||
| // 		[branch "devel"] | ||||
| // 			remote = origin | ||||
| // 			merge = refs/heads/devel | ||||
| // | ||||
| // 		# Proxy settings | ||||
| // 		[core] | ||||
| // 			gitProxy="ssh" for "kernel.org" | ||||
| // 			gitProxy=default-proxy ; for the rest | ||||
| // | ||||
| // 		[include] | ||||
| // 			path = /path/to/foo.inc ; include by absolute path | ||||
| // 			path = foo ; expand "foo" relative to the current file | ||||
| // 			path = ~/foo ; expand "foo" in your `$HOME` directory | ||||
| // | ||||
| package config | ||||
							
								
								
									
										77
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // An Encoder writes config files to an output stream. | ||||
| type Encoder struct { | ||||
| 	w io.Writer | ||||
| } | ||||
|  | ||||
| // NewEncoder returns a new encoder that writes to w. | ||||
| func NewEncoder(w io.Writer) *Encoder { | ||||
| 	return &Encoder{w} | ||||
| } | ||||
|  | ||||
| // Encode writes the config in git config format to the stream of the encoder. | ||||
| func (e *Encoder) Encode(cfg *Config) error { | ||||
| 	for _, s := range cfg.Sections { | ||||
| 		if err := e.encodeSection(s); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeSection(s *Section) error { | ||||
| 	if len(s.Options) > 0 { | ||||
| 		if err := e.printf("[%s]\n", s.Name); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		if err := e.encodeOptions(s.Options); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for _, ss := range s.Subsections { | ||||
| 		if err := e.encodeSubsection(s.Name, ss); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error { | ||||
| 	//TODO: escape | ||||
| 	if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return e.encodeOptions(s.Options) | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeOptions(opts Options) error { | ||||
| 	for _, o := range opts { | ||||
| 		pattern := "\t%s = %s\n" | ||||
| 		if strings.Contains(o.Value, "\\") { | ||||
| 			pattern = "\t%s = %q\n" | ||||
| 		} | ||||
|  | ||||
| 		if err := e.printf(pattern, o.Key, o.Value); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) printf(msg string, args ...interface{}) error { | ||||
| 	_, err := fmt.Fprintf(e.w, msg, args...) | ||||
| 	return err | ||||
| } | ||||
							
								
								
									
										117
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										117
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,117 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // Option defines a key/value entity in a config file. | ||||
| type Option struct { | ||||
| 	// Key preserving original caseness. | ||||
| 	// Use IsKey instead to compare key regardless of caseness. | ||||
| 	Key string | ||||
| 	// Original value as string, could be not normalized. | ||||
| 	Value string | ||||
| } | ||||
|  | ||||
| type Options []*Option | ||||
|  | ||||
| // IsKey returns true if the given key matches | ||||
| // this option's key in a case-insensitive comparison. | ||||
| func (o *Option) IsKey(key string) bool { | ||||
| 	return strings.ToLower(o.Key) == strings.ToLower(key) | ||||
| } | ||||
|  | ||||
| func (opts Options) GoString() string { | ||||
| 	var strs []string | ||||
| 	for _, opt := range opts { | ||||
| 		strs = append(strs, fmt.Sprintf("%#v", opt)) | ||||
| 	} | ||||
|  | ||||
| 	return strings.Join(strs, ", ") | ||||
| } | ||||
|  | ||||
| // Get gets the value for the given key if set, | ||||
| // otherwise it returns the empty string. | ||||
| // | ||||
| // Note that there is no difference | ||||
| // | ||||
| // This matches git behaviour since git v1.8.1-rc1, | ||||
| // if there are multiple definitions of a key, the | ||||
| // last one wins. | ||||
| // | ||||
| // See: http://article.gmane.org/gmane.linux.kernel/1407184 | ||||
| // | ||||
| // In order to get all possible values for the same key, | ||||
| // use GetAll. | ||||
| func (opts Options) Get(key string) string { | ||||
| 	for i := len(opts) - 1; i >= 0; i-- { | ||||
| 		o := opts[i] | ||||
| 		if o.IsKey(key) { | ||||
| 			return o.Value | ||||
| 		} | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // GetAll returns all possible values for the same key. | ||||
| func (opts Options) GetAll(key string) []string { | ||||
| 	result := []string{} | ||||
| 	for _, o := range opts { | ||||
| 		if o.IsKey(key) { | ||||
| 			result = append(result, o.Value) | ||||
| 		} | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| func (opts Options) withoutOption(key string) Options { | ||||
| 	result := Options{} | ||||
| 	for _, o := range opts { | ||||
| 		if !o.IsKey(key) { | ||||
| 			result = append(result, o) | ||||
| 		} | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| func (opts Options) withAddedOption(key string, value string) Options { | ||||
| 	return append(opts, &Option{key, value}) | ||||
| } | ||||
|  | ||||
| func (opts Options) withSettedOption(key string, values ...string) Options { | ||||
| 	var result Options | ||||
| 	var added []string | ||||
| 	for _, o := range opts { | ||||
| 		if !o.IsKey(key) { | ||||
| 			result = append(result, o) | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		if contains(values, o.Value) { | ||||
| 			added = append(added, o.Value) | ||||
| 			result = append(result, o) | ||||
| 			continue | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for _, value := range values { | ||||
| 		if contains(added, value) { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		result = result.withAddedOption(key, value) | ||||
| 	} | ||||
|  | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| func contains(haystack []string, needle string) bool { | ||||
| 	for _, s := range haystack { | ||||
| 		if s == needle { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return false | ||||
| } | ||||
							
								
								
									
										146
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										146
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,146 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // Section is the representation of a section inside git configuration files. | ||||
| // Each Section contains Options that are used by both the Git plumbing | ||||
| // and the porcelains. | ||||
| // Sections can be further divided into subsections. To begin a subsection | ||||
| // put its name in double quotes, separated by space from the section name, | ||||
| // in the section header, like in the example below: | ||||
| // | ||||
| //     [section "subsection"] | ||||
| // | ||||
| // All the other lines (and the remainder of the line after the section header) | ||||
| // are recognized as option variables, in the form "name = value" (or just name, | ||||
| // which is a short-hand to say that the variable is the boolean "true"). | ||||
| // The variable names are case-insensitive, allow only alphanumeric characters | ||||
| // and -, and must start with an alphabetic character: | ||||
| // | ||||
| //     [section "subsection1"] | ||||
| //         option1 = value1 | ||||
| //         option2 | ||||
| //     [section "subsection2"] | ||||
| //         option3 = value2 | ||||
| // | ||||
| type Section struct { | ||||
| 	Name        string | ||||
| 	Options     Options | ||||
| 	Subsections Subsections | ||||
| } | ||||
|  | ||||
| type Subsection struct { | ||||
| 	Name    string | ||||
| 	Options Options | ||||
| } | ||||
|  | ||||
| type Sections []*Section | ||||
|  | ||||
| func (s Sections) GoString() string { | ||||
| 	var strs []string | ||||
| 	for _, ss := range s { | ||||
| 		strs = append(strs, fmt.Sprintf("%#v", ss)) | ||||
| 	} | ||||
|  | ||||
| 	return strings.Join(strs, ", ") | ||||
| } | ||||
|  | ||||
| type Subsections []*Subsection | ||||
|  | ||||
| func (s Subsections) GoString() string { | ||||
| 	var strs []string | ||||
| 	for _, ss := range s { | ||||
| 		strs = append(strs, fmt.Sprintf("%#v", ss)) | ||||
| 	} | ||||
|  | ||||
| 	return strings.Join(strs, ", ") | ||||
| } | ||||
|  | ||||
| // IsName checks if the name provided is equals to the Section name, case insensitive. | ||||
| func (s *Section) IsName(name string) bool { | ||||
| 	return strings.ToLower(s.Name) == strings.ToLower(name) | ||||
| } | ||||
|  | ||||
| // Option return the value for the specified key. Empty string is returned if | ||||
| // key does not exists. | ||||
| func (s *Section) Option(key string) string { | ||||
| 	return s.Options.Get(key) | ||||
| } | ||||
|  | ||||
| // AddOption adds a new Option to the Section. The updated Section is returned. | ||||
| func (s *Section) AddOption(key string, value string) *Section { | ||||
| 	s.Options = s.Options.withAddedOption(key, value) | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // SetOption adds a new Option to the Section. If the option already exists, is replaced. | ||||
| // The updated Section is returned. | ||||
| func (s *Section) SetOption(key string, value string) *Section { | ||||
| 	s.Options = s.Options.withSettedOption(key, value) | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // Remove an option with the specified key. The updated Section is returned. | ||||
| func (s *Section) RemoveOption(key string) *Section { | ||||
| 	s.Options = s.Options.withoutOption(key) | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // Subsection returns a Subsection from the specified Section. If the | ||||
| // Subsection does not exists, new one is created and added to Section. | ||||
| func (s *Section) Subsection(name string) *Subsection { | ||||
| 	for i := len(s.Subsections) - 1; i >= 0; i-- { | ||||
| 		ss := s.Subsections[i] | ||||
| 		if ss.IsName(name) { | ||||
| 			return ss | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	ss := &Subsection{Name: name} | ||||
| 	s.Subsections = append(s.Subsections, ss) | ||||
| 	return ss | ||||
| } | ||||
|  | ||||
| // HasSubsection checks if the Section has a Subsection with the specified name. | ||||
| func (s *Section) HasSubsection(name string) bool { | ||||
| 	for _, ss := range s.Subsections { | ||||
| 		if ss.IsName(name) { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // IsName checks if the name of the subsection is exactly the specified name. | ||||
| func (s *Subsection) IsName(name string) bool { | ||||
| 	return s.Name == name | ||||
| } | ||||
|  | ||||
| // Option returns an option with the specified key. If the option does not exists, | ||||
| // empty spring will be returned. | ||||
| func (s *Subsection) Option(key string) string { | ||||
| 	return s.Options.Get(key) | ||||
| } | ||||
|  | ||||
| // AddOption adds a new Option to the Subsection. The updated Subsection is returned. | ||||
| func (s *Subsection) AddOption(key string, value string) *Subsection { | ||||
| 	s.Options = s.Options.withAddedOption(key, value) | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // SetOption adds a new Option to the Subsection. If the option already exists, is replaced. | ||||
| // The updated Subsection is returned. | ||||
| func (s *Subsection) SetOption(key string, value ...string) *Subsection { | ||||
| 	s.Options = s.Options.withSettedOption(key, value...) | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // RemoveOption removes the option with the specified key. The updated Subsection is returned. | ||||
| func (s *Subsection) RemoveOption(key string) *Subsection { | ||||
| 	s.Options = s.Options.withoutOption(key) | ||||
| 	return s | ||||
| } | ||||
							
								
								
									
										58
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,58 @@ | ||||
| package diff | ||||
|  | ||||
| import ( | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/filemode" | ||||
| ) | ||||
|  | ||||
| // Operation defines the operation of a diff item. | ||||
| type Operation int | ||||
|  | ||||
| const ( | ||||
| 	// Equal item represents a equals diff. | ||||
| 	Equal Operation = iota | ||||
| 	// Add item represents an insert diff. | ||||
| 	Add | ||||
| 	// Delete item represents a delete diff. | ||||
| 	Delete | ||||
| ) | ||||
|  | ||||
| // Patch represents a collection of steps to transform several files. | ||||
| type Patch interface { | ||||
| 	// FilePatches returns a slice of patches per file. | ||||
| 	FilePatches() []FilePatch | ||||
| 	// Message returns an optional message that can be at the top of the | ||||
| 	// Patch representation. | ||||
| 	Message() string | ||||
| } | ||||
|  | ||||
| // FilePatch represents the necessary steps to transform one file to another. | ||||
| type FilePatch interface { | ||||
| 	// IsBinary returns true if this patch is representing a binary file. | ||||
| 	IsBinary() bool | ||||
| 	// Files returns the from and to Files, with all the necessary metadata to | ||||
| 	// about them. If the patch creates a new file, "from" will be nil. | ||||
| 	// If the patch deletes a file, "to" will be nil. | ||||
| 	Files() (from, to File) | ||||
| 	// Chunks returns a slice of ordered changes to transform "from" File to | ||||
| 	// "to" File. If the file is a binary one, Chunks will be empty. | ||||
| 	Chunks() []Chunk | ||||
| } | ||||
|  | ||||
| // File contains all the file metadata necessary to print some patch formats. | ||||
| type File interface { | ||||
| 	// Hash returns the File Hash. | ||||
| 	Hash() plumbing.Hash | ||||
| 	// Mode returns the FileMode. | ||||
| 	Mode() filemode.FileMode | ||||
| 	// Path returns the complete Path to the file, including the filename. | ||||
| 	Path() string | ||||
| } | ||||
|  | ||||
| // Chunk represents a portion of a file transformation to another. | ||||
| type Chunk interface { | ||||
| 	// Content contains the portion of the file. | ||||
| 	Content() string | ||||
| 	// Type contains the Operation to do with this Chunk. | ||||
| 	Type() Operation | ||||
| } | ||||
							
								
								
									
										367
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										367
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,367 @@ | ||||
| package diff | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	diffInit = "diff --git a/%s b/%s\n" | ||||
|  | ||||
| 	chunkStart  = "@@ -" | ||||
| 	chunkMiddle = " +" | ||||
| 	chunkEnd    = " @@%s\n" | ||||
| 	chunkCount  = "%d,%d" | ||||
|  | ||||
| 	noFilePath = "/dev/null" | ||||
| 	aDir       = "a/" | ||||
| 	bDir       = "b/" | ||||
|  | ||||
| 	fPath  = "--- %s\n" | ||||
| 	tPath  = "+++ %s\n" | ||||
| 	binary = "Binary files %s and %s differ\n" | ||||
|  | ||||
| 	addLine    = "+%s%s" | ||||
| 	deleteLine = "-%s%s" | ||||
| 	equalLine  = " %s%s" | ||||
| 	noNewLine  = "\n\\ No newline at end of file\n" | ||||
|  | ||||
| 	oldMode         = "old mode %o\n" | ||||
| 	newMode         = "new mode %o\n" | ||||
| 	deletedFileMode = "deleted file mode %o\n" | ||||
| 	newFileMode     = "new file mode %o\n" | ||||
|  | ||||
| 	renameFrom     = "from" | ||||
| 	renameTo       = "to" | ||||
| 	renameFileMode = "rename %s %s\n" | ||||
|  | ||||
| 	indexAndMode = "index %s..%s %o\n" | ||||
| 	indexNoMode  = "index %s..%s\n" | ||||
|  | ||||
| 	DefaultContextLines = 3 | ||||
| ) | ||||
|  | ||||
| // UnifiedEncoder encodes an unified diff into the provided Writer. | ||||
| // There are some unsupported features: | ||||
| //     - Similarity index for renames | ||||
| //     - Sort hash representation | ||||
| type UnifiedEncoder struct { | ||||
| 	io.Writer | ||||
|  | ||||
| 	// ctxLines is the count of unchanged lines that will appear | ||||
| 	// surrounding a change. | ||||
| 	ctxLines int | ||||
|  | ||||
| 	buf bytes.Buffer | ||||
| } | ||||
|  | ||||
| func NewUnifiedEncoder(w io.Writer, ctxLines int) *UnifiedEncoder { | ||||
| 	return &UnifiedEncoder{ctxLines: ctxLines, Writer: w} | ||||
| } | ||||
|  | ||||
| func (e *UnifiedEncoder) Encode(patch Patch) error { | ||||
| 	e.printMessage(patch.Message()) | ||||
|  | ||||
| 	if err := e.encodeFilePatch(patch.FilePatches()); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	_, err := e.buf.WriteTo(e) | ||||
|  | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (e *UnifiedEncoder) encodeFilePatch(filePatches []FilePatch) error { | ||||
| 	for _, p := range filePatches { | ||||
| 		f, t := p.Files() | ||||
| 		if err := e.header(f, t, p.IsBinary()); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		g := newHunksGenerator(p.Chunks(), e.ctxLines) | ||||
| 		for _, c := range g.Generate() { | ||||
| 			c.WriteTo(&e.buf) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (e *UnifiedEncoder) printMessage(message string) { | ||||
| 	isEmpty := message == "" | ||||
| 	hasSuffix := strings.HasSuffix(message, "\n") | ||||
| 	if !isEmpty && !hasSuffix { | ||||
| 		message += "\n" | ||||
| 	} | ||||
|  | ||||
| 	e.buf.WriteString(message) | ||||
| } | ||||
|  | ||||
| func (e *UnifiedEncoder) header(from, to File, isBinary bool) error { | ||||
| 	switch { | ||||
| 	case from == nil && to == nil: | ||||
| 		return nil | ||||
| 	case from != nil && to != nil: | ||||
| 		hashEquals := from.Hash() == to.Hash() | ||||
|  | ||||
| 		fmt.Fprintf(&e.buf, diffInit, from.Path(), to.Path()) | ||||
|  | ||||
| 		if from.Mode() != to.Mode() { | ||||
| 			fmt.Fprintf(&e.buf, oldMode+newMode, from.Mode(), to.Mode()) | ||||
| 		} | ||||
|  | ||||
| 		if from.Path() != to.Path() { | ||||
| 			fmt.Fprintf(&e.buf, | ||||
| 				renameFileMode+renameFileMode, | ||||
| 				renameFrom, from.Path(), renameTo, to.Path()) | ||||
| 		} | ||||
|  | ||||
| 		if from.Mode() != to.Mode() && !hashEquals { | ||||
| 			fmt.Fprintf(&e.buf, indexNoMode, from.Hash(), to.Hash()) | ||||
| 		} else if !hashEquals { | ||||
| 			fmt.Fprintf(&e.buf, indexAndMode, from.Hash(), to.Hash(), from.Mode()) | ||||
| 		} | ||||
|  | ||||
| 		if !hashEquals { | ||||
| 			e.pathLines(isBinary, aDir+from.Path(), bDir+to.Path()) | ||||
| 		} | ||||
| 	case from == nil: | ||||
| 		fmt.Fprintf(&e.buf, diffInit, to.Path(), to.Path()) | ||||
| 		fmt.Fprintf(&e.buf, newFileMode, to.Mode()) | ||||
| 		fmt.Fprintf(&e.buf, indexNoMode, plumbing.ZeroHash, to.Hash()) | ||||
| 		e.pathLines(isBinary, noFilePath, bDir+to.Path()) | ||||
| 	case to == nil: | ||||
| 		fmt.Fprintf(&e.buf, diffInit, from.Path(), from.Path()) | ||||
| 		fmt.Fprintf(&e.buf, deletedFileMode, from.Mode()) | ||||
| 		fmt.Fprintf(&e.buf, indexNoMode, from.Hash(), plumbing.ZeroHash) | ||||
| 		e.pathLines(isBinary, aDir+from.Path(), noFilePath) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (e *UnifiedEncoder) pathLines(isBinary bool, fromPath, toPath string) { | ||||
| 	format := fPath + tPath | ||||
| 	if isBinary { | ||||
| 		format = binary | ||||
| 	} | ||||
|  | ||||
| 	fmt.Fprintf(&e.buf, format, fromPath, toPath) | ||||
| } | ||||
|  | ||||
| type hunksGenerator struct { | ||||
| 	fromLine, toLine            int | ||||
| 	ctxLines                    int | ||||
| 	chunks                      []Chunk | ||||
| 	current                     *hunk | ||||
| 	hunks                       []*hunk | ||||
| 	beforeContext, afterContext []string | ||||
| } | ||||
|  | ||||
| func newHunksGenerator(chunks []Chunk, ctxLines int) *hunksGenerator { | ||||
| 	return &hunksGenerator{ | ||||
| 		chunks:   chunks, | ||||
| 		ctxLines: ctxLines, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *hunksGenerator) Generate() []*hunk { | ||||
| 	for i, chunk := range c.chunks { | ||||
| 		ls := splitLines(chunk.Content()) | ||||
| 		lsLen := len(ls) | ||||
|  | ||||
| 		switch chunk.Type() { | ||||
| 		case Equal: | ||||
| 			c.fromLine += lsLen | ||||
| 			c.toLine += lsLen | ||||
| 			c.processEqualsLines(ls, i) | ||||
| 		case Delete: | ||||
| 			if lsLen != 0 { | ||||
| 				c.fromLine++ | ||||
| 			} | ||||
|  | ||||
| 			c.processHunk(i, chunk.Type()) | ||||
| 			c.fromLine += lsLen - 1 | ||||
| 			c.current.AddOp(chunk.Type(), ls...) | ||||
| 		case Add: | ||||
| 			if lsLen != 0 { | ||||
| 				c.toLine++ | ||||
| 			} | ||||
| 			c.processHunk(i, chunk.Type()) | ||||
| 			c.toLine += lsLen - 1 | ||||
| 			c.current.AddOp(chunk.Type(), ls...) | ||||
| 		} | ||||
|  | ||||
| 		if i == len(c.chunks)-1 && c.current != nil { | ||||
| 			c.hunks = append(c.hunks, c.current) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return c.hunks | ||||
| } | ||||
|  | ||||
| func (c *hunksGenerator) processHunk(i int, op Operation) { | ||||
| 	if c.current != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	var ctxPrefix string | ||||
| 	linesBefore := len(c.beforeContext) | ||||
| 	if linesBefore > c.ctxLines { | ||||
| 		ctxPrefix = " " + c.beforeContext[linesBefore-c.ctxLines-1] | ||||
| 		c.beforeContext = c.beforeContext[linesBefore-c.ctxLines:] | ||||
| 		linesBefore = c.ctxLines | ||||
| 	} | ||||
|  | ||||
| 	c.current = &hunk{ctxPrefix: strings.TrimSuffix(ctxPrefix, "\n")} | ||||
| 	c.current.AddOp(Equal, c.beforeContext...) | ||||
|  | ||||
| 	switch op { | ||||
| 	case Delete: | ||||
| 		c.current.fromLine, c.current.toLine = | ||||
| 			c.addLineNumbers(c.fromLine, c.toLine, linesBefore, i, Add) | ||||
| 	case Add: | ||||
| 		c.current.toLine, c.current.fromLine = | ||||
| 			c.addLineNumbers(c.toLine, c.fromLine, linesBefore, i, Delete) | ||||
| 	} | ||||
|  | ||||
| 	c.beforeContext = nil | ||||
| } | ||||
|  | ||||
| // addLineNumbers obtains the line numbers in a new chunk | ||||
| func (c *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) { | ||||
| 	cla = la - linesBefore | ||||
| 	// we need to search for a reference for the next diff | ||||
| 	switch { | ||||
| 	case linesBefore != 0 && c.ctxLines != 0: | ||||
| 		if lb > c.ctxLines { | ||||
| 			clb = lb - c.ctxLines + 1 | ||||
| 		} else { | ||||
| 			clb = 1 | ||||
| 		} | ||||
| 	case c.ctxLines == 0: | ||||
| 		clb = lb | ||||
| 	case i != len(c.chunks)-1: | ||||
| 		next := c.chunks[i+1] | ||||
| 		if next.Type() == op || next.Type() == Equal { | ||||
| 			// this diff will be into this chunk | ||||
| 			clb = lb + 1 | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (c *hunksGenerator) processEqualsLines(ls []string, i int) { | ||||
| 	if c.current == nil { | ||||
| 		c.beforeContext = append(c.beforeContext, ls...) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	c.afterContext = append(c.afterContext, ls...) | ||||
| 	if len(c.afterContext) <= c.ctxLines*2 && i != len(c.chunks)-1 { | ||||
| 		c.current.AddOp(Equal, c.afterContext...) | ||||
| 		c.afterContext = nil | ||||
| 	} else { | ||||
| 		ctxLines := c.ctxLines | ||||
| 		if ctxLines > len(c.afterContext) { | ||||
| 			ctxLines = len(c.afterContext) | ||||
| 		} | ||||
| 		c.current.AddOp(Equal, c.afterContext[:ctxLines]...) | ||||
| 		c.hunks = append(c.hunks, c.current) | ||||
|  | ||||
| 		c.current = nil | ||||
| 		c.beforeContext = c.afterContext[ctxLines:] | ||||
| 		c.afterContext = nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| var splitLinesRE = regexp.MustCompile(`[^\n]*(\n|$)`) | ||||
|  | ||||
| func splitLines(s string) []string { | ||||
| 	out := splitLinesRE.FindAllString(s, -1) | ||||
| 	if out[len(out)-1] == "" { | ||||
| 		out = out[:len(out)-1] | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| type hunk struct { | ||||
| 	fromLine int | ||||
| 	toLine   int | ||||
|  | ||||
| 	fromCount int | ||||
| 	toCount   int | ||||
|  | ||||
| 	ctxPrefix string | ||||
| 	ops       []*op | ||||
| } | ||||
|  | ||||
| func (c *hunk) WriteTo(buf *bytes.Buffer) { | ||||
| 	buf.WriteString(chunkStart) | ||||
|  | ||||
| 	if c.fromCount == 1 { | ||||
| 		fmt.Fprintf(buf, "%d", c.fromLine) | ||||
| 	} else { | ||||
| 		fmt.Fprintf(buf, chunkCount, c.fromLine, c.fromCount) | ||||
| 	} | ||||
|  | ||||
| 	buf.WriteString(chunkMiddle) | ||||
|  | ||||
| 	if c.toCount == 1 { | ||||
| 		fmt.Fprintf(buf, "%d", c.toLine) | ||||
| 	} else { | ||||
| 		fmt.Fprintf(buf, chunkCount, c.toLine, c.toCount) | ||||
| 	} | ||||
|  | ||||
| 	fmt.Fprintf(buf, chunkEnd, c.ctxPrefix) | ||||
|  | ||||
| 	for _, d := range c.ops { | ||||
| 		buf.WriteString(d.String()) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *hunk) AddOp(t Operation, s ...string) { | ||||
| 	ls := len(s) | ||||
| 	switch t { | ||||
| 	case Add: | ||||
| 		c.toCount += ls | ||||
| 	case Delete: | ||||
| 		c.fromCount += ls | ||||
| 	case Equal: | ||||
| 		c.toCount += ls | ||||
| 		c.fromCount += ls | ||||
| 	} | ||||
|  | ||||
| 	for _, l := range s { | ||||
| 		c.ops = append(c.ops, &op{l, t}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type op struct { | ||||
| 	text string | ||||
| 	t    Operation | ||||
| } | ||||
|  | ||||
| func (o *op) String() string { | ||||
| 	var prefix, suffix string | ||||
| 	switch o.t { | ||||
| 	case Add: | ||||
| 		prefix = addLine | ||||
| 	case Delete: | ||||
| 		prefix = deleteLine | ||||
| 	case Equal: | ||||
| 		prefix = equalLine | ||||
| 	} | ||||
| 	n := len(o.text) | ||||
| 	if n > 0 && o.text[n-1] != '\n' { | ||||
| 		suffix = noNewLine | ||||
| 	} | ||||
|  | ||||
| 	return fmt.Sprintf(prefix, o.text, suffix) | ||||
| } | ||||
							
								
								
									
										136
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										136
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,136 @@ | ||||
| package gitignore | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"os/user" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/go-git/go-billy/v5" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/format/config" | ||||
| 	gioutil "github.com/go-git/go-git/v5/utils/ioutil" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	commentPrefix = "#" | ||||
| 	coreSection   = "core" | ||||
| 	eol           = "\n" | ||||
| 	excludesfile  = "excludesfile" | ||||
| 	gitDir        = ".git" | ||||
| 	gitignoreFile = ".gitignore" | ||||
| 	gitconfigFile = ".gitconfig" | ||||
| 	systemFile    = "/etc/gitconfig" | ||||
| ) | ||||
|  | ||||
| // readIgnoreFile reads a specific git ignore file. | ||||
| func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) { | ||||
| 	f, err := fs.Open(fs.Join(append(path, ignoreFile)...)) | ||||
| 	if err == nil { | ||||
| 		defer f.Close() | ||||
|  | ||||
| 		if data, err := ioutil.ReadAll(f); err == nil { | ||||
| 			for _, s := range strings.Split(string(data), eol) { | ||||
| 				if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 { | ||||
| 					ps = append(ps, ParsePattern(s, path)) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} else if !os.IsNotExist(err) { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // ReadPatterns reads gitignore patterns recursively traversing through the directory | ||||
| // structure. The result is in the ascending order of priority (last higher). | ||||
| func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) { | ||||
| 	ps, _ = readIgnoreFile(fs, path, gitignoreFile) | ||||
|  | ||||
| 	var fis []os.FileInfo | ||||
| 	fis, err = fs.ReadDir(fs.Join(path...)) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	for _, fi := range fis { | ||||
| 		if fi.IsDir() && fi.Name() != gitDir { | ||||
| 			var subps []Pattern | ||||
| 			subps, err = ReadPatterns(fs, append(path, fi.Name())) | ||||
| 			if err != nil { | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			if len(subps) > 0 { | ||||
| 				ps = append(ps, subps...) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { | ||||
| 	f, err := fs.Open(path) | ||||
| 	if err != nil { | ||||
| 		if os.IsNotExist(err) { | ||||
| 			return nil, nil | ||||
| 		} | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	defer gioutil.CheckClose(f, &err) | ||||
|  | ||||
| 	b, err := ioutil.ReadAll(f) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	d := config.NewDecoder(bytes.NewBuffer(b)) | ||||
|  | ||||
| 	raw := config.New() | ||||
| 	if err = d.Decode(raw); err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	s := raw.Section(coreSection) | ||||
| 	efo := s.Options.Get(excludesfile) | ||||
| 	if efo == "" { | ||||
| 		return nil, nil | ||||
| 	} | ||||
|  | ||||
| 	ps, err = readIgnoreFile(fs, nil, efo) | ||||
| 	if os.IsNotExist(err) { | ||||
| 		return nil, nil | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // LoadGlobalPatterns loads gitignore patterns from from the gitignore file | ||||
| // declared in a user's ~/.gitconfig file.  If the ~/.gitconfig file does not | ||||
| // exist the function will return nil.  If the core.excludesfile property | ||||
| // is not declared, the function will return nil.  If the file pointed to by | ||||
| // the core.excludesfile property does not exist, the function will return nil. | ||||
| // | ||||
| // The function assumes fs is rooted at the root filesystem. | ||||
| func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) { | ||||
| 	usr, err := user.Current() | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile)) | ||||
| } | ||||
|  | ||||
| // LoadSystemPatterns loads gitignore patterns from from the gitignore file | ||||
| // declared in a system's /etc/gitconfig file.  If the ~/.gitconfig file does | ||||
| // not exist the function will return nil.  If the core.excludesfile property | ||||
| // is not declared, the function will return nil.  If the file pointed to by | ||||
| // the core.excludesfile property does not exist, the function will return nil. | ||||
| // | ||||
| // The function assumes fs is rooted at the root filesystem. | ||||
| func LoadSystemPatterns(fs billy.Filesystem) (ps []Pattern, err error) { | ||||
| 	return loadPatterns(fs, systemFile) | ||||
| } | ||||
							
								
								
									
										70
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,70 @@ | ||||
| // Package gitignore implements matching file system paths to gitignore patterns that | ||||
| // can be automatically read from a git repository tree in the order of definition | ||||
| // priorities. It support all pattern formats as specified in the original gitignore | ||||
| // documentation, copied below: | ||||
| // | ||||
| //   Pattern format | ||||
| //   ============== | ||||
| // | ||||
| //		- A blank line matches no files, so it can serve as a separator for readability. | ||||
| // | ||||
| //		- A line starting with # serves as a comment. Put a backslash ("\") in front of | ||||
| //		  the first hash for patterns that begin with a hash. | ||||
| // | ||||
| //		- Trailing spaces are ignored unless they are quoted with backslash ("\"). | ||||
| // | ||||
| //		- An optional prefix "!" which negates the pattern; any matching file excluded | ||||
| //		  by a previous pattern will become included again. It is not possible to | ||||
| //		  re-include a file if a parent directory of that file is excluded. | ||||
| //		  Git doesn’t list excluded directories for performance reasons, so | ||||
| //		  any patterns on contained files have no effect, no matter where they are | ||||
| //		  defined. Put a backslash ("\") in front of the first "!" for patterns | ||||
| //		  that begin with a literal "!", for example, "\!important!.txt". | ||||
| // | ||||
| //		- If the pattern ends with a slash, it is removed for the purpose of the | ||||
| //		  following description, but it would only find a match with a directory. | ||||
| //		  In other words, foo/ will match a directory foo and paths underneath it, | ||||
| //		  but will not match a regular file or a symbolic link foo (this is consistent | ||||
| //		  with the way how pathspec works in general in Git). | ||||
| // | ||||
| //		- If the pattern does not contain a slash /, Git treats it as a shell glob | ||||
| //		  pattern and checks for a match against the pathname relative to the location | ||||
| //		  of the .gitignore file (relative to the toplevel of the work tree if not | ||||
| //		  from a .gitignore file). | ||||
| // | ||||
| //		- Otherwise, Git treats the pattern as a shell glob suitable for consumption | ||||
| //		  by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will | ||||
| //		  not match a / in the pathname. For example, "Documentation/*.html" matches | ||||
| //		  "Documentation/git.html" but not "Documentation/ppc/ppc.html" or | ||||
| //		  "tools/perf/Documentation/perf.html". | ||||
| // | ||||
| //		- A leading slash matches the beginning of the pathname. For example, | ||||
| //		  "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c". | ||||
| // | ||||
| //		Two consecutive asterisks ("**") in patterns matched against full pathname | ||||
| //		may have special meaning: | ||||
| // | ||||
| //		- A leading "**" followed by a slash means match in all directories. | ||||
| //		  For example, "**/foo" matches file or directory "foo" anywhere, the same as | ||||
| //		  pattern "foo". "**/foo/bar" matches file or directory "bar" | ||||
| //		  anywhere that is directly under directory "foo". | ||||
| // | ||||
| //		- A trailing "/**" matches everything inside. For example, "abc/**" matches | ||||
| //		  all files inside directory "abc", relative to the location of the | ||||
| //		  .gitignore file, with infinite depth. | ||||
| // | ||||
| //		- A slash followed by two consecutive asterisks then a slash matches | ||||
| //		  zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b", | ||||
| //		  "a/x/y/b" and so on. | ||||
| // | ||||
| //		- Other consecutive asterisks are considered invalid. | ||||
| // | ||||
| //   Copyright and license | ||||
| //   ===================== | ||||
| // | ||||
| //		Copyright (c) Oleg Sklyar, Silvertern and source{d} | ||||
| // | ||||
| //		The package code was donated to source{d} to include, modify and develop | ||||
| //		further as a part of the `go-git` project, release it on the license of | ||||
| //		the whole project or delete it from the project. | ||||
| package gitignore | ||||
							
								
								
									
										30
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,30 @@ | ||||
| package gitignore | ||||
|  | ||||
| // Matcher defines a global multi-pattern matcher for gitignore patterns | ||||
| type Matcher interface { | ||||
| 	// Match matches patterns in the order of priorities. As soon as an inclusion or | ||||
| 	// exclusion is found, not further matching is performed. | ||||
| 	Match(path []string, isDir bool) bool | ||||
| } | ||||
|  | ||||
| // NewMatcher constructs a new global matcher. Patterns must be given in the order of | ||||
| // increasing priority. That is most generic settings files first, then the content of | ||||
| // the repo .gitignore, then content of .gitignore down the path or the repo and then | ||||
| // the content command line arguments. | ||||
| func NewMatcher(ps []Pattern) Matcher { | ||||
| 	return &matcher{ps} | ||||
| } | ||||
|  | ||||
| type matcher struct { | ||||
| 	patterns []Pattern | ||||
| } | ||||
|  | ||||
| func (m *matcher) Match(path []string, isDir bool) bool { | ||||
| 	n := len(m.patterns) | ||||
| 	for i := n - 1; i >= 0; i-- { | ||||
| 		if match := m.patterns[i].Match(path, isDir); match > NoMatch { | ||||
| 			return match == Exclude | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
							
								
								
									
										153
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										153
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,153 @@ | ||||
| package gitignore | ||||
|  | ||||
| import ( | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // MatchResult defines outcomes of a match, no match, exclusion or inclusion. | ||||
| type MatchResult int | ||||
|  | ||||
| const ( | ||||
| 	// NoMatch defines the no match outcome of a match check | ||||
| 	NoMatch MatchResult = iota | ||||
| 	// Exclude defines an exclusion of a file as a result of a match check | ||||
| 	Exclude | ||||
| 	// Include defines an explicit inclusion of a file as a result of a match check | ||||
| 	Include | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	inclusionPrefix = "!" | ||||
| 	zeroToManyDirs  = "**" | ||||
| 	patternDirSep   = "/" | ||||
| ) | ||||
|  | ||||
| // Pattern defines a single gitignore pattern. | ||||
| type Pattern interface { | ||||
| 	// Match matches the given path to the pattern. | ||||
| 	Match(path []string, isDir bool) MatchResult | ||||
| } | ||||
|  | ||||
| type pattern struct { | ||||
| 	domain    []string | ||||
| 	pattern   []string | ||||
| 	inclusion bool | ||||
| 	dirOnly   bool | ||||
| 	isGlob    bool | ||||
| } | ||||
|  | ||||
| // ParsePattern parses a gitignore pattern string into the Pattern structure. | ||||
| func ParsePattern(p string, domain []string) Pattern { | ||||
| 	res := pattern{domain: domain} | ||||
|  | ||||
| 	if strings.HasPrefix(p, inclusionPrefix) { | ||||
| 		res.inclusion = true | ||||
| 		p = p[1:] | ||||
| 	} | ||||
|  | ||||
| 	if !strings.HasSuffix(p, "\\ ") { | ||||
| 		p = strings.TrimRight(p, " ") | ||||
| 	} | ||||
|  | ||||
| 	if strings.HasSuffix(p, patternDirSep) { | ||||
| 		res.dirOnly = true | ||||
| 		p = p[:len(p)-1] | ||||
| 	} | ||||
|  | ||||
| 	if strings.Contains(p, patternDirSep) { | ||||
| 		res.isGlob = true | ||||
| 	} | ||||
|  | ||||
| 	res.pattern = strings.Split(p, patternDirSep) | ||||
| 	return &res | ||||
| } | ||||
|  | ||||
| func (p *pattern) Match(path []string, isDir bool) MatchResult { | ||||
| 	if len(path) <= len(p.domain) { | ||||
| 		return NoMatch | ||||
| 	} | ||||
| 	for i, e := range p.domain { | ||||
| 		if path[i] != e { | ||||
| 			return NoMatch | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	path = path[len(p.domain):] | ||||
| 	if p.isGlob && !p.globMatch(path, isDir) { | ||||
| 		return NoMatch | ||||
| 	} else if !p.isGlob && !p.simpleNameMatch(path, isDir) { | ||||
| 		return NoMatch | ||||
| 	} | ||||
|  | ||||
| 	if p.inclusion { | ||||
| 		return Include | ||||
| 	} else { | ||||
| 		return Exclude | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *pattern) simpleNameMatch(path []string, isDir bool) bool { | ||||
| 	for i, name := range path { | ||||
| 		if match, err := filepath.Match(p.pattern[0], name); err != nil { | ||||
| 			return false | ||||
| 		} else if !match { | ||||
| 			continue | ||||
| 		} | ||||
| 		if p.dirOnly && !isDir && i == len(path)-1 { | ||||
| 			return false | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (p *pattern) globMatch(path []string, isDir bool) bool { | ||||
| 	matched := false | ||||
| 	canTraverse := false | ||||
| 	for i, pattern := range p.pattern { | ||||
| 		if pattern == "" { | ||||
| 			canTraverse = false | ||||
| 			continue | ||||
| 		} | ||||
| 		if pattern == zeroToManyDirs { | ||||
| 			if i == len(p.pattern)-1 { | ||||
| 				break | ||||
| 			} | ||||
| 			canTraverse = true | ||||
| 			continue | ||||
| 		} | ||||
| 		if strings.Contains(pattern, zeroToManyDirs) { | ||||
| 			return false | ||||
| 		} | ||||
| 		if len(path) == 0 { | ||||
| 			return false | ||||
| 		} | ||||
| 		if canTraverse { | ||||
| 			canTraverse = false | ||||
| 			for len(path) > 0 { | ||||
| 				e := path[0] | ||||
| 				path = path[1:] | ||||
| 				if match, err := filepath.Match(pattern, e); err != nil { | ||||
| 					return false | ||||
| 				} else if match { | ||||
| 					matched = true | ||||
| 					break | ||||
| 				} else if len(path) == 0 { | ||||
| 					// if nothing left then fail | ||||
| 					matched = false | ||||
| 				} | ||||
| 			} | ||||
| 		} else { | ||||
| 			if match, err := filepath.Match(pattern, path[0]); err != nil || !match { | ||||
| 				return false | ||||
| 			} | ||||
| 			matched = true | ||||
| 			path = path[1:] | ||||
| 		} | ||||
| 	} | ||||
| 	if matched && p.dirOnly && !isDir && len(path) == 0 { | ||||
| 		matched = false | ||||
| 	} | ||||
| 	return matched | ||||
| } | ||||
							
								
								
									
										177
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										177
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,177 @@ | ||||
| package idxfile | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"io" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/utils/binary" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// ErrUnsupportedVersion is returned by Decode when the idx file version | ||||
| 	// is not supported. | ||||
| 	ErrUnsupportedVersion = errors.New("Unsupported version") | ||||
| 	// ErrMalformedIdxFile is returned by Decode when the idx file is corrupted. | ||||
| 	ErrMalformedIdxFile = errors.New("Malformed IDX file") | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	fanout         = 256 | ||||
| 	objectIDLength = 20 | ||||
| ) | ||||
|  | ||||
| // Decoder reads and decodes idx files from an input stream. | ||||
| type Decoder struct { | ||||
| 	*bufio.Reader | ||||
| } | ||||
|  | ||||
| // NewDecoder builds a new idx stream decoder, that reads from r. | ||||
| func NewDecoder(r io.Reader) *Decoder { | ||||
| 	return &Decoder{bufio.NewReader(r)} | ||||
| } | ||||
|  | ||||
| // Decode reads from the stream and decode the content into the MemoryIndex struct. | ||||
| func (d *Decoder) Decode(idx *MemoryIndex) error { | ||||
| 	if err := validateHeader(d); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	flow := []func(*MemoryIndex, io.Reader) error{ | ||||
| 		readVersion, | ||||
| 		readFanout, | ||||
| 		readObjectNames, | ||||
| 		readCRC32, | ||||
| 		readOffsets, | ||||
| 		readChecksums, | ||||
| 	} | ||||
|  | ||||
| 	for _, f := range flow { | ||||
| 		if err := f(idx, d); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func validateHeader(r io.Reader) error { | ||||
| 	var h = make([]byte, 4) | ||||
| 	if _, err := io.ReadFull(r, h); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if !bytes.Equal(h, idxHeader) { | ||||
| 		return ErrMalformedIdxFile | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func readVersion(idx *MemoryIndex, r io.Reader) error { | ||||
| 	v, err := binary.ReadUint32(r) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if v > VersionSupported { | ||||
| 		return ErrUnsupportedVersion | ||||
| 	} | ||||
|  | ||||
| 	idx.Version = v | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func readFanout(idx *MemoryIndex, r io.Reader) error { | ||||
| 	for k := 0; k < fanout; k++ { | ||||
| 		n, err := binary.ReadUint32(r) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		idx.Fanout[k] = n | ||||
| 		idx.FanoutMapping[k] = noMapping | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func readObjectNames(idx *MemoryIndex, r io.Reader) error { | ||||
| 	for k := 0; k < fanout; k++ { | ||||
| 		var buckets uint32 | ||||
| 		if k == 0 { | ||||
| 			buckets = idx.Fanout[k] | ||||
| 		} else { | ||||
| 			buckets = idx.Fanout[k] - idx.Fanout[k-1] | ||||
| 		} | ||||
|  | ||||
| 		if buckets == 0 { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		idx.FanoutMapping[k] = len(idx.Names) | ||||
|  | ||||
| 		nameLen := int(buckets * objectIDLength) | ||||
| 		bin := make([]byte, nameLen) | ||||
| 		if _, err := io.ReadFull(r, bin); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		idx.Names = append(idx.Names, bin) | ||||
| 		idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4)) | ||||
| 		idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4)) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func readCRC32(idx *MemoryIndex, r io.Reader) error { | ||||
| 	for k := 0; k < fanout; k++ { | ||||
| 		if pos := idx.FanoutMapping[k]; pos != noMapping { | ||||
| 			if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func readOffsets(idx *MemoryIndex, r io.Reader) error { | ||||
| 	var o64cnt int | ||||
| 	for k := 0; k < fanout; k++ { | ||||
| 		if pos := idx.FanoutMapping[k]; pos != noMapping { | ||||
| 			if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
|  | ||||
| 			for p := 0; p < len(idx.Offset32[pos]); p += 4 { | ||||
| 				if idx.Offset32[pos][p]&(byte(1)<<7) > 0 { | ||||
| 					o64cnt++ | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if o64cnt > 0 { | ||||
| 		idx.Offset64 = make([]byte, o64cnt*8) | ||||
| 		if _, err := io.ReadFull(r, idx.Offset64); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func readChecksums(idx *MemoryIndex, r io.Reader) error { | ||||
| 	if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if _, err := io.ReadFull(r, idx.IdxChecksum[:]); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										128
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,128 @@ | ||||
| // Package idxfile implements encoding and decoding of packfile idx files. | ||||
| // | ||||
| //  == Original (version 1) pack-*.idx files have the following format: | ||||
| // | ||||
| //    - The header consists of 256 4-byte network byte order | ||||
| //      integers.  N-th entry of this table records the number of | ||||
| //      objects in the corresponding pack, the first byte of whose | ||||
| //      object name is less than or equal to N.  This is called the | ||||
| //      'first-level fan-out' table. | ||||
| // | ||||
| //    - The header is followed by sorted 24-byte entries, one entry | ||||
| //      per object in the pack.  Each entry is: | ||||
| // | ||||
| //     4-byte network byte order integer, recording where the | ||||
| //     object is stored in the packfile as the offset from the | ||||
| //     beginning. | ||||
| // | ||||
| //     20-byte object name. | ||||
| // | ||||
| //   - The file is concluded with a trailer: | ||||
| // | ||||
| //     A copy of the 20-byte SHA1 checksum at the end of | ||||
| //     corresponding packfile. | ||||
| // | ||||
| //     20-byte SHA1-checksum of all of the above. | ||||
| // | ||||
| //   Pack Idx file: | ||||
| // | ||||
| //        --  +--------------------------------+ | ||||
| //   fanout   | fanout[0] = 2 (for example)    |-. | ||||
| //   table    +--------------------------------+ | | ||||
| //            | fanout[1]                      | | | ||||
| //            +--------------------------------+ | | ||||
| //            | fanout[2]                      | | | ||||
| //            ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | ||||
| //            | fanout[255] = total objects    |---. | ||||
| //        --  +--------------------------------+ | | | ||||
| //   main     | offset                         | | | | ||||
| //   index    | object name 00XXXXXXXXXXXXXXXX | | | | ||||
| //   tab      +--------------------------------+ | | | ||||
| //            | offset                         | | | | ||||
| //            | object name 00XXXXXXXXXXXXXXXX | | | | ||||
| //            +--------------------------------+<+ | | ||||
| //          .-| offset                         |   | | ||||
| //          | | object name 01XXXXXXXXXXXXXXXX |   | | ||||
| //          | +--------------------------------+   | | ||||
| //          | | offset                         |   | | ||||
| //          | | object name 01XXXXXXXXXXXXXXXX |   | | ||||
| //          | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~   | | ||||
| //          | | offset                         |   | | ||||
| //          | | object name FFXXXXXXXXXXXXXXXX |   | | ||||
| //        --| +--------------------------------+<--+ | ||||
| //  trailer | | packfile checksum              | | ||||
| //          | +--------------------------------+ | ||||
| //          | | idxfile checksum               | | ||||
| //          | +--------------------------------+ | ||||
| //          .---------. | ||||
| //                    | | ||||
| //  Pack file entry: <+ | ||||
| // | ||||
| //     packed object header: | ||||
| //     1-byte size extension bit (MSB) | ||||
| //           type (next 3 bit) | ||||
| //           size0 (lower 4-bit) | ||||
| //         n-byte sizeN (as long as MSB is set, each 7-bit) | ||||
| //         size0..sizeN form 4+7+7+..+7 bit integer, size0 | ||||
| //         is the least significant part, and sizeN is the | ||||
| //         most significant part. | ||||
| //     packed object data: | ||||
| //         If it is not DELTA, then deflated bytes (the size above | ||||
| //         is the size before compression). | ||||
| //     If it is REF_DELTA, then | ||||
| //       20-byte base object name SHA1 (the size above is the | ||||
| //         size of the delta data that follows). | ||||
| //           delta data, deflated. | ||||
| //     If it is OFS_DELTA, then | ||||
| //       n-byte offset (see below) interpreted as a negative | ||||
| //         offset from the type-byte of the header of the | ||||
| //         ofs-delta entry (the size above is the size of | ||||
| //         the delta data that follows). | ||||
| //       delta data, deflated. | ||||
| // | ||||
| //     offset encoding: | ||||
| //       n bytes with MSB set in all but the last one. | ||||
| //       The offset is then the number constructed by | ||||
| //       concatenating the lower 7 bit of each byte, and | ||||
| //       for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1)) | ||||
| //       to the result. | ||||
| // | ||||
| //   == Version 2 pack-*.idx files support packs larger than 4 GiB, and | ||||
| //      have some other reorganizations.  They have the format: | ||||
| // | ||||
| //     - A 4-byte magic number '\377tOc' which is an unreasonable | ||||
| //       fanout[0] value. | ||||
| // | ||||
| //     - A 4-byte version number (= 2) | ||||
| // | ||||
| //     - A 256-entry fan-out table just like v1. | ||||
| // | ||||
| //     - A table of sorted 20-byte SHA1 object names.  These are | ||||
| //       packed together without offset values to reduce the cache | ||||
| //       footprint of the binary search for a specific object name. | ||||
| // | ||||
| //     - A table of 4-byte CRC32 values of the packed object data. | ||||
| //       This is new in v2 so compressed data can be copied directly | ||||
| //       from pack to pack during repacking without undetected | ||||
| //       data corruption. | ||||
| // | ||||
| //     - A table of 4-byte offset values (in network byte order). | ||||
| //       These are usually 31-bit pack file offsets, but large | ||||
| //       offsets are encoded as an index into the next table with | ||||
| //       the msbit set. | ||||
| // | ||||
| //     - A table of 8-byte offset entries (empty for pack files less | ||||
| //       than 2 GiB).  Pack files are organized with heavily used | ||||
| //       objects toward the front, so most object references should | ||||
| //       not need to refer to this table. | ||||
| // | ||||
| //     - The same trailer as a v1 pack file: | ||||
| // | ||||
| //       A copy of the 20-byte SHA1 checksum at the end of | ||||
| //       corresponding packfile. | ||||
| // | ||||
| //       20-byte SHA1-checksum of all of the above. | ||||
| // | ||||
| // Source: | ||||
| // https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt | ||||
| package idxfile | ||||
							
								
								
									
										142
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										142
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,142 @@ | ||||
| package idxfile | ||||
|  | ||||
| import ( | ||||
| 	"crypto/sha1" | ||||
| 	"hash" | ||||
| 	"io" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/utils/binary" | ||||
| ) | ||||
|  | ||||
| // Encoder writes MemoryIndex structs to an output stream. | ||||
| type Encoder struct { | ||||
| 	io.Writer | ||||
| 	hash hash.Hash | ||||
| } | ||||
|  | ||||
| // NewEncoder returns a new stream encoder that writes to w. | ||||
| func NewEncoder(w io.Writer) *Encoder { | ||||
| 	h := sha1.New() | ||||
| 	mw := io.MultiWriter(w, h) | ||||
| 	return &Encoder{mw, h} | ||||
| } | ||||
|  | ||||
| // Encode encodes an MemoryIndex to the encoder writer. | ||||
| func (e *Encoder) Encode(idx *MemoryIndex) (int, error) { | ||||
| 	flow := []func(*MemoryIndex) (int, error){ | ||||
| 		e.encodeHeader, | ||||
| 		e.encodeFanout, | ||||
| 		e.encodeHashes, | ||||
| 		e.encodeCRC32, | ||||
| 		e.encodeOffsets, | ||||
| 		e.encodeChecksums, | ||||
| 	} | ||||
|  | ||||
| 	sz := 0 | ||||
| 	for _, f := range flow { | ||||
| 		i, err := f(idx) | ||||
| 		sz += i | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return sz, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return sz, nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) { | ||||
| 	c, err := e.Write(idxHeader) | ||||
| 	if err != nil { | ||||
| 		return c, err | ||||
| 	} | ||||
|  | ||||
| 	return c + 4, binary.WriteUint32(e, idx.Version) | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) { | ||||
| 	for _, c := range idx.Fanout { | ||||
| 		if err := binary.WriteUint32(e, c); err != nil { | ||||
| 			return 0, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return fanout * 4, nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) { | ||||
| 	var size int | ||||
| 	for k := 0; k < fanout; k++ { | ||||
| 		pos := idx.FanoutMapping[k] | ||||
| 		if pos == noMapping { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		n, err := e.Write(idx.Names[pos]) | ||||
| 		if err != nil { | ||||
| 			return size, err | ||||
| 		} | ||||
| 		size += n | ||||
| 	} | ||||
| 	return size, nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) { | ||||
| 	var size int | ||||
| 	for k := 0; k < fanout; k++ { | ||||
| 		pos := idx.FanoutMapping[k] | ||||
| 		if pos == noMapping { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		n, err := e.Write(idx.CRC32[pos]) | ||||
| 		if err != nil { | ||||
| 			return size, err | ||||
| 		} | ||||
|  | ||||
| 		size += n | ||||
| 	} | ||||
|  | ||||
| 	return size, nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) { | ||||
| 	var size int | ||||
| 	for k := 0; k < fanout; k++ { | ||||
| 		pos := idx.FanoutMapping[k] | ||||
| 		if pos == noMapping { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		n, err := e.Write(idx.Offset32[pos]) | ||||
| 		if err != nil { | ||||
| 			return size, err | ||||
| 		} | ||||
|  | ||||
| 		size += n | ||||
| 	} | ||||
|  | ||||
| 	if len(idx.Offset64) > 0 { | ||||
| 		n, err := e.Write(idx.Offset64) | ||||
| 		if err != nil { | ||||
| 			return size, err | ||||
| 		} | ||||
|  | ||||
| 		size += n | ||||
| 	} | ||||
|  | ||||
| 	return size, nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) { | ||||
| 	if _, err := e.Write(idx.PackfileChecksum[:]); err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | ||||
| 	copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20]) | ||||
| 	if _, err := e.Write(idx.IdxChecksum[:]); err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | ||||
| 	return 40, nil | ||||
| } | ||||
							
								
								
									
										346
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										346
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,346 @@ | ||||
| package idxfile | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"io" | ||||
| 	"sort" | ||||
|  | ||||
| 	encbin "encoding/binary" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// VersionSupported is the only idx version supported. | ||||
| 	VersionSupported = 2 | ||||
|  | ||||
| 	noMapping = -1 | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	idxHeader = []byte{255, 't', 'O', 'c'} | ||||
| ) | ||||
|  | ||||
| // Index represents an index of a packfile. | ||||
| type Index interface { | ||||
| 	// Contains checks whether the given hash is in the index. | ||||
| 	Contains(h plumbing.Hash) (bool, error) | ||||
| 	// FindOffset finds the offset in the packfile for the object with | ||||
| 	// the given hash. | ||||
| 	FindOffset(h plumbing.Hash) (int64, error) | ||||
| 	// FindCRC32 finds the CRC32 of the object with the given hash. | ||||
| 	FindCRC32(h plumbing.Hash) (uint32, error) | ||||
| 	// FindHash finds the hash for the object with the given offset. | ||||
| 	FindHash(o int64) (plumbing.Hash, error) | ||||
| 	// Count returns the number of entries in the index. | ||||
| 	Count() (int64, error) | ||||
| 	// Entries returns an iterator to retrieve all index entries. | ||||
| 	Entries() (EntryIter, error) | ||||
| 	// EntriesByOffset returns an iterator to retrieve all index entries ordered | ||||
| 	// by offset. | ||||
| 	EntriesByOffset() (EntryIter, error) | ||||
| } | ||||
|  | ||||
| // MemoryIndex is the in memory representation of an idx file. | ||||
| type MemoryIndex struct { | ||||
| 	Version uint32 | ||||
| 	Fanout  [256]uint32 | ||||
| 	// FanoutMapping maps the position in the fanout table to the position | ||||
| 	// in the Names, Offset32 and CRC32 slices. This improves the memory | ||||
| 	// usage by not needing an array with unnecessary empty slots. | ||||
| 	FanoutMapping    [256]int | ||||
| 	Names            [][]byte | ||||
| 	Offset32         [][]byte | ||||
| 	CRC32            [][]byte | ||||
| 	Offset64         []byte | ||||
| 	PackfileChecksum [20]byte | ||||
| 	IdxChecksum      [20]byte | ||||
|  | ||||
| 	offsetHash       map[int64]plumbing.Hash | ||||
| 	offsetHashIsFull bool | ||||
| } | ||||
|  | ||||
| var _ Index = (*MemoryIndex)(nil) | ||||
|  | ||||
| // NewMemoryIndex returns an instance of a new MemoryIndex. | ||||
| func NewMemoryIndex() *MemoryIndex { | ||||
| 	return &MemoryIndex{} | ||||
| } | ||||
|  | ||||
| func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) { | ||||
| 	k := idx.FanoutMapping[h[0]] | ||||
| 	if k == noMapping { | ||||
| 		return 0, false | ||||
| 	} | ||||
|  | ||||
| 	if len(idx.Names) <= k { | ||||
| 		return 0, false | ||||
| 	} | ||||
|  | ||||
| 	data := idx.Names[k] | ||||
| 	high := uint64(len(idx.Offset32[k])) >> 2 | ||||
| 	if high == 0 { | ||||
| 		return 0, false | ||||
| 	} | ||||
|  | ||||
| 	low := uint64(0) | ||||
| 	for { | ||||
| 		mid := (low + high) >> 1 | ||||
| 		offset := mid * objectIDLength | ||||
|  | ||||
| 		cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength]) | ||||
| 		if cmp < 0 { | ||||
| 			high = mid | ||||
| 		} else if cmp == 0 { | ||||
| 			return int(mid), true | ||||
| 		} else { | ||||
| 			low = mid + 1 | ||||
| 		} | ||||
|  | ||||
| 		if low >= high { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return 0, false | ||||
| } | ||||
|  | ||||
| // Contains implements the Index interface. | ||||
| func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) { | ||||
| 	_, ok := idx.findHashIndex(h) | ||||
| 	return ok, nil | ||||
| } | ||||
|  | ||||
| // FindOffset implements the Index interface. | ||||
| func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) { | ||||
| 	if len(idx.FanoutMapping) <= int(h[0]) { | ||||
| 		return 0, plumbing.ErrObjectNotFound | ||||
| 	} | ||||
|  | ||||
| 	k := idx.FanoutMapping[h[0]] | ||||
| 	i, ok := idx.findHashIndex(h) | ||||
| 	if !ok { | ||||
| 		return 0, plumbing.ErrObjectNotFound | ||||
| 	} | ||||
|  | ||||
| 	offset := idx.getOffset(k, i) | ||||
|  | ||||
| 	if !idx.offsetHashIsFull { | ||||
| 		// Save the offset for reverse lookup | ||||
| 		if idx.offsetHash == nil { | ||||
| 			idx.offsetHash = make(map[int64]plumbing.Hash) | ||||
| 		} | ||||
| 		idx.offsetHash[int64(offset)] = h | ||||
| 	} | ||||
|  | ||||
| 	return int64(offset), nil | ||||
| } | ||||
|  | ||||
| const isO64Mask = uint64(1) << 31 | ||||
|  | ||||
| func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 { | ||||
| 	offset := secondLevel << 2 | ||||
| 	ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4]) | ||||
|  | ||||
| 	if (uint64(ofs) & isO64Mask) != 0 { | ||||
| 		offset := 8 * (uint64(ofs) & ^isO64Mask) | ||||
| 		n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8]) | ||||
| 		return n | ||||
| 	} | ||||
|  | ||||
| 	return uint64(ofs) | ||||
| } | ||||
|  | ||||
| // FindCRC32 implements the Index interface. | ||||
| func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) { | ||||
| 	k := idx.FanoutMapping[h[0]] | ||||
| 	i, ok := idx.findHashIndex(h) | ||||
| 	if !ok { | ||||
| 		return 0, plumbing.ErrObjectNotFound | ||||
| 	} | ||||
|  | ||||
| 	return idx.getCRC32(k, i), nil | ||||
| } | ||||
|  | ||||
| func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 { | ||||
| 	offset := secondLevel << 2 | ||||
| 	return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4]) | ||||
| } | ||||
|  | ||||
| // FindHash implements the Index interface. | ||||
| func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) { | ||||
| 	var hash plumbing.Hash | ||||
| 	var ok bool | ||||
|  | ||||
| 	if idx.offsetHash != nil { | ||||
| 		if hash, ok = idx.offsetHash[o]; ok { | ||||
| 			return hash, nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Lazily generate the reverse offset/hash map if required. | ||||
| 	if !idx.offsetHashIsFull || idx.offsetHash == nil { | ||||
| 		if err := idx.genOffsetHash(); err != nil { | ||||
| 			return plumbing.ZeroHash, err | ||||
| 		} | ||||
|  | ||||
| 		hash, ok = idx.offsetHash[o] | ||||
| 	} | ||||
|  | ||||
| 	if !ok { | ||||
| 		return plumbing.ZeroHash, plumbing.ErrObjectNotFound | ||||
| 	} | ||||
|  | ||||
| 	return hash, nil | ||||
| } | ||||
|  | ||||
| // genOffsetHash generates the offset/hash mapping for reverse search. | ||||
| func (idx *MemoryIndex) genOffsetHash() error { | ||||
| 	count, err := idx.Count() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	idx.offsetHash = make(map[int64]plumbing.Hash, count) | ||||
| 	idx.offsetHashIsFull = true | ||||
|  | ||||
| 	var hash plumbing.Hash | ||||
| 	i := uint32(0) | ||||
| 	for firstLevel, fanoutValue := range idx.Fanout { | ||||
| 		mappedFirstLevel := idx.FanoutMapping[firstLevel] | ||||
| 		for secondLevel := uint32(0); i < fanoutValue; i++ { | ||||
| 			copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:]) | ||||
| 			offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel))) | ||||
| 			idx.offsetHash[offset] = hash | ||||
| 			secondLevel++ | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Count implements the Index interface. | ||||
| func (idx *MemoryIndex) Count() (int64, error) { | ||||
| 	return int64(idx.Fanout[fanout-1]), nil | ||||
| } | ||||
|  | ||||
| // Entries implements the Index interface. | ||||
| func (idx *MemoryIndex) Entries() (EntryIter, error) { | ||||
| 	return &idxfileEntryIter{idx, 0, 0, 0}, nil | ||||
| } | ||||
|  | ||||
| // EntriesByOffset implements the Index interface. | ||||
| func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) { | ||||
| 	count, err := idx.Count() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	iter := &idxfileEntryOffsetIter{ | ||||
| 		entries: make(entriesByOffset, count), | ||||
| 	} | ||||
|  | ||||
| 	entries, err := idx.Entries() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	for pos := 0; int64(pos) < count; pos++ { | ||||
| 		entry, err := entries.Next() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		iter.entries[pos] = entry | ||||
| 	} | ||||
|  | ||||
| 	sort.Sort(iter.entries) | ||||
|  | ||||
| 	return iter, nil | ||||
| } | ||||
|  | ||||
| // EntryIter is an iterator that will return the entries in a packfile index. | ||||
| type EntryIter interface { | ||||
| 	// Next returns the next entry in the packfile index. | ||||
| 	Next() (*Entry, error) | ||||
| 	// Close closes the iterator. | ||||
| 	Close() error | ||||
| } | ||||
|  | ||||
| type idxfileEntryIter struct { | ||||
| 	idx                     *MemoryIndex | ||||
| 	total                   int | ||||
| 	firstLevel, secondLevel int | ||||
| } | ||||
|  | ||||
| func (i *idxfileEntryIter) Next() (*Entry, error) { | ||||
| 	for { | ||||
| 		if i.firstLevel >= fanout { | ||||
| 			return nil, io.EOF | ||||
| 		} | ||||
|  | ||||
| 		if i.total >= int(i.idx.Fanout[i.firstLevel]) { | ||||
| 			i.firstLevel++ | ||||
| 			i.secondLevel = 0 | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel] | ||||
| 		entry := new(Entry) | ||||
| 		copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:]) | ||||
| 		entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel) | ||||
| 		entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel) | ||||
|  | ||||
| 		i.secondLevel++ | ||||
| 		i.total++ | ||||
|  | ||||
| 		return entry, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (i *idxfileEntryIter) Close() error { | ||||
| 	i.firstLevel = fanout | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Entry is the in memory representation of an object entry in the idx file. | ||||
| type Entry struct { | ||||
| 	Hash   plumbing.Hash | ||||
| 	CRC32  uint32 | ||||
| 	Offset uint64 | ||||
| } | ||||
|  | ||||
| type idxfileEntryOffsetIter struct { | ||||
| 	entries entriesByOffset | ||||
| 	pos     int | ||||
| } | ||||
|  | ||||
| func (i *idxfileEntryOffsetIter) Next() (*Entry, error) { | ||||
| 	if i.pos >= len(i.entries) { | ||||
| 		return nil, io.EOF | ||||
| 	} | ||||
|  | ||||
| 	entry := i.entries[i.pos] | ||||
| 	i.pos++ | ||||
|  | ||||
| 	return entry, nil | ||||
| } | ||||
|  | ||||
| func (i *idxfileEntryOffsetIter) Close() error { | ||||
| 	i.pos = len(i.entries) + 1 | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type entriesByOffset []*Entry | ||||
|  | ||||
| func (o entriesByOffset) Len() int { | ||||
| 	return len(o) | ||||
| } | ||||
|  | ||||
| func (o entriesByOffset) Less(i int, j int) bool { | ||||
| 	return o[i].Offset < o[j].Offset | ||||
| } | ||||
|  | ||||
| func (o entriesByOffset) Swap(i int, j int) { | ||||
| 	o[i], o[j] = o[j], o[i] | ||||
| } | ||||
							
								
								
									
										186
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										186
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,186 @@ | ||||
| package idxfile | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"sort" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/utils/binary" | ||||
| ) | ||||
|  | ||||
| // objects implements sort.Interface and uses hash as sorting key. | ||||
| type objects []Entry | ||||
|  | ||||
| // Writer implements a packfile Observer interface and is used to generate | ||||
| // indexes. | ||||
| type Writer struct { | ||||
| 	m sync.Mutex | ||||
|  | ||||
| 	count    uint32 | ||||
| 	checksum plumbing.Hash | ||||
| 	objects  objects | ||||
| 	offset64 uint32 | ||||
| 	finished bool | ||||
| 	index    *MemoryIndex | ||||
| 	added    map[plumbing.Hash]struct{} | ||||
| } | ||||
|  | ||||
| // Index returns a previously created MemoryIndex or creates a new one if | ||||
| // needed. | ||||
| func (w *Writer) Index() (*MemoryIndex, error) { | ||||
| 	w.m.Lock() | ||||
| 	defer w.m.Unlock() | ||||
|  | ||||
| 	if w.index == nil { | ||||
| 		return w.createIndex() | ||||
| 	} | ||||
|  | ||||
| 	return w.index, nil | ||||
| } | ||||
|  | ||||
| // Add appends new object data. | ||||
| func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) { | ||||
| 	w.m.Lock() | ||||
| 	defer w.m.Unlock() | ||||
|  | ||||
| 	if w.added == nil { | ||||
| 		w.added = make(map[plumbing.Hash]struct{}) | ||||
| 	} | ||||
|  | ||||
| 	if _, ok := w.added[h]; !ok { | ||||
| 		w.added[h] = struct{}{} | ||||
| 		w.objects = append(w.objects, Entry{h, crc, pos}) | ||||
| 	} | ||||
|  | ||||
| } | ||||
|  | ||||
| func (w *Writer) Finished() bool { | ||||
| 	return w.finished | ||||
| } | ||||
|  | ||||
| // OnHeader implements packfile.Observer interface. | ||||
| func (w *Writer) OnHeader(count uint32) error { | ||||
| 	w.count = count | ||||
| 	w.objects = make(objects, 0, count) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // OnInflatedObjectHeader implements packfile.Observer interface. | ||||
| func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // OnInflatedObjectContent implements packfile.Observer interface. | ||||
| func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error { | ||||
| 	w.Add(h, uint64(pos), crc) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // OnFooter implements packfile.Observer interface. | ||||
| func (w *Writer) OnFooter(h plumbing.Hash) error { | ||||
| 	w.checksum = h | ||||
| 	w.finished = true | ||||
| 	_, err := w.createIndex() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // creatIndex returns a filled MemoryIndex with the information filled by | ||||
| // the observer callbacks. | ||||
| func (w *Writer) createIndex() (*MemoryIndex, error) { | ||||
| 	if !w.finished { | ||||
| 		return nil, fmt.Errorf("the index still hasn't finished building") | ||||
| 	} | ||||
|  | ||||
| 	idx := new(MemoryIndex) | ||||
| 	w.index = idx | ||||
|  | ||||
| 	sort.Sort(w.objects) | ||||
|  | ||||
| 	// unmap all fans by default | ||||
| 	for i := range idx.FanoutMapping { | ||||
| 		idx.FanoutMapping[i] = noMapping | ||||
| 	} | ||||
|  | ||||
| 	buf := new(bytes.Buffer) | ||||
|  | ||||
| 	last := -1 | ||||
| 	bucket := -1 | ||||
| 	for i, o := range w.objects { | ||||
| 		fan := o.Hash[0] | ||||
|  | ||||
| 		// fill the gaps between fans | ||||
| 		for j := last + 1; j < int(fan); j++ { | ||||
| 			idx.Fanout[j] = uint32(i) | ||||
| 		} | ||||
|  | ||||
| 		// update the number of objects for this position | ||||
| 		idx.Fanout[fan] = uint32(i + 1) | ||||
|  | ||||
| 		// we move from one bucket to another, update counters and allocate | ||||
| 		// memory | ||||
| 		if last != int(fan) { | ||||
| 			bucket++ | ||||
| 			idx.FanoutMapping[fan] = bucket | ||||
| 			last = int(fan) | ||||
|  | ||||
| 			idx.Names = append(idx.Names, make([]byte, 0)) | ||||
| 			idx.Offset32 = append(idx.Offset32, make([]byte, 0)) | ||||
| 			idx.CRC32 = append(idx.CRC32, make([]byte, 0)) | ||||
| 		} | ||||
|  | ||||
| 		idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...) | ||||
|  | ||||
| 		offset := o.Offset | ||||
| 		if offset > math.MaxInt32 { | ||||
| 			offset = w.addOffset64(offset) | ||||
| 		} | ||||
|  | ||||
| 		buf.Truncate(0) | ||||
| 		binary.WriteUint32(buf, uint32(offset)) | ||||
| 		idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...) | ||||
|  | ||||
| 		buf.Truncate(0) | ||||
| 		binary.WriteUint32(buf, o.CRC32) | ||||
| 		idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...) | ||||
| 	} | ||||
|  | ||||
| 	for j := last + 1; j < 256; j++ { | ||||
| 		idx.Fanout[j] = uint32(len(w.objects)) | ||||
| 	} | ||||
|  | ||||
| 	idx.Version = VersionSupported | ||||
| 	idx.PackfileChecksum = w.checksum | ||||
|  | ||||
| 	return idx, nil | ||||
| } | ||||
|  | ||||
| func (w *Writer) addOffset64(pos uint64) uint64 { | ||||
| 	buf := new(bytes.Buffer) | ||||
| 	binary.WriteUint64(buf, pos) | ||||
| 	w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...) | ||||
|  | ||||
| 	index := uint64(w.offset64 | (1 << 31)) | ||||
| 	w.offset64++ | ||||
|  | ||||
| 	return index | ||||
| } | ||||
|  | ||||
| func (o objects) Len() int { | ||||
| 	return len(o) | ||||
| } | ||||
|  | ||||
| func (o objects) Less(i int, j int) bool { | ||||
| 	cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:]) | ||||
| 	return cmp < 0 | ||||
| } | ||||
|  | ||||
| func (o objects) Swap(i int, j int) { | ||||
| 	o[i], o[j] = o[j], o[i] | ||||
| } | ||||
							
								
								
									
										477
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										477
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,477 @@ | ||||
| package index | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"crypto/sha1" | ||||
| 	"errors" | ||||
| 	"hash" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/utils/binary" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// DecodeVersionSupported is the range of supported index versions | ||||
| 	DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4} | ||||
|  | ||||
| 	// ErrMalformedSignature is returned by Decode when the index header file is | ||||
| 	// malformed | ||||
| 	ErrMalformedSignature = errors.New("malformed index signature file") | ||||
| 	// ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with | ||||
| 	// the read content | ||||
| 	ErrInvalidChecksum = errors.New("invalid checksum") | ||||
|  | ||||
| 	errUnknownExtension = errors.New("unknown extension") | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	entryHeaderLength = 62 | ||||
| 	entryExtended     = 0x4000 | ||||
| 	entryValid        = 0x8000 | ||||
| 	nameMask          = 0xfff | ||||
| 	intentToAddMask   = 1 << 13 | ||||
| 	skipWorkTreeMask  = 1 << 14 | ||||
| ) | ||||
|  | ||||
| // A Decoder reads and decodes index files from an input stream. | ||||
| type Decoder struct { | ||||
| 	r         io.Reader | ||||
| 	hash      hash.Hash | ||||
| 	lastEntry *Entry | ||||
|  | ||||
| 	extReader *bufio.Reader | ||||
| } | ||||
|  | ||||
| // NewDecoder returns a new decoder that reads from r. | ||||
| func NewDecoder(r io.Reader) *Decoder { | ||||
| 	h := sha1.New() | ||||
| 	return &Decoder{ | ||||
| 		r:         io.TeeReader(r, h), | ||||
| 		hash:      h, | ||||
| 		extReader: bufio.NewReader(nil), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Decode reads the whole index object from its input and stores it in the | ||||
| // value pointed to by idx. | ||||
| func (d *Decoder) Decode(idx *Index) error { | ||||
| 	var err error | ||||
| 	idx.Version, err = validateHeader(d.r) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	entryCount, err := binary.ReadUint32(d.r) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if err := d.readEntries(idx, int(entryCount)); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return d.readExtensions(idx) | ||||
| } | ||||
|  | ||||
| func (d *Decoder) readEntries(idx *Index, count int) error { | ||||
| 	for i := 0; i < count; i++ { | ||||
| 		e, err := d.readEntry(idx) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		d.lastEntry = e | ||||
| 		idx.Entries = append(idx.Entries, e) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (d *Decoder) readEntry(idx *Index) (*Entry, error) { | ||||
| 	e := &Entry{} | ||||
|  | ||||
| 	var msec, mnsec, sec, nsec uint32 | ||||
| 	var flags uint16 | ||||
|  | ||||
| 	flow := []interface{}{ | ||||
| 		&sec, &nsec, | ||||
| 		&msec, &mnsec, | ||||
| 		&e.Dev, | ||||
| 		&e.Inode, | ||||
| 		&e.Mode, | ||||
| 		&e.UID, | ||||
| 		&e.GID, | ||||
| 		&e.Size, | ||||
| 		&e.Hash, | ||||
| 		&flags, | ||||
| 	} | ||||
|  | ||||
| 	if err := binary.Read(d.r, flow...); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	read := entryHeaderLength | ||||
|  | ||||
| 	if sec != 0 || nsec != 0 { | ||||
| 		e.CreatedAt = time.Unix(int64(sec), int64(nsec)) | ||||
| 	} | ||||
|  | ||||
| 	if msec != 0 || mnsec != 0 { | ||||
| 		e.ModifiedAt = time.Unix(int64(msec), int64(mnsec)) | ||||
| 	} | ||||
|  | ||||
| 	e.Stage = Stage(flags>>12) & 0x3 | ||||
|  | ||||
| 	if flags&entryExtended != 0 { | ||||
| 		extended, err := binary.ReadUint16(d.r) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		read += 2 | ||||
| 		e.IntentToAdd = extended&intentToAddMask != 0 | ||||
| 		e.SkipWorktree = extended&skipWorkTreeMask != 0 | ||||
| 	} | ||||
|  | ||||
| 	if err := d.readEntryName(idx, e, flags); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return e, d.padEntry(idx, e, read) | ||||
| } | ||||
|  | ||||
| func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error { | ||||
| 	var name string | ||||
| 	var err error | ||||
|  | ||||
| 	switch idx.Version { | ||||
| 	case 2, 3: | ||||
| 		len := flags & nameMask | ||||
| 		name, err = d.doReadEntryName(len) | ||||
| 	case 4: | ||||
| 		name, err = d.doReadEntryNameV4() | ||||
| 	default: | ||||
| 		return ErrUnsupportedVersion | ||||
| 	} | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	e.Name = name | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (d *Decoder) doReadEntryNameV4() (string, error) { | ||||
| 	l, err := binary.ReadVariableWidthInt(d.r) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	var base string | ||||
| 	if d.lastEntry != nil { | ||||
| 		base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)] | ||||
| 	} | ||||
|  | ||||
| 	name, err := binary.ReadUntil(d.r, '\x00') | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	return base + string(name), nil | ||||
| } | ||||
|  | ||||
| func (d *Decoder) doReadEntryName(len uint16) (string, error) { | ||||
| 	name := make([]byte, len) | ||||
| 	_, err := io.ReadFull(d.r, name[:]) | ||||
|  | ||||
| 	return string(name), err | ||||
| } | ||||
|  | ||||
| // Index entries are padded out to the next 8 byte alignment | ||||
| // for historical reasons related to how C Git read the files. | ||||
| func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error { | ||||
| 	if idx.Version == 4 { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	entrySize := read + len(e.Name) | ||||
| 	padLen := 8 - entrySize%8 | ||||
| 	_, err := io.CopyN(ioutil.Discard, d.r, int64(padLen)) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (d *Decoder) readExtensions(idx *Index) error { | ||||
| 	// TODO: support 'Split index' and 'Untracked cache' extensions, take in | ||||
| 	// count that they are not supported by jgit or libgit | ||||
|  | ||||
| 	var expected []byte | ||||
| 	var err error | ||||
|  | ||||
| 	var header [4]byte | ||||
| 	for { | ||||
| 		expected = d.hash.Sum(nil) | ||||
|  | ||||
| 		var n int | ||||
| 		if n, err = io.ReadFull(d.r, header[:]); err != nil { | ||||
| 			if n == 0 { | ||||
| 				err = io.EOF | ||||
| 			} | ||||
|  | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		err = d.readExtension(idx, header[:]) | ||||
| 		if err != nil { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if err != errUnknownExtension { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return d.readChecksum(expected, header) | ||||
| } | ||||
|  | ||||
| func (d *Decoder) readExtension(idx *Index, header []byte) error { | ||||
| 	switch { | ||||
| 	case bytes.Equal(header, treeExtSignature): | ||||
| 		r, err := d.getExtensionReader() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		idx.Cache = &Tree{} | ||||
| 		d := &treeExtensionDecoder{r} | ||||
| 		if err := d.Decode(idx.Cache); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	case bytes.Equal(header, resolveUndoExtSignature): | ||||
| 		r, err := d.getExtensionReader() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		idx.ResolveUndo = &ResolveUndo{} | ||||
| 		d := &resolveUndoDecoder{r} | ||||
| 		if err := d.Decode(idx.ResolveUndo); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	case bytes.Equal(header, endOfIndexEntryExtSignature): | ||||
| 		r, err := d.getExtensionReader() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		idx.EndOfIndexEntry = &EndOfIndexEntry{} | ||||
| 		d := &endOfIndexEntryDecoder{r} | ||||
| 		if err := d.Decode(idx.EndOfIndexEntry); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	default: | ||||
| 		return errUnknownExtension | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (d *Decoder) getExtensionReader() (*bufio.Reader, error) { | ||||
| 	len, err := binary.ReadUint32(d.r) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)}) | ||||
| 	return d.extReader, nil | ||||
| } | ||||
|  | ||||
| func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { | ||||
| 	var h plumbing.Hash | ||||
| 	copy(h[:4], alreadyRead[:]) | ||||
|  | ||||
| 	if _, err := io.ReadFull(d.r, h[4:]); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if !bytes.Equal(h[:], expected) { | ||||
| 		return ErrInvalidChecksum | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func validateHeader(r io.Reader) (version uint32, err error) { | ||||
| 	var s = make([]byte, 4) | ||||
| 	if _, err := io.ReadFull(r, s); err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | ||||
| 	if !bytes.Equal(s, indexSignature) { | ||||
| 		return 0, ErrMalformedSignature | ||||
| 	} | ||||
|  | ||||
| 	version, err = binary.ReadUint32(r) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | ||||
| 	if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max { | ||||
| 		return 0, ErrUnsupportedVersion | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| type treeExtensionDecoder struct { | ||||
| 	r *bufio.Reader | ||||
| } | ||||
|  | ||||
| func (d *treeExtensionDecoder) Decode(t *Tree) error { | ||||
| 	for { | ||||
| 		e, err := d.readEntry() | ||||
| 		if err != nil { | ||||
| 			if err == io.EOF { | ||||
| 				return nil | ||||
| 			} | ||||
|  | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		if e == nil { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		t.Entries = append(t.Entries, *e) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) { | ||||
| 	e := &TreeEntry{} | ||||
|  | ||||
| 	path, err := binary.ReadUntil(d.r, '\x00') | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	e.Path = string(path) | ||||
|  | ||||
| 	count, err := binary.ReadUntil(d.r, ' ') | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	i, err := strconv.Atoi(string(count)) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	// An entry can be in an invalidated state and is represented by having a | ||||
| 	// negative number in the entry_count field. | ||||
| 	if i == -1 { | ||||
| 		return nil, nil | ||||
| 	} | ||||
|  | ||||
| 	e.Entries = i | ||||
| 	trees, err := binary.ReadUntil(d.r, '\n') | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	i, err = strconv.Atoi(string(trees)) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	e.Trees = i | ||||
| 	_, err = io.ReadFull(d.r, e.Hash[:]) | ||||
|  | ||||
| 	return e, nil | ||||
| } | ||||
|  | ||||
| type resolveUndoDecoder struct { | ||||
| 	r *bufio.Reader | ||||
| } | ||||
|  | ||||
| func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error { | ||||
| 	for { | ||||
| 		e, err := d.readEntry() | ||||
| 		if err != nil { | ||||
| 			if err == io.EOF { | ||||
| 				return nil | ||||
| 			} | ||||
|  | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		ru.Entries = append(ru.Entries, *e) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) { | ||||
| 	e := &ResolveUndoEntry{ | ||||
| 		Stages: make(map[Stage]plumbing.Hash), | ||||
| 	} | ||||
|  | ||||
| 	path, err := binary.ReadUntil(d.r, '\x00') | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	e.Path = string(path) | ||||
|  | ||||
| 	for i := 0; i < 3; i++ { | ||||
| 		if err := d.readStage(e, Stage(i+1)); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for s := range e.Stages { | ||||
| 		var hash plumbing.Hash | ||||
| 		if _, err := io.ReadFull(d.r, hash[:]); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		e.Stages[s] = hash | ||||
| 	} | ||||
|  | ||||
| 	return e, nil | ||||
| } | ||||
|  | ||||
| func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { | ||||
| 	ascii, err := binary.ReadUntil(d.r, '\x00') | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	stage, err := strconv.ParseInt(string(ascii), 8, 64) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if stage != 0 { | ||||
| 		e.Stages[s] = plumbing.ZeroHash | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type endOfIndexEntryDecoder struct { | ||||
| 	r *bufio.Reader | ||||
| } | ||||
|  | ||||
| func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { | ||||
| 	var err error | ||||
| 	e.Offset, err = binary.ReadUint32(d.r) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	_, err = io.ReadFull(d.r, e.Hash[:]) | ||||
| 	return err | ||||
| } | ||||
							
								
								
									
										360
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										360
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,360 @@ | ||||
| // Package index implements encoding and decoding of index format files. | ||||
| // | ||||
| //    Git index format | ||||
| //    ================ | ||||
| // | ||||
| //    == The Git index file has the following format | ||||
| // | ||||
| //      All binary numbers are in network byte order. Version 2 is described | ||||
| //      here unless stated otherwise. | ||||
| // | ||||
| //      - A 12-byte header consisting of | ||||
| // | ||||
| //        4-byte signature: | ||||
| //          The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache") | ||||
| // | ||||
| //        4-byte version number: | ||||
| //          The current supported versions are 2, 3 and 4. | ||||
| // | ||||
| //        32-bit number of index entries. | ||||
| // | ||||
| //      - A number of sorted index entries (see below). | ||||
| // | ||||
| //      - Extensions | ||||
| // | ||||
| //        Extensions are identified by signature. Optional extensions can | ||||
| //        be ignored if Git does not understand them. | ||||
| // | ||||
| //        Git currently supports cached tree and resolve undo extensions. | ||||
| // | ||||
| //        4-byte extension signature. If the first byte is 'A'..'Z' the | ||||
| //        extension is optional and can be ignored. | ||||
| // | ||||
| //        32-bit size of the extension | ||||
| // | ||||
| //        Extension data | ||||
| // | ||||
| //      - 160-bit SHA-1 over the content of the index file before this | ||||
| //        checksum. | ||||
| // | ||||
| //    == Index entry | ||||
| // | ||||
| //      Index entries are sorted in ascending order on the name field, | ||||
| //      interpreted as a string of unsigned bytes (i.e. memcmp() order, no | ||||
| //      localization, no special casing of directory separator '/'). Entries | ||||
| //      with the same name are sorted by their stage field. | ||||
| // | ||||
| //      32-bit ctime seconds, the last time a file's metadata changed | ||||
| //        this is stat(2) data | ||||
| // | ||||
| //      32-bit ctime nanosecond fractions | ||||
| //        this is stat(2) data | ||||
| // | ||||
| //      32-bit mtime seconds, the last time a file's data changed | ||||
| //        this is stat(2) data | ||||
| // | ||||
| //      32-bit mtime nanosecond fractions | ||||
| //        this is stat(2) data | ||||
| // | ||||
| //      32-bit dev | ||||
| //        this is stat(2) data | ||||
| // | ||||
| //      32-bit ino | ||||
| //        this is stat(2) data | ||||
| // | ||||
| //      32-bit mode, split into (high to low bits) | ||||
| // | ||||
| //        4-bit object type | ||||
| //          valid values in binary are 1000 (regular file), 1010 (symbolic link) | ||||
| //          and 1110 (gitlink) | ||||
| // | ||||
| //        3-bit unused | ||||
| // | ||||
| //        9-bit unix permission. Only 0755 and 0644 are valid for regular files. | ||||
| //        Symbolic links and gitlinks have value 0 in this field. | ||||
| // | ||||
| //      32-bit uid | ||||
| //        this is stat(2) data | ||||
| // | ||||
| //      32-bit gid | ||||
| //        this is stat(2) data | ||||
| // | ||||
| //      32-bit file size | ||||
| //        This is the on-disk size from stat(2), truncated to 32-bit. | ||||
| // | ||||
| //      160-bit SHA-1 for the represented object | ||||
| // | ||||
| //      A 16-bit 'flags' field split into (high to low bits) | ||||
| // | ||||
| //        1-bit assume-valid flag | ||||
| // | ||||
| //        1-bit extended flag (must be zero in version 2) | ||||
| // | ||||
| //        2-bit stage (during merge) | ||||
| // | ||||
| //        12-bit name length if the length is less than 0xFFF; otherwise 0xFFF | ||||
| //        is stored in this field. | ||||
| // | ||||
| //      (Version 3 or later) A 16-bit field, only applicable if the | ||||
| //      "extended flag" above is 1, split into (high to low bits). | ||||
| // | ||||
| //        1-bit reserved for future | ||||
| // | ||||
| //        1-bit skip-worktree flag (used by sparse checkout) | ||||
| // | ||||
| //        1-bit intent-to-add flag (used by "git add -N") | ||||
| // | ||||
| //        13-bit unused, must be zero | ||||
| // | ||||
| //      Entry path name (variable length) relative to top level directory | ||||
| //        (without leading slash). '/' is used as path separator. The special | ||||
| //        path components ".", ".." and ".git" (without quotes) are disallowed. | ||||
| //        Trailing slash is also disallowed. | ||||
| // | ||||
| //        The exact encoding is undefined, but the '.' and '/' characters | ||||
| //        are encoded in 7-bit ASCII and the encoding cannot contain a NUL | ||||
| //        byte (iow, this is a UNIX pathname). | ||||
| // | ||||
| //      (Version 4) In version 4, the entry path name is prefix-compressed | ||||
| //        relative to the path name for the previous entry (the very first | ||||
| //        entry is encoded as if the path name for the previous entry is an | ||||
| //        empty string).  At the beginning of an entry, an integer N in the | ||||
| //        variable width encoding (the same encoding as the offset is encoded | ||||
| //        for OFS_DELTA pack entries; see pack-format.txt) is stored, followed | ||||
| //        by a NUL-terminated string S.  Removing N bytes from the end of the | ||||
| //        path name for the previous entry, and replacing it with the string S | ||||
| //        yields the path name for this entry. | ||||
| // | ||||
| //      1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes | ||||
| //      while keeping the name NUL-terminated. | ||||
| // | ||||
| //      (Version 4) In version 4, the padding after the pathname does not | ||||
| //      exist. | ||||
| // | ||||
| //      Interpretation of index entries in split index mode is completely | ||||
| //      different. See below for details. | ||||
| // | ||||
| //    == Extensions | ||||
| // | ||||
| //    === Cached tree | ||||
| // | ||||
| //      Cached tree extension contains pre-computed hashes for trees that can | ||||
| //      be derived from the index. It helps speed up tree object generation | ||||
| //      from index for a new commit. | ||||
| // | ||||
| //      When a path is updated in index, the path must be invalidated and | ||||
| //      removed from tree cache. | ||||
| // | ||||
| //      The signature for this extension is { 'T', 'R', 'E', 'E' }. | ||||
| // | ||||
| //      A series of entries fill the entire extension; each of which | ||||
| //      consists of: | ||||
| // | ||||
| //      - NUL-terminated path component (relative to its parent directory); | ||||
| // | ||||
| //      - ASCII decimal number of entries in the index that is covered by the | ||||
| //        tree this entry represents (entry_count); | ||||
| // | ||||
| //      - A space (ASCII 32); | ||||
| // | ||||
| //      - ASCII decimal number that represents the number of subtrees this | ||||
| //        tree has; | ||||
| // | ||||
| //      - A newline (ASCII 10); and | ||||
| // | ||||
| //      - 160-bit object name for the object that would result from writing | ||||
| //        this span of index as a tree. | ||||
| // | ||||
| //      An entry can be in an invalidated state and is represented by having | ||||
| //      a negative number in the entry_count field. In this case, there is no | ||||
| //      object name and the next entry starts immediately after the newline. | ||||
| //      When writing an invalid entry, -1 should always be used as entry_count. | ||||
| // | ||||
| //      The entries are written out in the top-down, depth-first order.  The | ||||
| //      first entry represents the root level of the repository, followed by the | ||||
| //      first subtree--let's call this A--of the root level (with its name | ||||
| //      relative to the root level), followed by the first subtree of A (with | ||||
| //      its name relative to A), ... | ||||
| // | ||||
| //    === Resolve undo | ||||
| // | ||||
| //      A conflict is represented in the index as a set of higher stage entries. | ||||
| //      When a conflict is resolved (e.g. with "git add path"), these higher | ||||
| //      stage entries will be removed and a stage-0 entry with proper resolution | ||||
| //      is added. | ||||
| // | ||||
| //      When these higher stage entries are removed, they are saved in the | ||||
| //      resolve undo extension, so that conflicts can be recreated (e.g. with | ||||
| //      "git checkout -m"), in case users want to redo a conflict resolution | ||||
| //      from scratch. | ||||
| // | ||||
| //      The signature for this extension is { 'R', 'E', 'U', 'C' }. | ||||
| // | ||||
| //      A series of entries fill the entire extension; each of which | ||||
| //      consists of: | ||||
| // | ||||
| //      - NUL-terminated pathname the entry describes (relative to the root of | ||||
| //        the repository, i.e. full pathname); | ||||
| // | ||||
| //      - Three NUL-terminated ASCII octal numbers, entry mode of entries in | ||||
| //        stage 1 to 3 (a missing stage is represented by "0" in this field); | ||||
| //        and | ||||
| // | ||||
| //      - At most three 160-bit object names of the entry in stages from 1 to 3 | ||||
| //        (nothing is written for a missing stage). | ||||
| // | ||||
| //    === Split index | ||||
| // | ||||
| //      In split index mode, the majority of index entries could be stored | ||||
| //      in a separate file. This extension records the changes to be made on | ||||
| //      top of that to produce the final index. | ||||
| // | ||||
| //      The signature for this extension is { 'l', 'i', 'n', 'k' }. | ||||
| // | ||||
| //      The extension consists of: | ||||
| // | ||||
| //      - 160-bit SHA-1 of the shared index file. The shared index file path | ||||
| //        is $GIT_DIR/sharedindex.<SHA-1>. If all 160 bits are zero, the | ||||
| //        index does not require a shared index file. | ||||
| // | ||||
| //      - An ewah-encoded delete bitmap, each bit represents an entry in the | ||||
| //        shared index. If a bit is set, its corresponding entry in the | ||||
| //        shared index will be removed from the final index.  Note, because | ||||
| //        a delete operation changes index entry positions, but we do need | ||||
| //        original positions in replace phase, it's best to just mark | ||||
| //        entries for removal, then do a mass deletion after replacement. | ||||
| // | ||||
| //      - An ewah-encoded replace bitmap, each bit represents an entry in | ||||
| //        the shared index. If a bit is set, its corresponding entry in the | ||||
| //        shared index will be replaced with an entry in this index | ||||
| //        file. All replaced entries are stored in sorted order in this | ||||
| //        index. The first "1" bit in the replace bitmap corresponds to the | ||||
| //        first index entry, the second "1" bit to the second entry and so | ||||
| //        on. Replaced entries may have empty path names to save space. | ||||
| // | ||||
| //      The remaining index entries after replaced ones will be added to the | ||||
| //      final index. These added entries are also sorted by entry name then | ||||
| //      stage. | ||||
| // | ||||
| //    == Untracked cache | ||||
| // | ||||
| //      Untracked cache saves the untracked file list and necessary data to | ||||
| //      verify the cache. The signature for this extension is { 'U', 'N', | ||||
| //      'T', 'R' }. | ||||
| // | ||||
| //      The extension starts with | ||||
| // | ||||
| //      - A sequence of NUL-terminated strings, preceded by the size of the | ||||
| //        sequence in variable width encoding. Each string describes the | ||||
| //        environment where the cache can be used. | ||||
| // | ||||
| //      - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from | ||||
| //        ctime field until "file size". | ||||
| // | ||||
| //      - Stat data of plumbing.excludesfile | ||||
| // | ||||
| //      - 32-bit dir_flags (see struct dir_struct) | ||||
| // | ||||
| //      - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file | ||||
| //        does not exist. | ||||
| // | ||||
| //      - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does | ||||
| //        not exist. | ||||
| // | ||||
| //      - NUL-terminated string of per-dir exclude file name. This usually | ||||
| //        is ".gitignore". | ||||
| // | ||||
| //      - The number of following directory blocks, variable width | ||||
| //        encoding. If this number is zero, the extension ends here with a | ||||
| //        following NUL. | ||||
| // | ||||
| //      - A number of directory blocks in depth-first-search order, each | ||||
| //        consists of | ||||
| // | ||||
| //        - The number of untracked entries, variable width encoding. | ||||
| // | ||||
| //        - The number of sub-directory blocks, variable width encoding. | ||||
| // | ||||
| //        - The directory name terminated by NUL. | ||||
| // | ||||
| //        - A number of untracked file/dir names terminated by NUL. | ||||
| // | ||||
| //    The remaining data of each directory block is grouped by type: | ||||
| // | ||||
| //      - An ewah bitmap, the n-th bit marks whether the n-th directory has | ||||
| //        valid untracked cache entries. | ||||
| // | ||||
| //      - An ewah bitmap, the n-th bit records "check-only" bit of | ||||
| //        read_directory_recursive() for the n-th directory. | ||||
| // | ||||
| //      - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data | ||||
| //        is valid for the n-th directory and exists in the next data. | ||||
| // | ||||
| //      - An array of stat data. The n-th data corresponds with the n-th | ||||
| //        "one" bit in the previous ewah bitmap. | ||||
| // | ||||
| //      - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit | ||||
| //        in the previous ewah bitmap. | ||||
| // | ||||
| //      - One NUL. | ||||
| // | ||||
| //   == File System Monitor cache | ||||
| // | ||||
| //     The file system monitor cache tracks files for which the core.fsmonitor | ||||
| //     hook has told us about changes.  The signature for this extension is | ||||
| //     { 'F', 'S', 'M', 'N' }. | ||||
| // | ||||
| //     The extension starts with | ||||
| // | ||||
| //     - 32-bit version number: the current supported version is 1. | ||||
| // | ||||
| //     - 64-bit time: the extension data reflects all changes through the given | ||||
| //       time which is stored as the nanoseconds elapsed since midnight, | ||||
| //       January 1, 1970. | ||||
| // | ||||
| //    - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap. | ||||
| // | ||||
| //    - An ewah bitmap, the n-th bit indicates whether the n-th index entry | ||||
| //      is not CE_FSMONITOR_VALID. | ||||
| // | ||||
| //  == End of Index Entry | ||||
| // | ||||
| //    The End of Index Entry (EOIE) is used to locate the end of the variable | ||||
| //    length index entries and the beginning of the extensions. Code can take | ||||
| //    advantage of this to quickly locate the index extensions without having | ||||
| //    to parse through all of the index entries. | ||||
| // | ||||
| //    Because it must be able to be loaded before the variable length cache | ||||
| //    entries and other index extensions, this extension must be written last. | ||||
| //    The signature for this extension is { 'E', 'O', 'I', 'E' }. | ||||
| // | ||||
| //    The extension consists of: | ||||
| // | ||||
| //    - 32-bit offset to the end of the index entries | ||||
| // | ||||
| //    - 160-bit SHA-1 over the extension types and their sizes (but not | ||||
| //      their contents).  E.g. if we have "TREE" extension that is N-bytes | ||||
| //      long, "REUC" extension that is M-bytes long, followed by "EOIE", | ||||
| //      then the hash would be: | ||||
| // | ||||
| //      SHA-1("TREE" + <binary representation of N> + | ||||
| //        "REUC" + <binary representation of M>) | ||||
| // | ||||
| //  == Index Entry Offset Table | ||||
| // | ||||
| //    The Index Entry Offset Table (IEOT) is used to help address the CPU | ||||
| //    cost of loading the index by enabling multi-threading the process of | ||||
| //    converting cache entries from the on-disk format to the in-memory format. | ||||
| //    The signature for this extension is { 'I', 'E', 'O', 'T' }. | ||||
| // | ||||
| //    The extension consists of: | ||||
| // | ||||
| //    - 32-bit version (currently 1) | ||||
| // | ||||
| //    - A number of index offset entries each consisting of: | ||||
| // | ||||
| //    - 32-bit offset from the beginning of the file to the first cache entry | ||||
| //      in this block of entries. | ||||
| // | ||||
| //    - 32-bit count of cache entries in this blockpackage index | ||||
| package index | ||||
							
								
								
									
										150
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										150
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,150 @@ | ||||
| package index | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"crypto/sha1" | ||||
| 	"errors" | ||||
| 	"hash" | ||||
| 	"io" | ||||
| 	"sort" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/utils/binary" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// EncodeVersionSupported is the range of supported index versions | ||||
| 	EncodeVersionSupported uint32 = 2 | ||||
|  | ||||
| 	// ErrInvalidTimestamp is returned by Encode if a Index with a Entry with | ||||
| 	// negative timestamp values | ||||
| 	ErrInvalidTimestamp = errors.New("negative timestamps are not allowed") | ||||
| ) | ||||
|  | ||||
| // An Encoder writes an Index to an output stream. | ||||
| type Encoder struct { | ||||
| 	w    io.Writer | ||||
| 	hash hash.Hash | ||||
| } | ||||
|  | ||||
| // NewEncoder returns a new encoder that writes to w. | ||||
| func NewEncoder(w io.Writer) *Encoder { | ||||
| 	h := sha1.New() | ||||
| 	mw := io.MultiWriter(w, h) | ||||
| 	return &Encoder{mw, h} | ||||
| } | ||||
|  | ||||
| // Encode writes the Index to the stream of the encoder. | ||||
| func (e *Encoder) Encode(idx *Index) error { | ||||
| 	// TODO: support versions v3 and v4 | ||||
| 	// TODO: support extensions | ||||
| 	if idx.Version != EncodeVersionSupported { | ||||
| 		return ErrUnsupportedVersion | ||||
| 	} | ||||
|  | ||||
| 	if err := e.encodeHeader(idx); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if err := e.encodeEntries(idx); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return e.encodeFooter() | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeHeader(idx *Index) error { | ||||
| 	return binary.Write(e.w, | ||||
| 		indexSignature, | ||||
| 		idx.Version, | ||||
| 		uint32(len(idx.Entries)), | ||||
| 	) | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeEntries(idx *Index) error { | ||||
| 	sort.Sort(byName(idx.Entries)) | ||||
|  | ||||
| 	for _, entry := range idx.Entries { | ||||
| 		if err := e.encodeEntry(entry); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		wrote := entryHeaderLength + len(entry.Name) | ||||
| 		if err := e.padEntry(wrote); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeEntry(entry *Entry) error { | ||||
| 	if entry.IntentToAdd || entry.SkipWorktree { | ||||
| 		return ErrUnsupportedVersion | ||||
| 	} | ||||
|  | ||||
| 	sec, nsec, err := e.timeToUint32(&entry.CreatedAt) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	flags := uint16(entry.Stage&0x3) << 12 | ||||
| 	if l := len(entry.Name); l < nameMask { | ||||
| 		flags |= uint16(l) | ||||
| 	} else { | ||||
| 		flags |= nameMask | ||||
| 	} | ||||
|  | ||||
| 	flow := []interface{}{ | ||||
| 		sec, nsec, | ||||
| 		msec, mnsec, | ||||
| 		entry.Dev, | ||||
| 		entry.Inode, | ||||
| 		entry.Mode, | ||||
| 		entry.UID, | ||||
| 		entry.GID, | ||||
| 		entry.Size, | ||||
| 		entry.Hash[:], | ||||
| 		flags, | ||||
| 	} | ||||
|  | ||||
| 	if err := binary.Write(e.w, flow...); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return binary.Write(e.w, []byte(entry.Name)) | ||||
| } | ||||
|  | ||||
| func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) { | ||||
| 	if t.IsZero() { | ||||
| 		return 0, 0, nil | ||||
| 	} | ||||
|  | ||||
| 	if t.Unix() < 0 || t.UnixNano() < 0 { | ||||
| 		return 0, 0, ErrInvalidTimestamp | ||||
| 	} | ||||
|  | ||||
| 	return uint32(t.Unix()), uint32(t.Nanosecond()), nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) padEntry(wrote int) error { | ||||
| 	padLen := 8 - wrote%8 | ||||
|  | ||||
| 	_, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen)) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeFooter() error { | ||||
| 	return binary.Write(e.w, e.hash.Sum(nil)) | ||||
| } | ||||
|  | ||||
| type byName []*Entry | ||||
|  | ||||
| func (l byName) Len() int           { return len(l) } | ||||
| func (l byName) Swap(i, j int)      { l[i], l[j] = l[j], l[i] } | ||||
| func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name } | ||||
							
								
								
									
										213
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										213
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,213 @@ | ||||
| package index | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"path/filepath" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/filemode" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// ErrUnsupportedVersion is returned by Decode when the index file version | ||||
| 	// is not supported. | ||||
| 	ErrUnsupportedVersion = errors.New("unsupported version") | ||||
| 	// ErrEntryNotFound is returned by Index.Entry, if an entry is not found. | ||||
| 	ErrEntryNotFound = errors.New("entry not found") | ||||
|  | ||||
| 	indexSignature              = []byte{'D', 'I', 'R', 'C'} | ||||
| 	treeExtSignature            = []byte{'T', 'R', 'E', 'E'} | ||||
| 	resolveUndoExtSignature     = []byte{'R', 'E', 'U', 'C'} | ||||
| 	endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'} | ||||
| ) | ||||
|  | ||||
| // Stage during merge | ||||
| type Stage int | ||||
|  | ||||
| const ( | ||||
| 	// Merged is the default stage, fully merged | ||||
| 	Merged Stage = 1 | ||||
| 	// AncestorMode is the base revision | ||||
| 	AncestorMode Stage = 1 | ||||
| 	// OurMode is the first tree revision, ours | ||||
| 	OurMode Stage = 2 | ||||
| 	// TheirMode is the second tree revision, theirs | ||||
| 	TheirMode Stage = 3 | ||||
| ) | ||||
|  | ||||
| // Index contains the information about which objects are currently checked out | ||||
| // in the worktree, having information about the working files. Changes in | ||||
| // worktree are detected using this Index. The Index is also used during merges | ||||
| type Index struct { | ||||
| 	// Version is index version | ||||
| 	Version uint32 | ||||
| 	// Entries collection of entries represented by this Index. The order of | ||||
| 	// this collection is not guaranteed | ||||
| 	Entries []*Entry | ||||
| 	// Cache represents the 'Cached tree' extension | ||||
| 	Cache *Tree | ||||
| 	// ResolveUndo represents the 'Resolve undo' extension | ||||
| 	ResolveUndo *ResolveUndo | ||||
| 	// EndOfIndexEntry represents the 'End of Index Entry' extension | ||||
| 	EndOfIndexEntry *EndOfIndexEntry | ||||
| } | ||||
|  | ||||
| // Add creates a new Entry and returns it. The caller should first check that | ||||
| // another entry with the same path does not exist. | ||||
| func (i *Index) Add(path string) *Entry { | ||||
| 	e := &Entry{ | ||||
| 		Name: filepath.ToSlash(path), | ||||
| 	} | ||||
|  | ||||
| 	i.Entries = append(i.Entries, e) | ||||
| 	return e | ||||
| } | ||||
|  | ||||
| // Entry returns the entry that match the given path, if any. | ||||
| func (i *Index) Entry(path string) (*Entry, error) { | ||||
| 	path = filepath.ToSlash(path) | ||||
| 	for _, e := range i.Entries { | ||||
| 		if e.Name == path { | ||||
| 			return e, nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil, ErrEntryNotFound | ||||
| } | ||||
|  | ||||
| // Remove remove the entry that match the give path and returns deleted entry. | ||||
| func (i *Index) Remove(path string) (*Entry, error) { | ||||
| 	path = filepath.ToSlash(path) | ||||
| 	for index, e := range i.Entries { | ||||
| 		if e.Name == path { | ||||
| 			i.Entries = append(i.Entries[:index], i.Entries[index+1:]...) | ||||
| 			return e, nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil, ErrEntryNotFound | ||||
| } | ||||
|  | ||||
| // Glob returns the all entries matching pattern or nil if there is no matching | ||||
| // entry. The syntax of patterns is the same as in filepath.Glob. | ||||
| func (i *Index) Glob(pattern string) (matches []*Entry, err error) { | ||||
| 	pattern = filepath.ToSlash(pattern) | ||||
| 	for _, e := range i.Entries { | ||||
| 		m, err := match(pattern, e.Name) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		if m { | ||||
| 			matches = append(matches, e) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // String is equivalent to `git ls-files --stage --debug` | ||||
| func (i *Index) String() string { | ||||
| 	buf := bytes.NewBuffer(nil) | ||||
| 	for _, e := range i.Entries { | ||||
| 		buf.WriteString(e.String()) | ||||
| 	} | ||||
|  | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| // Entry represents a single file (or stage of a file) in the cache. An entry | ||||
| // represents exactly one stage of a file. If a file path is unmerged then | ||||
| // multiple Entry instances may appear for the same path name. | ||||
| type Entry struct { | ||||
| 	// Hash is the SHA1 of the represented file | ||||
| 	Hash plumbing.Hash | ||||
| 	// Name is the  Entry path name relative to top level directory | ||||
| 	Name string | ||||
| 	// CreatedAt time when the tracked path was created | ||||
| 	CreatedAt time.Time | ||||
| 	// ModifiedAt time when the tracked path was changed | ||||
| 	ModifiedAt time.Time | ||||
| 	// Dev and Inode of the tracked path | ||||
| 	Dev, Inode uint32 | ||||
| 	// Mode of the path | ||||
| 	Mode filemode.FileMode | ||||
| 	// UID and GID, userid and group id of the owner | ||||
| 	UID, GID uint32 | ||||
| 	// Size is the length in bytes for regular files | ||||
| 	Size uint32 | ||||
| 	// Stage on a merge is defines what stage is representing this entry | ||||
| 	// https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging | ||||
| 	Stage Stage | ||||
| 	// SkipWorktree used in sparse checkouts | ||||
| 	// https://git-scm.com/docs/git-read-tree#_sparse_checkout | ||||
| 	SkipWorktree bool | ||||
| 	// IntentToAdd record only the fact that the path will be added later | ||||
| 	// https://git-scm.com/docs/git-add ("git add -N") | ||||
| 	IntentToAdd bool | ||||
| } | ||||
|  | ||||
| func (e Entry) String() string { | ||||
| 	buf := bytes.NewBuffer(nil) | ||||
|  | ||||
| 	fmt.Fprintf(buf, "%06o %s %d\t%s\n", e.Mode, e.Hash, e.Stage, e.Name) | ||||
| 	fmt.Fprintf(buf, "  ctime: %d:%d\n", e.CreatedAt.Unix(), e.CreatedAt.Nanosecond()) | ||||
| 	fmt.Fprintf(buf, "  mtime: %d:%d\n", e.ModifiedAt.Unix(), e.ModifiedAt.Nanosecond()) | ||||
| 	fmt.Fprintf(buf, "  dev: %d\tino: %d\n", e.Dev, e.Inode) | ||||
| 	fmt.Fprintf(buf, "  uid: %d\tgid: %d\n", e.UID, e.GID) | ||||
| 	fmt.Fprintf(buf, "  size: %d\tflags: %x\n", e.Size, 0) | ||||
|  | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| // Tree contains pre-computed hashes for trees that can be derived from the | ||||
| // index. It helps speed up tree object generation from index for a new commit. | ||||
| type Tree struct { | ||||
| 	Entries []TreeEntry | ||||
| } | ||||
|  | ||||
| // TreeEntry entry of a cached Tree | ||||
| type TreeEntry struct { | ||||
| 	// Path component (relative to its parent directory) | ||||
| 	Path string | ||||
| 	// Entries is the number of entries in the index that is covered by the tree | ||||
| 	// this entry represents. | ||||
| 	Entries int | ||||
| 	// Trees is the number that represents the number of subtrees this tree has | ||||
| 	Trees int | ||||
| 	// Hash object name for the object that would result from writing this span | ||||
| 	// of index as a tree. | ||||
| 	Hash plumbing.Hash | ||||
| } | ||||
|  | ||||
| // ResolveUndo is used when a conflict is resolved (e.g. with "git add path"), | ||||
| // these higher stage entries are removed and a stage-0 entry with proper | ||||
| // resolution is added. When these higher stage entries are removed, they are | ||||
| // saved in the resolve undo extension. | ||||
| type ResolveUndo struct { | ||||
| 	Entries []ResolveUndoEntry | ||||
| } | ||||
|  | ||||
| // ResolveUndoEntry contains the information about a conflict when is resolved | ||||
| type ResolveUndoEntry struct { | ||||
| 	Path   string | ||||
| 	Stages map[Stage]plumbing.Hash | ||||
| } | ||||
|  | ||||
| // EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of | ||||
| // the variable length index entries and the beginning of the extensions. Code | ||||
| // can take advantage of this to quickly locate the index extensions without | ||||
| // having to parse through all of the index entries. | ||||
| // | ||||
| //  Because it must be able to be loaded before the variable length cache | ||||
| //  entries and other index extensions, this extension must be written last. | ||||
| type EndOfIndexEntry struct { | ||||
| 	// Offset to the end of the index entries | ||||
| 	Offset uint32 | ||||
| 	// Hash is a SHA-1 over the extension types and their sizes (but not | ||||
| 	//	their contents). | ||||
| 	Hash plumbing.Hash | ||||
| } | ||||
							
								
								
									
										186
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										186
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,186 @@ | ||||
| package index | ||||
|  | ||||
| import ( | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| // match is filepath.Match with support to match fullpath and not only filenames | ||||
| // code from: | ||||
| // https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224 | ||||
| func match(pattern, name string) (matched bool, err error) { | ||||
| Pattern: | ||||
| 	for len(pattern) > 0 { | ||||
| 		var star bool | ||||
| 		var chunk string | ||||
| 		star, chunk, pattern = scanChunk(pattern) | ||||
|  | ||||
| 		// Look for match at current position. | ||||
| 		t, ok, err := matchChunk(chunk, name) | ||||
| 		// if we're the last chunk, make sure we've exhausted the name | ||||
| 		// otherwise we'll give a false result even if we could still match | ||||
| 		// using the star | ||||
| 		if ok && (len(t) == 0 || len(pattern) > 0) { | ||||
| 			name = t | ||||
| 			continue | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| 		if star { | ||||
| 			// Look for match skipping i+1 bytes. | ||||
| 			// Cannot skip /. | ||||
| 			for i := 0; i < len(name); i++ { | ||||
| 				t, ok, err := matchChunk(chunk, name[i+1:]) | ||||
| 				if ok { | ||||
| 					// if we're the last chunk, make sure we exhausted the name | ||||
| 					if len(pattern) == 0 && len(t) > 0 { | ||||
| 						continue | ||||
| 					} | ||||
| 					name = t | ||||
| 					continue Pattern | ||||
| 				} | ||||
| 				if err != nil { | ||||
| 					return false, err | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		return false, nil | ||||
| 	} | ||||
| 	return len(name) == 0, nil | ||||
| } | ||||
|  | ||||
| // scanChunk gets the next segment of pattern, which is a non-star string | ||||
| // possibly preceded by a star. | ||||
| func scanChunk(pattern string) (star bool, chunk, rest string) { | ||||
| 	for len(pattern) > 0 && pattern[0] == '*' { | ||||
| 		pattern = pattern[1:] | ||||
| 		star = true | ||||
| 	} | ||||
| 	inrange := false | ||||
| 	var i int | ||||
| Scan: | ||||
| 	for i = 0; i < len(pattern); i++ { | ||||
| 		switch pattern[i] { | ||||
| 		case '\\': | ||||
| 			if runtime.GOOS != "windows" { | ||||
| 				// error check handled in matchChunk: bad pattern. | ||||
| 				if i+1 < len(pattern) { | ||||
| 					i++ | ||||
| 				} | ||||
| 			} | ||||
| 		case '[': | ||||
| 			inrange = true | ||||
| 		case ']': | ||||
| 			inrange = false | ||||
| 		case '*': | ||||
| 			if !inrange { | ||||
| 				break Scan | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return star, pattern[0:i], pattern[i:] | ||||
| } | ||||
|  | ||||
| // matchChunk checks whether chunk matches the beginning of s. | ||||
| // If so, it returns the remainder of s (after the match). | ||||
| // Chunk is all single-character operators: literals, char classes, and ?. | ||||
| func matchChunk(chunk, s string) (rest string, ok bool, err error) { | ||||
| 	for len(chunk) > 0 { | ||||
| 		if len(s) == 0 { | ||||
| 			return | ||||
| 		} | ||||
| 		switch chunk[0] { | ||||
| 		case '[': | ||||
| 			// character class | ||||
| 			r, n := utf8.DecodeRuneInString(s) | ||||
| 			s = s[n:] | ||||
| 			chunk = chunk[1:] | ||||
| 			// We can't end right after '[', we're expecting at least | ||||
| 			// a closing bracket and possibly a caret. | ||||
| 			if len(chunk) == 0 { | ||||
| 				err = filepath.ErrBadPattern | ||||
| 				return | ||||
| 			} | ||||
| 			// possibly negated | ||||
| 			negated := chunk[0] == '^' | ||||
| 			if negated { | ||||
| 				chunk = chunk[1:] | ||||
| 			} | ||||
| 			// parse all ranges | ||||
| 			match := false | ||||
| 			nrange := 0 | ||||
| 			for { | ||||
| 				if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { | ||||
| 					chunk = chunk[1:] | ||||
| 					break | ||||
| 				} | ||||
| 				var lo, hi rune | ||||
| 				if lo, chunk, err = getEsc(chunk); err != nil { | ||||
| 					return | ||||
| 				} | ||||
| 				hi = lo | ||||
| 				if chunk[0] == '-' { | ||||
| 					if hi, chunk, err = getEsc(chunk[1:]); err != nil { | ||||
| 						return | ||||
| 					} | ||||
| 				} | ||||
| 				if lo <= r && r <= hi { | ||||
| 					match = true | ||||
| 				} | ||||
| 				nrange++ | ||||
| 			} | ||||
| 			if match == negated { | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 		case '?': | ||||
| 			_, n := utf8.DecodeRuneInString(s) | ||||
| 			s = s[n:] | ||||
| 			chunk = chunk[1:] | ||||
|  | ||||
| 		case '\\': | ||||
| 			if runtime.GOOS != "windows" { | ||||
| 				chunk = chunk[1:] | ||||
| 				if len(chunk) == 0 { | ||||
| 					err = filepath.ErrBadPattern | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			fallthrough | ||||
|  | ||||
| 		default: | ||||
| 			if chunk[0] != s[0] { | ||||
| 				return | ||||
| 			} | ||||
| 			s = s[1:] | ||||
| 			chunk = chunk[1:] | ||||
| 		} | ||||
| 	} | ||||
| 	return s, true, nil | ||||
| } | ||||
|  | ||||
| // getEsc gets a possibly-escaped character from chunk, for a character class. | ||||
| func getEsc(chunk string) (r rune, nchunk string, err error) { | ||||
| 	if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { | ||||
| 		err = filepath.ErrBadPattern | ||||
| 		return | ||||
| 	} | ||||
| 	if chunk[0] == '\\' && runtime.GOOS != "windows" { | ||||
| 		chunk = chunk[1:] | ||||
| 		if len(chunk) == 0 { | ||||
| 			err = filepath.ErrBadPattern | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	r, n := utf8.DecodeRuneInString(chunk) | ||||
| 	if r == utf8.RuneError && n == 1 { | ||||
| 		err = filepath.ErrBadPattern | ||||
| 	} | ||||
| 	nchunk = chunk[n:] | ||||
| 	if len(nchunk) == 0 { | ||||
| 		err = filepath.ErrBadPattern | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
							
								
								
									
										2
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,2 @@ | ||||
| // Package objfile implements encoding and decoding of object files. | ||||
| package objfile | ||||
							
								
								
									
										114
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										114
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,114 @@ | ||||
| package objfile | ||||
|  | ||||
| import ( | ||||
| 	"compress/zlib" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"strconv" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/format/packfile" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	ErrClosed       = errors.New("objfile: already closed") | ||||
| 	ErrHeader       = errors.New("objfile: invalid header") | ||||
| 	ErrNegativeSize = errors.New("objfile: negative object size") | ||||
| ) | ||||
|  | ||||
| // Reader reads and decodes compressed objfile data from a provided io.Reader. | ||||
| // Reader implements io.ReadCloser. Close should be called when finished with | ||||
| // the Reader. Close will not close the underlying io.Reader. | ||||
| type Reader struct { | ||||
| 	multi  io.Reader | ||||
| 	zlib   io.ReadCloser | ||||
| 	hasher plumbing.Hasher | ||||
| } | ||||
|  | ||||
| // NewReader returns a new Reader reading from r. | ||||
| func NewReader(r io.Reader) (*Reader, error) { | ||||
| 	zlib, err := zlib.NewReader(r) | ||||
| 	if err != nil { | ||||
| 		return nil, packfile.ErrZLib.AddDetails(err.Error()) | ||||
| 	} | ||||
|  | ||||
| 	return &Reader{ | ||||
| 		zlib: zlib, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // Header reads the type and the size of object, and prepares the reader for read | ||||
| func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) { | ||||
| 	var raw []byte | ||||
| 	raw, err = r.readUntil(' ') | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	t, err = plumbing.ParseObjectType(string(raw)) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	raw, err = r.readUntil(0) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	size, err = strconv.ParseInt(string(raw), 10, 64) | ||||
| 	if err != nil { | ||||
| 		err = ErrHeader | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	defer r.prepareForRead(t, size) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // readSlice reads one byte at a time from r until it encounters delim or an | ||||
| // error. | ||||
| func (r *Reader) readUntil(delim byte) ([]byte, error) { | ||||
| 	var buf [1]byte | ||||
| 	value := make([]byte, 0, 16) | ||||
| 	for { | ||||
| 		if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) { | ||||
| 			if err == io.EOF { | ||||
| 				return nil, ErrHeader | ||||
| 			} | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		if buf[0] == delim { | ||||
| 			return value, nil | ||||
| 		} | ||||
|  | ||||
| 		value = append(value, buf[0]) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) { | ||||
| 	r.hasher = plumbing.NewHasher(t, size) | ||||
| 	r.multi = io.TeeReader(r.zlib, r.hasher) | ||||
| } | ||||
|  | ||||
| // Read reads len(p) bytes into p from the object data stream. It returns | ||||
| // the number of bytes read (0 <= n <= len(p)) and any error encountered. Even | ||||
| // if Read returns n < len(p), it may use all of p as scratch space during the | ||||
| // call. | ||||
| // | ||||
| // If Read encounters the end of the data stream it will return err == io.EOF, | ||||
| // either in the current call if n > 0 or in a subsequent call. | ||||
| func (r *Reader) Read(p []byte) (n int, err error) { | ||||
| 	return r.multi.Read(p) | ||||
| } | ||||
|  | ||||
| // Hash returns the hash of the object data stream that has been read so far. | ||||
| func (r *Reader) Hash() plumbing.Hash { | ||||
| 	return r.hasher.Sum() | ||||
| } | ||||
|  | ||||
| // Close releases any resources consumed by the Reader. Calling Close does not | ||||
| // close the wrapped io.Reader originally passed to NewReader. | ||||
| func (r *Reader) Close() error { | ||||
| 	return r.zlib.Close() | ||||
| } | ||||
							
								
								
									
										109
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										109
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,109 @@ | ||||
| package objfile | ||||
|  | ||||
| import ( | ||||
| 	"compress/zlib" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"strconv" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)") | ||||
| ) | ||||
|  | ||||
| // Writer writes and encodes data in compressed objfile format to a provided | ||||
| // io.Writer. Close should be called when finished with the Writer. Close will | ||||
| // not close the underlying io.Writer. | ||||
| type Writer struct { | ||||
| 	raw    io.Writer | ||||
| 	zlib   io.WriteCloser | ||||
| 	hasher plumbing.Hasher | ||||
| 	multi  io.Writer | ||||
|  | ||||
| 	closed  bool | ||||
| 	pending int64 // number of unwritten bytes | ||||
| } | ||||
|  | ||||
| // NewWriter returns a new Writer writing to w. | ||||
| // | ||||
| // The returned Writer implements io.WriteCloser. Close should be called when | ||||
| // finished with the Writer. Close will not close the underlying io.Writer. | ||||
| func NewWriter(w io.Writer) *Writer { | ||||
| 	return &Writer{ | ||||
| 		raw:  w, | ||||
| 		zlib: zlib.NewWriter(w), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WriteHeader writes the type and the size and prepares to accept the object's | ||||
| // contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a | ||||
| // negative size is provided, ErrNegativeSize is returned. | ||||
| func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error { | ||||
| 	if !t.Valid() { | ||||
| 		return plumbing.ErrInvalidType | ||||
| 	} | ||||
| 	if size < 0 { | ||||
| 		return ErrNegativeSize | ||||
| 	} | ||||
|  | ||||
| 	b := t.Bytes() | ||||
| 	b = append(b, ' ') | ||||
| 	b = append(b, []byte(strconv.FormatInt(size, 10))...) | ||||
| 	b = append(b, 0) | ||||
|  | ||||
| 	defer w.prepareForWrite(t, size) | ||||
| 	_, err := w.zlib.Write(b) | ||||
|  | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) { | ||||
| 	w.pending = size | ||||
|  | ||||
| 	w.hasher = plumbing.NewHasher(t, size) | ||||
| 	w.multi = io.MultiWriter(w.zlib, w.hasher) | ||||
| } | ||||
|  | ||||
| // Write writes the object's contents. Write returns the error ErrOverflow if | ||||
| // more than size bytes are written after WriteHeader. | ||||
| func (w *Writer) Write(p []byte) (n int, err error) { | ||||
| 	if w.closed { | ||||
| 		return 0, ErrClosed | ||||
| 	} | ||||
|  | ||||
| 	overwrite := false | ||||
| 	if int64(len(p)) > w.pending { | ||||
| 		p = p[0:w.pending] | ||||
| 		overwrite = true | ||||
| 	} | ||||
|  | ||||
| 	n, err = w.multi.Write(p) | ||||
| 	w.pending -= int64(n) | ||||
| 	if err == nil && overwrite { | ||||
| 		err = ErrOverflow | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Hash returns the hash of the object data stream that has been written so far. | ||||
| // It can be called before or after Close. | ||||
| func (w *Writer) Hash() plumbing.Hash { | ||||
| 	return w.hasher.Sum() // Not yet closed, return hash of data written so far | ||||
| } | ||||
|  | ||||
| // Close releases any resources consumed by the Writer. | ||||
| // | ||||
| // Calling Close does not close the wrapped io.Writer originally passed to | ||||
| // NewWriter. | ||||
| func (w *Writer) Close() error { | ||||
| 	if err := w.zlib.Close(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	w.closed = true | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										78
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										78
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,78 @@ | ||||
| package packfile | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"compress/zlib" | ||||
| 	"io" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing/storer" | ||||
| 	"github.com/go-git/go-git/v5/utils/ioutil" | ||||
| ) | ||||
|  | ||||
| var signature = []byte{'P', 'A', 'C', 'K'} | ||||
|  | ||||
| const ( | ||||
| 	// VersionSupported is the packfile version supported by this package | ||||
| 	VersionSupported uint32 = 2 | ||||
|  | ||||
| 	firstLengthBits = uint8(4)   // the first byte into object header has 4 bits to store the length | ||||
| 	lengthBits      = uint8(7)   // subsequent bytes has 7 bits to store the length | ||||
| 	maskFirstLength = 15         // 0000 1111 | ||||
| 	maskContinue    = 0x80       // 1000 0000 | ||||
| 	maskLength      = uint8(127) // 0111 1111 | ||||
| 	maskType        = uint8(112) // 0111 0000 | ||||
| ) | ||||
|  | ||||
| // UpdateObjectStorage updates the storer with the objects in the given | ||||
| // packfile. | ||||
| func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error { | ||||
| 	if pw, ok := s.(storer.PackfileWriter); ok { | ||||
| 		return WritePackfileToObjectStorage(pw, packfile) | ||||
| 	} | ||||
|  | ||||
| 	p, err := NewParserWithStorage(NewScanner(packfile), s) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	_, err = p.Parse() | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // WritePackfileToObjectStorage writes all the packfile objects into the given | ||||
| // object storage. | ||||
| func WritePackfileToObjectStorage( | ||||
| 	sw storer.PackfileWriter, | ||||
| 	packfile io.Reader, | ||||
| ) (err error) { | ||||
| 	w, err := sw.PackfileWriter() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	defer ioutil.CheckClose(w, &err) | ||||
|  | ||||
| 	var n int64 | ||||
| 	n, err = io.Copy(w, packfile) | ||||
| 	if err == nil && n == 0 { | ||||
| 		return ErrEmptyPackfile | ||||
| 	} | ||||
|  | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| var bufPool = sync.Pool{ | ||||
| 	New: func() interface{} { | ||||
| 		return bytes.NewBuffer(nil) | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01} | ||||
|  | ||||
| var zlibReaderPool = sync.Pool{ | ||||
| 	New: func() interface{} { | ||||
| 		r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes)) | ||||
| 		return r | ||||
| 	}, | ||||
| } | ||||
							
								
								
									
										297
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										297
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,297 @@ | ||||
| package packfile | ||||
|  | ||||
| const blksz = 16 | ||||
| const maxChainLength = 64 | ||||
|  | ||||
| // deltaIndex is a modified version of JGit's DeltaIndex adapted to our current | ||||
| // design. | ||||
| type deltaIndex struct { | ||||
| 	table   []int | ||||
| 	entries []int | ||||
| 	mask    int | ||||
| } | ||||
|  | ||||
| func (idx *deltaIndex) init(buf []byte) { | ||||
| 	scanner := newDeltaIndexScanner(buf, len(buf)) | ||||
| 	idx.mask = scanner.mask | ||||
| 	idx.table = scanner.table | ||||
| 	idx.entries = make([]int, countEntries(scanner)+1) | ||||
| 	idx.copyEntries(scanner) | ||||
| } | ||||
|  | ||||
| // findMatch returns the offset of src where the block starting at tgtOffset | ||||
| // is and the length of the match. A length of 0 means there was no match. A | ||||
| // length of -1 means the src length is lower than the blksz and whatever | ||||
| // other positive length is the length of the match in bytes. | ||||
| func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) { | ||||
| 	if len(tgt) < tgtOffset+s { | ||||
| 		return 0, len(tgt) - tgtOffset | ||||
| 	} | ||||
|  | ||||
| 	if len(src) < blksz { | ||||
| 		return 0, -1 | ||||
| 	} | ||||
|  | ||||
| 	if len(tgt) >= tgtOffset+s && len(src) >= blksz { | ||||
| 		h := hashBlock(tgt, tgtOffset) | ||||
| 		tIdx := h & idx.mask | ||||
| 		eIdx := idx.table[tIdx] | ||||
| 		if eIdx != 0 { | ||||
| 			srcOffset = idx.entries[eIdx] | ||||
| 		} else { | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		l = matchLength(src, tgt, tgtOffset, srcOffset) | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func matchLength(src, tgt []byte, otgt, osrc int) (l int) { | ||||
| 	lensrc := len(src) | ||||
| 	lentgt := len(tgt) | ||||
| 	for (osrc < lensrc && otgt < lentgt) && src[osrc] == tgt[otgt] { | ||||
| 		l++ | ||||
| 		osrc++ | ||||
| 		otgt++ | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func countEntries(scan *deltaIndexScanner) (cnt int) { | ||||
| 	// Figure out exactly how many entries we need. As we do the | ||||
| 	// enumeration truncate any delta chains longer than what we | ||||
| 	// are willing to scan during encode. This keeps the encode | ||||
| 	// logic linear in the size of the input rather than quadratic. | ||||
| 	for i := 0; i < len(scan.table); i++ { | ||||
| 		h := scan.table[i] | ||||
| 		if h == 0 { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		size := 0 | ||||
| 		for { | ||||
| 			size++ | ||||
| 			if size == maxChainLength { | ||||
| 				scan.next[h] = 0 | ||||
| 				break | ||||
| 			} | ||||
| 			h = scan.next[h] | ||||
|  | ||||
| 			if h == 0 { | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		cnt += size | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (idx *deltaIndex) copyEntries(scanner *deltaIndexScanner) { | ||||
| 	// Rebuild the entries list from the scanner, positioning all | ||||
| 	// blocks in the same hash chain next to each other. We can | ||||
| 	// then later discard the next list, along with the scanner. | ||||
| 	// | ||||
| 	next := 1 | ||||
| 	for i := 0; i < len(idx.table); i++ { | ||||
| 		h := idx.table[i] | ||||
| 		if h == 0 { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		idx.table[i] = next | ||||
| 		for { | ||||
| 			idx.entries[next] = scanner.entries[h] | ||||
| 			next++ | ||||
| 			h = scanner.next[h] | ||||
|  | ||||
| 			if h == 0 { | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type deltaIndexScanner struct { | ||||
| 	table   []int | ||||
| 	entries []int | ||||
| 	next    []int | ||||
| 	mask    int | ||||
| 	count   int | ||||
| } | ||||
|  | ||||
| func newDeltaIndexScanner(buf []byte, size int) *deltaIndexScanner { | ||||
| 	size -= size % blksz | ||||
| 	worstCaseBlockCnt := size / blksz | ||||
| 	if worstCaseBlockCnt < 1 { | ||||
| 		return new(deltaIndexScanner) | ||||
| 	} | ||||
|  | ||||
| 	tableSize := tableSize(worstCaseBlockCnt) | ||||
| 	scanner := &deltaIndexScanner{ | ||||
| 		table:   make([]int, tableSize), | ||||
| 		mask:    tableSize - 1, | ||||
| 		entries: make([]int, worstCaseBlockCnt+1), | ||||
| 		next:    make([]int, worstCaseBlockCnt+1), | ||||
| 	} | ||||
|  | ||||
| 	scanner.scan(buf, size) | ||||
| 	return scanner | ||||
| } | ||||
|  | ||||
| // slightly modified version of JGit's DeltaIndexScanner. We store the offset on the entries | ||||
| // instead of the entries and the key, so we avoid operations to retrieve the offset later, as | ||||
| // we don't use the key. | ||||
| // See: https://github.com/eclipse/jgit/blob/005e5feb4ecd08c4e4d141a38b9e7942accb3212/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/DeltaIndexScanner.java | ||||
| func (s *deltaIndexScanner) scan(buf []byte, end int) { | ||||
| 	lastHash := 0 | ||||
| 	ptr := end - blksz | ||||
|  | ||||
| 	for { | ||||
| 		key := hashBlock(buf, ptr) | ||||
| 		tIdx := key & s.mask | ||||
| 		head := s.table[tIdx] | ||||
| 		if head != 0 && lastHash == key { | ||||
| 			s.entries[head] = ptr | ||||
| 		} else { | ||||
| 			s.count++ | ||||
| 			eIdx := s.count | ||||
| 			s.entries[eIdx] = ptr | ||||
| 			s.next[eIdx] = head | ||||
| 			s.table[tIdx] = eIdx | ||||
| 		} | ||||
|  | ||||
| 		lastHash = key | ||||
| 		ptr -= blksz | ||||
|  | ||||
| 		if 0 > ptr { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func tableSize(worstCaseBlockCnt int) int { | ||||
| 	shift := 32 - leadingZeros(uint32(worstCaseBlockCnt)) | ||||
| 	sz := 1 << uint(shift-1) | ||||
| 	if sz < worstCaseBlockCnt { | ||||
| 		sz <<= 1 | ||||
| 	} | ||||
| 	return sz | ||||
| } | ||||
|  | ||||
| // use https://golang.org/pkg/math/bits/#LeadingZeros32 in the future | ||||
| func leadingZeros(x uint32) (n int) { | ||||
| 	if x >= 1<<16 { | ||||
| 		x >>= 16 | ||||
| 		n = 16 | ||||
| 	} | ||||
| 	if x >= 1<<8 { | ||||
| 		x >>= 8 | ||||
| 		n += 8 | ||||
| 	} | ||||
| 	n += int(len8tab[x]) | ||||
| 	return 32 - n | ||||
| } | ||||
|  | ||||
| var len8tab = [256]uint8{ | ||||
| 	0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, | ||||
| 	0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, | ||||
| 	0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, | ||||
| 	0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, | ||||
| 	0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, | ||||
| 	0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, | ||||
| 	0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, | ||||
| 	0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, | ||||
| 	0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, | ||||
| 	0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, | ||||
| 	0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, | ||||
| 	0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, | ||||
| 	0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, | ||||
| 	0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, | ||||
| 	0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, | ||||
| 	0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, | ||||
| } | ||||
|  | ||||
| func hashBlock(raw []byte, ptr int) int { | ||||
| 	// The first 4 steps collapse out into a 4 byte big-endian decode, | ||||
| 	// with a larger right shift as we combined shift lefts together. | ||||
| 	// | ||||
| 	hash := ((uint32(raw[ptr]) & 0xff) << 24) | | ||||
| 		((uint32(raw[ptr+1]) & 0xff) << 16) | | ||||
| 		((uint32(raw[ptr+2]) & 0xff) << 8) | | ||||
| 		(uint32(raw[ptr+3]) & 0xff) | ||||
| 	hash ^= T[hash>>31] | ||||
|  | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+4]) & 0xff)) ^ T[hash>>23] | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+5]) & 0xff)) ^ T[hash>>23] | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+6]) & 0xff)) ^ T[hash>>23] | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+7]) & 0xff)) ^ T[hash>>23] | ||||
|  | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+8]) & 0xff)) ^ T[hash>>23] | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+9]) & 0xff)) ^ T[hash>>23] | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+10]) & 0xff)) ^ T[hash>>23] | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+11]) & 0xff)) ^ T[hash>>23] | ||||
|  | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+12]) & 0xff)) ^ T[hash>>23] | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+13]) & 0xff)) ^ T[hash>>23] | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+14]) & 0xff)) ^ T[hash>>23] | ||||
| 	hash = ((hash << 8) | (uint32(raw[ptr+15]) & 0xff)) ^ T[hash>>23] | ||||
|  | ||||
| 	return int(hash) | ||||
| } | ||||
|  | ||||
| var T = []uint32{0x00000000, 0xd4c6b32d, 0x7d4bd577, | ||||
| 	0xa98d665a, 0x2e5119c3, 0xfa97aaee, 0x531accb4, 0x87dc7f99, | ||||
| 	0x5ca23386, 0x886480ab, 0x21e9e6f1, 0xf52f55dc, 0x72f32a45, | ||||
| 	0xa6359968, 0x0fb8ff32, 0xdb7e4c1f, 0x6d82d421, 0xb944670c, | ||||
| 	0x10c90156, 0xc40fb27b, 0x43d3cde2, 0x97157ecf, 0x3e981895, | ||||
| 	0xea5eabb8, 0x3120e7a7, 0xe5e6548a, 0x4c6b32d0, 0x98ad81fd, | ||||
| 	0x1f71fe64, 0xcbb74d49, 0x623a2b13, 0xb6fc983e, 0x0fc31b6f, | ||||
| 	0xdb05a842, 0x7288ce18, 0xa64e7d35, 0x219202ac, 0xf554b181, | ||||
| 	0x5cd9d7db, 0x881f64f6, 0x536128e9, 0x87a79bc4, 0x2e2afd9e, | ||||
| 	0xfaec4eb3, 0x7d30312a, 0xa9f68207, 0x007be45d, 0xd4bd5770, | ||||
| 	0x6241cf4e, 0xb6877c63, 0x1f0a1a39, 0xcbcca914, 0x4c10d68d, | ||||
| 	0x98d665a0, 0x315b03fa, 0xe59db0d7, 0x3ee3fcc8, 0xea254fe5, | ||||
| 	0x43a829bf, 0x976e9a92, 0x10b2e50b, 0xc4745626, 0x6df9307c, | ||||
| 	0xb93f8351, 0x1f8636de, 0xcb4085f3, 0x62cde3a9, 0xb60b5084, | ||||
| 	0x31d72f1d, 0xe5119c30, 0x4c9cfa6a, 0x985a4947, 0x43240558, | ||||
| 	0x97e2b675, 0x3e6fd02f, 0xeaa96302, 0x6d751c9b, 0xb9b3afb6, | ||||
| 	0x103ec9ec, 0xc4f87ac1, 0x7204e2ff, 0xa6c251d2, 0x0f4f3788, | ||||
| 	0xdb8984a5, 0x5c55fb3c, 0x88934811, 0x211e2e4b, 0xf5d89d66, | ||||
| 	0x2ea6d179, 0xfa606254, 0x53ed040e, 0x872bb723, 0x00f7c8ba, | ||||
| 	0xd4317b97, 0x7dbc1dcd, 0xa97aaee0, 0x10452db1, 0xc4839e9c, | ||||
| 	0x6d0ef8c6, 0xb9c84beb, 0x3e143472, 0xead2875f, 0x435fe105, | ||||
| 	0x97995228, 0x4ce71e37, 0x9821ad1a, 0x31accb40, 0xe56a786d, | ||||
| 	0x62b607f4, 0xb670b4d9, 0x1ffdd283, 0xcb3b61ae, 0x7dc7f990, | ||||
| 	0xa9014abd, 0x008c2ce7, 0xd44a9fca, 0x5396e053, 0x8750537e, | ||||
| 	0x2edd3524, 0xfa1b8609, 0x2165ca16, 0xf5a3793b, 0x5c2e1f61, | ||||
| 	0x88e8ac4c, 0x0f34d3d5, 0xdbf260f8, 0x727f06a2, 0xa6b9b58f, | ||||
| 	0x3f0c6dbc, 0xebcade91, 0x4247b8cb, 0x96810be6, 0x115d747f, | ||||
| 	0xc59bc752, 0x6c16a108, 0xb8d01225, 0x63ae5e3a, 0xb768ed17, | ||||
| 	0x1ee58b4d, 0xca233860, 0x4dff47f9, 0x9939f4d4, 0x30b4928e, | ||||
| 	0xe47221a3, 0x528eb99d, 0x86480ab0, 0x2fc56cea, 0xfb03dfc7, | ||||
| 	0x7cdfa05e, 0xa8191373, 0x01947529, 0xd552c604, 0x0e2c8a1b, | ||||
| 	0xdaea3936, 0x73675f6c, 0xa7a1ec41, 0x207d93d8, 0xf4bb20f5, | ||||
| 	0x5d3646af, 0x89f0f582, 0x30cf76d3, 0xe409c5fe, 0x4d84a3a4, | ||||
| 	0x99421089, 0x1e9e6f10, 0xca58dc3d, 0x63d5ba67, 0xb713094a, | ||||
| 	0x6c6d4555, 0xb8abf678, 0x11269022, 0xc5e0230f, 0x423c5c96, | ||||
| 	0x96faefbb, 0x3f7789e1, 0xebb13acc, 0x5d4da2f2, 0x898b11df, | ||||
| 	0x20067785, 0xf4c0c4a8, 0x731cbb31, 0xa7da081c, 0x0e576e46, | ||||
| 	0xda91dd6b, 0x01ef9174, 0xd5292259, 0x7ca44403, 0xa862f72e, | ||||
| 	0x2fbe88b7, 0xfb783b9a, 0x52f55dc0, 0x8633eeed, 0x208a5b62, | ||||
| 	0xf44ce84f, 0x5dc18e15, 0x89073d38, 0x0edb42a1, 0xda1df18c, | ||||
| 	0x739097d6, 0xa75624fb, 0x7c2868e4, 0xa8eedbc9, 0x0163bd93, | ||||
| 	0xd5a50ebe, 0x52797127, 0x86bfc20a, 0x2f32a450, 0xfbf4177d, | ||||
| 	0x4d088f43, 0x99ce3c6e, 0x30435a34, 0xe485e919, 0x63599680, | ||||
| 	0xb79f25ad, 0x1e1243f7, 0xcad4f0da, 0x11aabcc5, 0xc56c0fe8, | ||||
| 	0x6ce169b2, 0xb827da9f, 0x3ffba506, 0xeb3d162b, 0x42b07071, | ||||
| 	0x9676c35c, 0x2f49400d, 0xfb8ff320, 0x5202957a, 0x86c42657, | ||||
| 	0x011859ce, 0xd5deeae3, 0x7c538cb9, 0xa8953f94, 0x73eb738b, | ||||
| 	0xa72dc0a6, 0x0ea0a6fc, 0xda6615d1, 0x5dba6a48, 0x897cd965, | ||||
| 	0x20f1bf3f, 0xf4370c12, 0x42cb942c, 0x960d2701, 0x3f80415b, | ||||
| 	0xeb46f276, 0x6c9a8def, 0xb85c3ec2, 0x11d15898, 0xc517ebb5, | ||||
| 	0x1e69a7aa, 0xcaaf1487, 0x632272dd, 0xb7e4c1f0, 0x3038be69, | ||||
| 	0xe4fe0d44, 0x4d736b1e, 0x99b5d833, | ||||
| } | ||||
							
								
								
									
										369
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										369
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,369 @@ | ||||
| package packfile | ||||
|  | ||||
| import ( | ||||
| 	"sort" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/storer" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// deltas based on deltas, how many steps we can do. | ||||
| 	// 50 is the default value used in JGit | ||||
| 	maxDepth = int64(50) | ||||
| ) | ||||
|  | ||||
| // applyDelta is the set of object types that we should apply deltas | ||||
| var applyDelta = map[plumbing.ObjectType]bool{ | ||||
| 	plumbing.BlobObject: true, | ||||
| 	plumbing.TreeObject: true, | ||||
| } | ||||
|  | ||||
| type deltaSelector struct { | ||||
| 	storer storer.EncodedObjectStorer | ||||
| } | ||||
|  | ||||
| func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector { | ||||
| 	return &deltaSelector{s} | ||||
| } | ||||
|  | ||||
| // ObjectsToPack creates a list of ObjectToPack from the hashes | ||||
| // provided, creating deltas if it's suitable, using an specific | ||||
| // internal logic.  `packWindow` specifies the size of the sliding | ||||
| // window used to compare objects for delta compression; 0 turns off | ||||
| // delta compression entirely. | ||||
| func (dw *deltaSelector) ObjectsToPack( | ||||
| 	hashes []plumbing.Hash, | ||||
| 	packWindow uint, | ||||
| ) ([]*ObjectToPack, error) { | ||||
| 	otp, err := dw.objectsToPack(hashes, packWindow) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if packWindow == 0 { | ||||
| 		return otp, nil | ||||
| 	} | ||||
|  | ||||
| 	dw.sort(otp) | ||||
|  | ||||
| 	var objectGroups [][]*ObjectToPack | ||||
| 	var prev *ObjectToPack | ||||
| 	i := -1 | ||||
| 	for _, obj := range otp { | ||||
| 		if prev == nil || prev.Type() != obj.Type() { | ||||
| 			objectGroups = append(objectGroups, []*ObjectToPack{obj}) | ||||
| 			i++ | ||||
| 			prev = obj | ||||
| 		} else { | ||||
| 			objectGroups[i] = append(objectGroups[i], obj) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	var wg sync.WaitGroup | ||||
| 	var once sync.Once | ||||
| 	for _, objs := range objectGroups { | ||||
| 		objs := objs | ||||
| 		wg.Add(1) | ||||
| 		go func() { | ||||
| 			if walkErr := dw.walk(objs, packWindow); walkErr != nil { | ||||
| 				once.Do(func() { | ||||
| 					err = walkErr | ||||
| 				}) | ||||
| 			} | ||||
| 			wg.Done() | ||||
| 		}() | ||||
| 	} | ||||
| 	wg.Wait() | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return otp, nil | ||||
| } | ||||
|  | ||||
| func (dw *deltaSelector) objectsToPack( | ||||
| 	hashes []plumbing.Hash, | ||||
| 	packWindow uint, | ||||
| ) ([]*ObjectToPack, error) { | ||||
| 	var objectsToPack []*ObjectToPack | ||||
| 	for _, h := range hashes { | ||||
| 		var o plumbing.EncodedObject | ||||
| 		var err error | ||||
| 		if packWindow == 0 { | ||||
| 			o, err = dw.encodedObject(h) | ||||
| 		} else { | ||||
| 			o, err = dw.encodedDeltaObject(h) | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		otp := newObjectToPack(o) | ||||
| 		if _, ok := o.(plumbing.DeltaObject); ok { | ||||
| 			otp.CleanOriginal() | ||||
| 		} | ||||
|  | ||||
| 		objectsToPack = append(objectsToPack, otp) | ||||
| 	} | ||||
|  | ||||
| 	if packWindow == 0 { | ||||
| 		return objectsToPack, nil | ||||
| 	} | ||||
|  | ||||
| 	if err := dw.fixAndBreakChains(objectsToPack); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return objectsToPack, nil | ||||
| } | ||||
|  | ||||
| func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) { | ||||
| 	edos, ok := dw.storer.(storer.DeltaObjectStorer) | ||||
| 	if !ok { | ||||
| 		return dw.encodedObject(h) | ||||
| 	} | ||||
|  | ||||
| 	return edos.DeltaObject(plumbing.AnyObject, h) | ||||
| } | ||||
|  | ||||
| func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) { | ||||
| 	return dw.storer.EncodedObject(plumbing.AnyObject, h) | ||||
| } | ||||
|  | ||||
| func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error { | ||||
| 	m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack)) | ||||
| 	for _, otp := range objectsToPack { | ||||
| 		m[otp.Hash()] = otp | ||||
| 	} | ||||
|  | ||||
| 	for _, otp := range objectsToPack { | ||||
| 		if err := dw.fixAndBreakChainsOne(m, otp); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error { | ||||
| 	if !otp.Object.Type().IsDelta() { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	// Initial ObjectToPack instances might have a delta assigned to Object | ||||
| 	// but no actual base initially. Once Base is assigned to a delta, it means | ||||
| 	// we already fixed it. | ||||
| 	if otp.Base != nil { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	do, ok := otp.Object.(plumbing.DeltaObject) | ||||
| 	if !ok { | ||||
| 		// if this is not a DeltaObject, then we cannot retrieve its base, | ||||
| 		// so we have to break the delta chain here. | ||||
| 		return dw.undeltify(otp) | ||||
| 	} | ||||
|  | ||||
| 	base, ok := objectsToPack[do.BaseHash()] | ||||
| 	if !ok { | ||||
| 		// The base of the delta is not in our list of objects to pack, so | ||||
| 		// we break the chain. | ||||
| 		return dw.undeltify(otp) | ||||
| 	} | ||||
|  | ||||
| 	if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	otp.SetDelta(base, otp.Object) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error { | ||||
| 	if otp.Original != nil { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	if !otp.Object.Type().IsDelta() { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	obj, err := dw.encodedObject(otp.Hash()) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	otp.SetOriginal(obj) | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // undeltify undeltifies an *ObjectToPack by retrieving the original object from | ||||
| // the storer and resetting it. | ||||
| func (dw *deltaSelector) undeltify(otp *ObjectToPack) error { | ||||
| 	if err := dw.restoreOriginal(otp); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	otp.Object = otp.Original | ||||
| 	otp.Depth = 0 | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) { | ||||
| 	sort.Sort(byTypeAndSize(objectsToPack)) | ||||
| } | ||||
|  | ||||
| func (dw *deltaSelector) walk( | ||||
| 	objectsToPack []*ObjectToPack, | ||||
| 	packWindow uint, | ||||
| ) error { | ||||
| 	indexMap := make(map[plumbing.Hash]*deltaIndex) | ||||
| 	for i := 0; i < len(objectsToPack); i++ { | ||||
| 		// Clean up the index map and reconstructed delta objects for anything | ||||
| 		// outside our pack window, to save memory. | ||||
| 		if i > int(packWindow) { | ||||
| 			obj := objectsToPack[i-int(packWindow)] | ||||
|  | ||||
| 			delete(indexMap, obj.Hash()) | ||||
|  | ||||
| 			if obj.IsDelta() { | ||||
| 				obj.SaveOriginalMetadata() | ||||
| 				obj.CleanOriginal() | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		target := objectsToPack[i] | ||||
|  | ||||
| 		// If we already have a delta, we don't try to find a new one for this | ||||
| 		// object. This happens when a delta is set to be reused from an existing | ||||
| 		// packfile. | ||||
| 		if target.IsDelta() { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		// We only want to create deltas from specific types. | ||||
| 		if !applyDelta[target.Type()] { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		for j := i - 1; j >= 0 && i-j < int(packWindow); j-- { | ||||
| 			base := objectsToPack[j] | ||||
| 			// Objects must use only the same type as their delta base. | ||||
| 			// Since objectsToPack is sorted by type and size, once we find | ||||
| 			// a different type, we know we won't find more of them. | ||||
| 			if base.Type() != target.Type() { | ||||
| 				break | ||||
| 			} | ||||
|  | ||||
| 			if err := dw.tryToDeltify(indexMap, base, target); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (dw *deltaSelector) tryToDeltify(indexMap map[plumbing.Hash]*deltaIndex, base, target *ObjectToPack) error { | ||||
| 	// Original object might not be present if we're reusing a delta, so we | ||||
| 	// ensure it is restored. | ||||
| 	if err := dw.restoreOriginal(target); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if err := dw.restoreOriginal(base); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	// If the sizes are radically different, this is a bad pairing. | ||||
| 	if target.Size() < base.Size()>>4 { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	msz := dw.deltaSizeLimit( | ||||
| 		target.Object.Size(), | ||||
| 		base.Depth, | ||||
| 		target.Depth, | ||||
| 		target.IsDelta(), | ||||
| 	) | ||||
|  | ||||
| 	// Nearly impossible to fit useful delta. | ||||
| 	if msz <= 8 { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	// If we have to insert a lot to make this work, find another. | ||||
| 	if base.Size()-target.Size() > msz { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	if _, ok := indexMap[base.Hash()]; !ok { | ||||
| 		indexMap[base.Hash()] = new(deltaIndex) | ||||
| 	} | ||||
|  | ||||
| 	// Now we can generate the delta using originals | ||||
| 	delta, err := getDelta(indexMap[base.Hash()], base.Original, target.Original) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	// if delta better than target | ||||
| 	if delta.Size() < msz { | ||||
| 		target.SetDelta(base, delta) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int, | ||||
| 	targetDepth int, targetDelta bool) int64 { | ||||
| 	if !targetDelta { | ||||
| 		// Any delta should be no more than 50% of the original size | ||||
| 		// (for text files deflate of whole form should shrink 50%). | ||||
| 		n := targetSize >> 1 | ||||
|  | ||||
| 		// Evenly distribute delta size limits over allowed depth. | ||||
| 		// If src is non-delta (depth = 0), delta <= 50% of original. | ||||
| 		// If src is almost at limit (9/10), delta <= 10% of original. | ||||
| 		return n * (maxDepth - int64(baseDepth)) / maxDepth | ||||
| 	} | ||||
|  | ||||
| 	// With a delta base chosen any new delta must be "better". | ||||
| 	// Retain the distribution described above. | ||||
| 	d := int64(targetDepth) | ||||
| 	n := targetSize | ||||
|  | ||||
| 	// If target depth is bigger than maxDepth, this delta is not suitable to be used. | ||||
| 	if d >= maxDepth { | ||||
| 		return 0 | ||||
| 	} | ||||
|  | ||||
| 	// If src is whole (depth=0) and base is near limit (depth=9/10) | ||||
| 	// any delta using src can be 10x larger and still be better. | ||||
| 	// | ||||
| 	// If src is near limit (depth=9/10) and base is whole (depth=0) | ||||
| 	// a new delta dependent on src must be 1/10th the size. | ||||
| 	return n * (maxDepth - int64(baseDepth)) / (maxDepth - d) | ||||
| } | ||||
|  | ||||
| type byTypeAndSize []*ObjectToPack | ||||
|  | ||||
| func (a byTypeAndSize) Len() int { return len(a) } | ||||
|  | ||||
| func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } | ||||
|  | ||||
| func (a byTypeAndSize) Less(i, j int) bool { | ||||
| 	if a[i].Type() < a[j].Type() { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	if a[i].Type() > a[j].Type() { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	return a[i].Size() > a[j].Size() | ||||
| } | ||||
							
								
								
									
										200
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										200
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,200 @@ | ||||
| package packfile | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| ) | ||||
|  | ||||
| // See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and | ||||
| // https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js | ||||
| // for more info | ||||
|  | ||||
| const ( | ||||
| 	// Standard chunk size used to generate fingerprints | ||||
| 	s = 16 | ||||
|  | ||||
| 	// https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428 | ||||
| 	// Max size of a copy operation (64KB) | ||||
| 	maxCopySize = 64 * 1024 | ||||
| ) | ||||
|  | ||||
| // GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object, | ||||
| // will be loaded into memory to be able to create the delta object. | ||||
| // To generate target again, you will need the obtained object and "base" one. | ||||
| // Error will be returned if base or target object cannot be read. | ||||
| func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) { | ||||
| 	return getDelta(new(deltaIndex), base, target) | ||||
| } | ||||
|  | ||||
| func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) { | ||||
| 	br, err := base.Reader() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer br.Close() | ||||
| 	tr, err := target.Reader() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer tr.Close() | ||||
|  | ||||
| 	bb := bufPool.Get().(*bytes.Buffer) | ||||
| 	defer bufPool.Put(bb) | ||||
| 	bb.Reset() | ||||
|  | ||||
| 	_, err = bb.ReadFrom(br) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	tb := bufPool.Get().(*bytes.Buffer) | ||||
| 	defer bufPool.Put(tb) | ||||
| 	tb.Reset() | ||||
|  | ||||
| 	_, err = tb.ReadFrom(tr) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	db := diffDelta(index, bb.Bytes(), tb.Bytes()) | ||||
| 	delta := &plumbing.MemoryObject{} | ||||
| 	_, err = delta.Write(db) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	delta.SetSize(int64(len(db))) | ||||
| 	delta.SetType(plumbing.OFSDeltaObject) | ||||
|  | ||||
| 	return delta, nil | ||||
| } | ||||
|  | ||||
| // DiffDelta returns the delta that transforms src into tgt. | ||||
| func DiffDelta(src, tgt []byte) []byte { | ||||
| 	return diffDelta(new(deltaIndex), src, tgt) | ||||
| } | ||||
|  | ||||
| func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte { | ||||
| 	buf := bufPool.Get().(*bytes.Buffer) | ||||
| 	defer bufPool.Put(buf) | ||||
| 	buf.Reset() | ||||
| 	buf.Write(deltaEncodeSize(len(src))) | ||||
| 	buf.Write(deltaEncodeSize(len(tgt))) | ||||
|  | ||||
| 	if len(index.entries) == 0 { | ||||
| 		index.init(src) | ||||
| 	} | ||||
|  | ||||
| 	ibuf := bufPool.Get().(*bytes.Buffer) | ||||
| 	defer bufPool.Put(ibuf) | ||||
| 	ibuf.Reset() | ||||
| 	for i := 0; i < len(tgt); i++ { | ||||
| 		offset, l := index.findMatch(src, tgt, i) | ||||
|  | ||||
| 		if l == 0 { | ||||
| 			// couldn't find a match, just write the current byte and continue | ||||
| 			ibuf.WriteByte(tgt[i]) | ||||
| 		} else if l < 0 { | ||||
| 			// src is less than blksz, copy the rest of the target to avoid | ||||
| 			// calls to findMatch | ||||
| 			for ; i < len(tgt); i++ { | ||||
| 				ibuf.WriteByte(tgt[i]) | ||||
| 			} | ||||
| 		} else if l < s { | ||||
| 			// remaining target is less than blksz, copy what's left of it | ||||
| 			// and avoid calls to findMatch | ||||
| 			for j := i; j < i+l; j++ { | ||||
| 				ibuf.WriteByte(tgt[j]) | ||||
| 			} | ||||
| 			i += l - 1 | ||||
| 		} else { | ||||
| 			encodeInsertOperation(ibuf, buf) | ||||
|  | ||||
| 			rl := l | ||||
| 			aOffset := offset | ||||
| 			for rl > 0 { | ||||
| 				if rl < maxCopySize { | ||||
| 					buf.Write(encodeCopyOperation(aOffset, rl)) | ||||
| 					break | ||||
| 				} | ||||
|  | ||||
| 				buf.Write(encodeCopyOperation(aOffset, maxCopySize)) | ||||
| 				rl -= maxCopySize | ||||
| 				aOffset += maxCopySize | ||||
| 			} | ||||
|  | ||||
| 			i += l - 1 | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	encodeInsertOperation(ibuf, buf) | ||||
|  | ||||
| 	// buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it. | ||||
| 	return append([]byte{}, buf.Bytes()...) | ||||
| } | ||||
|  | ||||
| func encodeInsertOperation(ibuf, buf *bytes.Buffer) { | ||||
| 	if ibuf.Len() == 0 { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	b := ibuf.Bytes() | ||||
| 	s := ibuf.Len() | ||||
| 	o := 0 | ||||
| 	for { | ||||
| 		if s <= 127 { | ||||
| 			break | ||||
| 		} | ||||
| 		buf.WriteByte(byte(127)) | ||||
| 		buf.Write(b[o : o+127]) | ||||
| 		s -= 127 | ||||
| 		o += 127 | ||||
| 	} | ||||
| 	buf.WriteByte(byte(s)) | ||||
| 	buf.Write(b[o : o+s]) | ||||
|  | ||||
| 	ibuf.Reset() | ||||
| } | ||||
|  | ||||
| func deltaEncodeSize(size int) []byte { | ||||
| 	var ret []byte | ||||
| 	c := size & 0x7f | ||||
| 	size >>= 7 | ||||
| 	for { | ||||
| 		if size == 0 { | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		ret = append(ret, byte(c|0x80)) | ||||
| 		c = size & 0x7f | ||||
| 		size >>= 7 | ||||
| 	} | ||||
| 	ret = append(ret, byte(c)) | ||||
|  | ||||
| 	return ret | ||||
| } | ||||
|  | ||||
| func encodeCopyOperation(offset, length int) []byte { | ||||
| 	code := 0x80 | ||||
| 	var opcodes []byte | ||||
|  | ||||
| 	var i uint | ||||
| 	for i = 0; i < 4; i++ { | ||||
| 		f := 0xff << (i * 8) | ||||
| 		if offset&f != 0 { | ||||
| 			opcodes = append(opcodes, byte(offset&f>>(i*8))) | ||||
| 			code |= 0x01 << i | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for i = 0; i < 3; i++ { | ||||
| 		f := 0xff << (i * 8) | ||||
| 		if length&f != 0 { | ||||
| 			opcodes = append(opcodes, byte(length&f>>(i*8))) | ||||
| 			code |= 0x10 << i | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return append([]byte{byte(code)}, opcodes...) | ||||
| } | ||||
							
								
								
									
										39
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,39 @@ | ||||
| // Package packfile implements encoding and decoding of packfile format. | ||||
| // | ||||
| //  == pack-*.pack files have the following format: | ||||
| // | ||||
| //    - A header appears at the beginning and consists of the following: | ||||
| // | ||||
| //      4-byte signature: | ||||
| //          The signature is: {'P', 'A', 'C', 'K'} | ||||
| // | ||||
| //      4-byte version number (network byte order): | ||||
| //          GIT currently accepts version number 2 or 3 but | ||||
| //          generates version 2 only. | ||||
| // | ||||
| //      4-byte number of objects contained in the pack (network byte order) | ||||
| // | ||||
| //      Observation: we cannot have more than 4G versions ;-) and | ||||
| //      more than 4G objects in a pack. | ||||
| // | ||||
| //    - The header is followed by number of object entries, each of | ||||
| //      which looks like this: | ||||
| // | ||||
| //      (undeltified representation) | ||||
| //      n-byte type and length (3-bit type, (n-1)*7+4-bit length) | ||||
| //      compressed data | ||||
| // | ||||
| //      (deltified representation) | ||||
| //      n-byte type and length (3-bit type, (n-1)*7+4-bit length) | ||||
| //      20-byte base object name | ||||
| //      compressed delta data | ||||
| // | ||||
| //      Observation: length of each object is encoded in a variable | ||||
| //      length format and is not constrained to 32-bit or anything. | ||||
| // | ||||
| //   - The trailer records 20-byte SHA1 checksum of all of the above. | ||||
| // | ||||
| // | ||||
| // Source: | ||||
| // https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt | ||||
| package packfile | ||||
							
								
								
									
										219
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										219
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,219 @@ | ||||
| package packfile | ||||
|  | ||||
| import ( | ||||
| 	"compress/zlib" | ||||
| 	"crypto/sha1" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/storer" | ||||
| 	"github.com/go-git/go-git/v5/utils/binary" | ||||
| ) | ||||
|  | ||||
| // Encoder gets the data from the storage and write it into the writer in PACK | ||||
| // format | ||||
| type Encoder struct { | ||||
| 	selector *deltaSelector | ||||
| 	w        *offsetWriter | ||||
| 	zw       *zlib.Writer | ||||
| 	hasher   plumbing.Hasher | ||||
|  | ||||
| 	useRefDeltas bool | ||||
| } | ||||
|  | ||||
| // NewEncoder creates a new packfile encoder using a specific Writer and | ||||
| // EncodedObjectStorer. By default deltas used to generate the packfile will be | ||||
| // OFSDeltaObject. To use Reference deltas, set useRefDeltas to true. | ||||
| func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder { | ||||
| 	h := plumbing.Hasher{ | ||||
| 		Hash: sha1.New(), | ||||
| 	} | ||||
| 	mw := io.MultiWriter(w, h) | ||||
| 	ow := newOffsetWriter(mw) | ||||
| 	zw := zlib.NewWriter(mw) | ||||
| 	return &Encoder{ | ||||
| 		selector:     newDeltaSelector(s), | ||||
| 		w:            ow, | ||||
| 		zw:           zw, | ||||
| 		hasher:       h, | ||||
| 		useRefDeltas: useRefDeltas, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Encode creates a packfile containing all the objects referenced in | ||||
| // hashes and writes it to the writer in the Encoder.  `packWindow` | ||||
| // specifies the size of the sliding window used to compare objects | ||||
| // for delta compression; 0 turns off delta compression entirely. | ||||
| func (e *Encoder) Encode( | ||||
| 	hashes []plumbing.Hash, | ||||
| 	packWindow uint, | ||||
| ) (plumbing.Hash, error) { | ||||
| 	objects, err := e.selector.ObjectsToPack(hashes, packWindow) | ||||
| 	if err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	return e.encode(objects) | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) { | ||||
| 	if err := e.head(len(objects)); err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	for _, o := range objects { | ||||
| 		if err := e.entry(o); err != nil { | ||||
| 			return plumbing.ZeroHash, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return e.footer() | ||||
| } | ||||
|  | ||||
| func (e *Encoder) head(numEntries int) error { | ||||
| 	return binary.Write( | ||||
| 		e.w, | ||||
| 		signature, | ||||
| 		int32(VersionSupported), | ||||
| 		int32(numEntries), | ||||
| 	) | ||||
| } | ||||
|  | ||||
| func (e *Encoder) entry(o *ObjectToPack) error { | ||||
| 	if o.WantWrite() { | ||||
| 		// A cycle exists in this delta chain. This should only occur if a | ||||
| 		// selected object representation disappeared during writing | ||||
| 		// (for example due to a concurrent repack) and a different base | ||||
| 		// was chosen, forcing a cycle. Select something other than a | ||||
| 		// delta, and write this object. | ||||
| 		e.selector.restoreOriginal(o) | ||||
| 		o.BackToOriginal() | ||||
| 	} | ||||
|  | ||||
| 	if o.IsWritten() { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	o.MarkWantWrite() | ||||
|  | ||||
| 	if err := e.writeBaseIfDelta(o); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	// We need to check if we already write that object due a cyclic delta chain | ||||
| 	if o.IsWritten() { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	o.Offset = e.w.Offset() | ||||
|  | ||||
| 	if o.IsDelta() { | ||||
| 		if err := e.writeDeltaHeader(o); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} else { | ||||
| 		if err := e.entryHead(o.Type(), o.Size()); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	e.zw.Reset(e.w) | ||||
| 	or, err := o.Object.Reader() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	_, err = io.Copy(e.zw, or) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return e.zw.Close() | ||||
| } | ||||
|  | ||||
| func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error { | ||||
| 	if o.IsDelta() && !o.Base.IsWritten() { | ||||
| 		// We must write base first | ||||
| 		return e.entry(o.Base) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error { | ||||
| 	// Write offset deltas by default | ||||
| 	t := plumbing.OFSDeltaObject | ||||
| 	if e.useRefDeltas { | ||||
| 		t = plumbing.REFDeltaObject | ||||
| 	} | ||||
|  | ||||
| 	if err := e.entryHead(t, o.Object.Size()); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if e.useRefDeltas { | ||||
| 		return e.writeRefDeltaHeader(o.Base.Hash()) | ||||
| 	} else { | ||||
| 		return e.writeOfsDeltaHeader(o) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error { | ||||
| 	return binary.Write(e.w, base) | ||||
| } | ||||
|  | ||||
| func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error { | ||||
| 	// for OFS_DELTA, offset of the base is interpreted as negative offset | ||||
| 	// relative to the type-byte of the header of the ofs-delta entry. | ||||
| 	relativeOffset := o.Offset - o.Base.Offset | ||||
| 	if relativeOffset <= 0 { | ||||
| 		return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset) | ||||
| 	} | ||||
|  | ||||
| 	return binary.WriteVariableWidthInt(e.w, relativeOffset) | ||||
| } | ||||
|  | ||||
| func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error { | ||||
| 	t := int64(typeNum) | ||||
| 	header := []byte{} | ||||
| 	c := (t << firstLengthBits) | (size & maskFirstLength) | ||||
| 	size >>= firstLengthBits | ||||
| 	for { | ||||
| 		if size == 0 { | ||||
| 			break | ||||
| 		} | ||||
| 		header = append(header, byte(c|maskContinue)) | ||||
| 		c = size & int64(maskLength) | ||||
| 		size >>= lengthBits | ||||
| 	} | ||||
|  | ||||
| 	header = append(header, byte(c)) | ||||
| 	_, err := e.w.Write(header) | ||||
|  | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (e *Encoder) footer() (plumbing.Hash, error) { | ||||
| 	h := e.hasher.Sum() | ||||
| 	return h, binary.Write(e.w, h) | ||||
| } | ||||
|  | ||||
| type offsetWriter struct { | ||||
| 	w      io.Writer | ||||
| 	offset int64 | ||||
| } | ||||
|  | ||||
| func newOffsetWriter(w io.Writer) *offsetWriter { | ||||
| 	return &offsetWriter{w: w} | ||||
| } | ||||
|  | ||||
| func (ow *offsetWriter) Write(p []byte) (n int, err error) { | ||||
| 	n, err = ow.w.Write(p) | ||||
| 	ow.offset += int64(n) | ||||
| 	return n, err | ||||
| } | ||||
|  | ||||
| func (ow *offsetWriter) Offset() int64 { | ||||
| 	return ow.offset | ||||
| } | ||||
							
								
								
									
										30
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,30 @@ | ||||
| package packfile | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| // Error specifies errors returned during packfile parsing. | ||||
| type Error struct { | ||||
| 	reason, details string | ||||
| } | ||||
|  | ||||
| // NewError returns a new error. | ||||
| func NewError(reason string) *Error { | ||||
| 	return &Error{reason: reason} | ||||
| } | ||||
|  | ||||
| // Error returns a text representation of the error. | ||||
| func (e *Error) Error() string { | ||||
| 	if e.details == "" { | ||||
| 		return e.reason | ||||
| 	} | ||||
|  | ||||
| 	return fmt.Sprintf("%s: %s", e.reason, e.details) | ||||
| } | ||||
|  | ||||
| // AddDetails adds details to an error, with additional text. | ||||
| func (e *Error) AddDetails(format string, args ...interface{}) *Error { | ||||
| 	return &Error{ | ||||
| 		reason:  e.reason, | ||||
| 		details: fmt.Sprintf(format, args...), | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										116
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										116
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,116 @@ | ||||
| package packfile | ||||
|  | ||||
| import ( | ||||
| 	"io" | ||||
|  | ||||
| 	billy "github.com/go-git/go-billy/v5" | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/cache" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/format/idxfile" | ||||
| ) | ||||
|  | ||||
| // FSObject is an object from the packfile on the filesystem. | ||||
| type FSObject struct { | ||||
| 	hash   plumbing.Hash | ||||
| 	h      *ObjectHeader | ||||
| 	offset int64 | ||||
| 	size   int64 | ||||
| 	typ    plumbing.ObjectType | ||||
| 	index  idxfile.Index | ||||
| 	fs     billy.Filesystem | ||||
| 	path   string | ||||
| 	cache  cache.Object | ||||
| } | ||||
|  | ||||
| // NewFSObject creates a new filesystem object. | ||||
| func NewFSObject( | ||||
| 	hash plumbing.Hash, | ||||
| 	finalType plumbing.ObjectType, | ||||
| 	offset int64, | ||||
| 	contentSize int64, | ||||
| 	index idxfile.Index, | ||||
| 	fs billy.Filesystem, | ||||
| 	path string, | ||||
| 	cache cache.Object, | ||||
| ) *FSObject { | ||||
| 	return &FSObject{ | ||||
| 		hash:   hash, | ||||
| 		offset: offset, | ||||
| 		size:   contentSize, | ||||
| 		typ:    finalType, | ||||
| 		index:  index, | ||||
| 		fs:     fs, | ||||
| 		path:   path, | ||||
| 		cache:  cache, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Reader implements the plumbing.EncodedObject interface. | ||||
| func (o *FSObject) Reader() (io.ReadCloser, error) { | ||||
| 	obj, ok := o.cache.Get(o.hash) | ||||
| 	if ok && obj != o { | ||||
| 		reader, err := obj.Reader() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		return reader, nil | ||||
| 	} | ||||
|  | ||||
| 	f, err := o.fs.Open(o.path) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	p := NewPackfileWithCache(o.index, nil, f, o.cache) | ||||
| 	r, err := p.getObjectContent(o.offset) | ||||
| 	if err != nil { | ||||
| 		_ = f.Close() | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if err := f.Close(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return r, nil | ||||
| } | ||||
|  | ||||
| // SetSize implements the plumbing.EncodedObject interface. This method | ||||
| // is a noop. | ||||
| func (o *FSObject) SetSize(int64) {} | ||||
|  | ||||
| // SetType implements the plumbing.EncodedObject interface. This method is | ||||
| // a noop. | ||||
| func (o *FSObject) SetType(plumbing.ObjectType) {} | ||||
|  | ||||
| // Hash implements the plumbing.EncodedObject interface. | ||||
| func (o *FSObject) Hash() plumbing.Hash { return o.hash } | ||||
|  | ||||
| // Size implements the plumbing.EncodedObject interface. | ||||
| func (o *FSObject) Size() int64 { return o.size } | ||||
|  | ||||
| // Type implements the plumbing.EncodedObject interface. | ||||
| func (o *FSObject) Type() plumbing.ObjectType { | ||||
| 	return o.typ | ||||
| } | ||||
|  | ||||
| // Writer implements the plumbing.EncodedObject interface. This method always | ||||
| // returns a nil writer. | ||||
| func (o *FSObject) Writer() (io.WriteCloser, error) { | ||||
| 	return nil, nil | ||||
| } | ||||
|  | ||||
| type objectReader struct { | ||||
| 	io.ReadCloser | ||||
| 	f billy.File | ||||
| } | ||||
|  | ||||
| func (r *objectReader) Close() error { | ||||
| 	if err := r.ReadCloser.Close(); err != nil { | ||||
| 		_ = r.f.Close() | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return r.f.Close() | ||||
| } | ||||
							
								
								
									
										164
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										164
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,164 @@ | ||||
| package packfile | ||||
|  | ||||
| import ( | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| ) | ||||
|  | ||||
| // ObjectToPack is a representation of an object that is going to be into a | ||||
| // pack file. | ||||
| type ObjectToPack struct { | ||||
| 	// The main object to pack, it could be any object, including deltas | ||||
| 	Object plumbing.EncodedObject | ||||
| 	// Base is the object that a delta is based on (it could be also another delta). | ||||
| 	// If the main object is not a delta, Base will be null | ||||
| 	Base *ObjectToPack | ||||
| 	// Original is the object that we can generate applying the delta to | ||||
| 	// Base, or the same object as Object in the case of a non-delta | ||||
| 	// object. | ||||
| 	Original plumbing.EncodedObject | ||||
| 	// Depth is the amount of deltas needed to resolve to obtain Original | ||||
| 	// (delta based on delta based on ...) | ||||
| 	Depth int | ||||
|  | ||||
| 	// offset in pack when object has been already written, or 0 if it | ||||
| 	// has not been written yet | ||||
| 	Offset int64 | ||||
|  | ||||
| 	// Information from the original object | ||||
| 	resolvedOriginal bool | ||||
| 	originalType     plumbing.ObjectType | ||||
| 	originalSize     int64 | ||||
| 	originalHash     plumbing.Hash | ||||
| } | ||||
|  | ||||
| // newObjectToPack creates a correct ObjectToPack based on a non-delta object | ||||
| func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack { | ||||
| 	return &ObjectToPack{ | ||||
| 		Object:   o, | ||||
| 		Original: o, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on | ||||
| // his base (could be another delta), the delta target (in this case called original), | ||||
| // and the delta Object itself | ||||
| func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack { | ||||
| 	return &ObjectToPack{ | ||||
| 		Object:   delta, | ||||
| 		Base:     base, | ||||
| 		Original: original, | ||||
| 		Depth:    base.Depth + 1, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // BackToOriginal converts that ObjectToPack to a non-deltified object if it was one | ||||
| func (o *ObjectToPack) BackToOriginal() { | ||||
| 	if o.IsDelta() && o.Original != nil { | ||||
| 		o.Object = o.Original | ||||
| 		o.Base = nil | ||||
| 		o.Depth = 0 | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // IsWritten returns if that ObjectToPack was | ||||
| // already written into the packfile or not | ||||
| func (o *ObjectToPack) IsWritten() bool { | ||||
| 	return o.Offset > 1 | ||||
| } | ||||
|  | ||||
| // MarkWantWrite marks this ObjectToPack as WantWrite | ||||
| // to avoid delta chain loops | ||||
| func (o *ObjectToPack) MarkWantWrite() { | ||||
| 	o.Offset = 1 | ||||
| } | ||||
|  | ||||
| // WantWrite checks if this ObjectToPack was marked as WantWrite before | ||||
| func (o *ObjectToPack) WantWrite() bool { | ||||
| 	return o.Offset == 1 | ||||
| } | ||||
|  | ||||
| // SetOriginal sets both Original and saves size, type and hash. If object | ||||
| // is nil Original is set but previous resolved values are kept | ||||
| func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) { | ||||
| 	o.Original = obj | ||||
| 	o.SaveOriginalMetadata() | ||||
| } | ||||
|  | ||||
| // SaveOriginalMetadata saves size, type and hash of Original object | ||||
| func (o *ObjectToPack) SaveOriginalMetadata() { | ||||
| 	if o.Original != nil { | ||||
| 		o.originalSize = o.Original.Size() | ||||
| 		o.originalType = o.Original.Type() | ||||
| 		o.originalHash = o.Original.Hash() | ||||
| 		o.resolvedOriginal = true | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // CleanOriginal sets Original to nil | ||||
| func (o *ObjectToPack) CleanOriginal() { | ||||
| 	o.Original = nil | ||||
| } | ||||
|  | ||||
| func (o *ObjectToPack) Type() plumbing.ObjectType { | ||||
| 	if o.Original != nil { | ||||
| 		return o.Original.Type() | ||||
| 	} | ||||
|  | ||||
| 	if o.resolvedOriginal { | ||||
| 		return o.originalType | ||||
| 	} | ||||
|  | ||||
| 	if o.Base != nil { | ||||
| 		return o.Base.Type() | ||||
| 	} | ||||
|  | ||||
| 	if o.Object != nil { | ||||
| 		return o.Object.Type() | ||||
| 	} | ||||
|  | ||||
| 	panic("cannot get type") | ||||
| } | ||||
|  | ||||
| func (o *ObjectToPack) Hash() plumbing.Hash { | ||||
| 	if o.Original != nil { | ||||
| 		return o.Original.Hash() | ||||
| 	} | ||||
|  | ||||
| 	if o.resolvedOriginal { | ||||
| 		return o.originalHash | ||||
| 	} | ||||
|  | ||||
| 	do, ok := o.Object.(plumbing.DeltaObject) | ||||
| 	if ok { | ||||
| 		return do.ActualHash() | ||||
| 	} | ||||
|  | ||||
| 	panic("cannot get hash") | ||||
| } | ||||
|  | ||||
| func (o *ObjectToPack) Size() int64 { | ||||
| 	if o.Original != nil { | ||||
| 		return o.Original.Size() | ||||
| 	} | ||||
|  | ||||
| 	if o.resolvedOriginal { | ||||
| 		return o.originalSize | ||||
| 	} | ||||
|  | ||||
| 	do, ok := o.Object.(plumbing.DeltaObject) | ||||
| 	if ok { | ||||
| 		return do.ActualSize() | ||||
| 	} | ||||
|  | ||||
| 	panic("cannot get ObjectToPack size") | ||||
| } | ||||
|  | ||||
| func (o *ObjectToPack) IsDelta() bool { | ||||
| 	return o.Base != nil | ||||
| } | ||||
|  | ||||
| func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) { | ||||
| 	o.Object = delta | ||||
| 	o.Base = base | ||||
| 	o.Depth = base.Depth + 1 | ||||
| } | ||||
							
								
								
									
										562
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										562
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,562 @@ | ||||
| package packfile | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"io" | ||||
| 	"os" | ||||
|  | ||||
| 	billy "github.com/go-git/go-billy/v5" | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/cache" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/format/idxfile" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/storer" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// ErrInvalidObject is returned by Decode when an invalid object is | ||||
| 	// found in the packfile. | ||||
| 	ErrInvalidObject = NewError("invalid git object") | ||||
| 	// ErrZLib is returned by Decode when there was an error unzipping | ||||
| 	// the packfile contents. | ||||
| 	ErrZLib = NewError("zlib reading error") | ||||
| ) | ||||
|  | ||||
| // When reading small objects from packfile it is beneficial to do so at | ||||
| // once to exploit the buffered I/O. In many cases the objects are so small | ||||
| // that they were already loaded to memory when the object header was | ||||
| // loaded from the packfile. Wrapping in FSObject would cause this buffered | ||||
| // data to be thrown away and then re-read later, with the additional | ||||
| // seeking causing reloads from disk. Objects smaller than this threshold | ||||
| // are now always read into memory and stored in cache instead of being | ||||
| // wrapped in FSObject. | ||||
| const smallObjectThreshold = 16 * 1024 | ||||
|  | ||||
| // Packfile allows retrieving information from inside a packfile. | ||||
| type Packfile struct { | ||||
| 	idxfile.Index | ||||
| 	fs             billy.Filesystem | ||||
| 	file           billy.File | ||||
| 	s              *Scanner | ||||
| 	deltaBaseCache cache.Object | ||||
| 	offsetToType   map[int64]plumbing.ObjectType | ||||
| } | ||||
|  | ||||
| // NewPackfileWithCache creates a new Packfile with the given object cache. | ||||
| // If the filesystem is provided, the packfile will return FSObjects, otherwise | ||||
| // it will return MemoryObjects. | ||||
| func NewPackfileWithCache( | ||||
| 	index idxfile.Index, | ||||
| 	fs billy.Filesystem, | ||||
| 	file billy.File, | ||||
| 	cache cache.Object, | ||||
| ) *Packfile { | ||||
| 	s := NewScanner(file) | ||||
| 	return &Packfile{ | ||||
| 		index, | ||||
| 		fs, | ||||
| 		file, | ||||
| 		s, | ||||
| 		cache, | ||||
| 		make(map[int64]plumbing.ObjectType), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NewPackfile returns a packfile representation for the given packfile file | ||||
| // and packfile idx. | ||||
| // If the filesystem is provided, the packfile will return FSObjects, otherwise | ||||
| // it will return MemoryObjects. | ||||
| func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile { | ||||
| 	return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault()) | ||||
| } | ||||
|  | ||||
| // Get retrieves the encoded object in the packfile with the given hash. | ||||
| func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) { | ||||
| 	offset, err := p.FindOffset(h) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return p.objectAtOffset(offset, h) | ||||
| } | ||||
|  | ||||
| // GetByOffset retrieves the encoded object from the packfile at the given | ||||
| // offset. | ||||
| func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) { | ||||
| 	hash, err := p.FindHash(o) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return p.objectAtOffset(o, hash) | ||||
| } | ||||
|  | ||||
| // GetSizeByOffset retrieves the size of the encoded object from the | ||||
| // packfile with the given offset. | ||||
| func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) { | ||||
| 	if _, err := p.s.SeekFromStart(o); err != nil { | ||||
| 		if err == io.EOF || isInvalid(err) { | ||||
| 			return 0, plumbing.ErrObjectNotFound | ||||
| 		} | ||||
|  | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | ||||
| 	h, err := p.nextObjectHeader() | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	return p.getObjectSize(h) | ||||
| } | ||||
|  | ||||
| func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) { | ||||
| 	h, err := p.s.SeekObjectHeader(offset) | ||||
| 	p.s.pendingObject = nil | ||||
| 	return h, err | ||||
| } | ||||
|  | ||||
| func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { | ||||
| 	h, err := p.s.NextObjectHeader() | ||||
| 	p.s.pendingObject = nil | ||||
| 	return h, err | ||||
| } | ||||
|  | ||||
| func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 { | ||||
| 	delta := buf.Bytes() | ||||
| 	_, delta = decodeLEB128(delta) // skip src size | ||||
| 	sz, _ := decodeLEB128(delta) | ||||
| 	return int64(sz) | ||||
| } | ||||
|  | ||||
| func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) { | ||||
| 	switch h.Type { | ||||
| 	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: | ||||
| 		return h.Length, nil | ||||
| 	case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: | ||||
| 		buf := bufPool.Get().(*bytes.Buffer) | ||||
| 		defer bufPool.Put(buf) | ||||
| 		buf.Reset() | ||||
|  | ||||
| 		if _, _, err := p.s.NextObject(buf); err != nil { | ||||
| 			return 0, err | ||||
| 		} | ||||
|  | ||||
| 		return p.getDeltaObjectSize(buf), nil | ||||
| 	default: | ||||
| 		return 0, ErrInvalidObject.AddDetails("type %q", h.Type) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) { | ||||
| 	switch h.Type { | ||||
| 	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: | ||||
| 		return h.Type, nil | ||||
| 	case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: | ||||
| 		var offset int64 | ||||
| 		if h.Type == plumbing.REFDeltaObject { | ||||
| 			offset, err = p.FindOffset(h.Reference) | ||||
| 			if err != nil { | ||||
| 				return | ||||
| 			} | ||||
| 		} else { | ||||
| 			offset = h.OffsetReference | ||||
| 		} | ||||
|  | ||||
| 		if baseType, ok := p.offsetToType[offset]; ok { | ||||
| 			typ = baseType | ||||
| 		} else { | ||||
| 			h, err = p.objectHeaderAtOffset(offset) | ||||
| 			if err != nil { | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			typ, err = p.getObjectType(h) | ||||
| 			if err != nil { | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	default: | ||||
| 		err = ErrInvalidObject.AddDetails("type %q", h.Type) | ||||
| 	} | ||||
|  | ||||
| 	p.offsetToType[h.Offset] = typ | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) { | ||||
| 	if obj, ok := p.cacheGet(hash); ok { | ||||
| 		return obj, nil | ||||
| 	} | ||||
|  | ||||
| 	h, err := p.objectHeaderAtOffset(offset) | ||||
| 	if err != nil { | ||||
| 		if err == io.EOF || isInvalid(err) { | ||||
| 			return nil, plumbing.ErrObjectNotFound | ||||
| 		} | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return p.getNextObject(h, hash) | ||||
| } | ||||
|  | ||||
| func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) { | ||||
| 	var err error | ||||
|  | ||||
| 	// If we have no filesystem, we will return a MemoryObject instead | ||||
| 	// of an FSObject. | ||||
| 	if p.fs == nil { | ||||
| 		return p.getNextMemoryObject(h) | ||||
| 	} | ||||
|  | ||||
| 	// If the object is small enough then read it completely into memory now since | ||||
| 	// it is already read from disk into buffer anyway. For delta objects we want | ||||
| 	// to perform the optimization too, but we have to be careful about applying | ||||
| 	// small deltas on big objects. | ||||
| 	var size int64 | ||||
| 	if h.Length <= smallObjectThreshold { | ||||
| 		if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject { | ||||
| 			return p.getNextMemoryObject(h) | ||||
| 		} | ||||
|  | ||||
| 		// For delta objects we read the delta data and apply the small object | ||||
| 		// optimization only if the expanded version of the object still meets | ||||
| 		// the small object threshold condition. | ||||
| 		buf := bufPool.Get().(*bytes.Buffer) | ||||
| 		defer bufPool.Put(buf) | ||||
| 		buf.Reset() | ||||
| 		if _, _, err := p.s.NextObject(buf); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		size = p.getDeltaObjectSize(buf) | ||||
| 		if size <= smallObjectThreshold { | ||||
| 			var obj = new(plumbing.MemoryObject) | ||||
| 			obj.SetSize(size) | ||||
| 			if h.Type == plumbing.REFDeltaObject { | ||||
| 				err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf) | ||||
| 			} else { | ||||
| 				err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf) | ||||
| 			} | ||||
| 			return obj, err | ||||
| 		} | ||||
| 	} else { | ||||
| 		size, err = p.getObjectSize(h) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	typ, err := p.getObjectType(h) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	p.offsetToType[h.Offset] = typ | ||||
|  | ||||
| 	return NewFSObject( | ||||
| 		hash, | ||||
| 		typ, | ||||
| 		h.Offset, | ||||
| 		size, | ||||
| 		p.Index, | ||||
| 		p.fs, | ||||
| 		p.file.Name(), | ||||
| 		p.deltaBaseCache, | ||||
| 	), nil | ||||
| } | ||||
|  | ||||
| func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { | ||||
| 	h, err := p.objectHeaderAtOffset(offset) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	// getObjectContent is called from FSObject, so we have to explicitly | ||||
| 	// get memory object here to avoid recursive cycle | ||||
| 	obj, err := p.getNextMemoryObject(h) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return obj.Reader() | ||||
| } | ||||
|  | ||||
| func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) { | ||||
| 	var obj = new(plumbing.MemoryObject) | ||||
| 	obj.SetSize(h.Length) | ||||
| 	obj.SetType(h.Type) | ||||
|  | ||||
| 	var err error | ||||
| 	switch h.Type { | ||||
| 	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: | ||||
| 		err = p.fillRegularObjectContent(obj) | ||||
| 	case plumbing.REFDeltaObject: | ||||
| 		err = p.fillREFDeltaObjectContent(obj, h.Reference) | ||||
| 	case plumbing.OFSDeltaObject: | ||||
| 		err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference) | ||||
| 	default: | ||||
| 		err = ErrInvalidObject.AddDetails("type %q", h.Type) | ||||
| 	} | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	p.offsetToType[h.Offset] = obj.Type() | ||||
|  | ||||
| 	return obj, nil | ||||
| } | ||||
|  | ||||
| func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error { | ||||
| 	w, err := obj.Writer() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	_, _, err = p.s.NextObject(w) | ||||
| 	p.cachePut(obj) | ||||
|  | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error { | ||||
| 	buf := bufPool.Get().(*bytes.Buffer) | ||||
| 	defer bufPool.Put(buf) | ||||
| 	buf.Reset() | ||||
| 	_, _, err := p.s.NextObject(buf) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf) | ||||
| } | ||||
|  | ||||
| func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error { | ||||
| 	var err error | ||||
|  | ||||
| 	base, ok := p.cacheGet(ref) | ||||
| 	if !ok { | ||||
| 		base, err = p.Get(ref) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	obj.SetType(base.Type()) | ||||
| 	err = ApplyDelta(obj, base, buf.Bytes()) | ||||
| 	p.cachePut(obj) | ||||
|  | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error { | ||||
| 	buf := bufPool.Get().(*bytes.Buffer) | ||||
| 	defer bufPool.Put(buf) | ||||
| 	buf.Reset() | ||||
| 	_, _, err := p.s.NextObject(buf) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf) | ||||
| } | ||||
|  | ||||
| func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error { | ||||
| 	hash, err := p.FindHash(offset) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	base, err := p.objectAtOffset(offset, hash) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	obj.SetType(base.Type()) | ||||
| 	err = ApplyDelta(obj, base, buf.Bytes()) | ||||
| 	p.cachePut(obj) | ||||
|  | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) { | ||||
| 	if p.deltaBaseCache == nil { | ||||
| 		return nil, false | ||||
| 	} | ||||
|  | ||||
| 	return p.deltaBaseCache.Get(h) | ||||
| } | ||||
|  | ||||
| func (p *Packfile) cachePut(obj plumbing.EncodedObject) { | ||||
| 	if p.deltaBaseCache == nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	p.deltaBaseCache.Put(obj) | ||||
| } | ||||
|  | ||||
| // GetAll returns an iterator with all encoded objects in the packfile. | ||||
| // The iterator returned is not thread-safe, it should be used in the same | ||||
| // thread as the Packfile instance. | ||||
| func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) { | ||||
| 	return p.GetByType(plumbing.AnyObject) | ||||
| } | ||||
|  | ||||
| // GetByType returns all the objects of the given type. | ||||
| func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) { | ||||
| 	switch typ { | ||||
| 	case plumbing.AnyObject, | ||||
| 		plumbing.BlobObject, | ||||
| 		plumbing.TreeObject, | ||||
| 		plumbing.CommitObject, | ||||
| 		plumbing.TagObject: | ||||
| 		entries, err := p.EntriesByOffset() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		return &objectIter{ | ||||
| 			// Easiest way to provide an object decoder is just to pass a Packfile | ||||
| 			// instance. To not mess with the seeks, it's a new instance with a | ||||
| 			// different scanner but the same cache and offset to hash map for | ||||
| 			// reusing as much cache as possible. | ||||
| 			p:    p, | ||||
| 			iter: entries, | ||||
| 			typ:  typ, | ||||
| 		}, nil | ||||
| 	default: | ||||
| 		return nil, plumbing.ErrInvalidType | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // ID returns the ID of the packfile, which is the checksum at the end of it. | ||||
| func (p *Packfile) ID() (plumbing.Hash, error) { | ||||
| 	prev, err := p.file.Seek(-20, io.SeekEnd) | ||||
| 	if err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	var hash plumbing.Hash | ||||
| 	if _, err := io.ReadFull(p.file, hash[:]); err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	if _, err := p.file.Seek(prev, io.SeekStart); err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	return hash, nil | ||||
| } | ||||
|  | ||||
| // Scanner returns the packfile's Scanner | ||||
| func (p *Packfile) Scanner() *Scanner { | ||||
| 	return p.s | ||||
| } | ||||
|  | ||||
| // Close the packfile and its resources. | ||||
| func (p *Packfile) Close() error { | ||||
| 	closer, ok := p.file.(io.Closer) | ||||
| 	if !ok { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	return closer.Close() | ||||
| } | ||||
|  | ||||
| type objectIter struct { | ||||
| 	p    *Packfile | ||||
| 	typ  plumbing.ObjectType | ||||
| 	iter idxfile.EntryIter | ||||
| } | ||||
|  | ||||
| func (i *objectIter) Next() (plumbing.EncodedObject, error) { | ||||
| 	for { | ||||
| 		e, err := i.iter.Next() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		if i.typ != plumbing.AnyObject { | ||||
| 			if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok { | ||||
| 				if typ != i.typ { | ||||
| 					continue | ||||
| 				} | ||||
| 			} else if obj, ok := i.p.cacheGet(e.Hash); ok { | ||||
| 				if obj.Type() != i.typ { | ||||
| 					i.p.offsetToType[int64(e.Offset)] = obj.Type() | ||||
| 					continue | ||||
| 				} | ||||
| 				return obj, nil | ||||
| 			} else { | ||||
| 				h, err := i.p.objectHeaderAtOffset(int64(e.Offset)) | ||||
| 				if err != nil { | ||||
| 					return nil, err | ||||
| 				} | ||||
|  | ||||
| 				if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject { | ||||
| 					typ, err := i.p.getObjectType(h) | ||||
| 					if err != nil { | ||||
| 						return nil, err | ||||
| 					} | ||||
| 					if typ != i.typ { | ||||
| 						i.p.offsetToType[int64(e.Offset)] = typ | ||||
| 						continue | ||||
| 					} | ||||
| 					// getObjectType will seek in the file so we cannot use getNextObject safely | ||||
| 					return i.p.objectAtOffset(int64(e.Offset), e.Hash) | ||||
| 				} else { | ||||
| 					if h.Type != i.typ { | ||||
| 						i.p.offsetToType[int64(e.Offset)] = h.Type | ||||
| 						continue | ||||
| 					} | ||||
| 					return i.p.getNextObject(h, e.Hash) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		return obj, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error { | ||||
| 	for { | ||||
| 		o, err := i.Next() | ||||
| 		if err != nil { | ||||
| 			if err == io.EOF { | ||||
| 				return nil | ||||
| 			} | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		if err := f(o); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (i *objectIter) Close() { | ||||
| 	i.iter.Close() | ||||
| } | ||||
|  | ||||
| // isInvalid checks whether an error is an os.PathError with an os.ErrInvalid | ||||
| // error inside. It also checks for the windows error, which is different from | ||||
| // os.ErrInvalid. | ||||
| func isInvalid(err error) bool { | ||||
| 	pe, ok := err.(*os.PathError) | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	errstr := pe.Err.Error() | ||||
| 	return errstr == errInvalidUnix || errstr == errInvalidWindows | ||||
| } | ||||
|  | ||||
| // errInvalidWindows is the Windows equivalent to os.ErrInvalid | ||||
| const errInvalidWindows = "The parameter is incorrect." | ||||
|  | ||||
| var errInvalidUnix = os.ErrInvalid.Error() | ||||
							
								
								
									
										490
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										490
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,490 @@ | ||||
| package packfile | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/cache" | ||||
| 	"github.com/go-git/go-git/v5/plumbing/storer" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// ErrReferenceDeltaNotFound is returned when the reference delta is not | ||||
| 	// found. | ||||
| 	ErrReferenceDeltaNotFound = errors.New("reference delta not found") | ||||
|  | ||||
| 	// ErrNotSeekableSource is returned when the source for the parser is not | ||||
| 	// seekable and a storage was not provided, so it can't be parsed. | ||||
| 	ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided") | ||||
|  | ||||
| 	// ErrDeltaNotCached is returned when the delta could not be found in cache. | ||||
| 	ErrDeltaNotCached = errors.New("delta could not be found in cache") | ||||
| ) | ||||
|  | ||||
| // Observer interface is implemented by index encoders. | ||||
| type Observer interface { | ||||
| 	// OnHeader is called when a new packfile is opened. | ||||
| 	OnHeader(count uint32) error | ||||
| 	// OnInflatedObjectHeader is called for each object header read. | ||||
| 	OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error | ||||
| 	// OnInflatedObjectContent is called for each decoded object. | ||||
| 	OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error | ||||
| 	// OnFooter is called when decoding is done. | ||||
| 	OnFooter(h plumbing.Hash) error | ||||
| } | ||||
|  | ||||
| // Parser decodes a packfile and calls any observer associated to it. Is used | ||||
| // to generate indexes. | ||||
| type Parser struct { | ||||
| 	storage    storer.EncodedObjectStorer | ||||
| 	scanner    *Scanner | ||||
| 	count      uint32 | ||||
| 	oi         []*objectInfo | ||||
| 	oiByHash   map[plumbing.Hash]*objectInfo | ||||
| 	oiByOffset map[int64]*objectInfo | ||||
| 	hashOffset map[plumbing.Hash]int64 | ||||
| 	checksum   plumbing.Hash | ||||
|  | ||||
| 	cache *cache.BufferLRU | ||||
| 	// delta content by offset, only used if source is not seekable | ||||
| 	deltas map[int64][]byte | ||||
|  | ||||
| 	ob []Observer | ||||
| } | ||||
|  | ||||
| // NewParser creates a new Parser. The Scanner source must be seekable. | ||||
| // If it's not, NewParserWithStorage should be used instead. | ||||
| func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) { | ||||
| 	return NewParserWithStorage(scanner, nil, ob...) | ||||
| } | ||||
|  | ||||
| // NewParserWithStorage creates a new Parser. The scanner source must either | ||||
| // be seekable or a storage must be provided. | ||||
| func NewParserWithStorage( | ||||
| 	scanner *Scanner, | ||||
| 	storage storer.EncodedObjectStorer, | ||||
| 	ob ...Observer, | ||||
| ) (*Parser, error) { | ||||
| 	if !scanner.IsSeekable && storage == nil { | ||||
| 		return nil, ErrNotSeekableSource | ||||
| 	} | ||||
|  | ||||
| 	var deltas map[int64][]byte | ||||
| 	if !scanner.IsSeekable { | ||||
| 		deltas = make(map[int64][]byte) | ||||
| 	} | ||||
|  | ||||
| 	return &Parser{ | ||||
| 		storage: storage, | ||||
| 		scanner: scanner, | ||||
| 		ob:      ob, | ||||
| 		count:   0, | ||||
| 		cache:   cache.NewBufferLRUDefault(), | ||||
| 		deltas:  deltas, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| func (p *Parser) forEachObserver(f func(o Observer) error) error { | ||||
| 	for _, o := range p.ob { | ||||
| 		if err := f(o); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *Parser) onHeader(count uint32) error { | ||||
| 	return p.forEachObserver(func(o Observer) error { | ||||
| 		return o.OnHeader(count) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (p *Parser) onInflatedObjectHeader( | ||||
| 	t plumbing.ObjectType, | ||||
| 	objSize int64, | ||||
| 	pos int64, | ||||
| ) error { | ||||
| 	return p.forEachObserver(func(o Observer) error { | ||||
| 		return o.OnInflatedObjectHeader(t, objSize, pos) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (p *Parser) onInflatedObjectContent( | ||||
| 	h plumbing.Hash, | ||||
| 	pos int64, | ||||
| 	crc uint32, | ||||
| 	content []byte, | ||||
| ) error { | ||||
| 	return p.forEachObserver(func(o Observer) error { | ||||
| 		return o.OnInflatedObjectContent(h, pos, crc, content) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (p *Parser) onFooter(h plumbing.Hash) error { | ||||
| 	return p.forEachObserver(func(o Observer) error { | ||||
| 		return o.OnFooter(h) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // Parse start decoding phase of the packfile. | ||||
| func (p *Parser) Parse() (plumbing.Hash, error) { | ||||
| 	if err := p.init(); err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	if err := p.indexObjects(); err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	var err error | ||||
| 	p.checksum, err = p.scanner.Checksum() | ||||
| 	if err != nil && err != io.EOF { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	if err := p.resolveDeltas(); err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	if err := p.onFooter(p.checksum); err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	return p.checksum, nil | ||||
| } | ||||
|  | ||||
| func (p *Parser) init() error { | ||||
| 	_, c, err := p.scanner.Header() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if err := p.onHeader(c); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	p.count = c | ||||
| 	p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count) | ||||
| 	p.oiByOffset = make(map[int64]*objectInfo, p.count) | ||||
| 	p.oi = make([]*objectInfo, p.count) | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *Parser) indexObjects() error { | ||||
| 	buf := new(bytes.Buffer) | ||||
|  | ||||
| 	for i := uint32(0); i < p.count; i++ { | ||||
| 		buf.Reset() | ||||
|  | ||||
| 		oh, err := p.scanner.NextObjectHeader() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		delta := false | ||||
| 		var ota *objectInfo | ||||
| 		switch t := oh.Type; t { | ||||
| 		case plumbing.OFSDeltaObject: | ||||
| 			delta = true | ||||
|  | ||||
| 			parent, ok := p.oiByOffset[oh.OffsetReference] | ||||
| 			if !ok { | ||||
| 				return plumbing.ErrObjectNotFound | ||||
| 			} | ||||
|  | ||||
| 			ota = newDeltaObject(oh.Offset, oh.Length, t, parent) | ||||
| 			parent.Children = append(parent.Children, ota) | ||||
| 		case plumbing.REFDeltaObject: | ||||
| 			delta = true | ||||
| 			parent, ok := p.oiByHash[oh.Reference] | ||||
| 			if !ok { | ||||
| 				// can't find referenced object in this pack file | ||||
| 				// this must be a "thin" pack. | ||||
| 				parent = &objectInfo{ //Placeholder parent | ||||
| 					SHA1:        oh.Reference, | ||||
| 					ExternalRef: true, // mark as an external reference that must be resolved | ||||
| 					Type:        plumbing.AnyObject, | ||||
| 					DiskType:    plumbing.AnyObject, | ||||
| 				} | ||||
| 				p.oiByHash[oh.Reference] = parent | ||||
| 			} | ||||
| 			ota = newDeltaObject(oh.Offset, oh.Length, t, parent) | ||||
| 			parent.Children = append(parent.Children, ota) | ||||
|  | ||||
| 		default: | ||||
| 			ota = newBaseObject(oh.Offset, oh.Length, t) | ||||
| 		} | ||||
|  | ||||
| 		_, crc, err := p.scanner.NextObject(buf) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		ota.Crc32 = crc | ||||
| 		ota.Length = oh.Length | ||||
|  | ||||
| 		data := buf.Bytes() | ||||
| 		if !delta { | ||||
| 			sha1, err := getSHA1(ota.Type, data) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
|  | ||||
| 			ota.SHA1 = sha1 | ||||
| 			p.oiByHash[ota.SHA1] = ota | ||||
| 		} | ||||
|  | ||||
| 		if p.storage != nil && !delta { | ||||
| 			obj := new(plumbing.MemoryObject) | ||||
| 			obj.SetSize(oh.Length) | ||||
| 			obj.SetType(oh.Type) | ||||
| 			if _, err := obj.Write(data); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
|  | ||||
| 			if _, err := p.storage.SetEncodedObject(obj); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if delta && !p.scanner.IsSeekable { | ||||
| 			p.deltas[oh.Offset] = make([]byte, len(data)) | ||||
| 			copy(p.deltas[oh.Offset], data) | ||||
| 		} | ||||
|  | ||||
| 		p.oiByOffset[oh.Offset] = ota | ||||
| 		p.oi[i] = ota | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *Parser) resolveDeltas() error { | ||||
| 	buf := &bytes.Buffer{} | ||||
| 	for _, obj := range p.oi { | ||||
| 		buf.Reset() | ||||
| 		err := p.get(obj, buf) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		content := buf.Bytes() | ||||
|  | ||||
| 		if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		if !obj.IsDelta() && len(obj.Children) > 0 { | ||||
| 			for _, child := range obj.Children { | ||||
| 				if err := p.resolveObject(ioutil.Discard, child, content); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
|  | ||||
| 			// Remove the delta from the cache. | ||||
| 			if obj.DiskType.IsDelta() && !p.scanner.IsSeekable { | ||||
| 				delete(p.deltas, obj.Offset) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) error { | ||||
| 	if !o.ExternalRef { // skip cache check for placeholder parents | ||||
| 		b, ok := p.cache.Get(o.Offset) | ||||
| 		if ok { | ||||
| 			_, err := buf.Write(b) | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// If it's not on the cache and is not a delta we can try to find it in the | ||||
| 	// storage, if there's one. External refs must enter here. | ||||
| 	if p.storage != nil && !o.Type.IsDelta() { | ||||
| 		e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		o.Type = e.Type() | ||||
|  | ||||
| 		r, err := e.Reader() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		_, err = buf.ReadFrom(io.LimitReader(r, e.Size())) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if o.ExternalRef { | ||||
| 		// we were not able to resolve a ref in a thin pack | ||||
| 		return ErrReferenceDeltaNotFound | ||||
| 	} | ||||
|  | ||||
| 	if o.DiskType.IsDelta() { | ||||
| 		b := bufPool.Get().(*bytes.Buffer) | ||||
| 		defer bufPool.Put(b) | ||||
| 		b.Reset() | ||||
| 		err := p.get(o.Parent, b) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		base := b.Bytes() | ||||
|  | ||||
| 		err = p.resolveObject(buf, o, base) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} else { | ||||
| 		err := p.readData(buf, o) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if len(o.Children) > 0 { | ||||
| 		data := make([]byte, buf.Len()) | ||||
| 		copy(data, buf.Bytes()) | ||||
| 		p.cache.Put(o.Offset, data) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *Parser) resolveObject( | ||||
| 	w io.Writer, | ||||
| 	o *objectInfo, | ||||
| 	base []byte, | ||||
| ) error { | ||||
| 	if !o.DiskType.IsDelta() { | ||||
| 		return nil | ||||
| 	} | ||||
| 	buf := bufPool.Get().(*bytes.Buffer) | ||||
| 	defer bufPool.Put(buf) | ||||
| 	buf.Reset() | ||||
| 	err := p.readData(buf, o) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	data := buf.Bytes() | ||||
|  | ||||
| 	data, err = applyPatchBase(o, data, base) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if p.storage != nil { | ||||
| 		obj := new(plumbing.MemoryObject) | ||||
| 		obj.SetSize(o.Size()) | ||||
| 		obj.SetType(o.Type) | ||||
| 		if _, err := obj.Write(data); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		if _, err := p.storage.SetEncodedObject(obj); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	_, err = w.Write(data) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (p *Parser) readData(w io.Writer, o *objectInfo) error { | ||||
| 	if !p.scanner.IsSeekable && o.DiskType.IsDelta() { | ||||
| 		data, ok := p.deltas[o.Offset] | ||||
| 		if !ok { | ||||
| 			return ErrDeltaNotCached | ||||
| 		} | ||||
| 		_, err := w.Write(data) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if _, _, err := p.scanner.NextObject(w); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) { | ||||
| 	patched, err := PatchDelta(base, data) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if ota.SHA1 == plumbing.ZeroHash { | ||||
| 		ota.Type = ota.Parent.Type | ||||
| 		sha1, err := getSHA1(ota.Type, patched) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		ota.SHA1 = sha1 | ||||
| 		ota.Length = int64(len(patched)) | ||||
| 	} | ||||
|  | ||||
| 	return patched, nil | ||||
| } | ||||
|  | ||||
| func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) { | ||||
| 	hasher := plumbing.NewHasher(t, int64(len(data))) | ||||
| 	if _, err := hasher.Write(data); err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	return hasher.Sum(), nil | ||||
| } | ||||
|  | ||||
| type objectInfo struct { | ||||
| 	Offset      int64 | ||||
| 	Length      int64 | ||||
| 	Type        plumbing.ObjectType | ||||
| 	DiskType    plumbing.ObjectType | ||||
| 	ExternalRef bool // indicates this is an external reference in a thin pack file | ||||
|  | ||||
| 	Crc32 uint32 | ||||
|  | ||||
| 	Parent   *objectInfo | ||||
| 	Children []*objectInfo | ||||
| 	SHA1     plumbing.Hash | ||||
| } | ||||
|  | ||||
| func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo { | ||||
| 	return newDeltaObject(offset, length, t, nil) | ||||
| } | ||||
|  | ||||
| func newDeltaObject( | ||||
| 	offset, length int64, | ||||
| 	t plumbing.ObjectType, | ||||
| 	parent *objectInfo, | ||||
| ) *objectInfo { | ||||
| 	obj := &objectInfo{ | ||||
| 		Offset:   offset, | ||||
| 		Length:   length, | ||||
| 		Type:     t, | ||||
| 		DiskType: t, | ||||
| 		Crc32:    0, | ||||
| 		Parent:   parent, | ||||
| 	} | ||||
|  | ||||
| 	return obj | ||||
| } | ||||
|  | ||||
| func (o *objectInfo) IsDelta() bool { | ||||
| 	return o.Type.IsDelta() | ||||
| } | ||||
|  | ||||
| func (o *objectInfo) Size() int64 { | ||||
| 	return o.Length | ||||
| } | ||||
							
								
								
									
										248
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										248
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,248 @@ | ||||
| package packfile | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"io" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| ) | ||||
|  | ||||
| // See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h | ||||
| // https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c, | ||||
| // and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js | ||||
| // for details about the delta format. | ||||
|  | ||||
| const deltaSizeMin = 4 | ||||
|  | ||||
| // ApplyDelta writes to target the result of applying the modification deltas in delta to base. | ||||
| func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) error { | ||||
| 	r, err := base.Reader() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	w, err := target.Writer() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	buf := bufPool.Get().(*bytes.Buffer) | ||||
| 	defer bufPool.Put(buf) | ||||
| 	buf.Reset() | ||||
| 	_, err = buf.ReadFrom(r) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	src := buf.Bytes() | ||||
|  | ||||
| 	dst := bufPool.Get().(*bytes.Buffer) | ||||
| 	defer bufPool.Put(dst) | ||||
| 	dst.Reset() | ||||
| 	err = patchDelta(dst, src, delta) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
|  | ||||
| 	target.SetSize(int64(dst.Len())) | ||||
|  | ||||
| 	b := byteSlicePool.Get().([]byte) | ||||
| 	_, err = io.CopyBuffer(w, dst, b) | ||||
| 	byteSlicePool.Put(b) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	ErrInvalidDelta = errors.New("invalid delta") | ||||
| 	ErrDeltaCmd     = errors.New("wrong delta command") | ||||
| ) | ||||
|  | ||||
| // PatchDelta returns the result of applying the modification deltas in delta to src. | ||||
| // An error will be returned if delta is corrupted (ErrDeltaLen) or an action command | ||||
| // is not copy from source or copy from delta (ErrDeltaCmd). | ||||
| func PatchDelta(src, delta []byte) ([]byte, error) { | ||||
| 	b := &bytes.Buffer{} | ||||
| 	if err := patchDelta(b, src, delta); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return b.Bytes(), nil | ||||
| } | ||||
|  | ||||
| func patchDelta(dst *bytes.Buffer, src, delta []byte) error { | ||||
| 	if len(delta) < deltaSizeMin { | ||||
| 		return ErrInvalidDelta | ||||
| 	} | ||||
|  | ||||
| 	srcSz, delta := decodeLEB128(delta) | ||||
| 	if srcSz != uint(len(src)) { | ||||
| 		return ErrInvalidDelta | ||||
| 	} | ||||
|  | ||||
| 	targetSz, delta := decodeLEB128(delta) | ||||
| 	remainingTargetSz := targetSz | ||||
|  | ||||
| 	var cmd byte | ||||
| 	dst.Grow(int(targetSz)) | ||||
| 	for { | ||||
| 		if len(delta) == 0 { | ||||
| 			return ErrInvalidDelta | ||||
| 		} | ||||
|  | ||||
| 		cmd = delta[0] | ||||
| 		delta = delta[1:] | ||||
| 		if isCopyFromSrc(cmd) { | ||||
| 			var offset, sz uint | ||||
| 			var err error | ||||
| 			offset, delta, err = decodeOffset(cmd, delta) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
|  | ||||
| 			sz, delta, err = decodeSize(cmd, delta) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
|  | ||||
| 			if invalidSize(sz, targetSz) || | ||||
| 				invalidOffsetSize(offset, sz, srcSz) { | ||||
| 				break | ||||
| 			} | ||||
| 			dst.Write(src[offset:offset+sz]) | ||||
| 			remainingTargetSz -= sz | ||||
| 		} else if isCopyFromDelta(cmd) { | ||||
| 			sz := uint(cmd) // cmd is the size itself | ||||
| 			if invalidSize(sz, targetSz) { | ||||
| 				return ErrInvalidDelta | ||||
| 			} | ||||
|  | ||||
| 			if uint(len(delta)) < sz { | ||||
| 				return ErrInvalidDelta | ||||
| 			} | ||||
|  | ||||
| 			dst.Write(delta[0:sz]) | ||||
| 			remainingTargetSz -= sz | ||||
| 			delta = delta[sz:] | ||||
| 		} else { | ||||
| 			return ErrDeltaCmd | ||||
| 		} | ||||
|  | ||||
| 		if remainingTargetSz <= 0 { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Decodes a number encoded as an unsigned LEB128 at the start of some | ||||
| // binary data and returns the decoded number and the rest of the | ||||
| // stream. | ||||
| // | ||||
| // This must be called twice on the delta data buffer, first to get the | ||||
| // expected source buffer size, and again to get the target buffer size. | ||||
| func decodeLEB128(input []byte) (uint, []byte) { | ||||
| 	var num, sz uint | ||||
| 	var b byte | ||||
| 	for { | ||||
| 		b = input[sz] | ||||
| 		num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks | ||||
| 		sz++ | ||||
|  | ||||
| 		if uint(b)&continuation == 0 || sz == uint(len(input)) { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return num, input[sz:] | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	payload      = 0x7f // 0111 1111 | ||||
| 	continuation = 0x80 // 1000 0000 | ||||
| ) | ||||
|  | ||||
| func isCopyFromSrc(cmd byte) bool { | ||||
| 	return (cmd & 0x80) != 0 | ||||
| } | ||||
|  | ||||
| func isCopyFromDelta(cmd byte) bool { | ||||
| 	return (cmd&0x80) == 0 && cmd != 0 | ||||
| } | ||||
|  | ||||
| func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { | ||||
| 	var offset uint | ||||
| 	if (cmd & 0x01) != 0 { | ||||
| 		if len(delta) == 0 { | ||||
| 			return 0, nil, ErrInvalidDelta | ||||
| 		} | ||||
| 		offset = uint(delta[0]) | ||||
| 		delta = delta[1:] | ||||
| 	} | ||||
| 	if (cmd & 0x02) != 0 { | ||||
| 		if len(delta) == 0 { | ||||
| 			return 0, nil, ErrInvalidDelta | ||||
| 		} | ||||
| 		offset |= uint(delta[0]) << 8 | ||||
| 		delta = delta[1:] | ||||
| 	} | ||||
| 	if (cmd & 0x04) != 0 { | ||||
| 		if len(delta) == 0 { | ||||
| 			return 0, nil, ErrInvalidDelta | ||||
| 		} | ||||
| 		offset |= uint(delta[0]) << 16 | ||||
| 		delta = delta[1:] | ||||
| 	} | ||||
| 	if (cmd & 0x08) != 0 { | ||||
| 		if len(delta) == 0 { | ||||
| 			return 0, nil, ErrInvalidDelta | ||||
| 		} | ||||
| 		offset |= uint(delta[0]) << 24 | ||||
| 		delta = delta[1:] | ||||
| 	} | ||||
|  | ||||
| 	return offset, delta, nil | ||||
| } | ||||
|  | ||||
| func decodeSize(cmd byte, delta []byte) (uint, []byte, error) { | ||||
| 	var sz uint | ||||
| 	if (cmd & 0x10) != 0 { | ||||
| 		if len(delta) == 0 { | ||||
| 			return 0, nil, ErrInvalidDelta | ||||
| 		} | ||||
| 		sz = uint(delta[0]) | ||||
| 		delta = delta[1:] | ||||
| 	} | ||||
| 	if (cmd & 0x20) != 0 { | ||||
| 		if len(delta) == 0 { | ||||
| 			return 0, nil, ErrInvalidDelta | ||||
| 		} | ||||
| 		sz |= uint(delta[0]) << 8 | ||||
| 		delta = delta[1:] | ||||
| 	} | ||||
| 	if (cmd & 0x40) != 0 { | ||||
| 		if len(delta) == 0 { | ||||
| 			return 0, nil, ErrInvalidDelta | ||||
| 		} | ||||
| 		sz |= uint(delta[0]) << 16 | ||||
| 		delta = delta[1:] | ||||
| 	} | ||||
| 	if sz == 0 { | ||||
| 		sz = 0x10000 | ||||
| 	} | ||||
|  | ||||
| 	return sz, delta, nil | ||||
| } | ||||
|  | ||||
| func invalidSize(sz, targetSz uint) bool { | ||||
| 	return sz > targetSz | ||||
| } | ||||
|  | ||||
| func invalidOffsetSize(offset, sz, srcSz uint) bool { | ||||
| 	return sumOverflows(offset, sz) || | ||||
| 		offset+sz > srcSz | ||||
| } | ||||
|  | ||||
| func sumOverflows(a, b uint) bool { | ||||
| 	return a+b < a | ||||
| } | ||||
							
								
								
									
										466
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										466
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,466 @@ | ||||
| package packfile | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"compress/zlib" | ||||
| 	"fmt" | ||||
| 	"hash" | ||||
| 	"hash/crc32" | ||||
| 	"io" | ||||
| 	stdioutil "io/ioutil" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/go-git/go-git/v5/plumbing" | ||||
| 	"github.com/go-git/go-git/v5/utils/binary" | ||||
| 	"github.com/go-git/go-git/v5/utils/ioutil" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile | ||||
| 	ErrEmptyPackfile = NewError("empty packfile") | ||||
| 	// ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect. | ||||
| 	ErrBadSignature = NewError("malformed pack file signature") | ||||
| 	// ErrUnsupportedVersion is returned by ReadHeader when the packfile version is | ||||
| 	// different than VersionSupported. | ||||
| 	ErrUnsupportedVersion = NewError("unsupported packfile version") | ||||
| 	// ErrSeekNotSupported returned if seek is not support | ||||
| 	ErrSeekNotSupported = NewError("not seek support") | ||||
| ) | ||||
|  | ||||
| // ObjectHeader contains the information related to the object, this information | ||||
| // is collected from the previous bytes to the content of the object. | ||||
| type ObjectHeader struct { | ||||
| 	Type            plumbing.ObjectType | ||||
| 	Offset          int64 | ||||
| 	Length          int64 | ||||
| 	Reference       plumbing.Hash | ||||
| 	OffsetReference int64 | ||||
| } | ||||
|  | ||||
| type Scanner struct { | ||||
| 	r   *scannerReader | ||||
| 	crc hash.Hash32 | ||||
|  | ||||
| 	// pendingObject is used to detect if an object has been read, or still | ||||
| 	// is waiting to be read | ||||
| 	pendingObject    *ObjectHeader | ||||
| 	version, objects uint32 | ||||
|  | ||||
| 	// lsSeekable says if this scanner can do Seek or not, to have a Scanner | ||||
| 	// seekable a r implementing io.Seeker is required | ||||
| 	IsSeekable bool | ||||
| } | ||||
|  | ||||
| // NewScanner returns a new Scanner based on a reader, if the given reader | ||||
| // implements io.ReadSeeker the Scanner will be also Seekable | ||||
| func NewScanner(r io.Reader) *Scanner { | ||||
| 	_, ok := r.(io.ReadSeeker) | ||||
|  | ||||
| 	crc := crc32.NewIEEE() | ||||
| 	return &Scanner{ | ||||
| 		r:          newScannerReader(r, crc), | ||||
| 		crc:        crc, | ||||
| 		IsSeekable: ok, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *Scanner) Reset(r io.Reader) { | ||||
| 	_, ok := r.(io.ReadSeeker) | ||||
|  | ||||
| 	s.r.Reset(r) | ||||
| 	s.crc.Reset() | ||||
| 	s.IsSeekable = ok | ||||
| 	s.pendingObject = nil | ||||
| 	s.version = 0 | ||||
| 	s.objects = 0 | ||||
| } | ||||
|  | ||||
| // Header reads the whole packfile header (signature, version and object count). | ||||
| // It returns the version and the object count and performs checks on the | ||||
| // validity of the signature and the version fields. | ||||
| func (s *Scanner) Header() (version, objects uint32, err error) { | ||||
| 	if s.version != 0 { | ||||
| 		return s.version, s.objects, nil | ||||
| 	} | ||||
|  | ||||
| 	sig, err := s.readSignature() | ||||
| 	if err != nil { | ||||
| 		if err == io.EOF { | ||||
| 			err = ErrEmptyPackfile | ||||
| 		} | ||||
|  | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if !s.isValidSignature(sig) { | ||||
| 		err = ErrBadSignature | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	version, err = s.readVersion() | ||||
| 	s.version = version | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if !s.isSupportedVersion(version) { | ||||
| 		err = ErrUnsupportedVersion.AddDetails("%d", version) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	objects, err = s.readCount() | ||||
| 	s.objects = objects | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // readSignature reads an returns the signature field in the packfile. | ||||
| func (s *Scanner) readSignature() ([]byte, error) { | ||||
| 	var sig = make([]byte, 4) | ||||
| 	if _, err := io.ReadFull(s.r, sig); err != nil { | ||||
| 		return []byte{}, err | ||||
| 	} | ||||
|  | ||||
| 	return sig, nil | ||||
| } | ||||
|  | ||||
| // isValidSignature returns if sig is a valid packfile signature. | ||||
| func (s *Scanner) isValidSignature(sig []byte) bool { | ||||
| 	return bytes.Equal(sig, signature) | ||||
| } | ||||
|  | ||||
| // readVersion reads and returns the version field of a packfile. | ||||
| func (s *Scanner) readVersion() (uint32, error) { | ||||
| 	return binary.ReadUint32(s.r) | ||||
| } | ||||
|  | ||||
| // isSupportedVersion returns whether version v is supported by the parser. | ||||
| // The current supported version is VersionSupported, defined above. | ||||
| func (s *Scanner) isSupportedVersion(v uint32) bool { | ||||
| 	return v == VersionSupported | ||||
| } | ||||
|  | ||||
| // readCount reads and returns the count of objects field of a packfile. | ||||
| func (s *Scanner) readCount() (uint32, error) { | ||||
| 	return binary.ReadUint32(s.r) | ||||
| } | ||||
|  | ||||
| // SeekObjectHeader seeks to specified offset and returns the ObjectHeader | ||||
| // for the next object in the reader | ||||
| func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) { | ||||
| 	// if seeking we assume that you are not interested in the header | ||||
| 	if s.version == 0 { | ||||
| 		s.version = VersionSupported | ||||
| 	} | ||||
|  | ||||
| 	if _, err := s.r.Seek(offset, io.SeekStart); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	h, err := s.nextObjectHeader() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	h.Offset = offset | ||||
| 	return h, nil | ||||
| } | ||||
|  | ||||
| // NextObjectHeader returns the ObjectHeader for the next object in the reader | ||||
| func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { | ||||
| 	if err := s.doPending(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	offset, err := s.r.Seek(0, io.SeekCurrent) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	h, err := s.nextObjectHeader() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	h.Offset = offset | ||||
| 	return h, nil | ||||
| } | ||||
|  | ||||
| // nextObjectHeader returns the ObjectHeader for the next object in the reader | ||||
| // without the Offset field | ||||
| func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) { | ||||
| 	s.r.Flush() | ||||
| 	s.crc.Reset() | ||||
|  | ||||
| 	h := &ObjectHeader{} | ||||
| 	s.pendingObject = h | ||||
|  | ||||
| 	var err error | ||||
| 	h.Offset, err = s.r.Seek(0, io.SeekCurrent) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	h.Type, h.Length, err = s.readObjectTypeAndLength() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	switch h.Type { | ||||
| 	case plumbing.OFSDeltaObject: | ||||
| 		no, err := binary.ReadVariableWidthInt(s.r) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		h.OffsetReference = h.Offset - no | ||||
| 	case plumbing.REFDeltaObject: | ||||
| 		var err error | ||||
| 		h.Reference, err = binary.ReadHash(s.r) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return h, nil | ||||
| } | ||||
|  | ||||
| func (s *Scanner) doPending() error { | ||||
| 	if s.version == 0 { | ||||
| 		var err error | ||||
| 		s.version, s.objects, err = s.Header() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return s.discardObjectIfNeeded() | ||||
| } | ||||
|  | ||||
| func (s *Scanner) discardObjectIfNeeded() error { | ||||
| 	if s.pendingObject == nil { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	h := s.pendingObject | ||||
| 	n, _, err := s.NextObject(stdioutil.Discard) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if n != h.Length { | ||||
| 		return fmt.Errorf( | ||||
| 			"error discarding object, discarded %d, expected %d", | ||||
| 			n, h.Length, | ||||
| 		) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // ReadObjectTypeAndLength reads and returns the object type and the | ||||
| // length field from an object entry in a packfile. | ||||
| func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) { | ||||
| 	t, c, err := s.readType() | ||||
| 	if err != nil { | ||||
| 		return t, 0, err | ||||
| 	} | ||||
|  | ||||
| 	l, err := s.readLength(c) | ||||
|  | ||||
| 	return t, l, err | ||||
| } | ||||
|  | ||||
| func (s *Scanner) readType() (plumbing.ObjectType, byte, error) { | ||||
| 	var c byte | ||||
| 	var err error | ||||
| 	if c, err = s.r.ReadByte(); err != nil { | ||||
| 		return plumbing.ObjectType(0), 0, err | ||||
| 	} | ||||
|  | ||||
| 	typ := parseType(c) | ||||
|  | ||||
| 	return typ, c, nil | ||||
| } | ||||
|  | ||||
| func parseType(b byte) plumbing.ObjectType { | ||||
| 	return plumbing.ObjectType((b & maskType) >> firstLengthBits) | ||||
| } | ||||
|  | ||||
| // the length is codified in the last 4 bits of the first byte and in | ||||
| // the last 7 bits of subsequent bytes.  Last byte has a 0 MSB. | ||||
| func (s *Scanner) readLength(first byte) (int64, error) { | ||||
| 	length := int64(first & maskFirstLength) | ||||
|  | ||||
| 	c := first | ||||
| 	shift := firstLengthBits | ||||
| 	var err error | ||||
| 	for c&maskContinue > 0 { | ||||
| 		if c, err = s.r.ReadByte(); err != nil { | ||||
| 			return 0, err | ||||
| 		} | ||||
|  | ||||
| 		length += int64(c&maskLength) << shift | ||||
| 		shift += lengthBits | ||||
| 	} | ||||
|  | ||||
| 	return length, nil | ||||
| } | ||||
|  | ||||
| // NextObject writes the content of the next object into the reader, returns | ||||
| // the number of bytes written, the CRC32 of the content and an error, if any | ||||
| func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) { | ||||
| 	s.pendingObject = nil | ||||
| 	written, err = s.copyObject(w) | ||||
|  | ||||
| 	s.r.Flush() | ||||
| 	crc32 = s.crc.Sum32() | ||||
| 	s.crc.Reset() | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // ReadRegularObject reads and write a non-deltified object | ||||
| // from it zlib stream in an object entry in the packfile. | ||||
| func (s *Scanner) copyObject(w io.Writer) (n int64, err error) { | ||||
| 	zr := zlibReaderPool.Get().(io.ReadCloser) | ||||
| 	defer zlibReaderPool.Put(zr) | ||||
|  | ||||
| 	if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil { | ||||
| 		return 0, fmt.Errorf("zlib reset error: %s", err) | ||||
| 	} | ||||
|  | ||||
| 	defer ioutil.CheckClose(zr, &err) | ||||
| 	buf := byteSlicePool.Get().([]byte) | ||||
| 	n, err = io.CopyBuffer(w, zr, buf) | ||||
| 	byteSlicePool.Put(buf) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| var byteSlicePool = sync.Pool{ | ||||
| 	New: func() interface{} { | ||||
| 		return make([]byte, 32*1024) | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| // SeekFromStart sets a new offset from start, returns the old position before | ||||
| // the change. | ||||
| func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) { | ||||
| 	// if seeking we assume that you are not interested in the header | ||||
| 	if s.version == 0 { | ||||
| 		s.version = VersionSupported | ||||
| 	} | ||||
|  | ||||
| 	previous, err = s.r.Seek(0, io.SeekCurrent) | ||||
| 	if err != nil { | ||||
| 		return -1, err | ||||
| 	} | ||||
|  | ||||
| 	_, err = s.r.Seek(offset, io.SeekStart) | ||||
| 	return previous, err | ||||
| } | ||||
|  | ||||
| // Checksum returns the checksum of the packfile | ||||
| func (s *Scanner) Checksum() (plumbing.Hash, error) { | ||||
| 	err := s.discardObjectIfNeeded() | ||||
| 	if err != nil { | ||||
| 		return plumbing.ZeroHash, err | ||||
| 	} | ||||
|  | ||||
| 	return binary.ReadHash(s.r) | ||||
| } | ||||
|  | ||||
| // Close reads the reader until io.EOF | ||||
| func (s *Scanner) Close() error { | ||||
| 	buf := byteSlicePool.Get().([]byte) | ||||
| 	_, err := io.CopyBuffer(stdioutil.Discard, s.r, buf) | ||||
| 	byteSlicePool.Put(buf) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Flush is a no-op (deprecated) | ||||
| func (s *Scanner) Flush() error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // scannerReader has the following characteristics: | ||||
| // - Provides an io.SeekReader impl for bufio.Reader, when the underlying | ||||
| //   reader supports it. | ||||
| // - Keeps track of the current read position, for when the underlying reader | ||||
| //   isn't an io.SeekReader, but we still want to know the current offset. | ||||
| // - Writes to the hash writer what it reads, with the aid of a smaller buffer. | ||||
| //   The buffer helps avoid a performance penality for performing small writes | ||||
| //   to the crc32 hash writer. | ||||
| type scannerReader struct { | ||||
| 	reader io.Reader | ||||
| 	crc    io.Writer | ||||
| 	rbuf   *bufio.Reader | ||||
| 	wbuf   *bufio.Writer | ||||
| 	offset int64 | ||||
| } | ||||
|  | ||||
| func newScannerReader(r io.Reader, h io.Writer) *scannerReader { | ||||
| 	sr := &scannerReader{ | ||||
| 		rbuf: bufio.NewReader(nil), | ||||
| 		wbuf: bufio.NewWriterSize(nil, 64), | ||||
| 		crc:  h, | ||||
| 	} | ||||
| 	sr.Reset(r) | ||||
|  | ||||
| 	return sr | ||||
| } | ||||
|  | ||||
| func (r *scannerReader) Reset(reader io.Reader) { | ||||
| 	r.reader = reader | ||||
| 	r.rbuf.Reset(r.reader) | ||||
| 	r.wbuf.Reset(r.crc) | ||||
|  | ||||
| 	r.offset = 0 | ||||
| 	if seeker, ok := r.reader.(io.ReadSeeker); ok { | ||||
| 		r.offset, _ = seeker.Seek(0, io.SeekCurrent) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *scannerReader) Read(p []byte) (n int, err error) { | ||||
| 	n, err = r.rbuf.Read(p) | ||||
|  | ||||
| 	r.offset += int64(n) | ||||
| 	if _, err := r.wbuf.Write(p[:n]); err != nil { | ||||
| 		return n, err | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (r *scannerReader) ReadByte() (b byte, err error) { | ||||
| 	b, err = r.rbuf.ReadByte() | ||||
| 	if err == nil { | ||||
| 		r.offset++ | ||||
| 		return b, r.wbuf.WriteByte(b) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (r *scannerReader) Flush() error { | ||||
| 	return r.wbuf.Flush() | ||||
| } | ||||
|  | ||||
| // Seek seeks to a location. If the underlying reader is not an io.ReadSeeker, | ||||
| // then only whence=io.SeekCurrent is supported, any other operation fails. | ||||
| func (r *scannerReader) Seek(offset int64, whence int) (int64, error) { | ||||
| 	var err error | ||||
|  | ||||
| 	if seeker, ok := r.reader.(io.ReadSeeker); !ok { | ||||
| 		if whence != io.SeekCurrent || offset != 0 { | ||||
| 			return -1, ErrSeekNotSupported | ||||
| 		} | ||||
| 	} else { | ||||
| 		if whence == io.SeekCurrent && offset == 0 { | ||||
| 			return r.offset, nil | ||||
| 		} | ||||
|  | ||||
| 		r.offset, err = seeker.Seek(offset, whence) | ||||
| 		r.rbuf.Reset(r.reader) | ||||
| 	} | ||||
|  | ||||
| 	return r.offset, err | ||||
| } | ||||
							
								
								
									
										122
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,122 @@ | ||||
| // Package pktline implements reading payloads form pkt-lines and encoding | ||||
| // pkt-lines from payloads. | ||||
| package pktline | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| ) | ||||
|  | ||||
| // An Encoder writes pkt-lines to an output stream. | ||||
| type Encoder struct { | ||||
| 	w io.Writer | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	// MaxPayloadSize is the maximum payload size of a pkt-line in bytes. | ||||
| 	MaxPayloadSize = 65516 | ||||
|  | ||||
| 	// For compatibility with canonical Git implementation, accept longer pkt-lines | ||||
| 	OversizePayloadMax = 65520 | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// FlushPkt are the contents of a flush-pkt pkt-line. | ||||
| 	FlushPkt = []byte{'0', '0', '0', '0'} | ||||
| 	// Flush is the payload to use with the Encode method to encode a flush-pkt. | ||||
| 	Flush = []byte{} | ||||
| 	// FlushString is the payload to use with the EncodeString method to encode a flush-pkt. | ||||
| 	FlushString = "" | ||||
| 	// ErrPayloadTooLong is returned by the Encode methods when any of the | ||||
| 	// provided payloads is bigger than MaxPayloadSize. | ||||
| 	ErrPayloadTooLong = errors.New("payload is too long") | ||||
| ) | ||||
|  | ||||
| // NewEncoder returns a new encoder that writes to w. | ||||
| func NewEncoder(w io.Writer) *Encoder { | ||||
| 	return &Encoder{ | ||||
| 		w: w, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Flush encodes a flush-pkt to the output stream. | ||||
| func (e *Encoder) Flush() error { | ||||
| 	_, err := e.w.Write(FlushPkt) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Encode encodes a pkt-line with the payload specified and write it to | ||||
| // the output stream.  If several payloads are specified, each of them | ||||
| // will get streamed in their own pkt-lines. | ||||
| func (e *Encoder) Encode(payloads ...[]byte) error { | ||||
| 	for _, p := range payloads { | ||||
| 		if err := e.encodeLine(p); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (e *Encoder) encodeLine(p []byte) error { | ||||
| 	if len(p) > MaxPayloadSize { | ||||
| 		return ErrPayloadTooLong | ||||
| 	} | ||||
|  | ||||
| 	if bytes.Equal(p, Flush) { | ||||
| 		return e.Flush() | ||||
| 	} | ||||
|  | ||||
| 	n := len(p) + 4 | ||||
| 	if _, err := e.w.Write(asciiHex16(n)); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	_, err := e.w.Write(p) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Returns the hexadecimal ascii representation of the 16 less | ||||
| // significant bits of n.  The length of the returned slice will always | ||||
| // be 4.  Example: if n is 1234 (0x4d2), the return value will be | ||||
| // []byte{'0', '4', 'd', '2'}. | ||||
| func asciiHex16(n int) []byte { | ||||
| 	var ret [4]byte | ||||
| 	ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12)) | ||||
| 	ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8)) | ||||
| 	ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4)) | ||||
| 	ret[3] = byteToASCIIHex(byte(n & 0x000f)) | ||||
|  | ||||
| 	return ret[:] | ||||
| } | ||||
|  | ||||
| // turns a byte into its hexadecimal ascii representation.  Example: | ||||
| // from 11 (0xb) to 'b'. | ||||
| func byteToASCIIHex(n byte) byte { | ||||
| 	if n < 10 { | ||||
| 		return '0' + n | ||||
| 	} | ||||
|  | ||||
| 	return 'a' - 10 + n | ||||
| } | ||||
|  | ||||
| // EncodeString works similarly as Encode but payloads are specified as strings. | ||||
| func (e *Encoder) EncodeString(payloads ...string) error { | ||||
| 	for _, p := range payloads { | ||||
| 		if err := e.Encode([]byte(p)); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Encodef encodes a single pkt-line with the payload formatted as | ||||
| // the format specifier. The rest of the arguments will be used in | ||||
| // the format string. | ||||
| func (e *Encoder) Encodef(format string, a ...interface{}) error { | ||||
| 	return e.EncodeString( | ||||
| 		fmt.Sprintf(format, a...), | ||||
| 	) | ||||
| } | ||||
							
								
								
									
										134
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,134 @@ | ||||
| package pktline | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"io" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	lenSize = 4 | ||||
| ) | ||||
|  | ||||
| // ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found. | ||||
| var ErrInvalidPktLen = errors.New("invalid pkt-len found") | ||||
|  | ||||
| // Scanner provides a convenient interface for reading the payloads of a | ||||
| // series of pkt-lines.  It takes an io.Reader providing the source, | ||||
| // which then can be tokenized through repeated calls to the Scan | ||||
| // method. | ||||
| // | ||||
| // After each Scan call, the Bytes method will return the payload of the | ||||
| // corresponding pkt-line on a shared buffer, which will be 65516 bytes | ||||
| // or smaller.  Flush pkt-lines are represented by empty byte slices. | ||||
| // | ||||
| // Scanning stops at EOF or the first I/O error. | ||||
| type Scanner struct { | ||||
| 	r       io.Reader     // The reader provided by the client | ||||
| 	err     error         // Sticky error | ||||
| 	payload []byte        // Last pkt-payload | ||||
| 	len     [lenSize]byte // Last pkt-len | ||||
| } | ||||
|  | ||||
| // NewScanner returns a new Scanner to read from r. | ||||
| func NewScanner(r io.Reader) *Scanner { | ||||
| 	return &Scanner{ | ||||
| 		r: r, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Err returns the first error encountered by the Scanner. | ||||
| func (s *Scanner) Err() error { | ||||
| 	return s.err | ||||
| } | ||||
|  | ||||
| // Scan advances the Scanner to the next pkt-line, whose payload will | ||||
| // then be available through the Bytes method.  Scanning stops at EOF | ||||
| // or the first I/O error.  After Scan returns false, the Err method | ||||
| // will return any error that occurred during scanning, except that if | ||||
| // it was io.EOF, Err will return nil. | ||||
| func (s *Scanner) Scan() bool { | ||||
| 	var l int | ||||
| 	l, s.err = s.readPayloadLen() | ||||
| 	if s.err == io.EOF { | ||||
| 		s.err = nil | ||||
| 		return false | ||||
| 	} | ||||
| 	if s.err != nil { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	if cap(s.payload) < l { | ||||
| 		s.payload = make([]byte, 0, l) | ||||
| 	} | ||||
|  | ||||
| 	if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	s.payload = s.payload[:l] | ||||
|  | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // Bytes returns the most recent payload generated by a call to Scan. | ||||
| // The underlying array may point to data that will be overwritten by a | ||||
| // subsequent call to Scan. It does no allocation. | ||||
| func (s *Scanner) Bytes() []byte { | ||||
| 	return s.payload | ||||
| } | ||||
|  | ||||
| // Method readPayloadLen returns the payload length by reading the | ||||
| // pkt-len and subtracting the pkt-len size. | ||||
| func (s *Scanner) readPayloadLen() (int, error) { | ||||
| 	if _, err := io.ReadFull(s.r, s.len[:]); err != nil { | ||||
| 		if err == io.ErrUnexpectedEOF { | ||||
| 			return 0, ErrInvalidPktLen | ||||
| 		} | ||||
|  | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | ||||
| 	n, err := hexDecode(s.len) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | ||||
| 	switch { | ||||
| 	case n == 0: | ||||
| 		return 0, nil | ||||
| 	case n <= lenSize: | ||||
| 		return 0, ErrInvalidPktLen | ||||
| 	case n > OversizePayloadMax+lenSize: | ||||
| 		return 0, ErrInvalidPktLen | ||||
| 	default: | ||||
| 		return n - lenSize, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Turns the hexadecimal representation of a number in a byte slice into | ||||
| // a number. This function substitute strconv.ParseUint(string(buf), 16, | ||||
| // 16) and/or hex.Decode, to avoid generating new strings, thus helping the | ||||
| // GC. | ||||
| func hexDecode(buf [lenSize]byte) (int, error) { | ||||
| 	var ret int | ||||
| 	for i := 0; i < lenSize; i++ { | ||||
| 		n, err := asciiHexToByte(buf[i]) | ||||
| 		if err != nil { | ||||
| 			return 0, ErrInvalidPktLen | ||||
| 		} | ||||
| 		ret = 16*ret + int(n) | ||||
| 	} | ||||
| 	return ret, nil | ||||
| } | ||||
|  | ||||
| // turns the hexadecimal ascii representation of a byte into its | ||||
| // numerical value.  Example: from 'b' to 11 (0xb). | ||||
| func asciiHexToByte(b byte) (byte, error) { | ||||
| 	switch { | ||||
| 	case b >= '0' && b <= '9': | ||||
| 		return b - '0', nil | ||||
| 	case b >= 'a' && b <= 'f': | ||||
| 		return b - 'a' + 10, nil | ||||
| 	default: | ||||
| 		return 0, ErrInvalidPktLen | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										73
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/hash.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								vendor/github.com/go-git/go-git/v5/plumbing/hash.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,73 @@ | ||||
| package plumbing | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"crypto/sha1" | ||||
| 	"encoding/hex" | ||||
| 	"hash" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| ) | ||||
|  | ||||
| // Hash SHA1 hashed content | ||||
| type Hash [20]byte | ||||
|  | ||||
| // ZeroHash is Hash with value zero | ||||
| var ZeroHash Hash | ||||
|  | ||||
| // ComputeHash compute the hash for a given ObjectType and content | ||||
| func ComputeHash(t ObjectType, content []byte) Hash { | ||||
| 	h := NewHasher(t, int64(len(content))) | ||||
| 	h.Write(content) | ||||
| 	return h.Sum() | ||||
| } | ||||
|  | ||||
| // NewHash return a new Hash from a hexadecimal hash representation | ||||
| func NewHash(s string) Hash { | ||||
| 	b, _ := hex.DecodeString(s) | ||||
|  | ||||
| 	var h Hash | ||||
| 	copy(h[:], b) | ||||
|  | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| func (h Hash) IsZero() bool { | ||||
| 	var empty Hash | ||||
| 	return h == empty | ||||
| } | ||||
|  | ||||
| func (h Hash) String() string { | ||||
| 	return hex.EncodeToString(h[:]) | ||||
| } | ||||
|  | ||||
| type Hasher struct { | ||||
| 	hash.Hash | ||||
| } | ||||
|  | ||||
| func NewHasher(t ObjectType, size int64) Hasher { | ||||
| 	h := Hasher{sha1.New()} | ||||
| 	h.Write(t.Bytes()) | ||||
| 	h.Write([]byte(" ")) | ||||
| 	h.Write([]byte(strconv.FormatInt(size, 10))) | ||||
| 	h.Write([]byte{0}) | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| func (h Hasher) Sum() (hash Hash) { | ||||
| 	copy(hash[:], h.Hash.Sum(nil)) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // HashesSort sorts a slice of Hashes in increasing order. | ||||
| func HashesSort(a []Hash) { | ||||
| 	sort.Sort(HashSlice(a)) | ||||
| } | ||||
|  | ||||
| // HashSlice attaches the methods of sort.Interface to []Hash, sorting in | ||||
| // increasing order. | ||||
| type HashSlice []Hash | ||||
|  | ||||
| func (p HashSlice) Len() int           { return len(p) } | ||||
| func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 } | ||||
| func (p HashSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	 6543
					6543