chore(deps): upgrade dependencies

Upgrade all dependencies to newest versions.
This commit is contained in:
Christopher Allen Lane
2023-12-13 08:29:02 -05:00
parent 0d9c92c8c0
commit 95a4e31b6c
769 changed files with 28936 additions and 12954 deletions

1
vendor/github.com/go-git/gcfg/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
coverage.out

17
vendor/github.com/go-git/gcfg/Makefile generated vendored Normal file
View File

@ -0,0 +1,17 @@
# General
WORKDIR = $(PWD)
# Go parameters
GOCMD = go
GOTEST = $(GOCMD) test
# Coverage
COVERAGE_REPORT = coverage.out
COVERAGE_MODE = count
test:
$(GOTEST) ./...
test-coverage:
echo "" > $(COVERAGE_REPORT); \
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...

View File

@ -1,7 +0,0 @@
// +build !go1.2
package gcfg
type textUnmarshaler interface {
UnmarshalText(text []byte) error
}

View File

@ -1,9 +0,0 @@
// +build go1.2
package gcfg
import (
"encoding"
)
type textUnmarshaler encoding.TextUnmarshaler

View File

@ -3,16 +3,16 @@ package gcfg
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"gopkg.in/warnings.v0"
"github.com/go-git/gcfg/scanner"
"github.com/go-git/gcfg/token"
"gopkg.in/warnings.v0"
)
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'}
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b', '\n': '\n'}
// no error: invalid literals should be caught by scanner
func unquote(s string) string {
@ -224,7 +224,7 @@ func readInto(config interface{}, fset *token.FileSet, file *token.File,
//
// If callback returns an error, ReadWithCallback terminates with an error too.
func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error {
src, err := ioutil.ReadAll(reader)
src, err := io.ReadAll(reader)
if err != nil {
return err
}
@ -239,7 +239,7 @@ func ReadWithCallback(reader io.Reader, callback func(string, string, string, st
// ReadInto reads gcfg formatted data from reader and sets the values into the
// corresponding fields in config.
func ReadInto(config interface{}, reader io.Reader) error {
src, err := ioutil.ReadAll(reader)
src, err := io.ReadAll(reader)
if err != nil {
return err
}
@ -263,7 +263,7 @@ func ReadFileInto(config interface{}, filename string) error {
return err
}
defer f.Close()
src, err := ioutil.ReadAll(f)
src, err := io.ReadAll(f)
if err != nil {
return err
}

View File

@ -8,7 +8,6 @@
//
// Note that the API for the scanner package may change to accommodate new
// features or implementation changes in gcfg.
//
package scanner
import (
@ -16,9 +15,7 @@ import (
"path/filepath"
"unicode"
"unicode/utf8"
)
import (
"github.com/go-git/gcfg/token"
)
@ -26,13 +23,11 @@ import (
// encountered and a handler was installed, the handler is called with a
// position and an error message. The position points to the beginning of
// the offending token.
//
type ErrorHandler func(pos token.Position, msg string)
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use.
//
type Scanner struct {
// immutable state
file *token.File // source file handle
@ -54,7 +49,6 @@ type Scanner struct {
// Read the next Unicode char into s.ch.
// s.ch < 0 means end-of-file.
//
func (s *Scanner) next() {
if s.rdOffset < len(s.src) {
s.offset = s.rdOffset
@ -87,7 +81,6 @@ func (s *Scanner) next() {
// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
type Mode uint
const (
@ -108,7 +101,6 @@ const (
//
// Note that Init may call err if there is an error in the first character
// of the file.
//
func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
// Explicitly initialize all fields since a scanner may be reused.
if file.Size() != len(src) {
@ -163,12 +155,13 @@ func (s *Scanner) scanIdentifier() string {
return string(s.src[offs:s.offset])
}
// val indicate if we are scanning a value (vs a header)
func (s *Scanner) scanEscape(val bool) {
offs := s.offset
ch := s.ch
s.next() // always make progress
switch ch {
case '\\', '"':
case '\\', '"', '\n':
// ok
case 'n', 't', 'b':
if val {
@ -289,7 +282,6 @@ func (s *Scanner) skipWhitespace() {
// Scan adds line information to the file added to the file
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
scanAgain:
s.skipWhitespace()

View File

@ -2,6 +2,7 @@ package gcfg
import (
"bytes"
"encoding"
"encoding/gob"
"fmt"
"math/big"
@ -10,8 +11,9 @@ import (
"unicode"
"unicode/utf8"
"github.com/go-git/gcfg/types"
"gopkg.in/warnings.v0"
"github.com/go-git/gcfg/types"
)
type tag struct {
@ -65,7 +67,7 @@ var setters = []setter{
}
func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
dtu, ok := d.(textUnmarshaler)
dtu, ok := d.(encoding.TextUnmarshaler)
if !ok {
return errUnsupportedType
}

11
vendor/github.com/go-git/go-billy/v5/Makefile generated vendored Normal file
View File

@ -0,0 +1,11 @@
# Go parameters
GOCMD = go
GOTEST = $(GOCMD) test
.PHONY: test
test:
$(GOTEST) -race ./...
test-coverage:
echo "" > $(COVERAGE_REPORT); \
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...

View File

@ -310,14 +310,14 @@ func (f *file) Duplicate(filename string, mode os.FileMode, flag int) billy.File
flag: flag,
}
if isAppend(flag) {
new.position = int64(new.content.Len())
}
if isTruncate(flag) {
new.content.Truncate()
}
if isAppend(flag) {
new.position = int64(new.content.Len())
}
return new
}

View File

@ -6,6 +6,7 @@ import (
"io"
"os"
"path/filepath"
"sync"
)
type storage struct {
@ -174,6 +175,8 @@ func clean(path string) string {
type content struct {
name string
bytes []byte
m sync.RWMutex
}
func (c *content) WriteAt(p []byte, off int64) (int, error) {
@ -185,6 +188,7 @@ func (c *content) WriteAt(p []byte, off int64) (int, error) {
}
}
c.m.Lock()
prev := len(c.bytes)
diff := int(off) - prev
@ -196,6 +200,7 @@ func (c *content) WriteAt(p []byte, off int64) (int, error) {
if len(c.bytes) < prev {
c.bytes = c.bytes[:prev]
}
c.m.Unlock()
return len(p), nil
}
@ -209,8 +214,10 @@ func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
}
}
c.m.RLock()
size := int64(len(c.bytes))
if off >= size {
c.m.RUnlock()
return 0, io.EOF
}
@ -220,10 +227,12 @@ func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
}
btr := c.bytes[off : off+l]
n = copy(b, btr)
if len(btr) < len(b) {
err = io.EOF
}
n = copy(b, btr)
c.m.RUnlock()
return
}

View File

@ -1,140 +1,123 @@
//go:build !js
// +build !js
// Package osfs provides a billy filesystem for the OS.
package osfs // import "github.com/go-git/go-billy/v5/osfs"
package osfs
import (
"io/ioutil"
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/helper/chroot"
)
const (
defaultDirectoryMode = 0755
defaultCreateMode = 0666
defaultDirectoryMode = 0o755
defaultCreateMode = 0o666
)
// Default Filesystem representing the root of the os filesystem.
var Default = &OS{}
// OS is a filesystem based on the os filesystem.
type OS struct{}
var Default = &ChrootOS{}
// New returns a new OS filesystem.
func New(baseDir string) billy.Filesystem {
return chroot.New(Default, baseDir)
// By default paths are deduplicated, but still enforced
// under baseDir. For more info refer to WithDeduplicatePath.
func New(baseDir string, opts ...Option) billy.Filesystem {
o := &options{
deduplicatePath: true,
}
for _, opt := range opts {
opt(o)
}
if o.Type == BoundOSFS {
return newBoundOS(baseDir, o.deduplicatePath)
}
return newChrootOS(baseDir)
}
func (fs *OS) Create(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode)
// WithBoundOS returns the option of using a Bound filesystem OS.
func WithBoundOS() Option {
return func(o *options) {
o.Type = BoundOSFS
}
}
func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
if flag&os.O_CREATE != 0 {
if err := fs.createDir(filename); err != nil {
// WithChrootOS returns the option of using a Chroot filesystem OS.
func WithChrootOS() Option {
return func(o *options) {
o.Type = ChrootOSFS
}
}
// WithDeduplicatePath toggles the deduplication of the base dir in the path.
// This occurs when absolute links are being used.
// Assuming base dir /base/dir and an absolute symlink /base/dir/target:
//
// With DeduplicatePath (default): /base/dir/target
// Without DeduplicatePath: /base/dir/base/dir/target
//
// This option is only used by the BoundOS OS type.
func WithDeduplicatePath(enabled bool) Option {
return func(o *options) {
o.deduplicatePath = enabled
}
}
type options struct {
Type
deduplicatePath bool
}
type Type int
const (
ChrootOSFS Type = iota
BoundOSFS
)
func readDir(dir string) ([]os.FileInfo, error) {
entries, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
infos := make([]fs.FileInfo, 0, len(entries))
for _, entry := range entries {
fi, err := entry.Info()
if err != nil {
return nil, err
}
infos = append(infos, fi)
}
f, err := os.OpenFile(filename, flag, perm)
if err != nil {
return nil, err
}
return &file{File: f}, err
return infos, nil
}
func (fs *OS) createDir(fullpath string) error {
dir := filepath.Dir(fullpath)
if dir != "." {
if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil {
return err
}
}
return nil
}
func (fs *OS) ReadDir(path string) ([]os.FileInfo, error) {
l, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
var s = make([]os.FileInfo, len(l))
for i, f := range l {
s[i] = f
}
return s, nil
}
func (fs *OS) Rename(from, to string) error {
if err := fs.createDir(to); err != nil {
return err
}
return rename(from, to)
}
func (fs *OS) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, defaultDirectoryMode)
}
func (fs *OS) Open(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDONLY, 0)
}
func (fs *OS) Stat(filename string) (os.FileInfo, error) {
return os.Stat(filename)
}
func (fs *OS) Remove(filename string) error {
return os.Remove(filename)
}
func (fs *OS) TempFile(dir, prefix string) (billy.File, error) {
if err := fs.createDir(dir + string(os.PathSeparator)); err != nil {
return nil, err
}
f, err := ioutil.TempFile(dir, prefix)
func tempFile(dir, prefix string) (billy.File, error) {
f, err := os.CreateTemp(dir, prefix)
if err != nil {
return nil, err
}
return &file{File: f}, nil
}
func (fs *OS) Join(elem ...string) string {
return filepath.Join(elem...)
}
func (fs *OS) RemoveAll(path string) error {
return os.RemoveAll(filepath.Clean(path))
}
func (fs *OS) Lstat(filename string) (os.FileInfo, error) {
return os.Lstat(filepath.Clean(filename))
}
func (fs *OS) Symlink(target, link string) error {
if err := fs.createDir(link); err != nil {
return err
func openFile(fn string, flag int, perm os.FileMode, createDir func(string) error) (billy.File, error) {
if flag&os.O_CREATE != 0 {
if createDir == nil {
return nil, fmt.Errorf("createDir func cannot be nil if file needs to be opened in create mode")
}
if err := createDir(fn); err != nil {
return nil, err
}
}
return os.Symlink(target, link)
}
func (fs *OS) Readlink(link string) (string, error) {
return os.Readlink(link)
}
// Capabilities implements the Capable interface.
func (fs *OS) Capabilities() billy.Capability {
return billy.DefaultCapabilities
f, err := os.OpenFile(fn, flag, perm)
if err != nil {
return nil, err
}
return &file{File: f}, err
}
// file is a wrapper for an os.File which adds support for file locking.

261
vendor/github.com/go-git/go-billy/v5/osfs/os_bound.go generated vendored Normal file
View File

@ -0,0 +1,261 @@
//go:build !js
// +build !js
/*
Copyright 2022 The Flux authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package osfs
import (
"fmt"
"os"
"path/filepath"
"strings"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/go-git/go-billy/v5"
)
// BoundOS is a fs implementation based on the OS filesystem which is bound to
// a base dir.
// Prefer this fs implementation over ChrootOS.
//
// Behaviours of note:
// 1. Read and write operations can only be directed to files which descends
// from the base dir.
// 2. Symlinks don't have their targets modified, and therefore can point
// to locations outside the base dir or to non-existent paths.
// 3. Readlink and Lstat ensures that the link file is located within the base
// dir, evaluating any symlinks that file or base dir may contain.
type BoundOS struct {
baseDir string
deduplicatePath bool
}
func newBoundOS(d string, deduplicatePath bool) billy.Filesystem {
return &BoundOS{baseDir: d, deduplicatePath: deduplicatePath}
}
func (fs *BoundOS) Create(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode)
}
func (fs *BoundOS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
fn, err := fs.abs(filename)
if err != nil {
return nil, err
}
return openFile(fn, flag, perm, fs.createDir)
}
func (fs *BoundOS) ReadDir(path string) ([]os.FileInfo, error) {
dir, err := fs.abs(path)
if err != nil {
return nil, err
}
return readDir(dir)
}
func (fs *BoundOS) Rename(from, to string) error {
f, err := fs.abs(from)
if err != nil {
return err
}
t, err := fs.abs(to)
if err != nil {
return err
}
// MkdirAll for target name.
if err := fs.createDir(t); err != nil {
return err
}
return os.Rename(f, t)
}
func (fs *BoundOS) MkdirAll(path string, perm os.FileMode) error {
dir, err := fs.abs(path)
if err != nil {
return err
}
return os.MkdirAll(dir, perm)
}
func (fs *BoundOS) Open(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDONLY, 0)
}
func (fs *BoundOS) Stat(filename string) (os.FileInfo, error) {
filename, err := fs.abs(filename)
if err != nil {
return nil, err
}
return os.Stat(filename)
}
func (fs *BoundOS) Remove(filename string) error {
fn, err := fs.abs(filename)
if err != nil {
return err
}
return os.Remove(fn)
}
// TempFile creates a temporary file. If dir is empty, the file
// will be created within the OS Temporary dir. If dir is provided
// it must descend from the current base dir.
func (fs *BoundOS) TempFile(dir, prefix string) (billy.File, error) {
if dir != "" {
var err error
dir, err = fs.abs(dir)
if err != nil {
return nil, err
}
}
return tempFile(dir, prefix)
}
func (fs *BoundOS) Join(elem ...string) string {
return filepath.Join(elem...)
}
func (fs *BoundOS) RemoveAll(path string) error {
dir, err := fs.abs(path)
if err != nil {
return err
}
return os.RemoveAll(dir)
}
func (fs *BoundOS) Symlink(target, link string) error {
ln, err := fs.abs(link)
if err != nil {
return err
}
// MkdirAll for containing dir.
if err := fs.createDir(ln); err != nil {
return err
}
return os.Symlink(target, ln)
}
func (fs *BoundOS) Lstat(filename string) (os.FileInfo, error) {
filename = filepath.Clean(filename)
if !filepath.IsAbs(filename) {
filename = filepath.Join(fs.baseDir, filename)
}
if ok, err := fs.insideBaseDirEval(filename); !ok {
return nil, err
}
return os.Lstat(filename)
}
func (fs *BoundOS) Readlink(link string) (string, error) {
if !filepath.IsAbs(link) {
link = filepath.Clean(filepath.Join(fs.baseDir, link))
}
if ok, err := fs.insideBaseDirEval(link); !ok {
return "", err
}
return os.Readlink(link)
}
// Chroot returns a new OS filesystem, with the base dir set to the
// result of joining the provided path with the underlying base dir.
func (fs *BoundOS) Chroot(path string) (billy.Filesystem, error) {
joined, err := securejoin.SecureJoin(fs.baseDir, path)
if err != nil {
return nil, err
}
return New(joined), nil
}
// Root returns the current base dir of the billy.Filesystem.
// This is required in order for this implementation to be a drop-in
// replacement for other upstream implementations (e.g. memory and osfs).
func (fs *BoundOS) Root() string {
return fs.baseDir
}
func (fs *BoundOS) createDir(fullpath string) error {
dir := filepath.Dir(fullpath)
if dir != "." {
if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil {
return err
}
}
return nil
}
// abs transforms filename to an absolute path, taking into account the base dir.
// Relative paths won't be allowed to ascend the base dir, so `../file` will become
// `/working-dir/file`.
//
// Note that if filename is a symlink, the returned address will be the target of the
// symlink.
func (fs *BoundOS) abs(filename string) (string, error) {
if filename == fs.baseDir {
filename = string(filepath.Separator)
}
path, err := securejoin.SecureJoin(fs.baseDir, filename)
if err != nil {
return "", nil
}
if fs.deduplicatePath {
vol := filepath.VolumeName(fs.baseDir)
dup := filepath.Join(fs.baseDir, fs.baseDir[len(vol):])
if strings.HasPrefix(path, dup+string(filepath.Separator)) {
return fs.abs(path[len(dup):])
}
}
return path, nil
}
// insideBaseDir checks whether filename is located within
// the fs.baseDir.
func (fs *BoundOS) insideBaseDir(filename string) (bool, error) {
if filename == fs.baseDir {
return true, nil
}
if !strings.HasPrefix(filename, fs.baseDir+string(filepath.Separator)) {
return false, fmt.Errorf("path outside base dir")
}
return true, nil
}
// insideBaseDirEval checks whether filename is contained within
// a dir that is within the fs.baseDir, by first evaluating any symlinks
// that either filename or fs.baseDir may contain.
func (fs *BoundOS) insideBaseDirEval(filename string) (bool, error) {
dir, err := filepath.EvalSymlinks(filepath.Dir(filename))
if dir == "" || os.IsNotExist(err) {
dir = filepath.Dir(filename)
}
wd, err := filepath.EvalSymlinks(fs.baseDir)
if wd == "" || os.IsNotExist(err) {
wd = fs.baseDir
}
if filename != wd && dir != wd && !strings.HasPrefix(dir, wd+string(filepath.Separator)) {
return false, fmt.Errorf("path outside base dir")
}
return true, nil
}

112
vendor/github.com/go-git/go-billy/v5/osfs/os_chroot.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
//go:build !js
// +build !js
package osfs
import (
"os"
"path/filepath"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/helper/chroot"
)
// ChrootOS is a legacy filesystem based on a "soft chroot" of the os filesystem.
// Although this is still the default os filesystem, consider using BoundOS instead.
//
// Behaviours of note:
// 1. A "soft chroot" translates the base dir to "/" for the purposes of the
// fs abstraction.
// 2. Symlinks targets may be modified to be kept within the chroot bounds.
// 3. Some file modes does not pass-through the fs abstraction.
// 4. The combination of 1 and 2 may cause go-git to think that a Git repository
// is dirty, when in fact it isn't.
type ChrootOS struct{}
func newChrootOS(baseDir string) billy.Filesystem {
return chroot.New(&ChrootOS{}, baseDir)
}
func (fs *ChrootOS) Create(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode)
}
func (fs *ChrootOS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
return openFile(filename, flag, perm, fs.createDir)
}
func (fs *ChrootOS) createDir(fullpath string) error {
dir := filepath.Dir(fullpath)
if dir != "." {
if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil {
return err
}
}
return nil
}
func (fs *ChrootOS) ReadDir(dir string) ([]os.FileInfo, error) {
return readDir(dir)
}
func (fs *ChrootOS) Rename(from, to string) error {
if err := fs.createDir(to); err != nil {
return err
}
return rename(from, to)
}
func (fs *ChrootOS) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, defaultDirectoryMode)
}
func (fs *ChrootOS) Open(filename string) (billy.File, error) {
return fs.OpenFile(filename, os.O_RDONLY, 0)
}
func (fs *ChrootOS) Stat(filename string) (os.FileInfo, error) {
return os.Stat(filename)
}
func (fs *ChrootOS) Remove(filename string) error {
return os.Remove(filename)
}
func (fs *ChrootOS) TempFile(dir, prefix string) (billy.File, error) {
if err := fs.createDir(dir + string(os.PathSeparator)); err != nil {
return nil, err
}
return tempFile(dir, prefix)
}
func (fs *ChrootOS) Join(elem ...string) string {
return filepath.Join(elem...)
}
func (fs *ChrootOS) RemoveAll(path string) error {
return os.RemoveAll(filepath.Clean(path))
}
func (fs *ChrootOS) Lstat(filename string) (os.FileInfo, error) {
return os.Lstat(filepath.Clean(filename))
}
func (fs *ChrootOS) Symlink(target, link string) error {
if err := fs.createDir(link); err != nil {
return err
}
return os.Symlink(target, link)
}
func (fs *ChrootOS) Readlink(link string) (string, error) {
return os.Readlink(link)
}
// Capabilities implements the Capable interface.
func (fs *ChrootOS) Capabilities() billy.Capability {
return billy.DefaultCapabilities
}

View File

@ -1,3 +1,4 @@
//go:build js
// +build js
package osfs
@ -16,6 +17,9 @@ var globalMemFs = memfs.New()
var Default = memfs.New()
// New returns a new OS filesystem.
func New(baseDir string) billy.Filesystem {
func New(baseDir string, _ ...Option) billy.Filesystem {
return chroot.New(Default, Default.Join("/", baseDir))
}
type options struct {
}

View File

@ -0,0 +1,3 @@
package osfs
type Option func(*options)

View File

@ -1,3 +1,4 @@
//go:build plan9
// +build plan9
package osfs
@ -83,3 +84,8 @@ func dirwstat(name string, d *syscall.Dir) error {
}
return nil
}
func umask(new int) func() {
return func() {
}
}

View File

@ -1,9 +1,11 @@
//go:build !plan9 && !windows && !js
// +build !plan9,!windows,!js
package osfs
import (
"os"
"syscall"
"golang.org/x/sys/unix"
)
@ -25,3 +27,12 @@ func (f *file) Unlock() error {
func rename(from, to string) error {
return os.Rename(from, to)
}
// umask sets umask to a new value, and returns a func which allows the
// caller to reset it back to what it was originally.
func umask(new int) func() {
old := syscall.Umask(new)
return func() {
syscall.Umask(old)
}
}

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package osfs
@ -10,15 +11,6 @@ import (
"golang.org/x/sys/windows"
)
type fileInfo struct {
os.FileInfo
name string
}
func (fi *fileInfo) Name() string {
return fi.name
}
var (
kernel32DLL = windows.NewLazySystemDLL("kernel32.dll")
lockFileExProc = kernel32DLL.NewProc("LockFileEx")
@ -59,3 +51,8 @@ func (f *file) Unlock() error {
func rename(from, to string) error {
return os.Rename(from, to)
}
func umask(new int) func() {
return func() {
}
}

72
vendor/github.com/go-git/go-billy/v5/util/walk.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package util
import (
"os"
"path/filepath"
"github.com/go-git/go-billy/v5"
)
// walk recursively descends path, calling walkFn
// adapted from https://golang.org/src/path/filepath/path.go
func walk(fs billy.Filesystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
if !info.IsDir() {
return walkFn(path, info, nil)
}
names, err := readdirnames(fs, path)
err1 := walkFn(path, info, err)
// If err != nil, walk can't walk into this directory.
// err1 != nil means walkFn want walk to skip this directory or stop walking.
// Therefore, if one of err and err1 isn't nil, walk will return.
if err != nil || err1 != nil {
// The caller's behavior is controlled by the return value, which is decided
// by walkFn. walkFn may ignore err and return nil.
// If walkFn returns SkipDir, it will be handled by the caller.
// So walk should return whatever walkFn returns.
return err1
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := fs.Lstat(filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walk(fs, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}
// Walk walks the file tree rooted at root, calling fn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by fn: see the WalkFunc documentation for
// details.
//
// The files are walked in lexical order, which makes the output deterministic
// but requires Walk to read an entire directory into memory before proceeding
// to walk that directory. Walk does not follow symbolic links.
//
// Function adapted from https://github.com/golang/go/blob/3b770f2ccb1fa6fecc22ea822a19447b10b70c5c/src/path/filepath/path.go#L500
func Walk(fs billy.Filesystem, root string, walkFn filepath.WalkFunc) error {
info, err := fs.Lstat(root)
if err != nil {
err = walkFn(root, nil, err)
} else {
err = walk(fs, root, info, walkFn)
}
if err == filepath.SkipDir {
return nil
}
return err
}

View File

@ -2,3 +2,6 @@ coverage.out
*~
coverage.txt
profile.out
.tmp/
.git-dist/
.vscode

View File

@ -1,111 +1,233 @@
Supported Capabilities
======================
# Supported Features
Here is a non-comprehensive table of git commands and features whose equivalent
is supported by go-git.
Here is a non-comprehensive table of git commands and features and their
compatibility status with go-git.
| Feature | Status | Notes |
|---------------------------------------|--------|-------|
| **config** |
| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. |
| **getting and creating repositories** |
| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. |
| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. |
| **basic snapshotting** |
| add | ✔ | Plain add is supported. Any other flags aren't supported |
| status | ✔ |
| commit | ✔ |
| reset | ✔ |
| rm | ✔ |
| mv | ✔ |
| **branching and merging** |
| branch | ✔ |
| checkout | ✔ | Basic usages of checkout are supported. |
| merge | ✖ |
| mergetool | ✖ |
| stash | ✖ |
| tag | |
| **sharing and updating projects** |
| fetch | ✔ |
| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. |
| push | ✔ |
| remote | ✔ |
| submodule | ✔ |
| **inspection and comparison** |
| show | ✔ |
| log | ✔ |
| shortlog | (see log) |
| describe | |
| **patching** |
| apply | ✖ |
| cherry-pick | ✖ |
| diff | ✔ | Patch object with UnifiedDiff output representation |
| rebase | ✖ |
| revert | ✖ |
| **debugging** |
| bisect | ✖ |
| blame | ✔ |
| grep | ✔ |
| **email** ||
| am | ✖ |
| apply | ✖ |
| format-patch | ✖ |
| send-email | ✖ |
| request-pull | ✖ |
| **external systems** |
| svn | |
| fast-import | ✖ |
| **administration** |
| clean | ✔ |
| gc | ✖ |
| fsck | ✖ |
| reflog | |
| filter-branch | ✖ |
| instaweb | |
| archive | |
| bundle | ✖ |
| prune | ✖ |
| repack | |
| **server admin** |
| daemon | |
| update-server-info | |
| **advanced** |
| notes | ✖ |
| replace | ✖ |
| worktree | |
| annotate | (see blame) |
| **gpg** |
| git-verify-commit | ✔ |
| git-verify-tag | ✔ |
| **plumbing commands** |
| cat-file | ✔ |
| check-ignore | |
| commit-tree | |
| count-objects | |
| diff-index | |
| for-each-ref | |
| hash-object | ✔ |
| ls-files | ✔ |
| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. |
| read-tree | |
| rev-list | ✔ |
| rev-parse | |
| show-ref | ✔ |
| symbolic-ref | |
| update-index | |
| update-ref | |
| verify-pack | |
| write-tree | |
| **protocols** |
| http(s):// (dumb) | |
| http(s):// (smart) | |
| git:// | |
| ssh:// | |
| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. |
| custom | |
| **other features** |
| gitignore | |
| gitattributes | |
| index version | |
| packfile version | |
| push-certs | ✖ |
## Getting and creating repositories
| Feature | Sub-feature | Status | Notes | Examples |
| ------- | ------------------------------------------------------------------------------------------------------------------ | ------ | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `init` | | ✅ | | |
| `init` | `--bare` | ✅ | | |
| `init` | `--template` <br/> `--separate-git-dir` <br/> `--shared` | ❌ | | |
| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) |
| `clone` | Authentication: <br/> - none <br/> - access token <br/> - username + password <br/> - ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go) <br/> - [clone access token](_examples/clone/auth/basic/access_token/main.go) <br/> - [clone user + password](_examples/clone/auth/basic/username_password/main.go) |
| `clone` | `--progress` <br/> `--single-branch` <br/> `--depth` <br/> `--origin` <br/> `--recurse-submodules` <br/>`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go) <br/> - [progress](_examples/progress/main.go) |
## Basic snapshotting
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | -------------------------------------------------------- | ------------------------------------ |
| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | |
| `status` | | ✅ | | |
| `commit` | | ✅ | | - [commit](_examples/commit/main.go) |
| `reset` | | ✅ | | |
| `rm` | | ✅ | | |
| `mv` | | ✅ | | |
## Branching and merging
| Feature | Sub-feature | Status | Notes | Examples |
| ----------- | ----------- | ------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- |
| `branch` | | ✅ | | - [branch](_examples/branch/main.go) |
| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) |
| `merge` | | ❌ | | |
| `mergetool` | | ❌ | | |
| `stash` | | ❌ | | |
| `tag` | | ✅ | | - [tag](_examples/tag/main.go) <br/> - [tag create and push](_examples/tag-create-push/main.go) |
## Sharing and updating projects
| Feature | Sub-feature | Status | Notes | Examples |
| ----------- | ----------- | ------ | ----------------------------------------------------------------------- | ------------------------------------------ |
| `fetch` | | ✅ | | |
| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) |
| `push` | | ✅ | | - [push](_examples/push/main.go) |
| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) |
| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) |
| `submodule` | deinit | ❌ | | |
## Inspection and comparison
| Feature | Sub-feature | Status | Notes | Examples |
| ---------- | ----------- | --------- | ----- | ------------------------------ |
| `show` | | ✅ | | |
| `log` | | ✅ | | - [log](_examples/log/main.go) |
| `shortlog` | | (see log) | | |
| `describe` | | ❌ | | |
## Patching
| Feature | Sub-feature | Status | Notes | Examples |
| ------------- | ----------- | ------ | ---------------------------------------------------- | -------- |
| `apply` | | ❌ | | |
| `cherry-pick` | | ❌ | | |
| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | |
| `rebase` | | ❌ | | |
| `revert` | | ❌ | | |
## Debugging
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | ----- | ---------------------------------- |
| `bisect` | | ❌ | | |
| `blame` | | ✅ | | - [blame](_examples/blame/main.go) |
| `grep` | | ✅ | | |
## Email
| Feature | Sub-feature | Status | Notes | Examples |
| -------------- | ----------- | ------ | ----- | -------- |
| `am` | | ❌ | | |
| `apply` | | ❌ | | |
| `format-patch` | | ❌ | | |
| `send-email` | | ❌ | | |
| `request-pull` | | ❌ | | |
## External systems
| Feature | Sub-feature | Status | Notes | Examples |
| ------------- | ----------- | ------ | ----- | -------- |
| `svn` | | ❌ | | |
| `fast-import` | | ❌ | | |
| `lfs` | | ❌ | | |
## Administration
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | ----------- | ------ | ----- | -------- |
| `clean` | | ✅ | | |
| `gc` | | ❌ | | |
| `fsck` | | ❌ | | |
| `reflog` | | ❌ | | |
| `filter-branch` | | ❌ | | |
| `instaweb` | | ❌ | | |
| `archive` | | ❌ | | |
| `bundle` | | ❌ | | |
| `prune` | | ❌ | | |
| `repack` | | ❌ | | |
## Server admin
| Feature | Sub-feature | Status | Notes | Examples |
| -------------------- | ----------- | ------ | ----- | ----------------------------------------- |
| `daemon` | | ❌ | | |
| `update-server-info` | | ✅ | | [cli](./cli/go-git/update_server_info.go) |
## Advanced
| Feature | Sub-feature | Status | Notes | Examples |
| ---------- | ----------- | ----------- | ----- | -------- |
| `notes` | | ❌ | | |
| `replace` | | ❌ | | |
| `worktree` | | ❌ | | |
| `annotate` | | (see blame) | | |
## GPG
| Feature | Sub-feature | Status | Notes | Examples |
| ------------------- | ----------- | ------ | ----- | -------- |
| `git-verify-commit` | | ✅ | | |
| `git-verify-tag` | | ✅ | | |
## Plumbing commands
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | ------------------------------------- | ------------ | --------------------------------------------------- | -------------------------------------------- |
| `cat-file` | | ✅ | | |
| `check-ignore` | | ❌ | | |
| `commit-tree` | | ❌ | | |
| `count-objects` | | ❌ | | |
| `diff-index` | | ❌ | | |
| `for-each-ref` | | ✅ | | |
| `hash-object` | | ✅ | | |
| `ls-files` | | ✅ | | |
| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) |
| `merge-base` | `--independent` <br/> `--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) |
| `merge-base` | `--fork-point` <br/> `--octopus` | ❌ | | |
| `read-tree` | | ❌ | | |
| `rev-list` | | ✅ | | |
| `rev-parse` | | ❌ | | |
| `show-ref` | | ✅ | | |
| `symbolic-ref` | | ✅ | | |
| `update-index` | | ❌ | | |
| `update-ref` | | ❌ | | |
| `verify-pack` | | ❌ | | |
| `write-tree` | | ❌ | | |
## Indexes and Git Protocols
| Feature | Version | Status | Notes |
| -------------------- | ------------------------------------------------------------------------------- | ------ | ----- |
| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | |
| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | |
| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | |
| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| pack-\*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| pack-\*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| cruft packs | | ❌ | |
## Capabilities
| Feature | Status | Notes |
| ------------------------------ | ------------ | ----- |
| `multi_ack` | ❌ | |
| `multi_ack_detailed` | ❌ | |
| `no-done` | ❌ | |
| `thin-pack` | ❌ | |
| `side-band` | ⚠️ (partial) | |
| `side-band-64k` | ⚠️ (partial) | |
| `ofs-delta` | ✅ | |
| `agent` | ✅ | |
| `object-format` | ❌ | |
| `symref` | ✅ | |
| `shallow` | ✅ | |
| `deepen-since` | ✅ | |
| `deepen-not` | ❌ | |
| `deepen-relative` | ❌ | |
| `no-progress` | ✅ | |
| `include-tag` | ✅ | |
| `report-status` | ✅ | |
| `report-status-v2` | ❌ | |
| `delete-refs` | ✅ | |
| `quiet` | ❌ | |
| `atomic` | ✅ | |
| `push-options` | ✅ | |
| `allow-tip-sha1-in-want` | ✅ | |
| `allow-reachable-sha1-in-want` | ❌ | |
| `push-cert=<nonce>` | ❌ | |
| `filter` | ❌ | |
| `session-id=<session id>` | ❌ | |
## Transport Schemes
| Scheme | Status | Notes | Examples |
| -------------------- | ------------ | ---------------------------------------------------------------------- | ---------------------------------------------- |
| `http(s)://` (dumb) | ❌ | | |
| `http(s)://` (smart) | ✅ | | |
| `git://` | ✅ | | |
| `ssh://` | ✅ | | |
| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | |
| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) |
## SHA256
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | ---------------------------------- | ------------------------------------ |
| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) |
| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) |
| `pull` | | ❌ | | |
| `fetch` | | ❌ | | |
| `push` | | ❌ | | |
## Other features
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | --------------------------- | ------ | ---------------------------------------------- | -------- |
| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | |
| `config` | `--global` <br/> `--system` | ✅ | Read-only. | |
| `gitignore` | | ✅ | | |
| `gitattributes` | | ✅ | | |
| `git-worktree` | | ❌ | Multiple worktrees are not supported. | |

78
vendor/github.com/go-git/go-git/v5/EXTENDING.md generated vendored Normal file
View File

@ -0,0 +1,78 @@
# Extending go-git
`go-git` was built in a highly extensible manner, which enables some of its functionalities to be changed or extended without the need of changing its codebase. Here are the key extensibility features:
## Dot Git Storers
Dot git storers are the components responsible for storing the Git internal files, including objects and references.
The built-in storer implementations include [memory](storage/memory) and [filesystem](storage/filesystem). The `memory` storer stores all the data in memory, and its use look like this:
```go
r, err := git.Init(memory.NewStorage(), nil)
```
The `filesystem` storer stores the data in the OS filesystem, and can be used as follows:
```go
r, err := git.Init(filesystem.NewStorage(osfs.New("/tmp/foo")), nil)
```
New implementations can be created by implementing the [storage.Storer interface](storage/storer.go#L16).
## Filesystem
Git repository worktrees are managed using a filesystem abstraction based on [go-billy](https://github.com/go-git/go-billy). The Git operations will take place against the specific filesystem implementation. Initialising a repository in Memory can be done as follows:
```go
fs := memfs.New()
r, err := git.Init(memory.NewStorage(), fs)
```
The same operation can be done against the OS filesystem:
```go
fs := osfs.New("/tmp/foo")
r, err := git.Init(memory.NewStorage(), fs)
```
New filesystems (e.g. cloud based storage) could be created by implementing `go-billy`'s [Filesystem interface](https://github.com/go-git/go-billy/blob/326c59f064021b821a55371d57794fbfb86d4cb3/fs.go#L52).
## Transport Schemes
Git supports various transport schemes, including `http`, `https`, `ssh`, `git`, `file`. `go-git` defines the [transport.Transport interface](plumbing/transport/common.go#L48) to represent them.
The built-in implementations can be replaced by calling `client.InstallProtocol`.
An example of changing the built-in `https` implementation to skip TLS could look like this:
```go
customClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
client.InstallProtocol("https", githttp.NewClient(customClient))
```
Some internal implementations enables code reuse amongst the different transport implementations. Some of these may be made public in the future (e.g. `plumbing/transport/internal/common`).
## Cache
Several different operations across `go-git` lean on caching of objects in order to achieve optimal performance. The caching functionality is defined by the [cache.Object interface](plumbing/cache/common.go#L17).
Two built-in implementations are `cache.ObjectLRU` and `cache.BufferLRU`. However, the caching functionality can be customized by implementing the interface `cache.Object` interface.
## Hash
`go-git` uses the `crypto.Hash` interface to represent hash functions. The built-in implementations are `github.com/pjbgf/sha1cd` for SHA1 and Go's `crypto/SHA256`.
The default hash functions can be changed by calling `hash.RegisterHash`.
```go
func init() {
hash.RegisterHash(crypto.SHA1, sha1.New)
}
```
New `SHA1` or `SHA256` hash functions that implement the `hash.RegisterHash` interface can be registered by calling `RegisterHash`.

View File

@ -27,7 +27,13 @@ build-git:
test:
@echo "running against `git version`"; \
$(GOTEST) ./...
$(GOTEST) -race ./...
TEMP_REPO := $(shell mktemp)
test-sha256:
$(GOCMD) run -tags sha256 _examples/sha256/main.go $(TEMP_REPO)
cd $(TEMP_REPO) && git fsck
rm -rf $(TEMP_REPO)
test-coverage:
@echo "running against `git version`"; \
@ -35,4 +41,13 @@ test-coverage:
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
clean:
rm -rf $(GIT_DIST_PATH)
rm -rf $(GIT_DIST_PATH)
fuzz:
@go test -fuzz=FuzzParser $(PWD)/internal/revision
@go test -fuzz=FuzzDecoder $(PWD)/plumbing/format/config
@go test -fuzz=FuzzPatchDelta $(PWD)/plumbing/format/packfile
@go test -fuzz=FuzzParseSignedBytes $(PWD)/plumbing/object
@go test -fuzz=FuzzDecode $(PWD)/plumbing/object
@go test -fuzz=FuzzDecoder $(PWD)/plumbing/protocol/packp
@go test -fuzz=FuzzNewEndpoint $(PWD)/plumbing/transport

38
vendor/github.com/go-git/go-git/v5/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,38 @@
# go-git Security Policy
The purpose of this security policy is to outline `go-git`'s process
for reporting, handling and disclosing security sensitive information.
## Supported Versions
The project follows a version support policy where only the latest minor
release is actively supported. Therefore, only issues that impact the latest
minor release will be fixed. Users are encouraged to upgrade to the latest
minor/patch release to benefit from the most up-to-date features, bug fixes,
and security enhancements.
The supported versions policy applies to both the `go-git` library and its
associated repositories within the `go-git` org.
## Reporting Security Issues
Please report any security vulnerabilities or potential weaknesses in `go-git`
privately via go-git-security@googlegroups.com. Do not publicly disclose the
details of the vulnerability until a fix has been implemented and released.
During the process the project maintainers will investigate the report, so please
provide detailed information, including steps to reproduce, affected versions, and any mitigations if known.
The project maintainers will acknowledge the receipt of the report and work with
the reporter to validate and address the issue.
Please note that `go-git` does not have any bounty programs, and therefore do
not provide financial compensation for disclosures.
## Security Disclosure Process
The project maintainers will make every effort to promptly address security issues.
Once a security vulnerability is fixed, a security advisory will be published to notify users and provide appropriate mitigation measures.
All `go-git` advisories can be found at https://github.com/go-git/go-git/security/advisories.

View File

@ -2,16 +2,18 @@ package git
import (
"bytes"
"container/heap"
"errors"
"fmt"
"io"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/utils/diff"
"github.com/sergi/go-diff/diffmatchpatch"
)
// BlameResult represents the result of a Blame operation.
@ -29,53 +31,26 @@ type BlameResult struct {
func Blame(c *object.Commit, path string) (*BlameResult, error) {
// The file to blame is identified by the input arguments:
// commit and path. commit is a Commit object obtained from a Repository. Path
// represents a path to a specific file contained into the repository.
// represents a path to a specific file contained in the repository.
//
// Blaming a file is a two step process:
// Blaming a file is done by walking the tree in reverse order trying to find where each line was last modified.
//
// 1. Create a linear history of the commits affecting a file. We use
// revlist.New for that.
// When a diff is found it cannot immediately assume it came from that commit, as it may have come from 1 of its
// parents, so it will first try to resolve those diffs from its parents, if it couldn't find the change in its
// parents then it will assign the change to itself.
//
// 2. Then build a graph with a node for every line in every file in
// the history of the file.
// When encountering 2 parents that have made the same change to a file it will choose the parent that was merged
// into the current branch first (this is determined by the order of the parents inside the commit).
//
// Each node is assigned a commit: Start by the nodes in the first
// commit. Assign that commit as the creator of all its lines.
//
// Then jump to the nodes in the next commit, and calculate the diff
// between the two files. Newly created lines get
// assigned the new commit as its origin. Modified lines also get
// this new commit. Untouched lines retain the old commit.
//
// All this work is done in the assignOrigin function which holds all
// the internal relevant data in a "blame" struct, that is not
// exported.
//
// TODO: ways to improve the efficiency of this function:
// 1. Improve revlist
// 2. Improve how to traverse the history (example a backward traversal will
// be much more efficient)
//
// TODO: ways to improve the function in general:
// 1. Add memoization between revlist and assign.
// 2. It is using much more memory than needed, see the TODOs below.
// This currently works on a line by line basis, if performance becomes an issue it could be changed to work with
// hunks rather than lines. Then when encountering diff hunks it would need to split them where necessary.
b := new(blame)
b.fRev = c
b.path = path
b.q = new(priorityQueue)
// get all the file revisions
if err := b.fillRevs(); err != nil {
return nil, err
}
// calculate the line tracking graph and fill in
// file contents in data.
if err := b.fillGraphAndData(); err != nil {
return nil, err
}
file, err := b.fRev.File(b.path)
file, err := b.fRev.File(path)
if err != nil {
return nil, err
}
@ -83,13 +58,59 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) {
if err != nil {
return nil, err
}
finalLength := len(finalLines)
// Each node (line) holds the commit where it was introduced or
// last modified. To achieve that we use the FORWARD algorithm
// described in Zimmermann, et al. "Mining Version Archives for
// Co-changed Lines", in proceedings of the Mining Software
// Repositories workshop, Shanghai, May 22-23, 2006.
lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1))
needsMap := make([]lineMap, finalLength)
for i := range needsMap {
needsMap[i] = lineMap{i, i, nil, -1}
}
contents, err := file.Contents()
if err != nil {
return nil, err
}
b.q.Push(&queueItem{
nil,
nil,
c,
path,
contents,
needsMap,
0,
false,
0,
})
items := make([]*queueItem, 0)
for {
items = items[:0]
for {
if b.q.Len() == 0 {
return nil, errors.New("invalid state: no items left on the blame queue")
}
item := b.q.Pop()
items = append(items, item)
next := b.q.Peek()
if next == nil || next.Hash != item.Commit.Hash {
break
}
}
finished, err := b.addBlames(items)
if err != nil {
return nil, err
}
if finished == true {
break
}
}
if err != nil {
return nil, err
}
b.lineToCommit = make([]*object.Commit, finalLength)
for i := range needsMap {
b.lineToCommit[i] = needsMap[i].Commit
}
lines, err := newLines(finalLines, b.lineToCommit)
if err != nil {
return nil, err
}
@ -105,6 +126,8 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) {
type Line struct {
// Author is the email address of the last author that modified the line.
Author string
// AuthorName is the name of the last author that modified the line.
AuthorName string
// Text is the original text of the line.
Text string
// Date is when the original text of the line was introduced
@ -113,31 +136,21 @@ type Line struct {
Hash plumbing.Hash
}
func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line {
func newLine(author, authorName, text string, date time.Time, hash plumbing.Hash) *Line {
return &Line{
Author: author,
Text: text,
Hash: hash,
Date: date,
Author: author,
AuthorName: authorName,
Text: text,
Hash: hash,
Date: date,
}
}
func newLines(contents []string, commits []*object.Commit) ([]*Line, error) {
lcontents := len(contents)
lcommits := len(commits)
if lcontents != lcommits {
if lcontents == lcommits-1 && contents[lcontents-1] != "\n" {
contents = append(contents, "\n")
} else {
return nil, errors.New("contents and commits have different length")
}
}
result := make([]*Line, 0, lcontents)
result := make([]*Line, 0, len(contents))
for i := range contents {
result = append(result, newLine(
commits[i].Author.Email, contents[i],
commits[i].Author.Email, commits[i].Author.Name, contents[i],
commits[i].Author.When, commits[i].Hash,
))
}
@ -152,151 +165,426 @@ type blame struct {
path string
// the commit of the final revision of the file to blame
fRev *object.Commit
// the chain of revisions affecting the the file to blame
revs []*object.Commit
// the contents of the file across all its revisions
data []string
// the graph of the lines in the file across all the revisions
graph [][]*object.Commit
// resolved lines
lineToCommit []*object.Commit
// queue of commits that need resolving
q *priorityQueue
}
// calculate the history of a file "path", starting from commit "from", sorted by commit date.
func (b *blame) fillRevs() error {
var err error
b.revs, err = references(b.fRev, b.path)
return err
type lineMap struct {
Orig, Cur int
Commit *object.Commit
FromParentNo int
}
// build graph of a file from its revision history
func (b *blame) fillGraphAndData() error {
//TODO: not all commits are needed, only the current rev and the prev
b.graph = make([][]*object.Commit, len(b.revs))
b.data = make([]string, len(b.revs)) // file contents in all the revisions
// for every revision of the file, starting with the first
// one...
for i, rev := range b.revs {
func (b *blame) addBlames(curItems []*queueItem) (bool, error) {
curItem := curItems[0]
// Simple optimisation to merge paths, there is potential to go a bit further here and check for any duplicates
// not only if they are all the same.
if len(curItems) == 1 {
curItems = nil
} else if curItem.IdenticalToChild {
allSame := true
lenCurItems := len(curItems)
lowestParentNo := curItem.ParentNo
for i := 1; i < lenCurItems; i++ {
if !curItems[i].IdenticalToChild || curItem.Child != curItems[i].Child {
allSame = false
break
}
lowestParentNo = min(lowestParentNo, curItems[i].ParentNo)
}
if allSame {
curItem.Child.numParentsNeedResolving = curItem.Child.numParentsNeedResolving - lenCurItems + 1
curItems = nil // free the memory
curItem.ParentNo = lowestParentNo
// Now check if we can remove the parent completely
for curItem.Child.IdenticalToChild && curItem.Child.MergedChildren == nil && curItem.Child.numParentsNeedResolving == 1 {
oldChild := curItem.Child
curItem.Child = oldChild.Child
curItem.ParentNo = oldChild.ParentNo
}
}
}
// if we have more than 1 item for this commit, create a single needsMap
if len(curItems) > 1 {
curItem.MergedChildren = make([]childToNeedsMap, len(curItems))
for i, c := range curItems {
curItem.MergedChildren[i] = childToNeedsMap{c.Child, c.NeedsMap, c.IdenticalToChild, c.ParentNo}
}
newNeedsMap := make([]lineMap, 0, len(curItem.NeedsMap))
newNeedsMap = append(newNeedsMap, curItems[0].NeedsMap...)
for i := 1; i < len(curItems); i++ {
cur := curItems[i].NeedsMap
n := 0 // position in newNeedsMap
c := 0 // position in current list
for c < len(cur) {
if n == len(newNeedsMap) {
newNeedsMap = append(newNeedsMap, cur[c:]...)
break
} else if newNeedsMap[n].Cur == cur[c].Cur {
n++
c++
} else if newNeedsMap[n].Cur < cur[c].Cur {
n++
} else {
newNeedsMap = append(newNeedsMap, cur[c])
newPos := len(newNeedsMap) - 1
for newPos > n {
newNeedsMap[newPos-1], newNeedsMap[newPos] = newNeedsMap[newPos], newNeedsMap[newPos-1]
newPos--
}
}
}
}
curItem.NeedsMap = newNeedsMap
curItem.IdenticalToChild = false
curItem.Child = nil
curItems = nil // free the memory
}
parents, err := parentsContainingPath(curItem.path, curItem.Commit)
if err != nil {
return false, err
}
anyPushed := false
for parnetNo, prev := range parents {
currentHash, err := blobHash(curItem.path, curItem.Commit)
if err != nil {
return false, err
}
prevHash, err := blobHash(prev.Path, prev.Commit)
if err != nil {
return false, err
}
if currentHash == prevHash {
if len(parents) == 1 && curItem.MergedChildren == nil && curItem.IdenticalToChild {
// commit that has 1 parent and 1 child and is the same as both, bypass it completely
b.q.Push(&queueItem{
Child: curItem.Child,
Commit: prev.Commit,
path: prev.Path,
Contents: curItem.Contents,
NeedsMap: curItem.NeedsMap, // reuse the NeedsMap as we are throwing away this item
IdenticalToChild: true,
ParentNo: curItem.ParentNo,
})
} else {
b.q.Push(&queueItem{
Child: curItem,
Commit: prev.Commit,
path: prev.Path,
Contents: curItem.Contents,
NeedsMap: append([]lineMap(nil), curItem.NeedsMap...), // create new slice and copy
IdenticalToChild: true,
ParentNo: parnetNo,
})
curItem.numParentsNeedResolving++
}
anyPushed = true
continue
}
// get the contents of the file
file, err := rev.File(b.path)
file, err := prev.Commit.File(prev.Path)
if err != nil {
return nil
return false, err
}
b.data[i], err = file.Contents()
prevContents, err := file.Contents()
if err != nil {
return err
return false, err
}
nLines := countLines(b.data[i])
// create a node for each line
b.graph[i] = make([]*object.Commit, nLines)
// assign a commit to each node
// if this is the first revision, then the node is assigned to
// this first commit.
if i == 0 {
for j := 0; j < nLines; j++ {
b.graph[i][j] = b.revs[i]
hunks := diff.Do(prevContents, curItem.Contents)
prevl := -1
curl := -1
need := 0
getFromParent := make([]lineMap, 0)
out:
for h := range hunks {
hLines := countLines(hunks[h].Text)
for hl := 0; hl < hLines; hl++ {
switch {
case hunks[h].Type == diffmatchpatch.DiffEqual:
prevl++
curl++
if curl == curItem.NeedsMap[need].Cur {
// add to needs
getFromParent = append(getFromParent, lineMap{curl, prevl, nil, -1})
// move to next need
need++
if need >= len(curItem.NeedsMap) {
break out
}
}
case hunks[h].Type == diffmatchpatch.DiffInsert:
curl++
if curl == curItem.NeedsMap[need].Cur {
// the line we want is added, it may have been added here (or by another parent), skip it for now
need++
if need >= len(curItem.NeedsMap) {
break out
}
}
case hunks[h].Type == diffmatchpatch.DiffDelete:
prevl += hLines
continue out
default:
return false, errors.New("invalid state: invalid hunk Type")
}
}
} else {
// if this is not the first commit, then assign to the old
// commit or to the new one, depending on what the diff
// says.
b.assignOrigin(i, i-1)
}
if len(getFromParent) > 0 {
b.q.Push(&queueItem{
curItem,
nil,
prev.Commit,
prev.Path,
prevContents,
getFromParent,
0,
false,
parnetNo,
})
curItem.numParentsNeedResolving++
anyPushed = true
}
}
return nil
}
// sliceGraph returns a slice of commits (one per line) for a particular
// revision of a file (0=first revision).
func (b *blame) sliceGraph(i int) []*object.Commit {
fVs := b.graph[i]
result := make([]*object.Commit, 0, len(fVs))
for _, v := range fVs {
c := *v
result = append(result, &c)
curItem.Contents = "" // no longer need, free the memory
if !anyPushed {
return finishNeeds(curItem)
}
return result
return false, nil
}
// Assigns origin to vertexes in current (c) rev from data in its previous (p)
// revision
func (b *blame) assignOrigin(c, p int) {
// assign origin based on diff info
hunks := diff.Do(b.data[p], b.data[c])
sl := -1 // source line
dl := -1 // destination line
for h := range hunks {
hLines := countLines(hunks[h].Text)
for hl := 0; hl < hLines; hl++ {
switch {
case hunks[h].Type == 0:
sl++
dl++
b.graph[c][dl] = b.graph[p][sl]
case hunks[h].Type == 1:
dl++
b.graph[c][dl] = b.revs[c]
case hunks[h].Type == -1:
sl++
default:
panic("unreachable")
func finishNeeds(curItem *queueItem) (bool, error) {
// any needs left in the needsMap must have come from this revision
for i := range curItem.NeedsMap {
if curItem.NeedsMap[i].Commit == nil {
curItem.NeedsMap[i].Commit = curItem.Commit
curItem.NeedsMap[i].FromParentNo = -1
}
}
if curItem.Child == nil && curItem.MergedChildren == nil {
return true, nil
}
if curItem.MergedChildren == nil {
return applyNeeds(curItem.Child, curItem.NeedsMap, curItem.IdenticalToChild, curItem.ParentNo)
}
for _, ctn := range curItem.MergedChildren {
m := 0 // position in merged needs map
p := 0 // position in parent needs map
for p < len(ctn.NeedsMap) {
if ctn.NeedsMap[p].Cur == curItem.NeedsMap[m].Cur {
ctn.NeedsMap[p].Commit = curItem.NeedsMap[m].Commit
m++
p++
} else if ctn.NeedsMap[p].Cur < curItem.NeedsMap[m].Cur {
p++
} else {
m++
}
}
finished, err := applyNeeds(ctn.Child, ctn.NeedsMap, ctn.IdenticalToChild, ctn.ParentNo)
if finished || err != nil {
return finished, err
}
}
return false, nil
}
func applyNeeds(child *queueItem, needsMap []lineMap, identicalToChild bool, parentNo int) (bool, error) {
if identicalToChild {
for i := range child.NeedsMap {
l := &child.NeedsMap[i]
if l.Cur != needsMap[i].Cur || l.Orig != needsMap[i].Orig {
return false, errors.New("needsMap isn't the same? Why not??")
}
if l.Commit == nil || parentNo < l.FromParentNo {
l.Commit = needsMap[i].Commit
l.FromParentNo = parentNo
}
}
} else {
i := 0
out:
for j := range child.NeedsMap {
l := &child.NeedsMap[j]
for needsMap[i].Orig < l.Cur {
i++
if i == len(needsMap) {
break out
}
}
if l.Cur == needsMap[i].Orig {
if l.Commit == nil || parentNo < l.FromParentNo {
l.Commit = needsMap[i].Commit
l.FromParentNo = parentNo
}
}
}
}
child.numParentsNeedResolving--
if child.numParentsNeedResolving == 0 {
finished, err := finishNeeds(child)
if finished || err != nil {
return finished, err
}
}
return false, nil
}
// GoString prints the results of a Blame using git-blame's style.
func (b *blame) GoString() string {
// String prints the results of a Blame using git-blame's style.
func (b BlameResult) String() string {
var buf bytes.Buffer
file, err := b.fRev.File(b.path)
if err != nil {
panic("PrettyPrint: internal error in repo.Data")
}
contents, err := file.Contents()
if err != nil {
panic("PrettyPrint: internal error in repo.Data")
}
lines := strings.Split(contents, "\n")
// max line number length
mlnl := len(strconv.Itoa(len(lines)))
mlnl := len(strconv.Itoa(len(b.Lines)))
// max author length
mal := b.maxAuthorLength()
format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n",
mal, mlnl)
format := fmt.Sprintf("%%s (%%-%ds %%s %%%dd) %%s\n", mal, mlnl)
fVs := b.graph[len(b.graph)-1]
for ln, v := range fVs {
fmt.Fprintf(&buf, format, v.Hash.String()[:8],
prettyPrintAuthor(fVs[ln]), ln+1, lines[ln])
for ln := range b.Lines {
_, _ = fmt.Fprintf(&buf, format, b.Lines[ln].Hash.String()[:8],
b.Lines[ln].AuthorName, b.Lines[ln].Date.Format("2006-01-02 15:04:05 -0700"), ln+1, b.Lines[ln].Text)
}
return buf.String()
}
// utility function to pretty print the author.
func prettyPrintAuthor(c *object.Commit) string {
return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02"))
}
// utility function to calculate the number of runes needed
// to print the longest author name in the blame of a file.
func (b *blame) maxAuthorLength() int {
memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1)
fVs := b.graph[len(b.graph)-1]
func (b BlameResult) maxAuthorLength() int {
m := 0
for ln := range fVs {
if _, ok := memo[fVs[ln].Hash]; ok {
continue
}
memo[fVs[ln].Hash] = struct{}{}
m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln])))
for ln := range b.Lines {
m = max(m, utf8.RuneCountInString(b.Lines[ln].AuthorName))
}
return m
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
type childToNeedsMap struct {
Child *queueItem
NeedsMap []lineMap
IdenticalToChild bool
ParentNo int
}
type queueItem struct {
Child *queueItem
MergedChildren []childToNeedsMap
Commit *object.Commit
path string
Contents string
NeedsMap []lineMap
numParentsNeedResolving int
IdenticalToChild bool
ParentNo int
}
type priorityQueueImp []*queueItem
func (pq *priorityQueueImp) Len() int { return len(*pq) }
func (pq *priorityQueueImp) Less(i, j int) bool {
return !(*pq)[i].Commit.Less((*pq)[j].Commit)
}
func (pq *priorityQueueImp) Swap(i, j int) { (*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i] }
func (pq *priorityQueueImp) Push(x any) { *pq = append(*pq, x.(*queueItem)) }
func (pq *priorityQueueImp) Pop() any {
n := len(*pq)
ret := (*pq)[n-1]
(*pq)[n-1] = nil // ovoid memory leak
*pq = (*pq)[0 : n-1]
return ret
}
func (pq *priorityQueueImp) Peek() *object.Commit {
if len(*pq) == 0 {
return nil
}
return (*pq)[0].Commit
}
type priorityQueue priorityQueueImp
func (pq *priorityQueue) Init() { heap.Init((*priorityQueueImp)(pq)) }
func (pq *priorityQueue) Len() int { return (*priorityQueueImp)(pq).Len() }
func (pq *priorityQueue) Push(c *queueItem) {
heap.Push((*priorityQueueImp)(pq), c)
}
func (pq *priorityQueue) Pop() *queueItem {
return heap.Pop((*priorityQueueImp)(pq)).(*queueItem)
}
func (pq *priorityQueue) Peek() *object.Commit { return (*priorityQueueImp)(pq).Peek() }
type parentCommit struct {
Commit *object.Commit
Path string
}
func parentsContainingPath(path string, c *object.Commit) ([]parentCommit, error) {
// TODO: benchmark this method making git.object.Commit.parent public instead of using
// an iterator
var result []parentCommit
iter := c.Parents()
for {
parent, err := iter.Next()
if err == io.EOF {
return result, nil
}
if err != nil {
return nil, err
}
if _, err := parent.File(path); err == nil {
result = append(result, parentCommit{parent, path})
} else {
// look for renames
patch, err := parent.Patch(c)
if err != nil {
return nil, err
} else if patch != nil {
for _, fp := range patch.FilePatches() {
from, to := fp.Files()
if from != nil && to != nil && to.Path() == path {
result = append(result, parentCommit{parent, from.Path()})
break
}
}
}
}
}
}
func blobHash(path string, commit *object.Commit) (plumbing.Hash, error) {
file, err := commit.File(path)
if err != nil {
return plumbing.ZeroHash, err
}
return file.Hash, nil
}

View File

@ -2,6 +2,7 @@ package config
import (
"errors"
"strings"
"github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
@ -26,6 +27,12 @@ type Branch struct {
// "true" and "interactive". "false" is undocumented and
// typically represented by the non-existence of this field
Rebase string
// Description explains what the branch is for.
// Multi-line explanations may be used.
//
// Original git command to edit:
// git branch --edit-description
Description string
raw *format.Subsection
}
@ -47,7 +54,7 @@ func (b *Branch) Validate() error {
return errBranchInvalidRebase
}
return nil
return plumbing.NewBranchReferenceName(b.Name).Validate()
}
func (b *Branch) marshal() *format.Subsection {
@ -75,9 +82,27 @@ func (b *Branch) marshal() *format.Subsection {
b.raw.SetOption(rebaseKey, b.Rebase)
}
if b.Description == "" {
b.raw.RemoveOption(descriptionKey)
} else {
desc := quoteDescription(b.Description)
b.raw.SetOption(descriptionKey, desc)
}
return b.raw
}
// hack to trigger conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
//
// Current Encoder implementation uses Go %q format if value contains a backslash character,
// which is not consistent with reference git implementation.
// git just replaces newline characters with \n, while Encoder prints them directly.
// Until value quoting fix, we should escape description value by replacing newline characters with \n.
func quoteDescription(desc string) string {
return strings.ReplaceAll(desc, "\n", `\n`)
}
func (b *Branch) unmarshal(s *format.Subsection) error {
b.raw = s
@ -85,6 +110,14 @@ func (b *Branch) unmarshal(s *format.Subsection) error {
b.Remote = b.raw.Options.Get(remoteSection)
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
b.Rebase = b.raw.Options.Get(rebaseKey)
b.Description = unquoteDescription(b.raw.Options.Get(descriptionKey))
return b.Validate()
}
// hack to enable conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
// goto quoteDescription for details.
func unquoteDescription(desc string) string {
return strings.ReplaceAll(desc, `\n`, "\n")
}

View File

@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
@ -14,8 +13,8 @@ import (
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/internal/url"
"github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/mitchellh/go-homedir"
)
const (
@ -60,12 +59,14 @@ type Config struct {
// CommentChar is the character indicating the start of a
// comment for commands like commit and tag
CommentChar string
// RepositoryFormatVersion identifies the repository format and layout version.
RepositoryFormatVersion format.RepositoryFormatVersion
}
User struct {
// Name is the personal name of the author and the commiter of a commit.
// Name is the personal name of the author and the committer of a commit.
Name string
// Email is the email of the author and the commiter of a commit.
// Email is the email of the author and the committer of a commit.
Email string
}
@ -77,9 +78,9 @@ type Config struct {
}
Committer struct {
// Name is the personal name of the commiter of a commit.
// Name is the personal name of the committer of a commit.
Name string
// Email is the email of the the commiter of a commit.
// Email is the email of the committer of a commit.
Email string
}
@ -97,6 +98,17 @@ type Config struct {
DefaultBranch string
}
Extensions struct {
// ObjectFormat specifies the hash algorithm to use. The
// acceptable values are sha1 and sha256. If not specified,
// sha1 is assumed. It is an error to specify this key unless
// core.repositoryFormatVersion is 1.
//
// This setting must not be changed after repository initialization
// (e.g. clone or init).
ObjectFormat format.ObjectFormat
}
// Remotes list of repository remotes, the key of the map is the name
// of the remote, should equal to RemoteConfig.Name.
Remotes map[string]*RemoteConfig
@ -132,7 +144,7 @@ func NewConfig() *Config {
// ReadConfig reads a config file from a io.Reader.
func ReadConfig(r io.Reader) (*Config, error) {
b, err := ioutil.ReadAll(r)
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
@ -146,11 +158,11 @@ func ReadConfig(r io.Reader) (*Config, error) {
}
// LoadConfig loads a config file from a given scope. The returned Config,
// contains exclusively information fom the given scope. If couldn't find a
// config file to the given scope, a empty one is returned.
// contains exclusively information from the given scope. If it couldn't find a
// config file to the given scope, an empty one is returned.
func LoadConfig(scope Scope) (*Config, error) {
if scope == LocalScope {
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.")
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer")
}
files, err := Paths(scope)
@ -185,7 +197,7 @@ func Paths(scope Scope) ([]string, error) {
files = append(files, filepath.Join(xdg, "git/config"))
}
home, err := homedir.Dir()
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
@ -227,27 +239,32 @@ func (c *Config) Validate() error {
}
const (
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
defaultBranchKey = "defaultBranch"
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
extensionsSection = "extensions"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
descriptionKey = "description"
defaultBranchKey = "defaultBranch"
repositoryFormatVersionKey = "repositoryformatversion"
objectFormat = "objectformat"
mirrorKey = "mirror"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.
@ -391,6 +408,7 @@ func (c *Config) unmarshalInit() {
// Marshal returns Config encoded as a git-config file.
func (c *Config) Marshal() ([]byte, error) {
c.marshalCore()
c.marshalExtensions()
c.marshalUser()
c.marshalPack()
c.marshalRemotes()
@ -410,12 +428,24 @@ func (c *Config) Marshal() ([]byte, error) {
func (c *Config) marshalCore() {
s := c.Raw.Section(coreSection)
s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare))
if string(c.Core.RepositoryFormatVersion) != "" {
s.SetOption(repositoryFormatVersionKey, string(c.Core.RepositoryFormatVersion))
}
if c.Core.Worktree != "" {
s.SetOption(worktreeKey, c.Core.Worktree)
}
}
func (c *Config) marshalExtensions() {
// Extensions are only supported on Version 1, therefore
// ignore them otherwise.
if c.Core.RepositoryFormatVersion == format.Version_1 {
s := c.Raw.Section(extensionsSection)
s.SetOption(objectFormat, string(c.Extensions.ObjectFormat))
}
}
func (c *Config) marshalUser() {
s := c.Raw.Section(userSection)
if c.User.Name != "" {
@ -549,6 +579,8 @@ type RemoteConfig struct {
// URLs the URLs of a remote repository. It must be non-empty. Fetch will
// always use the first URL, while push will use all of them.
URLs []string
// Mirror indicates that the repository is a mirror of remote.
Mirror bool
// insteadOfRulesApplied have urls been modified
insteadOfRulesApplied bool
@ -583,7 +615,7 @@ func (c *RemoteConfig) Validate() error {
c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))}
}
return nil
return plumbing.NewRemoteHEADReferenceName(c.Name).Validate()
}
func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
@ -602,6 +634,7 @@ func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
c.Name = c.raw.Name
c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...)
c.Fetch = fetch
c.Mirror = c.raw.Options.Get(mirrorKey) == "true"
return nil
}
@ -634,6 +667,10 @@ func (c *RemoteConfig) marshal() *format.Subsection {
c.raw.SetOption(fetchKey, values...)
}
if c.Mirror {
c.raw.SetOption(mirrorKey, strconv.FormatBool(c.Mirror))
}
return c.raw
}

View File

@ -64,7 +64,7 @@ func (s RefSpec) IsExactSHA1() bool {
return plumbing.IsHash(s.Src())
}
// Src return the src side.
// Src returns the src side.
func (s RefSpec) Src() string {
spec := string(s)

View File

@ -0,0 +1,29 @@
package path_util
import (
"os"
"os/user"
"strings"
)
func ReplaceTildeWithHome(path string) (string, error) {
if strings.HasPrefix(path, "~") {
firstSlash := strings.Index(path, "/")
if firstSlash == 1 {
home, err := os.UserHomeDir()
if err != nil {
return path, err
}
return strings.Replace(path, "~", home, 1), nil
} else if firstSlash > 1 {
username := path[1:firstSlash]
userAccount, err := user.Lookup(username)
if err != nil {
return path, err
}
return strings.Replace(path, path[:firstSlash], userAccount.HomeDir, 1), nil
}
}
return path, nil
}

View File

@ -322,6 +322,8 @@ func (p *Parser) parseAt() (Revisioner, error) {
}
return AtDate{t}, nil
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in @{<data>} structure`}
default:
date += lit
}
@ -424,6 +426,8 @@ func (p *Parser) parseCaretBraces() (Revisioner, error) {
p.unscan()
case tok != slash && start:
return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)}
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in ^{<data>} structure`}
case tok != cbrace:
p.unscan()
re += lit

View File

@ -5,8 +5,10 @@ import (
)
var (
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})(?:\/|:))?(?P<path>[^\\].*\/[^\\].*)$`)
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
// Ref: https://github.com/git/git/blob/master/Documentation/urls.txt#L37
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5}):)?(?P<path>[^\\].*)$`)
)
// MatchesScheme returns true if the given string matches a URL-like

View File

@ -60,7 +60,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
// Fetch the object.
obj, err := object.GetObject(p.Storer, hash)
if err != nil {
return fmt.Errorf("Getting object %s failed: %v", hash, err)
return fmt.Errorf("getting object %s failed: %v", hash, err)
}
// Walk all children depending on object type.
switch obj := obj.(type) {
@ -98,7 +98,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
return p.walkObjectTree(obj.Target)
default:
// Error out on unhandled object types.
return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj)
return fmt.Errorf("unknown object %X %s %T", obj.ID(), obj.Type(), obj)
}
return nil
}

View File

@ -10,6 +10,7 @@ import (
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
formatcfg "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband"
"github.com/go-git/go-git/v5/plumbing/transport"
@ -45,6 +46,14 @@ type CloneOptions struct {
ReferenceName plumbing.ReferenceName
// Fetch only ReferenceName if true.
SingleBranch bool
// Mirror clones the repository as a mirror.
//
// Compared to a bare clone, mirror not only maps local branches of the
// source to local branches of the target, it maps all refs (including
// remote-tracking branches, notes etc.) and sets up a refspec configuration
// such that all these refs are overwritten by a git remote update in the
// target repository.
Mirror bool
// No checkout of HEAD after clone if true.
NoCheckout bool
// Limit fetching to the specified number of commits.
@ -53,6 +62,9 @@ type CloneOptions struct {
// within, using their default settings. This option is ignored if the
// cloned repository does not have a worktree.
RecurseSubmodules SubmoduleRescursivity
// ShallowSubmodules limit cloning submodules to the 1 level of depth.
// It matches the git command --shallow-submodules.
ShallowSubmodules bool
// Progress is where the human readable information sent by the server is
// stored, if nil nothing is stored and the capability (if supported)
// no-progress, is sent to the server to avoid send this information.
@ -64,6 +76,17 @@ type CloneOptions struct {
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
// When the repository to clone is on the local machine, instead of
// using hard links, automatically setup .git/objects/info/alternates
// to share the objects with the source repository.
// The resulting repository starts out without any object of its own.
// NOTE: this is a possibly dangerous operation; do not use it unless
// you understand what it does.
//
// [Reference]: https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---shared
Shared bool
}
// Validate validates the fields and sets the default values.
@ -91,6 +114,8 @@ func (o *CloneOptions) Validate() error {
type PullOptions struct {
// Name of the remote to be pulled. If empty, uses the default.
RemoteName string
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// Remote branch to clone. If empty, uses HEAD.
ReferenceName plumbing.ReferenceName
// Fetch only ReferenceName if true.
@ -113,6 +138,8 @@ type PullOptions struct {
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
}
// Validate validates the fields and sets the default values.
@ -147,7 +174,9 @@ const (
type FetchOptions struct {
// Name of the remote to fetch from. Defaults to origin.
RemoteName string
RefSpecs []config.RefSpec
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
RefSpecs []config.RefSpec
// Depth limit fetching to the specified number of commits from the tip of
// each remote branch history.
Depth int
@ -167,6 +196,8 @@ type FetchOptions struct {
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
}
// Validate validates the fields and sets the default values.
@ -192,8 +223,16 @@ func (o *FetchOptions) Validate() error {
type PushOptions struct {
// RemoteName is the name of the remote to be pushed to.
RemoteName string
// RefSpecs specify what destination ref to update with what source
// object. A refspec with empty src can be used to delete a reference.
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// RefSpecs specify what destination ref to update with what source object.
//
// The format of a <refspec> parameter is an optional plus +, followed by
// the source object <src>, followed by a colon :, followed by the destination ref <dst>.
// The <src> is often the name of the branch you would want to push, but it can be a SHA-1.
// The <dst> tells which ref on the remote side is updated with this push.
//
// A refspec with empty src can be used to delete a reference.
RefSpecs []config.RefSpec
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
@ -206,13 +245,37 @@ type PushOptions struct {
// Force allows the push to update a remote branch even when the local
// branch does not descend from it.
Force bool
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// RequireRemoteRefs only allows a remote ref to be updated if its current
// value is the one specified here.
RequireRemoteRefs []config.RefSpec
// FollowTags will send any annotated tags with a commit target reachable from
// the refs already being pushed
FollowTags bool
// ForceWithLease allows a force push as long as the remote ref adheres to a "lease"
ForceWithLease *ForceWithLease
// PushOptions sets options to be transferred to the server during push.
Options map[string]string
// Atomic sets option to be an atomic push
Atomic bool
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
}
// ForceWithLease sets fields on the lease
// If neither RefName nor Hash are set, ForceWithLease protects
// all refs in the refspec by ensuring the ref of the remote in the local repsitory
// matches the one in the ref advertisement.
type ForceWithLease struct {
// RefName, when set will protect the ref by ensuring it matches the
// hash in the ref advertisement.
RefName plumbing.ReferenceName
// Hash is the expected object id of RefName. The push will be rejected unless this
// matches the corresponding object id of RefName in the refs advertisement.
Hash plumbing.Hash
}
// Validate validates the fields and sets the default values.
@ -249,6 +312,9 @@ type SubmoduleUpdateOptions struct {
RecurseSubmodules SubmoduleRescursivity
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// Depth limit fetching to the specified number of commits from the tip of
// each remote branch history.
Depth int
}
var (
@ -274,6 +340,8 @@ type CheckoutOptions struct {
// target branch. Force and Keep are mutually exclusive, should not be both
// set to true.
Keep bool
// SparseCheckoutDirectories
SparseCheckoutDirectories []string
}
// Validate validates the fields and sets the default values.
@ -366,7 +434,7 @@ type LogOptions struct {
// Show only those commits in which the specified file was inserted/updated.
// It is equivalent to running `git log -- <file-name>`.
// this field is kept for compatility, it can be replaced with PathFilter
// this field is kept for compatibility, it can be replaced with PathFilter
FileName *string
// Filter commits based on the path of files that are updated
@ -422,6 +490,10 @@ type CommitOptions struct {
// All automatically stage files that have been modified and deleted, but
// new files you have not told Git about are not affected.
All bool
// AllowEmptyCommits enable empty commits to be created. An empty commit
// is when no changes to the tree were made, but a new commit message is
// provided. The default behavior is false, which results in ErrEmptyCommit.
AllowEmptyCommits bool
// Author is the author's signature of the commit. If Author is empty the
// Name and Email is read from the config, and time.Now it's used as When.
Author *object.Signature
@ -435,10 +507,21 @@ type CommitOptions struct {
// commit will not be signed. The private key must be present and already
// decrypted.
SignKey *openpgp.Entity
// Amend will create a new commit object and replace the commit that HEAD currently
// points to. Cannot be used with All nor Parents.
Amend bool
}
// Validate validates the fields and sets the default values.
func (o *CommitOptions) Validate(r *Repository) error {
if o.All && o.Amend {
return errors.New("all and amend cannot be used together")
}
if o.Amend && len(o.Parents) > 0 {
return errors.New("parents cannot be used with amend")
}
if o.Author == nil {
if err := o.loadConfigAuthorAndCommitter(r); err != nil {
return err
@ -571,12 +654,35 @@ func (o *CreateTagOptions) loadConfigTagger(r *Repository) error {
type ListOptions struct {
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// PeelingOption defines how peeled objects are handled during a
// remote list.
PeelingOption PeelingOption
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
// Timeout specifies the timeout in seconds for list operations
Timeout int
}
// PeelingOption represents the different ways to handle peeled references.
//
// Peeled references represent the underlying object of an annotated
// (or signed) tag. Refer to upstream documentation for more info:
// https://github.com/git/git/blob/master/Documentation/technical/reftable.txt
type PeelingOption uint8
const (
// IgnorePeeled ignores all peeled reference names. This is the default behavior.
IgnorePeeled PeelingOption = 0
// OnlyPeeled returns only peeled reference names.
OnlyPeeled PeelingOption = 1
// AppendPeeled appends peeled reference names to the reference list.
AppendPeeled PeelingOption = 2
)
// CleanOptions describes how a clean should be performed.
type CleanOptions struct {
Dir bool
@ -601,7 +707,13 @@ var (
)
// Validate validates the fields and sets the default values.
//
// TODO: deprecate in favor of Validate(r *Repository) in v6.
func (o *GrepOptions) Validate(w *Worktree) error {
return o.validate(w.r)
}
func (o *GrepOptions) validate(r *Repository) error {
if !o.CommitHash.IsZero() && o.ReferenceName != "" {
return ErrHashOrReference
}
@ -609,7 +721,7 @@ func (o *GrepOptions) Validate(w *Worktree) error {
// If none of CommitHash and ReferenceName are provided, set commit hash of
// the repository's head.
if o.CommitHash.IsZero() && o.ReferenceName == "" {
ref, err := w.r.Head()
ref, err := r.Head()
if err != nil {
return err
}
@ -632,3 +744,13 @@ type PlainOpenOptions struct {
// Validate validates the fields and sets the default values.
func (o *PlainOpenOptions) Validate() error { return nil }
type PlainInitOptions struct {
InitOptions
// Determines if the repository will have a worktree (non-bare) or not (bare).
Bare bool
ObjectFormat formatcfg.ObjectFormat
}
// Validate validates the fields and sets the default values.
func (o *PlainInitOptions) Validate() error { return nil }

35
vendor/github.com/go-git/go-git/v5/oss-fuzz.sh generated vendored Normal file
View File

@ -0,0 +1,35 @@
#!/bin/bash -eu
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
go mod download
go get github.com/AdamKorcz/go-118-fuzz-build/testing
if [ "$SANITIZER" != "coverage" ]; then
sed -i '/func (s \*DecoderSuite) TestDecode(/,/^}/ s/^/\/\//' plumbing/format/config/decoder_test.go
sed -n '35,$p' plumbing/format/packfile/common_test.go >> plumbing/format/packfile/delta_test.go
sed -n '20,53p' plumbing/object/object_test.go >> plumbing/object/tree_test.go
sed -i 's|func Test|// func Test|' plumbing/transport/common_test.go
fi
compile_native_go_fuzzer $(pwd)/internal/revision FuzzParser fuzz_parser
compile_native_go_fuzzer $(pwd)/plumbing/format/config FuzzDecoder fuzz_decoder_config
compile_native_go_fuzzer $(pwd)/plumbing/format/packfile FuzzPatchDelta fuzz_patch_delta
compile_native_go_fuzzer $(pwd)/plumbing/object FuzzParseSignedBytes fuzz_parse_signed_bytes
compile_native_go_fuzzer $(pwd)/plumbing/object FuzzDecode fuzz_decode
compile_native_go_fuzzer $(pwd)/plumbing/protocol/packp FuzzDecoder fuzz_decoder_packp
compile_native_go_fuzzer $(pwd)/plumbing/transport FuzzNewEndpoint fuzz_new_endpoint

View File

@ -133,7 +133,7 @@ func (m FileMode) IsMalformed() bool {
m != Submodule
}
// String returns the FileMode as a string in the standatd git format,
// String returns the FileMode as a string in the standard git format,
// this is, an octal number padded with ceros to 7 digits. Malformed
// modes are printed in that same format, for easier debugging.
//

View File

@ -11,6 +11,10 @@ type Encoder struct {
w io.Writer
}
var (
subsectionReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
valueReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`, "\n", `\n`, "\t", `\t`, "\b", `\b`)
)
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w}
@ -48,8 +52,7 @@ func (e *Encoder) encodeSection(s *Section) error {
}
func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
//TODO: escape
if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil {
if err := e.printf("[%s \"%s\"]\n", sectionName, subsectionReplacer.Replace(s.Name)); err != nil {
return err
}
@ -58,12 +61,14 @@ func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
func (e *Encoder) encodeOptions(opts Options) error {
for _, o := range opts {
pattern := "\t%s = %s\n"
if strings.Contains(o.Value, "\\") {
pattern = "\t%s = %q\n"
var value string
if strings.ContainsAny(o.Value, "#;\"\t\n\\") || strings.HasPrefix(o.Value, " ") || strings.HasSuffix(o.Value, " ") {
value = `"`+valueReplacer.Replace(o.Value)+`"`
} else {
value = o.Value
}
if err := e.printf(pattern, o.Key, o.Value); err != nil {
if err := e.printf("\t%s = %s\n", o.Key, value); err != nil {
return err
}
}

View File

@ -0,0 +1,53 @@
package config
// RepositoryFormatVersion represents the repository format version,
// as per defined at:
//
// https://git-scm.com/docs/repository-version
type RepositoryFormatVersion string
const (
// Version_0 is the format defined by the initial version of git,
// including but not limited to the format of the repository
// directory, the repository configuration file, and the object
// and ref storage.
//
// Specifying the complete behavior of git is beyond the scope
// of this document.
Version_0 = "0"
// Version_1 is identical to version 0, with the following exceptions:
//
// 1. When reading the core.repositoryformatversion variable, a git
// implementation which supports version 1 MUST also read any
// configuration keys found in the extensions section of the
// configuration file.
//
// 2. If a version-1 repository specifies any extensions.* keys that
// the running git has not implemented, the operation MUST NOT proceed.
// Similarly, if the value of any known key is not understood by the
// implementation, the operation MUST NOT proceed.
//
// Note that if no extensions are specified in the config file, then
// core.repositoryformatversion SHOULD be set to 0 (setting it to 1 provides
// no benefit, and makes the repository incompatible with older
// implementations of git).
Version_1 = "1"
// DefaultRepositoryFormatVersion holds the default repository format version.
DefaultRepositoryFormatVersion = Version_0
)
// ObjectFormat defines the object format.
type ObjectFormat string
const (
// SHA1 represents the object format used for SHA1.
SHA1 ObjectFormat = "sha1"
// SHA256 represents the object format used for SHA256.
SHA256 ObjectFormat = "sha256"
// DefaultObjectFormat holds the default object format.
DefaultObjectFormat = SHA1
)

View File

@ -103,7 +103,7 @@ func (s *Section) RemoveSubsection(name string) *Section {
return s
}
// Option return the value for the specified key. Empty string is returned if
// Option returns the value for the specified key. Empty string is returned if
// key does not exists.
func (s *Section) Option(key string) string {
return s.Options.Get(key)

View File

@ -9,7 +9,7 @@ import (
type Operation int
const (
// Equal item represents a equals diff.
// Equal item represents an equals diff.
Equal Operation = iota
// Add item represents an insert diff.
Add
@ -26,15 +26,15 @@ type Patch interface {
Message() string
}
// FilePatch represents the necessary steps to transform one file to another.
// FilePatch represents the necessary steps to transform one file into another.
type FilePatch interface {
// IsBinary returns true if this patch is representing a binary file.
IsBinary() bool
// Files returns the from and to Files, with all the necessary metadata to
// Files returns the from and to Files, with all the necessary metadata
// about them. If the patch creates a new file, "from" will be nil.
// If the patch deletes a file, "to" will be nil.
Files() (from, to File)
// Chunks returns a slice of ordered changes to transform "from" File to
// Chunks returns a slice of ordered changes to transform "from" File into
// "to" File. If the file is a binary one, Chunks will be empty.
Chunks() []Chunk
}
@ -49,7 +49,7 @@ type File interface {
Path() string
}
// Chunk represents a portion of a file transformation to another.
// Chunk represents a portion of a file transformation into another.
type Chunk interface {
// Content contains the portion of the file.
Content() string

View File

@ -3,27 +3,32 @@ package gitignore
import (
"bufio"
"bytes"
"io/ioutil"
"io"
"os"
"strings"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-git/v5/internal/path_util"
"github.com/go-git/go-git/v5/plumbing/format/config"
gioutil "github.com/go-git/go-git/v5/utils/ioutil"
)
const (
commentPrefix = "#"
coreSection = "core"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
commentPrefix = "#"
coreSection = "core"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
infoExcludeFile = gitDir + "/info/exclude"
)
// readIgnoreFile reads a specific git ignore file.
func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) {
ignoreFile, _ = path_util.ReplaceTildeWithHome(ignoreFile)
f, err := fs.Open(fs.Join(append(path, ignoreFile)...))
if err == nil {
defer f.Close()
@ -42,10 +47,14 @@ func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps [
return
}
// ReadPatterns reads gitignore patterns recursively traversing through the directory
// structure. The result is in the ascending order of priority (last higher).
// ReadPatterns reads the .git/info/exclude and then the gitignore patterns
// recursively traversing through the directory structure. The result is in
// the ascending order of priority (last higher).
func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) {
ps, _ = readIgnoreFile(fs, path, gitignoreFile)
ps, _ = readIgnoreFile(fs, path, infoExcludeFile)
subps, _ := readIgnoreFile(fs, path, gitignoreFile)
ps = append(ps, subps...)
var fis []os.FileInfo
fis, err = fs.ReadDir(fs.Join(path...))
@ -81,7 +90,7 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) {
defer gioutil.CheckClose(f, &err)
b, err := ioutil.ReadAll(f)
b, err := io.ReadAll(f)
if err != nil {
return
}

View File

@ -39,6 +39,8 @@ type pattern struct {
// ParsePattern parses a gitignore pattern string into the Pattern structure.
func ParsePattern(p string, domain []string) Pattern {
// storing domain, copy it to ensure it isn't changed externally
domain = append([]string(nil), domain...)
res := pattern{domain: domain}
if strings.HasPrefix(p, inclusionPrefix) {

View File

@ -6,20 +6,21 @@ import (
"errors"
"io"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
var (
// ErrUnsupportedVersion is returned by Decode when the idx file version
// is not supported.
ErrUnsupportedVersion = errors.New("Unsupported version")
ErrUnsupportedVersion = errors.New("unsupported version")
// ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
ErrMalformedIdxFile = errors.New("Malformed IDX file")
ErrMalformedIdxFile = errors.New("malformed IDX file")
)
const (
fanout = 256
objectIDLength = 20
objectIDLength = hash.Size
)
// Decoder reads and decodes idx files from an input stream.

View File

@ -1,10 +1,9 @@
package idxfile
import (
"crypto/sha1"
"hash"
"io"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
@ -16,7 +15,7 @@ type Encoder struct {
// NewEncoder returns a new stream encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
h := hash.New(hash.CryptoType)
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}
@ -133,10 +132,10 @@ func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) {
return 0, err
}
copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20])
copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:hash.Size])
if _, err := e.Write(idx.IdxChecksum[:]); err != nil {
return 0, err
}
return 40, nil
return hash.HexSize, nil
}

View File

@ -8,6 +8,7 @@ import (
encbin "encoding/binary"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
)
const (
@ -53,8 +54,8 @@ type MemoryIndex struct {
Offset32 [][]byte
CRC32 [][]byte
Offset64 []byte
PackfileChecksum [20]byte
IdxChecksum [20]byte
PackfileChecksum [hash.Size]byte
IdxChecksum [hash.Size]byte
offsetHash map[int64]plumbing.Hash
offsetHashIsFull bool

View File

@ -84,11 +84,8 @@ func (w *Writer) OnFooter(h plumbing.Hash) error {
w.checksum = h
w.finished = true
_, err := w.createIndex()
if err != nil {
return err
}
return nil
return err
}
// creatIndex returns a filled MemoryIndex with the information filled by
@ -139,15 +136,23 @@ func (w *Writer) createIndex() (*MemoryIndex, error) {
offset := o.Offset
if offset > math.MaxInt32 {
offset = w.addOffset64(offset)
var err error
offset, err = w.addOffset64(offset)
if err != nil {
return nil, err
}
}
buf.Truncate(0)
binary.WriteUint32(buf, uint32(offset))
if err := binary.WriteUint32(buf, uint32(offset)); err != nil {
return nil, err
}
idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
buf.Truncate(0)
binary.WriteUint32(buf, o.CRC32)
if err := binary.WriteUint32(buf, o.CRC32); err != nil {
return nil, err
}
idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
}
@ -161,15 +166,17 @@ func (w *Writer) createIndex() (*MemoryIndex, error) {
return idx, nil
}
func (w *Writer) addOffset64(pos uint64) uint64 {
func (w *Writer) addOffset64(pos uint64) (uint64, error) {
buf := new(bytes.Buffer)
binary.WriteUint64(buf, pos)
w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
if err := binary.WriteUint64(buf, pos); err != nil {
return 0, err
}
w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
index := uint64(w.offset64 | (1 << 31))
w.offset64++
return index
return index, nil
}
func (o objects) Len() int {

View File

@ -3,15 +3,14 @@ package index
import (
"bufio"
"bytes"
"crypto/sha1"
"errors"
"hash"
"io"
"io/ioutil"
"strconv"
"time"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
@ -49,7 +48,7 @@ type Decoder struct {
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
h := sha1.New()
h := hash.New(hash.CryptoType)
return &Decoder{
r: io.TeeReader(r, h),
hash: h,
@ -202,7 +201,7 @@ func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {
entrySize := read + len(e.Name)
padLen := 8 - entrySize%8
_, err := io.CopyN(ioutil.Discard, d.r, int64(padLen))
_, err := io.CopyN(io.Discard, d.r, int64(padLen))
return err
}

View File

@ -2,19 +2,18 @@ package index
import (
"bytes"
"crypto/sha1"
"errors"
"hash"
"io"
"sort"
"time"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
var (
// EncodeVersionSupported is the range of supported index versions
EncodeVersionSupported uint32 = 2
EncodeVersionSupported uint32 = 3
// ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
// negative timestamp values
@ -29,16 +28,16 @@ type Encoder struct {
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
h := hash.New(hash.CryptoType)
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}
// Encode writes the Index to the stream of the encoder.
func (e *Encoder) Encode(idx *Index) error {
// TODO: support versions v3 and v4
// TODO: support v4
// TODO: support extensions
if idx.Version != EncodeVersionSupported {
if idx.Version > EncodeVersionSupported {
return ErrUnsupportedVersion
}
@ -68,8 +67,12 @@ func (e *Encoder) encodeEntries(idx *Index) error {
if err := e.encodeEntry(entry); err != nil {
return err
}
entryLength := entryHeaderLength
if entry.IntentToAdd || entry.SkipWorktree {
entryLength += 2
}
wrote := entryHeaderLength + len(entry.Name)
wrote := entryLength + len(entry.Name)
if err := e.padEntry(wrote); err != nil {
return err
}
@ -79,10 +82,6 @@ func (e *Encoder) encodeEntries(idx *Index) error {
}
func (e *Encoder) encodeEntry(entry *Entry) error {
if entry.IntentToAdd || entry.SkipWorktree {
return ErrUnsupportedVersion
}
sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
if err != nil {
return err
@ -110,9 +109,25 @@ func (e *Encoder) encodeEntry(entry *Entry) error {
entry.GID,
entry.Size,
entry.Hash[:],
flags,
}
flagsFlow := []interface{}{flags}
if entry.IntentToAdd || entry.SkipWorktree {
var extendedFlags uint16
if entry.IntentToAdd {
extendedFlags |= intentToAddMask
}
if entry.SkipWorktree {
extendedFlags |= skipWorkTreeMask
}
flagsFlow = []interface{}{flags | entryExtended, extendedFlags}
}
flow = append(flow, flagsFlow...)
if err := binary.Write(e.w, flow...); err != nil {
return err
}

View File

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/go-git/go-git/v5/plumbing"
@ -211,3 +212,20 @@ type EndOfIndexEntry struct {
// their contents).
Hash plumbing.Hash
}
// SkipUnless applies patterns in the form of A, A/B, A/B/C
// to the index to prevent the files from being checked out
func (i *Index) SkipUnless(patterns []string) {
for _, e := range i.Entries {
var include bool
for _, pattern := range patterns {
if strings.HasPrefix(e.Name, pattern) {
include = true
break
}
}
if !include {
e.SkipWorktree = true
}
}
}

View File

@ -1,13 +1,13 @@
package objfile
import (
"compress/zlib"
"errors"
"io"
"strconv"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/packfile"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -20,20 +20,22 @@ var (
// Reader implements io.ReadCloser. Close should be called when finished with
// the Reader. Close will not close the underlying io.Reader.
type Reader struct {
multi io.Reader
zlib io.ReadCloser
hasher plumbing.Hasher
multi io.Reader
zlib io.Reader
zlibref sync.ZLibReader
hasher plumbing.Hasher
}
// NewReader returns a new Reader reading from r.
func NewReader(r io.Reader) (*Reader, error) {
zlib, err := zlib.NewReader(r)
zlib, err := sync.GetZlibReader(r)
if err != nil {
return nil, packfile.ErrZLib.AddDetails(err.Error())
}
return &Reader{
zlib: zlib,
zlib: zlib.Reader,
zlibref: zlib,
}, nil
}
@ -110,5 +112,6 @@ func (r *Reader) Hash() plumbing.Hash {
// Close releases any resources consumed by the Reader. Calling Close does not
// close the wrapped io.Reader originally passed to NewReader.
func (r *Reader) Close() error {
return r.zlib.Close()
sync.PutZlibReader(r.zlibref)
return nil
}

View File

@ -7,6 +7,7 @@ import (
"strconv"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -18,9 +19,9 @@ var (
// not close the underlying io.Writer.
type Writer struct {
raw io.Writer
zlib io.WriteCloser
hasher plumbing.Hasher
multi io.Writer
zlib *zlib.Writer
closed bool
pending int64 // number of unwritten bytes
@ -31,9 +32,10 @@ type Writer struct {
// The returned Writer implements io.WriteCloser. Close should be called when
// finished with the Writer. Close will not close the underlying io.Writer.
func NewWriter(w io.Writer) *Writer {
zlib := sync.GetZlibWriter(w)
return &Writer{
raw: w,
zlib: zlib.NewWriter(w),
zlib: zlib,
}
}
@ -100,6 +102,7 @@ func (w *Writer) Hash() plumbing.Hash {
// Calling Close does not close the wrapped io.Writer originally passed to
// NewWriter.
func (w *Writer) Close() error {
defer sync.PutZlibWriter(w.zlib)
if err := w.zlib.Close(); err != nil {
return err
}

View File

@ -1,10 +1,7 @@
package packfile
import (
"bytes"
"compress/zlib"
"io"
"sync"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
@ -61,18 +58,3 @@ func WritePackfileToObjectStorage(
return err
}
var bufPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(nil)
},
}
var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
var zlibReaderPool = sync.Pool{
New: func() interface{} {
r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
return r
},
}

View File

@ -5,6 +5,7 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and
@ -16,8 +17,11 @@ const (
s = 16
// https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428
// Max size of a copy operation (64KB)
// Max size of a copy operation (64KB).
maxCopySize = 64 * 1024
// Min size of a copy operation.
minCopySize = 4
)
// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object,
@ -43,18 +47,16 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbin
defer ioutil.CheckClose(tr, &err)
bb := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(bb)
bb.Reset()
bb := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(bb)
_, err = bb.ReadFrom(br)
if err != nil {
return nil, err
}
tb := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(tb)
tb.Reset()
tb := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(tb)
_, err = tb.ReadFrom(tr)
if err != nil {
@ -80,9 +82,8 @@ func DiffDelta(src, tgt []byte) []byte {
}
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
buf.Write(deltaEncodeSize(len(src)))
buf.Write(deltaEncodeSize(len(tgt)))
@ -90,9 +91,8 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
index.init(src)
}
ibuf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(ibuf)
ibuf.Reset()
ibuf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(ibuf)
for i := 0; i < len(tgt); i++ {
offset, l := index.findMatch(src, tgt, i)

View File

@ -2,11 +2,11 @@ package packfile
import (
"compress/zlib"
"crypto/sha1"
"fmt"
"io"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/binary"
"github.com/go-git/go-git/v5/utils/ioutil"
@ -28,7 +28,7 @@ type Encoder struct {
// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true.
func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder {
h := plumbing.Hasher{
Hash: sha1.New(),
Hash: hash.New(hash.CryptoType),
}
mw := io.MultiWriter(w, h)
ow := newOffsetWriter(mw)
@ -131,11 +131,7 @@ func (e *Encoder) entry(o *ObjectToPack) (err error) {
defer ioutil.CheckClose(or, &err)
_, err = io.Copy(e.zw, or)
if err != nil {
return err
}
return nil
return err
}
func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error {

View File

@ -7,19 +7,20 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/format/idxfile"
"github.com/go-git/go-git/v5/utils/ioutil"
)
// FSObject is an object from the packfile on the filesystem.
type FSObject struct {
hash plumbing.Hash
h *ObjectHeader
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
hash plumbing.Hash
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
largeObjectThreshold int64
}
// NewFSObject creates a new filesystem object.
@ -32,16 +33,18 @@ func NewFSObject(
fs billy.Filesystem,
path string,
cache cache.Object,
largeObjectThreshold int64,
) *FSObject {
return &FSObject{
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
largeObjectThreshold: largeObjectThreshold,
}
}
@ -62,7 +65,21 @@ func (o *FSObject) Reader() (io.ReadCloser, error) {
return nil, err
}
p := NewPackfileWithCache(o.index, nil, f, o.cache)
p := NewPackfileWithCache(o.index, nil, f, o.cache, o.largeObjectThreshold)
if o.largeObjectThreshold > 0 && o.size > o.largeObjectThreshold {
// We have a big object
h, err := p.objectHeaderAtOffset(o.offset)
if err != nil {
return nil, err
}
r, err := p.getReaderDirect(h)
if err != nil {
_ = f.Close()
return nil, err
}
return ioutil.NewReadCloserWithCloser(r, f.Close), nil
}
r, err := p.getObjectContent(o.offset)
if err != nil {
_ = f.Close()
@ -100,17 +117,3 @@ func (o *FSObject) Type() plumbing.ObjectType {
func (o *FSObject) Writer() (io.WriteCloser, error) {
return nil, nil
}
type objectReader struct {
io.ReadCloser
f billy.File
}
func (r *objectReader) Close() error {
if err := r.ReadCloser.Close(); err != nil {
_ = r.f.Close()
return err
}
return r.f.Close()
}

View File

@ -2,6 +2,7 @@ package packfile
import (
"bytes"
"fmt"
"io"
"os"
@ -11,6 +12,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/format/idxfile"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -35,11 +37,12 @@ const smallObjectThreshold = 16 * 1024
// Packfile allows retrieving information from inside a packfile.
type Packfile struct {
idxfile.Index
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
largeObjectThreshold int64
}
// NewPackfileWithCache creates a new Packfile with the given object cache.
@ -50,6 +53,7 @@ func NewPackfileWithCache(
fs billy.Filesystem,
file billy.File,
cache cache.Object,
largeObjectThreshold int64,
) *Packfile {
s := NewScanner(file)
return &Packfile{
@ -59,6 +63,7 @@ func NewPackfileWithCache(
s,
cache,
make(map[int64]plumbing.ObjectType),
largeObjectThreshold,
}
}
@ -66,8 +71,8 @@ func NewPackfileWithCache(
// and packfile idx.
// If the filesystem is provided, the packfile will return FSObjects, otherwise
// it will return MemoryObjects.
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault())
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File, largeObjectThreshold int64) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault(), largeObjectThreshold)
}
// Get retrieves the encoded object in the packfile with the given hash.
@ -133,9 +138,8 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return h.Length, nil
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return 0, err
@ -222,9 +226,9 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
// For delta objects we read the delta data and apply the small object
// optimization only if the expanded version of the object still meets
// the small object threshold condition.
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return nil, err
}
@ -263,6 +267,7 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
p.fs,
p.file.Name(),
p.deltaBaseCache,
p.largeObjectThreshold,
), nil
}
@ -282,6 +287,49 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
return obj.Reader()
}
func asyncReader(p *Packfile) (io.ReadCloser, error) {
reader := ioutil.NewReaderUsingReaderAt(p.file, p.s.r.offset)
zr, err := sync.GetZlibReader(reader)
if err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
sync.PutZlibReader(zr)
return nil
}), nil
}
func (p *Packfile) getReaderDirect(h *ObjectHeader) (io.ReadCloser, error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return asyncReader(p)
case plumbing.REFDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readREFDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
case plumbing.OFSDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readOFSDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
default:
return nil, ErrInvalidObject.AddDetails("type %q", h.Type)
}
}
func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length)
@ -323,9 +371,9 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err err
}
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
@ -334,6 +382,20 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
}
func (p *Packfile) readREFDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
var err error
base, ok := p.cacheGet(h.Reference)
if !ok {
base, err = p.Get(h.Reference)
if err != nil {
return nil, err
}
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
var err error
@ -353,9 +415,9 @@ func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObjec
}
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
@ -364,6 +426,20 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
}
func (p *Packfile) readOFSDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
hash, err := p.FindHash(h.OffsetReference)
if err != nil {
return nil, err
}
base, err := p.objectAtOffset(h.OffsetReference, hash)
if err != nil {
return nil, err
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
hash, err := p.FindHash(offset)
if err != nil {

View File

@ -3,13 +3,14 @@ package packfile
import (
"bytes"
"errors"
"fmt"
"io"
stdioutil "io/ioutil"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -46,7 +47,6 @@ type Parser struct {
oi []*objectInfo
oiByHash map[plumbing.Hash]*objectInfo
oiByOffset map[int64]*objectInfo
hashOffset map[plumbing.Hash]int64
checksum plumbing.Hash
cache *cache.BufferLRU
@ -175,12 +175,25 @@ func (p *Parser) init() error {
return nil
}
type objectHeaderWriter func(typ plumbing.ObjectType, sz int64) error
type lazyObjectWriter interface {
// LazyWriter enables an object to be lazily written.
// It returns:
// - w: a writer to receive the object's content.
// - lwh: a func to write the object header.
// - err: any error from the initial writer creation process.
//
// Note that if the object header is not written BEFORE the writer
// is used, this will result in an invalid object.
LazyWriter() (w io.WriteCloser, lwh objectHeaderWriter, err error)
}
func (p *Parser) indexObjects() error {
buf := new(bytes.Buffer)
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for i := uint32(0); i < p.count; i++ {
buf.Reset()
oh, err := p.scanner.NextObjectHeader()
if err != nil {
return err
@ -220,39 +233,76 @@ func (p *Parser) indexObjects() error {
ota = newBaseObject(oh.Offset, oh.Length, t)
}
_, crc, err := p.scanner.NextObject(buf)
hasher := plumbing.NewHasher(oh.Type, oh.Length)
writers := []io.Writer{hasher}
var obj *plumbing.MemoryObject
// Lazy writing is only available for non-delta objects.
if p.storage != nil && !delta {
// When a storage is set and supports lazy writing,
// use that instead of creating a memory object.
if low, ok := p.storage.(lazyObjectWriter); ok {
ow, lwh, err := low.LazyWriter()
if err != nil {
return err
}
if err = lwh(oh.Type, oh.Length); err != nil {
return err
}
defer ow.Close()
writers = append(writers, ow)
} else {
obj = new(plumbing.MemoryObject)
obj.SetSize(oh.Length)
obj.SetType(oh.Type)
writers = append(writers, obj)
}
}
if delta && !p.scanner.IsSeekable {
buf.Reset()
buf.Grow(int(oh.Length))
writers = append(writers, buf)
}
mw := io.MultiWriter(writers...)
_, crc, err := p.scanner.NextObject(mw)
if err != nil {
return err
}
// Non delta objects needs to be added into the storage. This
// is only required when lazy writing is not supported.
if obj != nil {
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
ota.Crc32 = crc
ota.Length = oh.Length
data := buf.Bytes()
if !delta {
sha1, err := getSHA1(ota.Type, data)
if err != nil {
return err
sha1 := hasher.Sum()
// Move children of placeholder parent into actual parent, in case this
// was a non-external delta reference.
if placeholder, ok := p.oiByHash[sha1]; ok {
ota.Children = placeholder.Children
for _, c := range ota.Children {
c.Parent = ota
}
}
ota.SHA1 = sha1
p.oiByHash[ota.SHA1] = ota
}
if p.storage != nil && !delta {
obj := new(plumbing.MemoryObject)
obj.SetSize(oh.Length)
obj.SetType(oh.Type)
if _, err := obj.Write(data); err != nil {
return err
}
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
if delta && !p.scanner.IsSeekable {
data := buf.Bytes()
p.deltas[oh.Offset] = make([]byte, len(data))
copy(p.deltas[oh.Offset], data)
}
@ -265,28 +315,37 @@ func (p *Parser) indexObjects() error {
}
func (p *Parser) resolveDeltas() error {
buf := &bytes.Buffer{}
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for _, obj := range p.oi {
buf.Reset()
buf.Grow(int(obj.Length))
err := p.get(obj, buf)
if err != nil {
return err
}
content := buf.Bytes()
if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
return err
}
if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil {
if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, nil); err != nil {
return err
}
if !obj.IsDelta() && len(obj.Children) > 0 {
// Dealing with an io.ReaderAt object, means we can
// create it once and reuse across all children.
r := bytes.NewReader(buf.Bytes())
for _, child := range obj.Children {
if err := p.resolveObject(stdioutil.Discard, child, content); err != nil {
// Even though we are discarding the output, we still need to read it to
// so that the scanner can advance to the next object, and the SHA1 can be
// calculated.
if err := p.resolveObject(io.Discard, child, r); err != nil {
return err
}
p.resolveExternalRef(child)
}
// Remove the delta from the cache.
@ -299,6 +358,16 @@ func (p *Parser) resolveDeltas() error {
return nil
}
func (p *Parser) resolveExternalRef(o *objectInfo) {
if ref, ok := p.oiByHash[o.SHA1]; ok && ref.ExternalRef {
p.oiByHash[o.SHA1] = o
o.Children = ref.Children
for _, c := range o.Children {
c.Parent = o
}
}
}
func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
if !o.ExternalRef { // skip cache check for placeholder parents
b, ok := p.cache.Get(o.Offset)
@ -336,16 +405,15 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
}
if o.DiskType.IsDelta() {
b := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(b)
b.Reset()
b := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(b)
buf.Grow(int(o.Length))
err := p.get(o.Parent, b)
if err != nil {
return err
}
base := b.Bytes()
err = p.resolveObject(buf, o, base)
err = p.resolveObject(buf, o, bytes.NewReader(b.Bytes()))
if err != nil {
return err
}
@ -356,6 +424,13 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
}
}
// If the scanner is seekable, caching this data into
// memory by offset seems wasteful.
// There is a trade-off to be considered here in terms
// of execution time vs memory consumption.
//
// TODO: improve seekable execution time, so that we can
// skip this cache.
if len(o.Children) > 0 {
data := make([]byte, buf.Len())
copy(data, buf.Bytes())
@ -364,41 +439,75 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
return nil
}
// resolveObject resolves an object from base, using information
// provided by o.
//
// This call has the side-effect of changing field values
// from the object info o:
// - Type: OFSDeltaObject may become the target type (e.g. Blob).
// - Size: The size may be update with the target size.
// - Hash: Zero hashes will be calculated as part of the object
// resolution. Hence why this process can't be avoided even when w
// is an io.Discard.
//
// base must be an io.ReaderAt, which is a requirement from
// patchDeltaStream. The main reason being that reversing an
// delta object may lead to going backs and forths within base,
// which is not supported by io.Reader.
func (p *Parser) resolveObject(
w io.Writer,
o *objectInfo,
base []byte,
base io.ReaderAt,
) error {
if !o.DiskType.IsDelta() {
return nil
}
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
err := p.readData(buf, o)
if err != nil {
return err
}
data := buf.Bytes()
data, err = applyPatchBase(o, data, base)
writers := []io.Writer{w}
var obj *plumbing.MemoryObject
var lwh objectHeaderWriter
if p.storage != nil {
if low, ok := p.storage.(lazyObjectWriter); ok {
ow, wh, err := low.LazyWriter()
if err != nil {
return err
}
lwh = wh
defer ow.Close()
writers = append(writers, ow)
} else {
obj = new(plumbing.MemoryObject)
ow, err := obj.Writer()
if err != nil {
return err
}
writers = append(writers, ow)
}
}
mw := io.MultiWriter(writers...)
err = applyPatchBase(o, base, buf, mw, lwh)
if err != nil {
return err
}
if p.storage != nil {
obj := new(plumbing.MemoryObject)
obj.SetSize(o.Size())
if obj != nil {
obj.SetType(o.Type)
if _, err := obj.Write(data); err != nil {
return err
}
obj.SetSize(o.Size()) // Size here is correct as it was populated by applyPatchBase.
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
_, err = w.Write(data)
return err
}
@ -422,24 +531,31 @@ func (p *Parser) readData(w io.Writer, o *objectInfo) error {
return nil
}
func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
patched, err := PatchDelta(base, data)
// applyPatchBase applies the patch to target.
//
// Note that ota will be updated based on the description in resolveObject.
func applyPatchBase(ota *objectInfo, base io.ReaderAt, delta io.Reader, target io.Writer, wh objectHeaderWriter) error {
if target == nil {
return fmt.Errorf("cannot apply patch against nil target")
}
typ := ota.Type
if ota.SHA1 == plumbing.ZeroHash {
typ = ota.Parent.Type
}
sz, h, err := patchDeltaWriter(target, base, delta, typ, wh)
if err != nil {
return nil, err
return err
}
if ota.SHA1 == plumbing.ZeroHash {
ota.Type = ota.Parent.Type
sha1, err := getSHA1(ota.Type, patched)
if err != nil {
return nil, err
}
ota.SHA1 = sha1
ota.Length = int64(len(patched))
ota.Type = typ
ota.Length = int64(sz)
ota.SHA1 = h
}
return patched, nil
return nil
}
func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {

View File

@ -1,12 +1,16 @@
package packfile
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"math"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
@ -14,7 +18,33 @@ import (
// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
// for details about the delta format.
const deltaSizeMin = 4
var (
ErrInvalidDelta = errors.New("invalid delta")
ErrDeltaCmd = errors.New("wrong delta command")
)
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
)
type offset struct {
mask byte
shift uint
}
var offsets = []offset{
{mask: 0x01, shift: 0},
{mask: 0x02, shift: 8},
{mask: 0x04, shift: 16},
{mask: 0x08, shift: 24},
}
var sizes = []offset{
{mask: 0x10, shift: 0},
{mask: 0x20, shift: 8},
{mask: 0x40, shift: 16},
}
// ApplyDelta writes to target the result of applying the modification deltas in delta to base.
func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
@ -32,18 +62,16 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
defer ioutil.CheckClose(w, &err)
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, err = buf.ReadFrom(r)
if err != nil {
return err
}
src := buf.Bytes()
dst := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(dst)
dst.Reset()
dst := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(dst)
err = patchDelta(dst, src, delta)
if err != nil {
return err
@ -51,17 +79,12 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
target.SetSize(int64(dst.Len()))
b := byteSlicePool.Get().([]byte)
_, err = io.CopyBuffer(w, dst, b)
byteSlicePool.Put(b)
b := sync.GetByteSlice()
_, err = io.CopyBuffer(w, dst, *b)
sync.PutByteSlice(b)
return err
}
var (
ErrInvalidDelta = errors.New("invalid delta")
ErrDeltaCmd = errors.New("wrong delta command")
)
// PatchDelta returns the result of applying the modification deltas in delta to src.
// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
// is not copy from source or copy from delta (ErrDeltaCmd).
@ -73,8 +96,137 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
return b.Bytes(), nil
}
func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadCloser, error) {
deltaBuf := bufio.NewReaderSize(deltaRC, 1024)
srcSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
if srcSz != uint(base.Size()) {
return nil, ErrInvalidDelta
}
targetSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
remainingTargetSz := targetSz
dstRd, dstWr := io.Pipe()
go func() {
baseRd, err := base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
defer baseRd.Close()
baseBuf := bufio.NewReader(baseRd)
basePos := uint(0)
for {
cmd, err := deltaBuf.ReadByte()
if err == io.EOF {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
switch {
case isCopyFromSrc(cmd):
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
sz, err := decodeSizeByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
_ = dstWr.Close()
return
}
discard := offset - basePos
if basePos > offset {
_ = baseRd.Close()
baseRd, err = base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
baseBuf.Reset(baseRd)
discard = offset
}
for discard > math.MaxInt32 {
n, err := baseBuf.Discard(math.MaxInt32)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
for discard > 0 {
n, err := baseBuf.Discard(int(discard))
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
if _, err := io.Copy(dstWr, io.LimitReader(baseBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
basePos += sz
case isCopyFromDelta(cmd):
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if _, err := io.Copy(dstWr, io.LimitReader(deltaBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
default:
_ = dstWr.CloseWithError(ErrDeltaCmd)
return
}
if remainingTargetSz <= 0 {
_ = dstWr.Close()
return
}
}
}()
return dstRd, nil
}
func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
if len(delta) < deltaSizeMin {
if len(delta) < minCopySize {
return ErrInvalidDelta
}
@ -95,7 +247,9 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
cmd = delta[0]
delta = delta[1:]
if isCopyFromSrc(cmd) {
switch {
case isCopyFromSrc(cmd):
var offset, sz uint
var err error
offset, delta, err = decodeOffset(cmd, delta)
@ -114,7 +268,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
}
dst.Write(src[offset : offset+sz])
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
case isCopyFromDelta(cmd):
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
return ErrInvalidDelta
@ -127,7 +282,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
dst.Write(delta[0:sz])
remainingTargetSz -= sz
delta = delta[sz:]
} else {
default:
return ErrDeltaCmd
}
@ -139,6 +295,107 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
return nil
}
func patchDeltaWriter(dst io.Writer, base io.ReaderAt, delta io.Reader,
typ plumbing.ObjectType, writeHeader objectHeaderWriter) (uint, plumbing.Hash, error) {
deltaBuf := bufio.NewReaderSize(delta, 1024)
srcSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
return 0, plumbing.ZeroHash, err
}
if r, ok := base.(*bytes.Reader); ok && srcSz != uint(r.Size()) {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
targetSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
return 0, plumbing.ZeroHash, err
}
// If header still needs to be written, caller will provide
// a LazyObjectWriterHeader. This seems to be the case when
// dealing with thin-packs.
if writeHeader != nil {
err = writeHeader(typ, int64(targetSz))
if err != nil {
return 0, plumbing.ZeroHash, fmt.Errorf("could not lazy write header: %w", err)
}
}
remainingTargetSz := targetSz
hasher := plumbing.NewHasher(typ, int64(targetSz))
mw := io.MultiWriter(dst, hasher)
bufp := sync.GetByteSlice()
defer sync.PutByteSlice(bufp)
sr := io.NewSectionReader(base, int64(0), int64(srcSz))
// Keep both the io.LimitedReader types, so we can reset N.
baselr := io.LimitReader(sr, 0).(*io.LimitedReader)
deltalr := io.LimitReader(deltaBuf, 0).(*io.LimitedReader)
for {
buf := *bufp
cmd, err := deltaBuf.ReadByte()
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
if err != nil {
return 0, plumbing.ZeroHash, err
}
if isCopyFromSrc(cmd) {
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
return 0, plumbing.ZeroHash, err
}
sz, err := decodeSizeByteReader(cmd, deltaBuf)
if err != nil {
return 0, plumbing.ZeroHash, err
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
return 0, plumbing.ZeroHash, err
}
if _, err := sr.Seek(int64(offset), io.SeekStart); err != nil {
return 0, plumbing.ZeroHash, err
}
baselr.N = int64(sz)
if _, err := io.CopyBuffer(mw, baselr, buf); err != nil {
return 0, plumbing.ZeroHash, err
}
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
deltalr.N = int64(sz)
if _, err := io.CopyBuffer(mw, deltalr, buf); err != nil {
return 0, plumbing.ZeroHash, err
}
remainingTargetSz -= sz
} else {
return 0, plumbing.ZeroHash, err
}
if remainingTargetSz <= 0 {
break
}
}
return targetSz, hasher.Sum(), nil
}
// Decodes a number encoded as an unsigned LEB128 at the start of some
// binary data and returns the decoded number and the rest of the
// stream.
@ -161,78 +418,95 @@ func decodeLEB128(input []byte) (uint, []byte) {
return num, input[sz:]
}
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
)
func decodeLEB128ByteReader(input io.ByteReader) (uint, error) {
var num, sz uint
for {
b, err := input.ReadByte()
if err != nil {
return 0, err
}
num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
sz++
if uint(b)&continuation == 0 {
break
}
}
return num, nil
}
func isCopyFromSrc(cmd byte) bool {
return (cmd & 0x80) != 0
return (cmd & continuation) != 0
}
func isCopyFromDelta(cmd byte) bool {
return (cmd&0x80) == 0 && cmd != 0
return (cmd&continuation) == 0 && cmd != 0
}
func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var offset uint
for _, o := range offsets {
if (cmd & o.mask) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << o.shift
}
}
return offset, nil
}
func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
var offset uint
if (cmd & 0x01) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
for _, o := range offsets {
if (cmd & o.mask) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << o.shift
delta = delta[1:]
}
offset = uint(delta[0])
delta = delta[1:]
}
if (cmd & 0x02) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 8
delta = delta[1:]
}
if (cmd & 0x04) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 16
delta = delta[1:]
}
if (cmd & 0x08) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 24
delta = delta[1:]
}
return offset, delta, nil
}
func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var sz uint
for _, s := range sizes {
if (cmd & s.mask) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << s.shift
}
}
if sz == 0 {
sz = maxCopySize
}
return sz, nil
}
func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
var sz uint
if (cmd & 0x10) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
for _, s := range sizes {
if (cmd & s.mask) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << s.shift
delta = delta[1:]
}
sz = uint(delta[0])
delta = delta[1:]
}
if (cmd & 0x20) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << 8
delta = delta[1:]
}
if (cmd & 0x40) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << 16
delta = delta[1:]
}
if sz == 0 {
sz = 0x10000
sz = maxCopySize
}
return sz, delta, nil

View File

@ -3,17 +3,15 @@ package packfile
import (
"bufio"
"bytes"
"compress/zlib"
"fmt"
"hash"
"hash/crc32"
"io"
stdioutil "io/ioutil"
"sync"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/binary"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -114,7 +112,7 @@ func (s *Scanner) Header() (version, objects uint32, err error) {
return
}
// readSignature reads an returns the signature field in the packfile.
// readSignature reads a returns the signature field in the packfile.
func (s *Scanner) readSignature() ([]byte, error) {
var sig = make([]byte, 4)
if _, err := io.ReadFull(s.r, sig); err != nil {
@ -243,7 +241,7 @@ func (s *Scanner) discardObjectIfNeeded() error {
}
h := s.pendingObject
n, _, err := s.NextObject(stdioutil.Discard)
n, _, err := s.NextObject(io.Discard)
if err != nil {
return err
}
@ -320,29 +318,38 @@ func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err erro
return
}
// ReadObject returns a reader for the object content and an error
func (s *Scanner) ReadObject() (io.ReadCloser, error) {
s.pendingObject = nil
zr, err := sync.GetZlibReader(s.r)
if err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
sync.PutZlibReader(zr)
return nil
}), nil
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
zr := zlibReaderPool.Get().(io.ReadCloser)
defer zlibReaderPool.Put(zr)
zr, err := sync.GetZlibReader(s.r)
defer sync.PutZlibReader(zr)
if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
if err != nil {
return 0, fmt.Errorf("zlib reset error: %s", err)
}
defer ioutil.CheckClose(zr, &err)
buf := byteSlicePool.Get().([]byte)
n, err = io.CopyBuffer(w, zr, buf)
byteSlicePool.Put(buf)
defer ioutil.CheckClose(zr.Reader, &err)
buf := sync.GetByteSlice()
n, err = io.CopyBuffer(w, zr.Reader, *buf)
sync.PutByteSlice(buf)
return
}
var byteSlicePool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
// SeekFromStart sets a new offset from start, returns the old position before
// the change.
func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
@ -372,9 +379,10 @@ func (s *Scanner) Checksum() (plumbing.Hash, error) {
// Close reads the reader until io.EOF
func (s *Scanner) Close() error {
buf := byteSlicePool.Get().([]byte)
_, err := io.CopyBuffer(stdioutil.Discard, s.r, buf)
byteSlicePool.Put(buf)
buf := sync.GetByteSlice()
_, err := io.CopyBuffer(io.Discard, s.r, *buf)
sync.PutByteSlice(buf)
return err
}
@ -384,13 +392,13 @@ func (s *Scanner) Flush() error {
}
// scannerReader has the following characteristics:
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penality for performing small writes
// to the crc32 hash writer.
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penalty for performing small writes
// to the crc32 hash writer.
type scannerReader struct {
reader io.Reader
crc io.Writer

View File

@ -7,6 +7,8 @@ import (
"errors"
"fmt"
"io"
"github.com/go-git/go-git/v5/utils/trace"
)
// An Encoder writes pkt-lines to an output stream.
@ -43,6 +45,7 @@ func NewEncoder(w io.Writer) *Encoder {
// Flush encodes a flush-pkt to the output stream.
func (e *Encoder) Flush() error {
defer trace.Packet.Print("packet: > 0000")
_, err := e.w.Write(FlushPkt)
return err
}
@ -70,6 +73,7 @@ func (e *Encoder) encodeLine(p []byte) error {
}
n := len(p) + 4
defer trace.Packet.Printf("packet: > %04x %s", n, p)
if _, err := e.w.Write(asciiHex16(n)); err != nil {
return err
}

View File

@ -0,0 +1,51 @@
package pktline
import (
"bytes"
"errors"
"io"
"strings"
)
var (
// ErrInvalidErrorLine is returned by Decode when the packet line is not an
// error line.
ErrInvalidErrorLine = errors.New("expected an error-line")
errPrefix = []byte("ERR ")
)
// ErrorLine is a packet line that contains an error message.
// Once this packet is sent by client or server, the data transfer process is
// terminated.
// See https://git-scm.com/docs/pack-protocol#_pkt_line_format
type ErrorLine struct {
Text string
}
// Error implements the error interface.
func (e *ErrorLine) Error() string {
return e.Text
}
// Encode encodes the ErrorLine into a packet line.
func (e *ErrorLine) Encode(w io.Writer) error {
p := NewEncoder(w)
return p.Encodef("%s%s\n", string(errPrefix), e.Text)
}
// Decode decodes a packet line into an ErrorLine.
func (e *ErrorLine) Decode(r io.Reader) error {
s := NewScanner(r)
if !s.Scan() {
return s.Err()
}
line := s.Bytes()
if !bytes.HasPrefix(line, errPrefix) {
return ErrInvalidErrorLine
}
e.Text = strings.TrimSpace(string(line[4:]))
return nil
}

View File

@ -1,8 +1,12 @@
package pktline
import (
"bytes"
"errors"
"io"
"strings"
"github.com/go-git/go-git/v5/utils/trace"
)
const (
@ -65,6 +69,14 @@ func (s *Scanner) Scan() bool {
return false
}
s.payload = s.payload[:l]
trace.Packet.Printf("packet: < %04x %s", l, s.payload)
if bytes.HasPrefix(s.payload, errPrefix) {
s.err = &ErrorLine{
Text: strings.TrimSpace(string(s.payload[4:])),
}
return false
}
return true
}

View File

@ -2,15 +2,15 @@ package plumbing
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"hash"
"sort"
"strconv"
"github.com/go-git/go-git/v5/plumbing/hash"
)
// Hash SHA1 hashed content
type Hash [20]byte
type Hash [hash.Size]byte
// ZeroHash is Hash with value zero
var ZeroHash Hash
@ -46,7 +46,7 @@ type Hasher struct {
}
func NewHasher(t ObjectType, size int64) Hasher {
h := Hasher{sha1.New()}
h := Hasher{hash.New(hash.CryptoType)}
h.Write(t.Bytes())
h.Write([]byte(" "))
h.Write([]byte(strconv.FormatInt(size, 10)))
@ -74,10 +74,11 @@ func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// IsHash returns true if the given string is a valid hash.
func IsHash(s string) bool {
if len(s) != 40 {
switch len(s) {
case hash.HexSize:
_, err := hex.DecodeString(s)
return err == nil
default:
return false
}
_, err := hex.DecodeString(s)
return err == nil
}

View File

@ -0,0 +1,60 @@
// package hash provides a way for managing the
// underlying hash implementations used across go-git.
package hash
import (
"crypto"
"fmt"
"hash"
"github.com/pjbgf/sha1cd"
)
// algos is a map of hash algorithms.
var algos = map[crypto.Hash]func() hash.Hash{}
func init() {
reset()
}
// reset resets the default algos value. Can be used after running tests
// that registers new algorithms to avoid side effects.
func reset() {
algos[crypto.SHA1] = sha1cd.New
algos[crypto.SHA256] = crypto.SHA256.New
}
// RegisterHash allows for the hash algorithm used to be overridden.
// This ensures the hash selection for go-git must be explicit, when
// overriding the default value.
func RegisterHash(h crypto.Hash, f func() hash.Hash) error {
if f == nil {
return fmt.Errorf("cannot register hash: f is nil")
}
switch h {
case crypto.SHA1:
algos[h] = f
case crypto.SHA256:
algos[h] = f
default:
return fmt.Errorf("unsupported hash function: %v", h)
}
return nil
}
// Hash is the same as hash.Hash. This allows consumers
// to not having to import this package alongside "hash".
type Hash interface {
hash.Hash
}
// New returns a new Hash for the given hash function.
// It panics if the hash function is not registered.
func New(h crypto.Hash) Hash {
hh, ok := algos[h]
if !ok {
panic(fmt.Sprintf("hash algorithm not registered: %v", h))
}
return hh()
}

View File

@ -0,0 +1,15 @@
//go:build !sha256
// +build !sha256
package hash
import "crypto"
const (
// CryptoType defines what hash algorithm is being used.
CryptoType = crypto.SHA1
// Size defines the amount of bytes the hash yields.
Size = 20
// HexSize defines the strings size of the hash when represented in hexadecimal.
HexSize = 40
)

View File

@ -0,0 +1,15 @@
//go:build sha256
// +build sha256
package hash
import "crypto"
const (
// CryptoType defines what hash algorithm is being used.
CryptoType = crypto.SHA256
// Size defines the amount of bytes the hash yields.
Size = 32
// HexSize defines the strings size of the hash when represented in hexadecimal.
HexSize = 64
)

View File

@ -25,13 +25,13 @@ func (o *MemoryObject) Hash() Hash {
return o.h
}
// Type return the ObjectType
// Type returns the ObjectType
func (o *MemoryObject) Type() ObjectType { return o.t }
// SetType sets the ObjectType
func (o *MemoryObject) SetType(t ObjectType) { o.t = t }
// Size return the size of the object
// Size returns the size of the object
func (o *MemoryObject) Size() int64 { return o.sz }
// SetSize set the object size, a content of the given size should be written

View File

@ -82,7 +82,7 @@ func (t ObjectType) Valid() bool {
return t >= CommitObject && t <= REFDeltaObject
}
// IsDelta returns true for any ObjectTyoe that represents a delta (i.e.
// IsDelta returns true for any ObjectType that represents a delta (i.e.
// REFDeltaObject or OFSDeltaObject).
func (t ObjectType) IsDelta() bool {
return t == REFDeltaObject || t == OFSDeltaObject

View File

@ -39,7 +39,7 @@ func (c *Change) Action() (merkletrie.Action, error) {
return merkletrie.Modify, nil
}
// Files return the files before and after a change.
// Files returns the files before and after a change.
// For insertions from will be nil. For deletions to will be nil.
func (c *Change) Files() (from, to *File, err error) {
action, err := c.Action()

View File

@ -16,11 +16,11 @@ func newChange(c merkletrie.Change) (*Change, error) {
var err error
if ret.From, err = newChangeEntry(c.From); err != nil {
return nil, fmt.Errorf("From field: %s", err)
return nil, fmt.Errorf("from field: %s", err)
}
if ret.To, err = newChangeEntry(c.To); err != nil {
return nil, fmt.Errorf("To field: %s", err)
return nil, fmt.Errorf("to field: %s", err)
}
return ret, nil

View File

@ -1,7 +1,6 @@
package object
import (
"bufio"
"bytes"
"context"
"errors"
@ -14,17 +13,29 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
const (
beginpgp string = "-----BEGIN PGP SIGNATURE-----"
endpgp string = "-----END PGP SIGNATURE-----"
headerpgp string = "gpgsig"
beginpgp string = "-----BEGIN PGP SIGNATURE-----"
endpgp string = "-----END PGP SIGNATURE-----"
headerpgp string = "gpgsig"
headerencoding string = "encoding"
// https://github.com/git/git/blob/bcb6cae2966cc407ca1afc77413b3ef11103c175/Documentation/gitformat-signature.txt#L153
// When a merge commit is created from a signed tag, the tag is embedded in
// the commit with the "mergetag" header.
headermergetag string = "mergetag"
defaultUtf8CommitMesageEncoding MessageEncoding = "UTF-8"
)
// Hash represents the hash of an object
type Hash plumbing.Hash
// MessageEncoding represents the encoding of a commit
type MessageEncoding string
// Commit points to a single tree, marking it as what the project looked like
// at a certain point in time. It contains meta-information about that point
// in time, such as a timestamp, the author of the changes since the last
@ -38,6 +49,9 @@ type Commit struct {
// Committer is the one performing the commit, might be different from
// Author.
Committer Signature
// MergeTag is the embedded tag object when a merge commit is created by
// merging a signed tag.
MergeTag string
// PGPSignature is the PGP signature of the commit.
PGPSignature string
// Message is the commit message, contains arbitrary text.
@ -46,6 +60,8 @@ type Commit struct {
TreeHash plumbing.Hash
// ParentHashes are the hashes of the parent commits of the commit.
ParentHashes []plumbing.Hash
// Encoding is the encoding of the commit.
Encoding MessageEncoding
s storer.EncodedObjectStorer
}
@ -173,6 +189,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
c.Hash = o.Hash()
c.Encoding = defaultUtf8CommitMesageEncoding
reader, err := o.Reader()
if err != nil {
@ -180,11 +197,11 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
var message bool
var mergetag bool
var pgpsig bool
var msgbuf bytes.Buffer
for {
@ -193,6 +210,16 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
return err
}
if mergetag {
if len(line) > 0 && line[0] == ' ' {
line = bytes.TrimLeft(line, " ")
c.MergeTag += string(line)
continue
} else {
mergetag = false
}
}
if pgpsig {
if len(line) > 0 && line[0] == ' ' {
line = bytes.TrimLeft(line, " ")
@ -226,6 +253,11 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
c.Author.Decode(data)
case "committer":
c.Committer.Decode(data)
case headermergetag:
c.MergeTag += string(data) + "\n"
mergetag = true
case headerencoding:
c.Encoding = MessageEncoding(data)
case headerpgp:
c.PGPSignature += string(data) + "\n"
pgpsig = true
@ -287,6 +319,28 @@ func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
return err
}
if c.MergeTag != "" {
if _, err = fmt.Fprint(w, "\n"+headermergetag+" "); err != nil {
return err
}
// Split tag information lines and re-write with a left padding and
// newline. Use join for this so it's clear that a newline should not be
// added after this section. The newline will be added either as part of
// the PGP signature or the commit message.
mergetag := strings.TrimSuffix(c.MergeTag, "\n")
lines := strings.Split(mergetag, "\n")
if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
return err
}
}
if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMesageEncoding {
if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil {
return err
}
}
if c.PGPSignature != "" && includeSig {
if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
return err
@ -377,6 +431,17 @@ func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil)
}
// Less defines a compare function to determine which commit is 'earlier' by:
// - First use Committer.When
// - If Committer.When are equal then use Author.When
// - If Author.When also equal then compare the string value of the hash
func (c *Commit) Less(rhs *Commit) bool {
return c.Committer.When.Before(rhs.Committer.When) ||
(c.Committer.When.Equal(rhs.Committer.When) &&
(c.Author.When.Before(rhs.Author.When) ||
(c.Author.When.Equal(rhs.Author.When) && bytes.Compare(c.Hash[:], rhs.Hash[:]) < 0)))
}
func indent(t string) string {
var output []string
for _, line := range strings.Split(t, "\n") {

View File

@ -1,12 +0,0 @@
package object
import (
"bufio"
"sync"
)
var bufPool = sync.Pool{
New: func() interface{} {
return bufio.NewReader(nil)
},
}

View File

@ -96,10 +96,6 @@ func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, erro
}
func filePatch(c *Change) (fdiff.FilePatch, error) {
return filePatchWithContext(context.Background(), c)
}
func fileContent(f *File) (content string, isBinary bool, err error) {
if f == nil {
return
@ -321,8 +317,8 @@ func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
// File is deleted.
cs.Name = from.Path()
} else if from.Path() != to.Path() {
// File is renamed. Not supported.
// cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
// File is renamed.
cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
} else {
cs.Name = from.Path()
}

View File

@ -403,10 +403,16 @@ func min(a, b int) int {
return b
}
const maxMatrixSize = 10000
func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) {
// Allocate for the worst-case scenario where every pair has a score
// that we need to consider. We might not need that many.
matrix := make(similarityMatrix, 0, len(srcs)*len(dsts))
matrixSize := len(srcs) * len(dsts)
if matrixSize > maxMatrixSize {
matrixSize = maxMatrixSize
}
matrix := make(similarityMatrix, 0, matrixSize)
srcSizes := make([]int64, len(srcs))
dstSizes := make([]int64, len(dsts))
dstTooLarge := make(map[int]bool)
@ -735,10 +741,7 @@ func (i *similarityIndex) add(key int, cnt uint64) error {
// It's the same key, so increment the counter.
var err error
i.hashes[j], err = newKeyCountPair(key, v.count()+cnt)
if err != nil {
return err
}
return nil
return err
} else if j+1 >= len(i.hashes) {
j = 0
} else {

View File

@ -0,0 +1,101 @@
package object
import "bytes"
const (
signatureTypeUnknown signatureType = iota
signatureTypeOpenPGP
signatureTypeX509
signatureTypeSSH
)
var (
// openPGPSignatureFormat is the format of an OpenPGP signature.
openPGPSignatureFormat = signatureFormat{
[]byte("-----BEGIN PGP SIGNATURE-----"),
[]byte("-----BEGIN PGP MESSAGE-----"),
}
// x509SignatureFormat is the format of an X509 signature, which is
// a PKCS#7 (S/MIME) signature.
x509SignatureFormat = signatureFormat{
[]byte("-----BEGIN CERTIFICATE-----"),
}
// sshSignatureFormat is the format of an SSH signature.
sshSignatureFormat = signatureFormat{
[]byte("-----BEGIN SSH SIGNATURE-----"),
}
)
var (
// knownSignatureFormats is a map of known signature formats, indexed by
// their signatureType.
knownSignatureFormats = map[signatureType]signatureFormat{
signatureTypeOpenPGP: openPGPSignatureFormat,
signatureTypeX509: x509SignatureFormat,
signatureTypeSSH: sshSignatureFormat,
}
)
// signatureType represents the type of the signature.
type signatureType int8
// signatureFormat represents the beginning of a signature.
type signatureFormat [][]byte
// typeForSignature returns the type of the signature based on its format.
func typeForSignature(b []byte) signatureType {
for t, i := range knownSignatureFormats {
for _, begin := range i {
if bytes.HasPrefix(b, begin) {
return t
}
}
}
return signatureTypeUnknown
}
// parseSignedBytes returns the position of the last signature block found in
// the given bytes. If no signature block is found, it returns -1.
//
// When multiple signature blocks are found, the position of the last one is
// returned. Any tailing bytes after this signature block start should be
// considered part of the signature.
//
// Given this, it would be safe to use the returned position to split the bytes
// into two parts: the first part containing the message, the second part
// containing the signature.
//
// Example:
//
// message := []byte(`Message with signature
//
// -----BEGIN SSH SIGNATURE-----
// ...`)
//
// var signature string
// if pos, _ := parseSignedBytes(message); pos != -1 {
// signature = string(message[pos:])
// message = message[:pos]
// }
//
// This logic is on par with git's gpg-interface.c:parse_signed_buffer().
// https://github.com/git/git/blob/7c2ef319c52c4997256f5807564523dfd4acdfc7/gpg-interface.c#L668
func parseSignedBytes(b []byte) (int, signatureType) {
var n, match = 0, -1
var t signatureType
for n < len(b) {
var i = b[n:]
if st := typeForSignature(i); st != signatureTypeUnknown {
match = n
t = st
}
if eol := bytes.IndexByte(i, '\n'); eol >= 0 {
n += eol + 1
continue
}
// If we reach this point, we've reached the end.
break
}
return match, t
}

View File

@ -1,18 +1,16 @@
package object
import (
"bufio"
"bytes"
"fmt"
"io"
stdioutil "io/ioutil"
"strings"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// Tag represents an annotated tag object. It points to a single git object of
@ -93,9 +91,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
for {
var line []byte
line, err = r.ReadBytes('\n')
@ -128,40 +126,15 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
}
data, err := stdioutil.ReadAll(r)
data, err := io.ReadAll(r)
if err != nil {
return err
}
var pgpsig bool
// Check if data contains PGP signature.
if bytes.Contains(data, []byte(beginpgp)) {
// Split the lines at newline.
messageAndSig := bytes.Split(data, []byte("\n"))
for _, l := range messageAndSig {
if pgpsig {
if bytes.Contains(l, []byte(endpgp)) {
t.PGPSignature += endpgp + "\n"
break
} else {
t.PGPSignature += string(l) + "\n"
}
continue
}
// Check if it's the beginning of a PGP signature.
if bytes.Contains(l, []byte(beginpgp)) {
t.PGPSignature += beginpgp + "\n"
pgpsig = true
continue
}
t.Message += string(l) + "\n"
}
} else {
t.Message = string(data)
if sm, _ := parseSignedBytes(data); sm >= 0 {
t.PGPSignature = string(data[sm:])
data = data[:sm]
}
t.Message = string(data)
return nil
}

View File

@ -1,7 +1,6 @@
package object
import (
"bufio"
"context"
"errors"
"fmt"
@ -14,6 +13,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
const (
@ -230,9 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
for {
str, err := r.ReadString(' ')
if err != nil {

View File

@ -38,6 +38,10 @@ func NewTreeRootNode(t *Tree) noder.Noder {
}
}
func (t *treeNoder) Skip() bool {
return false
}
func (t *treeNoder) isRoot() bool {
return t.name == ""
}

View File

@ -57,7 +57,7 @@ func (a *AdvRefs) AddReference(r *plumbing.Reference) error {
switch r.Type() {
case plumbing.SymbolicReference:
v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String())
a.Capabilities.Add(capability.SymRef, v)
return a.Capabilities.Add(capability.SymRef, v)
case plumbing.HashReference:
a.References[r.Name().String()] = r.Hash()
default:
@ -96,12 +96,12 @@ func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error {
//
// Git versions prior to 1.8.4.3 has an special procedure to get
// the reference where is pointing to HEAD:
// - Check if a reference called master exists. If exists and it
// has the same hash as HEAD hash, we can say that HEAD is pointing to master
// - If master does not exists or does not have the same hash as HEAD,
// order references and check in that order if that reference has the same
// hash than HEAD. If yes, set HEAD pointing to that branch hash
// - If no reference is found, throw an error
// - Check if a reference called master exists. If exists and it
// has the same hash as HEAD hash, we can say that HEAD is pointing to master
// - If master does not exists or does not have the same hash as HEAD,
// order references and check in that order if that reference has the same
// hash than HEAD. If yes, set HEAD pointing to that branch hash
// - If no reference is found, throw an error
func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
if a.Head == nil {
return nil

View File

@ -133,6 +133,7 @@ func decodeFirstHash(p *advRefsDecoder) decoderStateFn {
return nil
}
// TODO: Use object-format (when available) for hash size. Git 2.41+
if len(p.line) < hashSize {
p.error("cannot read hash, pkt-line too short")
return nil

View File

@ -1,6 +1,11 @@
// Package capability defines the server and client capabilities.
package capability
import (
"fmt"
"os"
)
// Capability describes a server or client capability.
type Capability string
@ -238,7 +243,15 @@ const (
Filter Capability = "filter"
)
const DefaultAgent = "go-git/4.x"
const userAgent = "go-git/5.x"
// DefaultAgent provides the user agent string.
func DefaultAgent() string {
if envUserAgent, ok := os.LookupEnv("GO_GIT_USER_AGENT_EXTRA"); ok {
return fmt.Sprintf("%s %s", userAgent, envUserAgent)
}
return userAgent
}
var known = map[Capability]bool{
MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true,

View File

@ -86,7 +86,9 @@ func (l *List) Get(capability Capability) []string {
// Set sets a capability removing the previous values
func (l *List) Set(capability Capability, values ...string) error {
delete(l.m, capability)
if _, ok := l.m[capability]; ok {
l.m[capability].Values = l.m[capability].Values[:0]
}
return l.Add(capability, values...)
}

View File

@ -19,7 +19,6 @@ var (
// common
sp = []byte(" ")
eol = []byte("\n")
eq = []byte{'='}
// advertised-refs
null = []byte("\x00")
@ -49,6 +48,11 @@ func isFlush(payload []byte) bool {
return len(payload) == 0
}
var (
// ErrNilWriter is returned when a nil writer is passed to the encoder.
ErrNilWriter = fmt.Errorf("nil writer")
)
// ErrUnexpectedData represents an unexpected data decoding a message
type ErrUnexpectedData struct {
Msg string

View File

@ -0,0 +1,120 @@
package packp
import (
"fmt"
"io"
"strings"
"github.com/go-git/go-git/v5/plumbing/format/pktline"
)
var (
// ErrInvalidGitProtoRequest is returned by Decode if the input is not a
// valid git protocol request.
ErrInvalidGitProtoRequest = fmt.Errorf("invalid git protocol request")
)
// GitProtoRequest is a command request for the git protocol.
// It is used to send the command, endpoint, and extra parameters to the
// remote.
// See https://git-scm.com/docs/pack-protocol#_git_transport
type GitProtoRequest struct {
RequestCommand string
Pathname string
// Optional
Host string
// Optional
ExtraParams []string
}
// validate validates the request.
func (g *GitProtoRequest) validate() error {
if g.RequestCommand == "" {
return fmt.Errorf("%w: empty request command", ErrInvalidGitProtoRequest)
}
if g.Pathname == "" {
return fmt.Errorf("%w: empty pathname", ErrInvalidGitProtoRequest)
}
return nil
}
// Encode encodes the request into the writer.
func (g *GitProtoRequest) Encode(w io.Writer) error {
if w == nil {
return ErrNilWriter
}
if err := g.validate(); err != nil {
return err
}
p := pktline.NewEncoder(w)
req := fmt.Sprintf("%s %s\x00", g.RequestCommand, g.Pathname)
if host := g.Host; host != "" {
req += fmt.Sprintf("host=%s\x00", host)
}
if len(g.ExtraParams) > 0 {
req += "\x00"
for _, param := range g.ExtraParams {
req += param + "\x00"
}
}
if err := p.Encode([]byte(req)); err != nil {
return err
}
return nil
}
// Decode decodes the request from the reader.
func (g *GitProtoRequest) Decode(r io.Reader) error {
s := pktline.NewScanner(r)
if !s.Scan() {
err := s.Err()
if err == nil {
return ErrInvalidGitProtoRequest
}
return err
}
line := string(s.Bytes())
if len(line) == 0 {
return io.EOF
}
if line[len(line)-1] != 0 {
return fmt.Errorf("%w: missing null terminator", ErrInvalidGitProtoRequest)
}
parts := strings.SplitN(line, " ", 2)
if len(parts) != 2 {
return fmt.Errorf("%w: short request", ErrInvalidGitProtoRequest)
}
g.RequestCommand = parts[0]
params := strings.Split(parts[1], string(null))
if len(params) < 1 {
return fmt.Errorf("%w: missing pathname", ErrInvalidGitProtoRequest)
}
g.Pathname = params[0]
if len(params) > 1 {
g.Host = strings.TrimPrefix(params[1], "host=")
}
if len(params) > 2 {
for _, param := range params[2:] {
if param != "" {
g.ExtraParams = append(g.ExtraParams, param)
}
}
}
return nil
}

View File

@ -21,11 +21,6 @@ type ServerResponse struct {
// Decode decodes the response into the struct, isMultiACK should be true, if
// the request was done with multi_ack or multi_ack_detailed capabilities.
func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
// TODO: implement support for multi_ack or multi_ack_detailed responses
if isMultiACK {
return errors.New("multi_ack and multi_ack_detailed are not supported")
}
s := pktline.NewScanner(reader)
for s.Scan() {
@ -48,7 +43,23 @@ func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
}
}
return s.Err()
// isMultiACK is true when the remote server advertises the related
// capabilities when they are not in transport.UnsupportedCapabilities.
//
// Users may decide to remove multi_ack and multi_ack_detailed from the
// unsupported capabilities list, which allows them to do initial clones
// from Azure DevOps.
//
// Follow-up fetches may error, therefore errors are wrapped with additional
// information highlighting that this capabilities are not supported by go-git.
//
// TODO: Implement support for multi_ack or multi_ack_detailed responses.
err := s.Err()
if err != nil && isMultiACK {
return fmt.Errorf("multi_ack and multi_ack_detailed are not supported: %w", err)
}
return err
}
// stopReading detects when a valid command such as ACK or NAK is found to be
@ -90,12 +101,14 @@ func (r *ServerResponse) decodeLine(line []byte) error {
return fmt.Errorf("unexpected flush")
}
if bytes.Equal(line[0:3], ack) {
return r.decodeACKLine(line)
}
if len(line) >= 3 {
if bytes.Equal(line[0:3], ack) {
return r.decodeACKLine(line)
}
if bytes.Equal(line[0:3], nak) {
return nil
if bytes.Equal(line[0:3], nak) {
return nil
}
}
return fmt.Errorf("unexpected content %q", string(line))
@ -113,8 +126,9 @@ func (r *ServerResponse) decodeACKLine(line []byte) error {
}
// Encode encodes the ServerResponse into a writer.
func (r *ServerResponse) Encode(w io.Writer) error {
if len(r.ACKs) > 1 {
func (r *ServerResponse) Encode(w io.Writer, isMultiACK bool) error {
if len(r.ACKs) > 1 && !isMultiACK {
// For further information, refer to comments in the Decode func above.
return errors.New("multi_ack and multi_ack_detailed are not supported")
}

View File

@ -95,7 +95,7 @@ func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
}
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
}
return r

View File

@ -43,7 +43,7 @@ func (d *ulReqDecoder) Decode(v *UploadRequest) error {
return d.err
}
// fills out the parser stiky error
// fills out the parser sticky error
func (d *ulReqDecoder) error(format string, a ...interface{}) {
msg := fmt.Sprintf(
"pkt-line %d: %s", d.nLine,

View File

@ -19,6 +19,7 @@ var (
type ReferenceUpdateRequest struct {
Capabilities *capability.List
Commands []*Command
Options []*Option
Shallow *plumbing.Hash
// Packfile contains an optional packfile reader.
Packfile io.ReadCloser
@ -58,7 +59,7 @@ func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceU
r := NewReferenceUpdateRequest()
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
}
if adv.Supports(capability.ReportStatus) {
@ -86,9 +87,9 @@ type Action string
const (
Create Action = "create"
Update = "update"
Delete = "delete"
Invalid = "invalid"
Update Action = "update"
Delete Action = "delete"
Invalid Action = "invalid"
)
type Command struct {
@ -120,3 +121,8 @@ func (c *Command) validate() error {
return nil
}
type Option struct {
Key string
Value string
}

View File

@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/pktline"
@ -81,7 +80,7 @@ func (req *ReferenceUpdateRequest) Decode(r io.Reader) error {
var ok bool
rc, ok = r.(io.ReadCloser)
if !ok {
rc = ioutil.NopCloser(r)
rc = io.NopCloser(r)
}
d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)}

View File

@ -9,10 +9,6 @@ import (
"github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
)
var (
zeroHashString = plumbing.ZeroHash.String()
)
// Encode writes the ReferenceUpdateRequest encoding to the stream.
func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
if err := req.validate(); err != nil {
@ -29,6 +25,12 @@ func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
return err
}
if req.Capabilities.Supports(capability.PushOptions) {
if err := req.encodeOptions(e, req.Options); err != nil {
return err
}
}
if req.Packfile != nil {
if _, err := io.Copy(w, req.Packfile); err != nil {
return err
@ -73,3 +75,15 @@ func formatCommand(cmd *Command) string {
n := cmd.New.String()
return fmt.Sprintf("%s %s %s", o, n, cmd.Name)
}
func (req *ReferenceUpdateRequest) encodeOptions(e *pktline.Encoder,
opts []*Option) error {
for _, opt := range opts {
if err := e.Encodef("%s=%s", opt.Key, opt.Value); err != nil {
return err
}
}
return e.Flush()
}

View File

@ -38,10 +38,10 @@ func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackReque
}
}
// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants
// length is zero
// IsEmpty returns whether a request is empty - it is empty if Haves are contained
// in the Wants, or if Wants length is zero, and we don't have any shallows
func (r *UploadPackRequest) IsEmpty() bool {
return isSubset(r.Wants, r.Haves)
return isSubset(r.Wants, r.Haves) && len(r.Shallows) == 0
}
func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool {

View File

@ -24,7 +24,6 @@ type UploadPackResponse struct {
r io.ReadCloser
isShallow bool
isMultiACK bool
isOk bool
}
// NewUploadPackResponse create a new UploadPackResponse instance, the request
@ -79,7 +78,7 @@ func (r *UploadPackResponse) Encode(w io.Writer) (err error) {
}
}
if err := r.ServerResponse.Encode(w); err != nil {
if err := r.ServerResponse.Encode(w, r.isMultiACK); err != nil {
return err
}

View File

@ -3,6 +3,7 @@ package plumbing
import (
"errors"
"fmt"
"regexp"
"strings"
)
@ -15,10 +16,11 @@ const (
symrefPrefix = "ref: "
)
// RefRevParseRules are a set of rules to parse references into short names.
// These are the same rules as used by git in shorten_unambiguous_ref.
// RefRevParseRules are a set of rules to parse references into short names, or expand into a full reference.
// These are the same rules as used by git in shorten_unambiguous_ref and expand_ref.
// See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417
var RefRevParseRules = []string{
"%s",
"refs/%s",
"refs/tags/%s",
"refs/heads/%s",
@ -28,6 +30,9 @@ var RefRevParseRules = []string{
var (
ErrReferenceNotFound = errors.New("reference not found")
// ErrInvalidReferenceName is returned when a reference name is invalid.
ErrInvalidReferenceName = errors.New("invalid reference name")
)
// ReferenceType reference type's
@ -113,7 +118,7 @@ func (r ReferenceName) String() string {
func (r ReferenceName) Short() string {
s := string(r)
res := s
for _, format := range RefRevParseRules {
for _, format := range RefRevParseRules[1:] {
_, err := fmt.Sscanf(s, format, &res)
if err == nil {
continue
@ -123,9 +128,95 @@ func (r ReferenceName) Short() string {
return res
}
var (
ctrlSeqs = regexp.MustCompile(`[\000-\037\177]`)
)
// Validate validates a reference name.
// This follows the git-check-ref-format rules.
// See https://git-scm.com/docs/git-check-ref-format
//
// It is important to note that this function does not check if the reference
// exists in the repository.
// It only checks if the reference name is valid.
// This functions does not support the --refspec-pattern, --normalize, and
// --allow-onelevel options.
//
// Git imposes the following rules on how references are named:
//
// 1. They can include slash / for hierarchical (directory) grouping, but no
// slash-separated component can begin with a dot . or end with the
// sequence .lock.
// 2. They must contain at least one /. This enforces the presence of a
// category like heads/, tags/ etc. but the actual names are not
// restricted. If the --allow-onelevel option is used, this rule is
// waived.
// 3. They cannot have two consecutive dots .. anywhere.
// 4. They cannot have ASCII control characters (i.e. bytes whose values are
// lower than \040, or \177 DEL), space, tilde ~, caret ^, or colon :
// anywhere.
// 5. They cannot have question-mark ?, asterisk *, or open bracket [
// anywhere. See the --refspec-pattern option below for an exception to this
// rule.
// 6. They cannot begin or end with a slash / or contain multiple consecutive
// slashes (see the --normalize option below for an exception to this rule).
// 7. They cannot end with a dot ..
// 8. They cannot contain a sequence @{.
// 9. They cannot be the single character @.
// 10. They cannot contain a \.
func (r ReferenceName) Validate() error {
s := string(r)
if len(s) == 0 {
return ErrInvalidReferenceName
}
// HEAD is a special case
if r == HEAD {
return nil
}
// rule 7
if strings.HasSuffix(s, ".") {
return ErrInvalidReferenceName
}
// rule 2
parts := strings.Split(s, "/")
if len(parts) < 2 {
return ErrInvalidReferenceName
}
isBranch := r.IsBranch()
isTag := r.IsTag()
for _, part := range parts {
// rule 6
if len(part) == 0 {
return ErrInvalidReferenceName
}
if strings.HasPrefix(part, ".") || // rule 1
strings.Contains(part, "..") || // rule 3
ctrlSeqs.MatchString(part) || // rule 4
strings.ContainsAny(part, "~^:?*[ \t\n") || // rule 4 & 5
strings.Contains(part, "@{") || // rule 8
part == "@" || // rule 9
strings.Contains(part, "\\") || // rule 10
strings.HasSuffix(part, ".lock") { // rule 1
return ErrInvalidReferenceName
}
if (isBranch || isTag) && strings.HasPrefix(part, "-") { // branches & tags can't start with -
return ErrInvalidReferenceName
}
}
return nil
}
const (
HEAD ReferenceName = "HEAD"
Master ReferenceName = "refs/heads/master"
Main ReferenceName = "refs/heads/main"
)
// Reference is a representation of git reference
@ -168,22 +259,22 @@ func NewHashReference(n ReferenceName, h Hash) *Reference {
}
}
// Type return the type of a reference
// Type returns the type of a reference
func (r *Reference) Type() ReferenceType {
return r.t
}
// Name return the name of a reference
// Name returns the name of a reference
func (r *Reference) Name() ReferenceName {
return r.n
}
// Hash return the hash of a hash reference
// Hash returns the hash of a hash reference
func (r *Reference) Hash() Hash {
return r.h
}
// Target return the target of a symbolic reference
// Target returns the target of a symbolic reference
func (r *Reference) Target() ReferenceName {
return r.target
}
@ -204,6 +295,21 @@ func (r *Reference) Strings() [2]string {
}
func (r *Reference) String() string {
s := r.Strings()
return fmt.Sprintf("%s %s", s[1], s[0])
ref := ""
switch r.Type() {
case HashReference:
ref = r.Hash().String()
case SymbolicReference:
ref = symrefPrefix + r.Target().String()
default:
return ""
}
name := r.Name().String()
var v strings.Builder
v.Grow(len(ref) + len(name) + 1)
v.WriteString(ref)
v.WriteString(" ")
v.WriteString(name)
return v.String()
}

View File

@ -42,6 +42,7 @@ type EncodedObjectStorer interface {
HasEncodedObject(plumbing.Hash) error
// EncodedObjectSize returns the plaintext size of the encoded object.
EncodedObjectSize(plumbing.Hash) (int64, error)
AddAlternate(remote string) error
}
// DeltaObjectStorer is an EncodedObjectStorer that can return delta
@ -52,8 +53,8 @@ type DeltaObjectStorer interface {
DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
}
// Transactioner is a optional method for ObjectStorer, it enable transaction
// base write and read operations in the storage
// Transactioner is a optional method for ObjectStorer, it enables transactional read and write
// operations.
type Transactioner interface {
// Begin starts a transaction.
Begin() Transaction
@ -87,8 +88,8 @@ type PackedObjectStorer interface {
DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error
}
// PackfileWriter is a optional method for ObjectStorer, it enable direct write
// of packfile to the storage
// PackfileWriter is an optional method for ObjectStorer, it enables directly writing
// a packfile to storage.
type PackfileWriter interface {
// PackfileWriter returns a writer for writing a packfile to the storage
//

View File

@ -3,10 +3,7 @@
package client
import (
"crypto/tls"
"crypto/x509"
"fmt"
gohttp "net/http"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/file"
@ -24,14 +21,6 @@ var Protocols = map[string]transport.Transport{
"file": file.DefaultClient,
}
var insecureClient = http.NewClient(&gohttp.Client{
Transport: &gohttp.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
})
// InstallProtocol adds or modifies an existing protocol.
func InstallProtocol(scheme string, c transport.Transport) {
if c == nil {
@ -50,27 +39,6 @@ func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) {
}
func getTransport(endpoint *transport.Endpoint) (transport.Transport, error) {
if endpoint.Protocol == "https" {
if endpoint.InsecureSkipTLS {
return insecureClient, nil
}
if len(endpoint.CaBundle) != 0 {
rootCAs, _ := x509.SystemCertPool()
if rootCAs == nil {
rootCAs = x509.NewCertPool()
}
rootCAs.AppendCertsFromPEM(endpoint.CaBundle)
return http.NewClient(&gohttp.Client{
Transport: &gohttp.Transport{
TLSClientConfig: &tls.Config{
RootCAs: rootCAs,
},
},
}), nil
}
}
f, ok := Protocols[endpoint.Protocol]
if !ok {
return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol)

View File

@ -108,14 +108,45 @@ type Endpoint struct {
// Host is the host.
Host string
// Port is the port to connect, if 0 the default port for the given protocol
// wil be used.
// will be used.
Port int
// Path is the repository path.
Path string
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CaBundle specify additional ca bundle with system cert pool
CaBundle []byte
// Proxy provides info required for connecting to a proxy.
Proxy ProxyOptions
}
type ProxyOptions struct {
URL string
Username string
Password string
}
func (o *ProxyOptions) Validate() error {
if o.URL != "" {
_, err := url.Parse(o.URL)
return err
}
return nil
}
func (o *ProxyOptions) FullURL() (*url.URL, error) {
proxyURL, err := url.Parse(o.URL)
if err != nil {
return nil, err
}
if o.Username != "" {
if o.Password != "" {
proxyURL.User = url.UserPassword(o.Username, o.Password)
} else {
proxyURL.User = url.User(o.Username)
}
}
return proxyURL, nil
}
var defaultPorts = map[string]int{
@ -196,11 +227,17 @@ func parseURL(endpoint string) (*Endpoint, error) {
pass, _ = u.User.Password()
}
host := u.Hostname()
if strings.Contains(host, ":") {
// IPv6 address
host = "[" + host + "]"
}
return &Endpoint{
Protocol: u.Scheme,
User: user,
Password: pass,
Host: u.Hostname(),
Host: host,
Port: getPort(u),
Path: getPath(u),
}, nil

View File

@ -11,7 +11,6 @@ import (
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/internal/common"
"github.com/go-git/go-git/v5/utils/ioutil"
"golang.org/x/sys/execabs"
)
@ -112,7 +111,7 @@ func (c *command) Start() error {
func (c *command) StderrPipe() (io.Reader, error) {
// Pipe returned by Command.StderrPipe has a race with Read + Command.Wait.
// We use an io.Pipe and close it after the command finishes.
r, w := ioutil.Pipe()
r, w := io.Pipe()
c.cmd.Stderr = w
c.stderrCloser = r
return r, nil

View File

@ -2,11 +2,11 @@
package git
import (
"fmt"
"io"
"net"
"strconv"
"github.com/go-git/go-git/v5/plumbing/format/pktline"
"github.com/go-git/go-git/v5/plumbing/protocol/packp"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/internal/common"
"github.com/go-git/go-git/v5/utils/ioutil"
@ -41,10 +41,18 @@ type command struct {
// Start executes the command sending the required message to the TCP connection
func (c *command) Start() error {
cmd := endpointToCommand(c.command, c.endpoint)
req := packp.GitProtoRequest{
RequestCommand: c.command,
Pathname: c.endpoint.Path,
}
host := c.endpoint.Host
if c.endpoint.Port != DefaultPort {
host = net.JoinHostPort(c.endpoint.Host, strconv.Itoa(c.endpoint.Port))
}
e := pktline.NewEncoder(c.conn)
return e.Encode([]byte(cmd))
req.Host = host
return req.Encode(c.conn)
}
func (c *command) connect() error {
@ -69,7 +77,7 @@ func (c *command) getHostWithPort() string {
port = DefaultPort
}
return fmt.Sprintf("%s:%d", host, port)
return net.JoinHostPort(host, strconv.Itoa(port))
}
// StderrPipe git protocol doesn't have any dedicated error channel
@ -77,27 +85,18 @@ func (c *command) StderrPipe() (io.Reader, error) {
return nil, nil
}
// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent
// StdinPipe returns the underlying connection as WriteCloser, wrapped to prevent
// call to the Close function from the connection, a command execution in git
// protocol can't be closed or killed
func (c *command) StdinPipe() (io.WriteCloser, error) {
return ioutil.WriteNopCloser(c.conn), nil
}
// StdoutPipe return the underlying connection as Reader
// StdoutPipe returns the underlying connection as Reader
func (c *command) StdoutPipe() (io.Reader, error) {
return c.conn, nil
}
func endpointToCommand(cmd string, ep *transport.Endpoint) string {
host := ep.Host
if ep.Port != DefaultPort {
host = fmt.Sprintf("%s:%d", ep.Host, ep.Port)
}
return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0)
}
// Close closes the TCP connection and connection.
func (c *command) Close() error {
if !c.connected {

View File

@ -4,16 +4,22 @@ package http
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"sync"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/protocol/packp"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/golang/groupcache/lru"
)
// it requires a bytes.Buffer, because we need to know the length
@ -67,6 +73,17 @@ func advertisedReferences(ctx context.Context, s *session, serviceName string) (
return nil, err
}
// Git 2.41+ returns a zero-id plus capabilities when an empty
// repository is being cloned. This skips the existing logic within
// advrefs_decode.decodeFirstHash, which expects a flush-pkt instead.
//
// This logic aligns with plumbing/transport/internal/common/common.go.
if ar.IsEmpty() &&
// Empty repositories are valid for git-receive-pack.
transport.ReceivePackServiceName != serviceName {
return nil, transport.ErrEmptyRemoteRepository
}
transport.FilterUnsupportedCapabilities(ar.Capabilities)
s.advRefs = ar
@ -74,40 +91,83 @@ func advertisedReferences(ctx context.Context, s *session, serviceName string) (
}
type client struct {
c *http.Client
c *http.Client
transports *lru.Cache
m sync.RWMutex
}
// DefaultClient is the default HTTP client, which uses `http.DefaultClient`.
var DefaultClient = NewClient(nil)
// ClientOptions holds user configurable options for the client.
type ClientOptions struct {
// CacheMaxEntries is the max no. of entries that the transport objects
// cache will hold at any given point of time. It must be a positive integer.
// Calling `client.addTransport()` after the cache has reached the specified
// size, will result in the least recently used transport getting deleted
// before the provided transport is added to the cache.
CacheMaxEntries int
}
var (
// defaultTransportCacheSize is the default capacity of the transport objects cache.
// Its value is 0 because transport caching is turned off by default and is an
// opt-in feature.
defaultTransportCacheSize = 0
// DefaultClient is the default HTTP client, which uses a net/http client configured
// with http.DefaultTransport.
DefaultClient = NewClient(nil)
)
// NewClient creates a new client with a custom net/http client.
// See `InstallProtocol` to install and override default http client.
// Unless a properly initialized client is given, it will fall back into
// `http.DefaultClient`.
// If the net/http client is nil or empty, it will use a net/http client configured
// with http.DefaultTransport.
//
// Note that for HTTP client cannot distinguish between private repositories and
// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired`
// for both.
func NewClient(c *http.Client) transport.Transport {
if c == nil {
return &client{http.DefaultClient}
c = &http.Client{
Transport: http.DefaultTransport,
}
}
return NewClientWithOptions(c, &ClientOptions{
CacheMaxEntries: defaultTransportCacheSize,
})
}
return &client{
// NewClientWithOptions returns a new client configured with the provided net/http client
// and other custom options specific to the client.
// If the net/http client is nil or empty, it will use a net/http client configured
// with http.DefaultTransport.
func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transport {
if c == nil {
c = &http.Client{
Transport: http.DefaultTransport,
}
}
cl := &client{
c: c,
}
if opts != nil {
if opts.CacheMaxEntries > 0 {
cl.transports = lru.New(opts.CacheMaxEntries)
}
}
return cl
}
func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
transport.UploadPackSession, error) {
return newUploadPackSession(c.c, ep, auth)
return newUploadPackSession(c, ep, auth)
}
func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
transport.ReceivePackSession, error) {
return newReceivePackSession(c.c, ep, auth)
return newReceivePackSession(c, ep, auth)
}
type session struct {
@ -117,10 +177,106 @@ type session struct {
advRefs *packp.AdvRefs
}
func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
func transportWithInsecureTLS(transport *http.Transport) {
if transport.TLSClientConfig == nil {
transport.TLSClientConfig = &tls.Config{}
}
transport.TLSClientConfig.InsecureSkipVerify = true
}
func transportWithCABundle(transport *http.Transport, caBundle []byte) error {
rootCAs, err := x509.SystemCertPool()
if err != nil {
return err
}
if rootCAs == nil {
rootCAs = x509.NewCertPool()
}
rootCAs.AppendCertsFromPEM(caBundle)
if transport.TLSClientConfig == nil {
transport.TLSClientConfig = &tls.Config{}
}
transport.TLSClientConfig.RootCAs = rootCAs
return nil
}
func transportWithProxy(transport *http.Transport, proxyURL *url.URL) {
transport.Proxy = http.ProxyURL(proxyURL)
}
func configureTransport(transport *http.Transport, ep *transport.Endpoint) error {
if len(ep.CaBundle) > 0 {
if err := transportWithCABundle(transport, ep.CaBundle); err != nil {
return err
}
}
if ep.InsecureSkipTLS {
transportWithInsecureTLS(transport)
}
if ep.Proxy.URL != "" {
proxyURL, err := ep.Proxy.FullURL()
if err != nil {
return err
}
transportWithProxy(transport, proxyURL)
}
return nil
}
func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
var httpClient *http.Client
// We need to configure the http transport if there are transport specific
// options present in the endpoint.
if len(ep.CaBundle) > 0 || ep.InsecureSkipTLS || ep.Proxy.URL != "" {
var transport *http.Transport
// if the client wasn't configured to have a cache for transports then just configure
// the transport and use it directly, otherwise try to use the cache.
if c.transports == nil {
tr, ok := c.c.Transport.(*http.Transport)
if !ok {
return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s",
reflect.TypeOf(transport), reflect.TypeOf(c.c.Transport))
}
transport = tr.Clone()
configureTransport(transport, ep)
} else {
transportOpts := transportOptions{
caBundle: string(ep.CaBundle),
insecureSkipTLS: ep.InsecureSkipTLS,
}
if ep.Proxy.URL != "" {
proxyURL, err := ep.Proxy.FullURL()
if err != nil {
return nil, err
}
transportOpts.proxyURL = *proxyURL
}
var found bool
transport, found = c.fetchTransport(transportOpts)
if !found {
transport = c.c.Transport.(*http.Transport).Clone()
configureTransport(transport, ep)
c.addTransport(transportOpts, transport)
}
}
httpClient = &http.Client{
Transport: transport,
CheckRedirect: c.c.CheckRedirect,
Jar: c.c.Jar,
Timeout: c.c.Timeout,
}
} else {
httpClient = c.c
}
s := &session{
auth: basicAuthFromEndpoint(ep),
client: c,
client: httpClient,
endpoint: ep,
}
if auth != nil {
@ -250,14 +406,28 @@ func (a *TokenAuth) String() string {
// Err is a dedicated error to return errors based on status code
type Err struct {
Response *http.Response
Reason string
}
// NewErr returns a new Err based on a http response
// NewErr returns a new Err based on a http response and closes response body
// if needed
func NewErr(r *http.Response) error {
if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices {
return nil
}
var reason string
// If a response message is present, add it to error
var messageBuffer bytes.Buffer
if r.Body != nil {
messageLength, _ := messageBuffer.ReadFrom(r.Body)
if messageLength > 0 {
reason = messageBuffer.String()
}
_ = r.Body.Close()
}
switch r.StatusCode {
case http.StatusUnauthorized:
return transport.ErrAuthenticationRequired
@ -267,7 +437,7 @@ func NewErr(r *http.Response) error {
return transport.ErrRepositoryNotFound
}
return plumbing.NewUnexpectedError(&Err{r})
return plumbing.NewUnexpectedError(&Err{r, reason})
}
// StatusCode returns the status code of the response

View File

@ -19,7 +19,7 @@ type rpSession struct {
*session
}
func newReceivePackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
func newReceivePackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
s, err := newSession(c, ep, auth)
return &rpSession{s}, err
}
@ -102,7 +102,6 @@ func (s *rpSession) doRequest(
}
if err := NewErr(res); err != nil {
_ = res.Body.Close()
return nil, err
}

View File

@ -0,0 +1,40 @@
package http
import (
"net/http"
"net/url"
)
// transportOptions contains transport specific configuration.
type transportOptions struct {
insecureSkipTLS bool
// []byte is not comparable.
caBundle string
proxyURL url.URL
}
func (c *client) addTransport(opts transportOptions, transport *http.Transport) {
c.m.Lock()
c.transports.Add(opts, transport)
c.m.Unlock()
}
func (c *client) removeTransport(opts transportOptions) {
c.m.Lock()
c.transports.Remove(opts)
c.m.Unlock()
}
func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) {
c.m.RLock()
t, ok := c.transports.Get(opts)
c.m.RUnlock()
if !ok {
return nil, false
}
transport, ok := t.(*http.Transport)
if !ok {
return nil, false
}
return transport, true
}

Some files were not shown because too many files have changed in this diff Show More