chore(deps): upgrade dependencies

Upgrade all dependencies to newest versions.
This commit is contained in:
Christopher Allen Lane
2023-12-13 08:29:02 -05:00
parent 0d9c92c8c0
commit 95a4e31b6c
769 changed files with 28936 additions and 12954 deletions

View File

@@ -2,3 +2,6 @@ coverage.out
*~
coverage.txt
profile.out
.tmp/
.git-dist/
.vscode

View File

@@ -1,111 +1,233 @@
Supported Capabilities
======================
# Supported Features
Here is a non-comprehensive table of git commands and features whose equivalent
is supported by go-git.
Here is a non-comprehensive table of git commands and features and their
compatibility status with go-git.
| Feature | Status | Notes |
|---------------------------------------|--------|-------|
| **config** |
| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. |
| **getting and creating repositories** |
| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. |
| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. |
| **basic snapshotting** |
| add | ✔ | Plain add is supported. Any other flags aren't supported |
| status | ✔ |
| commit | ✔ |
| reset | ✔ |
| rm | ✔ |
| mv | ✔ |
| **branching and merging** |
| branch | ✔ |
| checkout | ✔ | Basic usages of checkout are supported. |
| merge | ✖ |
| mergetool | ✖ |
| stash | ✖ |
| tag | |
| **sharing and updating projects** |
| fetch | ✔ |
| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. |
| push | ✔ |
| remote | ✔ |
| submodule | ✔ |
| **inspection and comparison** |
| show | ✔ |
| log | ✔ |
| shortlog | (see log) |
| describe | |
| **patching** |
| apply | ✖ |
| cherry-pick | ✖ |
| diff | ✔ | Patch object with UnifiedDiff output representation |
| rebase | ✖ |
| revert | ✖ |
| **debugging** |
| bisect | ✖ |
| blame | ✔ |
| grep | ✔ |
| **email** ||
| am | ✖ |
| apply | ✖ |
| format-patch | ✖ |
| send-email | ✖ |
| request-pull | ✖ |
| **external systems** |
| svn | |
| fast-import | ✖ |
| **administration** |
| clean | ✔ |
| gc | ✖ |
| fsck | ✖ |
| reflog | |
| filter-branch | ✖ |
| instaweb | |
| archive | |
| bundle | ✖ |
| prune | ✖ |
| repack | |
| **server admin** |
| daemon | |
| update-server-info | |
| **advanced** |
| notes | ✖ |
| replace | ✖ |
| worktree | |
| annotate | (see blame) |
| **gpg** |
| git-verify-commit | ✔ |
| git-verify-tag | ✔ |
| **plumbing commands** |
| cat-file | ✔ |
| check-ignore | |
| commit-tree | |
| count-objects | |
| diff-index | |
| for-each-ref | |
| hash-object | ✔ |
| ls-files | ✔ |
| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. |
| read-tree | |
| rev-list | ✔ |
| rev-parse | |
| show-ref | ✔ |
| symbolic-ref | |
| update-index | |
| update-ref | |
| verify-pack | |
| write-tree | |
| **protocols** |
| http(s):// (dumb) | |
| http(s):// (smart) | |
| git:// | |
| ssh:// | |
| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. |
| custom | |
| **other features** |
| gitignore | |
| gitattributes | |
| index version | |
| packfile version | |
| push-certs | ✖ |
## Getting and creating repositories
| Feature | Sub-feature | Status | Notes | Examples |
| ------- | ------------------------------------------------------------------------------------------------------------------ | ------ | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `init` | | ✅ | | |
| `init` | `--bare` | ✅ | | |
| `init` | `--template` <br/> `--separate-git-dir` <br/> `--shared` | ❌ | | |
| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) |
| `clone` | Authentication: <br/> - none <br/> - access token <br/> - username + password <br/> - ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go) <br/> - [clone access token](_examples/clone/auth/basic/access_token/main.go) <br/> - [clone user + password](_examples/clone/auth/basic/username_password/main.go) |
| `clone` | `--progress` <br/> `--single-branch` <br/> `--depth` <br/> `--origin` <br/> `--recurse-submodules` <br/>`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go) <br/> - [progress](_examples/progress/main.go) |
## Basic snapshotting
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | -------------------------------------------------------- | ------------------------------------ |
| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | |
| `status` | | ✅ | | |
| `commit` | | ✅ | | - [commit](_examples/commit/main.go) |
| `reset` | | ✅ | | |
| `rm` | | ✅ | | |
| `mv` | | ✅ | | |
## Branching and merging
| Feature | Sub-feature | Status | Notes | Examples |
| ----------- | ----------- | ------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- |
| `branch` | | ✅ | | - [branch](_examples/branch/main.go) |
| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) |
| `merge` | | ❌ | | |
| `mergetool` | | ❌ | | |
| `stash` | | ❌ | | |
| `tag` | | ✅ | | - [tag](_examples/tag/main.go) <br/> - [tag create and push](_examples/tag-create-push/main.go) |
## Sharing and updating projects
| Feature | Sub-feature | Status | Notes | Examples |
| ----------- | ----------- | ------ | ----------------------------------------------------------------------- | ------------------------------------------ |
| `fetch` | | ✅ | | |
| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) |
| `push` | | ✅ | | - [push](_examples/push/main.go) |
| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) |
| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) |
| `submodule` | deinit | ❌ | | |
## Inspection and comparison
| Feature | Sub-feature | Status | Notes | Examples |
| ---------- | ----------- | --------- | ----- | ------------------------------ |
| `show` | | ✅ | | |
| `log` | | ✅ | | - [log](_examples/log/main.go) |
| `shortlog` | | (see log) | | |
| `describe` | | ❌ | | |
## Patching
| Feature | Sub-feature | Status | Notes | Examples |
| ------------- | ----------- | ------ | ---------------------------------------------------- | -------- |
| `apply` | | ❌ | | |
| `cherry-pick` | | ❌ | | |
| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | |
| `rebase` | | ❌ | | |
| `revert` | | ❌ | | |
## Debugging
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | ----- | ---------------------------------- |
| `bisect` | | ❌ | | |
| `blame` | | ✅ | | - [blame](_examples/blame/main.go) |
| `grep` | | ✅ | | |
## Email
| Feature | Sub-feature | Status | Notes | Examples |
| -------------- | ----------- | ------ | ----- | -------- |
| `am` | | ❌ | | |
| `apply` | | ❌ | | |
| `format-patch` | | ❌ | | |
| `send-email` | | ❌ | | |
| `request-pull` | | ❌ | | |
## External systems
| Feature | Sub-feature | Status | Notes | Examples |
| ------------- | ----------- | ------ | ----- | -------- |
| `svn` | | ❌ | | |
| `fast-import` | | ❌ | | |
| `lfs` | | ❌ | | |
## Administration
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | ----------- | ------ | ----- | -------- |
| `clean` | | ✅ | | |
| `gc` | | ❌ | | |
| `fsck` | | ❌ | | |
| `reflog` | | ❌ | | |
| `filter-branch` | | ❌ | | |
| `instaweb` | | ❌ | | |
| `archive` | | ❌ | | |
| `bundle` | | ❌ | | |
| `prune` | | ❌ | | |
| `repack` | | ❌ | | |
## Server admin
| Feature | Sub-feature | Status | Notes | Examples |
| -------------------- | ----------- | ------ | ----- | ----------------------------------------- |
| `daemon` | | ❌ | | |
| `update-server-info` | | ✅ | | [cli](./cli/go-git/update_server_info.go) |
## Advanced
| Feature | Sub-feature | Status | Notes | Examples |
| ---------- | ----------- | ----------- | ----- | -------- |
| `notes` | | ❌ | | |
| `replace` | | ❌ | | |
| `worktree` | | ❌ | | |
| `annotate` | | (see blame) | | |
## GPG
| Feature | Sub-feature | Status | Notes | Examples |
| ------------------- | ----------- | ------ | ----- | -------- |
| `git-verify-commit` | | ✅ | | |
| `git-verify-tag` | | ✅ | | |
## Plumbing commands
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | ------------------------------------- | ------------ | --------------------------------------------------- | -------------------------------------------- |
| `cat-file` | | ✅ | | |
| `check-ignore` | | ❌ | | |
| `commit-tree` | | ❌ | | |
| `count-objects` | | ❌ | | |
| `diff-index` | | ❌ | | |
| `for-each-ref` | | ✅ | | |
| `hash-object` | | ✅ | | |
| `ls-files` | | ✅ | | |
| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) |
| `merge-base` | `--independent` <br/> `--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) |
| `merge-base` | `--fork-point` <br/> `--octopus` | ❌ | | |
| `read-tree` | | ❌ | | |
| `rev-list` | | ✅ | | |
| `rev-parse` | | ❌ | | |
| `show-ref` | | ✅ | | |
| `symbolic-ref` | | ✅ | | |
| `update-index` | | ❌ | | |
| `update-ref` | | ❌ | | |
| `verify-pack` | | ❌ | | |
| `write-tree` | | ❌ | | |
## Indexes and Git Protocols
| Feature | Version | Status | Notes |
| -------------------- | ------------------------------------------------------------------------------- | ------ | ----- |
| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | |
| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | |
| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | |
| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| pack-\*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| pack-\*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| cruft packs | | ❌ | |
## Capabilities
| Feature | Status | Notes |
| ------------------------------ | ------------ | ----- |
| `multi_ack` | ❌ | |
| `multi_ack_detailed` | ❌ | |
| `no-done` | ❌ | |
| `thin-pack` | ❌ | |
| `side-band` | ⚠️ (partial) | |
| `side-band-64k` | ⚠️ (partial) | |
| `ofs-delta` | ✅ | |
| `agent` | ✅ | |
| `object-format` | ❌ | |
| `symref` | ✅ | |
| `shallow` | ✅ | |
| `deepen-since` | ✅ | |
| `deepen-not` | ❌ | |
| `deepen-relative` | ❌ | |
| `no-progress` | ✅ | |
| `include-tag` | ✅ | |
| `report-status` | ✅ | |
| `report-status-v2` | ❌ | |
| `delete-refs` | ✅ | |
| `quiet` | ❌ | |
| `atomic` | ✅ | |
| `push-options` | ✅ | |
| `allow-tip-sha1-in-want` | ✅ | |
| `allow-reachable-sha1-in-want` | ❌ | |
| `push-cert=<nonce>` | ❌ | |
| `filter` | ❌ | |
| `session-id=<session id>` | ❌ | |
## Transport Schemes
| Scheme | Status | Notes | Examples |
| -------------------- | ------------ | ---------------------------------------------------------------------- | ---------------------------------------------- |
| `http(s)://` (dumb) | ❌ | | |
| `http(s)://` (smart) | ✅ | | |
| `git://` | ✅ | | |
| `ssh://` | ✅ | | |
| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | |
| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) |
## SHA256
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | ---------------------------------- | ------------------------------------ |
| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) |
| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) |
| `pull` | | ❌ | | |
| `fetch` | | ❌ | | |
| `push` | | ❌ | | |
## Other features
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | --------------------------- | ------ | ---------------------------------------------- | -------- |
| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | |
| `config` | `--global` <br/> `--system` | ✅ | Read-only. | |
| `gitignore` | | ✅ | | |
| `gitattributes` | | ✅ | | |
| `git-worktree` | | ❌ | Multiple worktrees are not supported. | |

78
vendor/github.com/go-git/go-git/v5/EXTENDING.md generated vendored Normal file
View File

@@ -0,0 +1,78 @@
# Extending go-git
`go-git` was built in a highly extensible manner, which enables some of its functionalities to be changed or extended without the need of changing its codebase. Here are the key extensibility features:
## Dot Git Storers
Dot git storers are the components responsible for storing the Git internal files, including objects and references.
The built-in storer implementations include [memory](storage/memory) and [filesystem](storage/filesystem). The `memory` storer stores all the data in memory, and its use look like this:
```go
r, err := git.Init(memory.NewStorage(), nil)
```
The `filesystem` storer stores the data in the OS filesystem, and can be used as follows:
```go
r, err := git.Init(filesystem.NewStorage(osfs.New("/tmp/foo")), nil)
```
New implementations can be created by implementing the [storage.Storer interface](storage/storer.go#L16).
## Filesystem
Git repository worktrees are managed using a filesystem abstraction based on [go-billy](https://github.com/go-git/go-billy). The Git operations will take place against the specific filesystem implementation. Initialising a repository in Memory can be done as follows:
```go
fs := memfs.New()
r, err := git.Init(memory.NewStorage(), fs)
```
The same operation can be done against the OS filesystem:
```go
fs := osfs.New("/tmp/foo")
r, err := git.Init(memory.NewStorage(), fs)
```
New filesystems (e.g. cloud based storage) could be created by implementing `go-billy`'s [Filesystem interface](https://github.com/go-git/go-billy/blob/326c59f064021b821a55371d57794fbfb86d4cb3/fs.go#L52).
## Transport Schemes
Git supports various transport schemes, including `http`, `https`, `ssh`, `git`, `file`. `go-git` defines the [transport.Transport interface](plumbing/transport/common.go#L48) to represent them.
The built-in implementations can be replaced by calling `client.InstallProtocol`.
An example of changing the built-in `https` implementation to skip TLS could look like this:
```go
customClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
client.InstallProtocol("https", githttp.NewClient(customClient))
```
Some internal implementations enables code reuse amongst the different transport implementations. Some of these may be made public in the future (e.g. `plumbing/transport/internal/common`).
## Cache
Several different operations across `go-git` lean on caching of objects in order to achieve optimal performance. The caching functionality is defined by the [cache.Object interface](plumbing/cache/common.go#L17).
Two built-in implementations are `cache.ObjectLRU` and `cache.BufferLRU`. However, the caching functionality can be customized by implementing the interface `cache.Object` interface.
## Hash
`go-git` uses the `crypto.Hash` interface to represent hash functions. The built-in implementations are `github.com/pjbgf/sha1cd` for SHA1 and Go's `crypto/SHA256`.
The default hash functions can be changed by calling `hash.RegisterHash`.
```go
func init() {
hash.RegisterHash(crypto.SHA1, sha1.New)
}
```
New `SHA1` or `SHA256` hash functions that implement the `hash.RegisterHash` interface can be registered by calling `RegisterHash`.

View File

@@ -27,7 +27,13 @@ build-git:
test:
@echo "running against `git version`"; \
$(GOTEST) ./...
$(GOTEST) -race ./...
TEMP_REPO := $(shell mktemp)
test-sha256:
$(GOCMD) run -tags sha256 _examples/sha256/main.go $(TEMP_REPO)
cd $(TEMP_REPO) && git fsck
rm -rf $(TEMP_REPO)
test-coverage:
@echo "running against `git version`"; \
@@ -35,4 +41,13 @@ test-coverage:
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
clean:
rm -rf $(GIT_DIST_PATH)
rm -rf $(GIT_DIST_PATH)
fuzz:
@go test -fuzz=FuzzParser $(PWD)/internal/revision
@go test -fuzz=FuzzDecoder $(PWD)/plumbing/format/config
@go test -fuzz=FuzzPatchDelta $(PWD)/plumbing/format/packfile
@go test -fuzz=FuzzParseSignedBytes $(PWD)/plumbing/object
@go test -fuzz=FuzzDecode $(PWD)/plumbing/object
@go test -fuzz=FuzzDecoder $(PWD)/plumbing/protocol/packp
@go test -fuzz=FuzzNewEndpoint $(PWD)/plumbing/transport

38
vendor/github.com/go-git/go-git/v5/SECURITY.md generated vendored Normal file
View File

@@ -0,0 +1,38 @@
# go-git Security Policy
The purpose of this security policy is to outline `go-git`'s process
for reporting, handling and disclosing security sensitive information.
## Supported Versions
The project follows a version support policy where only the latest minor
release is actively supported. Therefore, only issues that impact the latest
minor release will be fixed. Users are encouraged to upgrade to the latest
minor/patch release to benefit from the most up-to-date features, bug fixes,
and security enhancements.
The supported versions policy applies to both the `go-git` library and its
associated repositories within the `go-git` org.
## Reporting Security Issues
Please report any security vulnerabilities or potential weaknesses in `go-git`
privately via go-git-security@googlegroups.com. Do not publicly disclose the
details of the vulnerability until a fix has been implemented and released.
During the process the project maintainers will investigate the report, so please
provide detailed information, including steps to reproduce, affected versions, and any mitigations if known.
The project maintainers will acknowledge the receipt of the report and work with
the reporter to validate and address the issue.
Please note that `go-git` does not have any bounty programs, and therefore do
not provide financial compensation for disclosures.
## Security Disclosure Process
The project maintainers will make every effort to promptly address security issues.
Once a security vulnerability is fixed, a security advisory will be published to notify users and provide appropriate mitigation measures.
All `go-git` advisories can be found at https://github.com/go-git/go-git/security/advisories.

View File

@@ -2,16 +2,18 @@ package git
import (
"bytes"
"container/heap"
"errors"
"fmt"
"io"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/utils/diff"
"github.com/sergi/go-diff/diffmatchpatch"
)
// BlameResult represents the result of a Blame operation.
@@ -29,53 +31,26 @@ type BlameResult struct {
func Blame(c *object.Commit, path string) (*BlameResult, error) {
// The file to blame is identified by the input arguments:
// commit and path. commit is a Commit object obtained from a Repository. Path
// represents a path to a specific file contained into the repository.
// represents a path to a specific file contained in the repository.
//
// Blaming a file is a two step process:
// Blaming a file is done by walking the tree in reverse order trying to find where each line was last modified.
//
// 1. Create a linear history of the commits affecting a file. We use
// revlist.New for that.
// When a diff is found it cannot immediately assume it came from that commit, as it may have come from 1 of its
// parents, so it will first try to resolve those diffs from its parents, if it couldn't find the change in its
// parents then it will assign the change to itself.
//
// 2. Then build a graph with a node for every line in every file in
// the history of the file.
// When encountering 2 parents that have made the same change to a file it will choose the parent that was merged
// into the current branch first (this is determined by the order of the parents inside the commit).
//
// Each node is assigned a commit: Start by the nodes in the first
// commit. Assign that commit as the creator of all its lines.
//
// Then jump to the nodes in the next commit, and calculate the diff
// between the two files. Newly created lines get
// assigned the new commit as its origin. Modified lines also get
// this new commit. Untouched lines retain the old commit.
//
// All this work is done in the assignOrigin function which holds all
// the internal relevant data in a "blame" struct, that is not
// exported.
//
// TODO: ways to improve the efficiency of this function:
// 1. Improve revlist
// 2. Improve how to traverse the history (example a backward traversal will
// be much more efficient)
//
// TODO: ways to improve the function in general:
// 1. Add memoization between revlist and assign.
// 2. It is using much more memory than needed, see the TODOs below.
// This currently works on a line by line basis, if performance becomes an issue it could be changed to work with
// hunks rather than lines. Then when encountering diff hunks it would need to split them where necessary.
b := new(blame)
b.fRev = c
b.path = path
b.q = new(priorityQueue)
// get all the file revisions
if err := b.fillRevs(); err != nil {
return nil, err
}
// calculate the line tracking graph and fill in
// file contents in data.
if err := b.fillGraphAndData(); err != nil {
return nil, err
}
file, err := b.fRev.File(b.path)
file, err := b.fRev.File(path)
if err != nil {
return nil, err
}
@@ -83,13 +58,59 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) {
if err != nil {
return nil, err
}
finalLength := len(finalLines)
// Each node (line) holds the commit where it was introduced or
// last modified. To achieve that we use the FORWARD algorithm
// described in Zimmermann, et al. "Mining Version Archives for
// Co-changed Lines", in proceedings of the Mining Software
// Repositories workshop, Shanghai, May 22-23, 2006.
lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1))
needsMap := make([]lineMap, finalLength)
for i := range needsMap {
needsMap[i] = lineMap{i, i, nil, -1}
}
contents, err := file.Contents()
if err != nil {
return nil, err
}
b.q.Push(&queueItem{
nil,
nil,
c,
path,
contents,
needsMap,
0,
false,
0,
})
items := make([]*queueItem, 0)
for {
items = items[:0]
for {
if b.q.Len() == 0 {
return nil, errors.New("invalid state: no items left on the blame queue")
}
item := b.q.Pop()
items = append(items, item)
next := b.q.Peek()
if next == nil || next.Hash != item.Commit.Hash {
break
}
}
finished, err := b.addBlames(items)
if err != nil {
return nil, err
}
if finished == true {
break
}
}
if err != nil {
return nil, err
}
b.lineToCommit = make([]*object.Commit, finalLength)
for i := range needsMap {
b.lineToCommit[i] = needsMap[i].Commit
}
lines, err := newLines(finalLines, b.lineToCommit)
if err != nil {
return nil, err
}
@@ -105,6 +126,8 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) {
type Line struct {
// Author is the email address of the last author that modified the line.
Author string
// AuthorName is the name of the last author that modified the line.
AuthorName string
// Text is the original text of the line.
Text string
// Date is when the original text of the line was introduced
@@ -113,31 +136,21 @@ type Line struct {
Hash plumbing.Hash
}
func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line {
func newLine(author, authorName, text string, date time.Time, hash plumbing.Hash) *Line {
return &Line{
Author: author,
Text: text,
Hash: hash,
Date: date,
Author: author,
AuthorName: authorName,
Text: text,
Hash: hash,
Date: date,
}
}
func newLines(contents []string, commits []*object.Commit) ([]*Line, error) {
lcontents := len(contents)
lcommits := len(commits)
if lcontents != lcommits {
if lcontents == lcommits-1 && contents[lcontents-1] != "\n" {
contents = append(contents, "\n")
} else {
return nil, errors.New("contents and commits have different length")
}
}
result := make([]*Line, 0, lcontents)
result := make([]*Line, 0, len(contents))
for i := range contents {
result = append(result, newLine(
commits[i].Author.Email, contents[i],
commits[i].Author.Email, commits[i].Author.Name, contents[i],
commits[i].Author.When, commits[i].Hash,
))
}
@@ -152,151 +165,426 @@ type blame struct {
path string
// the commit of the final revision of the file to blame
fRev *object.Commit
// the chain of revisions affecting the the file to blame
revs []*object.Commit
// the contents of the file across all its revisions
data []string
// the graph of the lines in the file across all the revisions
graph [][]*object.Commit
// resolved lines
lineToCommit []*object.Commit
// queue of commits that need resolving
q *priorityQueue
}
// calculate the history of a file "path", starting from commit "from", sorted by commit date.
func (b *blame) fillRevs() error {
var err error
b.revs, err = references(b.fRev, b.path)
return err
type lineMap struct {
Orig, Cur int
Commit *object.Commit
FromParentNo int
}
// build graph of a file from its revision history
func (b *blame) fillGraphAndData() error {
//TODO: not all commits are needed, only the current rev and the prev
b.graph = make([][]*object.Commit, len(b.revs))
b.data = make([]string, len(b.revs)) // file contents in all the revisions
// for every revision of the file, starting with the first
// one...
for i, rev := range b.revs {
func (b *blame) addBlames(curItems []*queueItem) (bool, error) {
curItem := curItems[0]
// Simple optimisation to merge paths, there is potential to go a bit further here and check for any duplicates
// not only if they are all the same.
if len(curItems) == 1 {
curItems = nil
} else if curItem.IdenticalToChild {
allSame := true
lenCurItems := len(curItems)
lowestParentNo := curItem.ParentNo
for i := 1; i < lenCurItems; i++ {
if !curItems[i].IdenticalToChild || curItem.Child != curItems[i].Child {
allSame = false
break
}
lowestParentNo = min(lowestParentNo, curItems[i].ParentNo)
}
if allSame {
curItem.Child.numParentsNeedResolving = curItem.Child.numParentsNeedResolving - lenCurItems + 1
curItems = nil // free the memory
curItem.ParentNo = lowestParentNo
// Now check if we can remove the parent completely
for curItem.Child.IdenticalToChild && curItem.Child.MergedChildren == nil && curItem.Child.numParentsNeedResolving == 1 {
oldChild := curItem.Child
curItem.Child = oldChild.Child
curItem.ParentNo = oldChild.ParentNo
}
}
}
// if we have more than 1 item for this commit, create a single needsMap
if len(curItems) > 1 {
curItem.MergedChildren = make([]childToNeedsMap, len(curItems))
for i, c := range curItems {
curItem.MergedChildren[i] = childToNeedsMap{c.Child, c.NeedsMap, c.IdenticalToChild, c.ParentNo}
}
newNeedsMap := make([]lineMap, 0, len(curItem.NeedsMap))
newNeedsMap = append(newNeedsMap, curItems[0].NeedsMap...)
for i := 1; i < len(curItems); i++ {
cur := curItems[i].NeedsMap
n := 0 // position in newNeedsMap
c := 0 // position in current list
for c < len(cur) {
if n == len(newNeedsMap) {
newNeedsMap = append(newNeedsMap, cur[c:]...)
break
} else if newNeedsMap[n].Cur == cur[c].Cur {
n++
c++
} else if newNeedsMap[n].Cur < cur[c].Cur {
n++
} else {
newNeedsMap = append(newNeedsMap, cur[c])
newPos := len(newNeedsMap) - 1
for newPos > n {
newNeedsMap[newPos-1], newNeedsMap[newPos] = newNeedsMap[newPos], newNeedsMap[newPos-1]
newPos--
}
}
}
}
curItem.NeedsMap = newNeedsMap
curItem.IdenticalToChild = false
curItem.Child = nil
curItems = nil // free the memory
}
parents, err := parentsContainingPath(curItem.path, curItem.Commit)
if err != nil {
return false, err
}
anyPushed := false
for parnetNo, prev := range parents {
currentHash, err := blobHash(curItem.path, curItem.Commit)
if err != nil {
return false, err
}
prevHash, err := blobHash(prev.Path, prev.Commit)
if err != nil {
return false, err
}
if currentHash == prevHash {
if len(parents) == 1 && curItem.MergedChildren == nil && curItem.IdenticalToChild {
// commit that has 1 parent and 1 child and is the same as both, bypass it completely
b.q.Push(&queueItem{
Child: curItem.Child,
Commit: prev.Commit,
path: prev.Path,
Contents: curItem.Contents,
NeedsMap: curItem.NeedsMap, // reuse the NeedsMap as we are throwing away this item
IdenticalToChild: true,
ParentNo: curItem.ParentNo,
})
} else {
b.q.Push(&queueItem{
Child: curItem,
Commit: prev.Commit,
path: prev.Path,
Contents: curItem.Contents,
NeedsMap: append([]lineMap(nil), curItem.NeedsMap...), // create new slice and copy
IdenticalToChild: true,
ParentNo: parnetNo,
})
curItem.numParentsNeedResolving++
}
anyPushed = true
continue
}
// get the contents of the file
file, err := rev.File(b.path)
file, err := prev.Commit.File(prev.Path)
if err != nil {
return nil
return false, err
}
b.data[i], err = file.Contents()
prevContents, err := file.Contents()
if err != nil {
return err
return false, err
}
nLines := countLines(b.data[i])
// create a node for each line
b.graph[i] = make([]*object.Commit, nLines)
// assign a commit to each node
// if this is the first revision, then the node is assigned to
// this first commit.
if i == 0 {
for j := 0; j < nLines; j++ {
b.graph[i][j] = b.revs[i]
hunks := diff.Do(prevContents, curItem.Contents)
prevl := -1
curl := -1
need := 0
getFromParent := make([]lineMap, 0)
out:
for h := range hunks {
hLines := countLines(hunks[h].Text)
for hl := 0; hl < hLines; hl++ {
switch {
case hunks[h].Type == diffmatchpatch.DiffEqual:
prevl++
curl++
if curl == curItem.NeedsMap[need].Cur {
// add to needs
getFromParent = append(getFromParent, lineMap{curl, prevl, nil, -1})
// move to next need
need++
if need >= len(curItem.NeedsMap) {
break out
}
}
case hunks[h].Type == diffmatchpatch.DiffInsert:
curl++
if curl == curItem.NeedsMap[need].Cur {
// the line we want is added, it may have been added here (or by another parent), skip it for now
need++
if need >= len(curItem.NeedsMap) {
break out
}
}
case hunks[h].Type == diffmatchpatch.DiffDelete:
prevl += hLines
continue out
default:
return false, errors.New("invalid state: invalid hunk Type")
}
}
} else {
// if this is not the first commit, then assign to the old
// commit or to the new one, depending on what the diff
// says.
b.assignOrigin(i, i-1)
}
if len(getFromParent) > 0 {
b.q.Push(&queueItem{
curItem,
nil,
prev.Commit,
prev.Path,
prevContents,
getFromParent,
0,
false,
parnetNo,
})
curItem.numParentsNeedResolving++
anyPushed = true
}
}
return nil
}
// sliceGraph returns a slice of commits (one per line) for a particular
// revision of a file (0=first revision).
func (b *blame) sliceGraph(i int) []*object.Commit {
fVs := b.graph[i]
result := make([]*object.Commit, 0, len(fVs))
for _, v := range fVs {
c := *v
result = append(result, &c)
curItem.Contents = "" // no longer need, free the memory
if !anyPushed {
return finishNeeds(curItem)
}
return result
return false, nil
}
// Assigns origin to vertexes in current (c) rev from data in its previous (p)
// revision
func (b *blame) assignOrigin(c, p int) {
// assign origin based on diff info
hunks := diff.Do(b.data[p], b.data[c])
sl := -1 // source line
dl := -1 // destination line
for h := range hunks {
hLines := countLines(hunks[h].Text)
for hl := 0; hl < hLines; hl++ {
switch {
case hunks[h].Type == 0:
sl++
dl++
b.graph[c][dl] = b.graph[p][sl]
case hunks[h].Type == 1:
dl++
b.graph[c][dl] = b.revs[c]
case hunks[h].Type == -1:
sl++
default:
panic("unreachable")
func finishNeeds(curItem *queueItem) (bool, error) {
// any needs left in the needsMap must have come from this revision
for i := range curItem.NeedsMap {
if curItem.NeedsMap[i].Commit == nil {
curItem.NeedsMap[i].Commit = curItem.Commit
curItem.NeedsMap[i].FromParentNo = -1
}
}
if curItem.Child == nil && curItem.MergedChildren == nil {
return true, nil
}
if curItem.MergedChildren == nil {
return applyNeeds(curItem.Child, curItem.NeedsMap, curItem.IdenticalToChild, curItem.ParentNo)
}
for _, ctn := range curItem.MergedChildren {
m := 0 // position in merged needs map
p := 0 // position in parent needs map
for p < len(ctn.NeedsMap) {
if ctn.NeedsMap[p].Cur == curItem.NeedsMap[m].Cur {
ctn.NeedsMap[p].Commit = curItem.NeedsMap[m].Commit
m++
p++
} else if ctn.NeedsMap[p].Cur < curItem.NeedsMap[m].Cur {
p++
} else {
m++
}
}
finished, err := applyNeeds(ctn.Child, ctn.NeedsMap, ctn.IdenticalToChild, ctn.ParentNo)
if finished || err != nil {
return finished, err
}
}
return false, nil
}
func applyNeeds(child *queueItem, needsMap []lineMap, identicalToChild bool, parentNo int) (bool, error) {
if identicalToChild {
for i := range child.NeedsMap {
l := &child.NeedsMap[i]
if l.Cur != needsMap[i].Cur || l.Orig != needsMap[i].Orig {
return false, errors.New("needsMap isn't the same? Why not??")
}
if l.Commit == nil || parentNo < l.FromParentNo {
l.Commit = needsMap[i].Commit
l.FromParentNo = parentNo
}
}
} else {
i := 0
out:
for j := range child.NeedsMap {
l := &child.NeedsMap[j]
for needsMap[i].Orig < l.Cur {
i++
if i == len(needsMap) {
break out
}
}
if l.Cur == needsMap[i].Orig {
if l.Commit == nil || parentNo < l.FromParentNo {
l.Commit = needsMap[i].Commit
l.FromParentNo = parentNo
}
}
}
}
child.numParentsNeedResolving--
if child.numParentsNeedResolving == 0 {
finished, err := finishNeeds(child)
if finished || err != nil {
return finished, err
}
}
return false, nil
}
// GoString prints the results of a Blame using git-blame's style.
func (b *blame) GoString() string {
// String prints the results of a Blame using git-blame's style.
func (b BlameResult) String() string {
var buf bytes.Buffer
file, err := b.fRev.File(b.path)
if err != nil {
panic("PrettyPrint: internal error in repo.Data")
}
contents, err := file.Contents()
if err != nil {
panic("PrettyPrint: internal error in repo.Data")
}
lines := strings.Split(contents, "\n")
// max line number length
mlnl := len(strconv.Itoa(len(lines)))
mlnl := len(strconv.Itoa(len(b.Lines)))
// max author length
mal := b.maxAuthorLength()
format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n",
mal, mlnl)
format := fmt.Sprintf("%%s (%%-%ds %%s %%%dd) %%s\n", mal, mlnl)
fVs := b.graph[len(b.graph)-1]
for ln, v := range fVs {
fmt.Fprintf(&buf, format, v.Hash.String()[:8],
prettyPrintAuthor(fVs[ln]), ln+1, lines[ln])
for ln := range b.Lines {
_, _ = fmt.Fprintf(&buf, format, b.Lines[ln].Hash.String()[:8],
b.Lines[ln].AuthorName, b.Lines[ln].Date.Format("2006-01-02 15:04:05 -0700"), ln+1, b.Lines[ln].Text)
}
return buf.String()
}
// utility function to pretty print the author.
func prettyPrintAuthor(c *object.Commit) string {
return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02"))
}
// utility function to calculate the number of runes needed
// to print the longest author name in the blame of a file.
func (b *blame) maxAuthorLength() int {
memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1)
fVs := b.graph[len(b.graph)-1]
func (b BlameResult) maxAuthorLength() int {
m := 0
for ln := range fVs {
if _, ok := memo[fVs[ln].Hash]; ok {
continue
}
memo[fVs[ln].Hash] = struct{}{}
m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln])))
for ln := range b.Lines {
m = max(m, utf8.RuneCountInString(b.Lines[ln].AuthorName))
}
return m
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
type childToNeedsMap struct {
Child *queueItem
NeedsMap []lineMap
IdenticalToChild bool
ParentNo int
}
type queueItem struct {
Child *queueItem
MergedChildren []childToNeedsMap
Commit *object.Commit
path string
Contents string
NeedsMap []lineMap
numParentsNeedResolving int
IdenticalToChild bool
ParentNo int
}
type priorityQueueImp []*queueItem
func (pq *priorityQueueImp) Len() int { return len(*pq) }
func (pq *priorityQueueImp) Less(i, j int) bool {
return !(*pq)[i].Commit.Less((*pq)[j].Commit)
}
func (pq *priorityQueueImp) Swap(i, j int) { (*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i] }
func (pq *priorityQueueImp) Push(x any) { *pq = append(*pq, x.(*queueItem)) }
func (pq *priorityQueueImp) Pop() any {
n := len(*pq)
ret := (*pq)[n-1]
(*pq)[n-1] = nil // ovoid memory leak
*pq = (*pq)[0 : n-1]
return ret
}
func (pq *priorityQueueImp) Peek() *object.Commit {
if len(*pq) == 0 {
return nil
}
return (*pq)[0].Commit
}
type priorityQueue priorityQueueImp
func (pq *priorityQueue) Init() { heap.Init((*priorityQueueImp)(pq)) }
func (pq *priorityQueue) Len() int { return (*priorityQueueImp)(pq).Len() }
func (pq *priorityQueue) Push(c *queueItem) {
heap.Push((*priorityQueueImp)(pq), c)
}
func (pq *priorityQueue) Pop() *queueItem {
return heap.Pop((*priorityQueueImp)(pq)).(*queueItem)
}
func (pq *priorityQueue) Peek() *object.Commit { return (*priorityQueueImp)(pq).Peek() }
type parentCommit struct {
Commit *object.Commit
Path string
}
func parentsContainingPath(path string, c *object.Commit) ([]parentCommit, error) {
// TODO: benchmark this method making git.object.Commit.parent public instead of using
// an iterator
var result []parentCommit
iter := c.Parents()
for {
parent, err := iter.Next()
if err == io.EOF {
return result, nil
}
if err != nil {
return nil, err
}
if _, err := parent.File(path); err == nil {
result = append(result, parentCommit{parent, path})
} else {
// look for renames
patch, err := parent.Patch(c)
if err != nil {
return nil, err
} else if patch != nil {
for _, fp := range patch.FilePatches() {
from, to := fp.Files()
if from != nil && to != nil && to.Path() == path {
result = append(result, parentCommit{parent, from.Path()})
break
}
}
}
}
}
}
func blobHash(path string, commit *object.Commit) (plumbing.Hash, error) {
file, err := commit.File(path)
if err != nil {
return plumbing.ZeroHash, err
}
return file.Hash, nil
}

View File

@@ -2,6 +2,7 @@ package config
import (
"errors"
"strings"
"github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
@@ -26,6 +27,12 @@ type Branch struct {
// "true" and "interactive". "false" is undocumented and
// typically represented by the non-existence of this field
Rebase string
// Description explains what the branch is for.
// Multi-line explanations may be used.
//
// Original git command to edit:
// git branch --edit-description
Description string
raw *format.Subsection
}
@@ -47,7 +54,7 @@ func (b *Branch) Validate() error {
return errBranchInvalidRebase
}
return nil
return plumbing.NewBranchReferenceName(b.Name).Validate()
}
func (b *Branch) marshal() *format.Subsection {
@@ -75,9 +82,27 @@ func (b *Branch) marshal() *format.Subsection {
b.raw.SetOption(rebaseKey, b.Rebase)
}
if b.Description == "" {
b.raw.RemoveOption(descriptionKey)
} else {
desc := quoteDescription(b.Description)
b.raw.SetOption(descriptionKey, desc)
}
return b.raw
}
// hack to trigger conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
//
// Current Encoder implementation uses Go %q format if value contains a backslash character,
// which is not consistent with reference git implementation.
// git just replaces newline characters with \n, while Encoder prints them directly.
// Until value quoting fix, we should escape description value by replacing newline characters with \n.
func quoteDescription(desc string) string {
return strings.ReplaceAll(desc, "\n", `\n`)
}
func (b *Branch) unmarshal(s *format.Subsection) error {
b.raw = s
@@ -85,6 +110,14 @@ func (b *Branch) unmarshal(s *format.Subsection) error {
b.Remote = b.raw.Options.Get(remoteSection)
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
b.Rebase = b.raw.Options.Get(rebaseKey)
b.Description = unquoteDescription(b.raw.Options.Get(descriptionKey))
return b.Validate()
}
// hack to enable conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
// goto quoteDescription for details.
func unquoteDescription(desc string) string {
return strings.ReplaceAll(desc, `\n`, "\n")
}

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
@@ -14,8 +13,8 @@ import (
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/internal/url"
"github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/mitchellh/go-homedir"
)
const (
@@ -60,12 +59,14 @@ type Config struct {
// CommentChar is the character indicating the start of a
// comment for commands like commit and tag
CommentChar string
// RepositoryFormatVersion identifies the repository format and layout version.
RepositoryFormatVersion format.RepositoryFormatVersion
}
User struct {
// Name is the personal name of the author and the commiter of a commit.
// Name is the personal name of the author and the committer of a commit.
Name string
// Email is the email of the author and the commiter of a commit.
// Email is the email of the author and the committer of a commit.
Email string
}
@@ -77,9 +78,9 @@ type Config struct {
}
Committer struct {
// Name is the personal name of the commiter of a commit.
// Name is the personal name of the committer of a commit.
Name string
// Email is the email of the the commiter of a commit.
// Email is the email of the committer of a commit.
Email string
}
@@ -97,6 +98,17 @@ type Config struct {
DefaultBranch string
}
Extensions struct {
// ObjectFormat specifies the hash algorithm to use. The
// acceptable values are sha1 and sha256. If not specified,
// sha1 is assumed. It is an error to specify this key unless
// core.repositoryFormatVersion is 1.
//
// This setting must not be changed after repository initialization
// (e.g. clone or init).
ObjectFormat format.ObjectFormat
}
// Remotes list of repository remotes, the key of the map is the name
// of the remote, should equal to RemoteConfig.Name.
Remotes map[string]*RemoteConfig
@@ -132,7 +144,7 @@ func NewConfig() *Config {
// ReadConfig reads a config file from a io.Reader.
func ReadConfig(r io.Reader) (*Config, error) {
b, err := ioutil.ReadAll(r)
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
@@ -146,11 +158,11 @@ func ReadConfig(r io.Reader) (*Config, error) {
}
// LoadConfig loads a config file from a given scope. The returned Config,
// contains exclusively information fom the given scope. If couldn't find a
// config file to the given scope, a empty one is returned.
// contains exclusively information from the given scope. If it couldn't find a
// config file to the given scope, an empty one is returned.
func LoadConfig(scope Scope) (*Config, error) {
if scope == LocalScope {
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.")
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer")
}
files, err := Paths(scope)
@@ -185,7 +197,7 @@ func Paths(scope Scope) ([]string, error) {
files = append(files, filepath.Join(xdg, "git/config"))
}
home, err := homedir.Dir()
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
@@ -227,27 +239,32 @@ func (c *Config) Validate() error {
}
const (
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
defaultBranchKey = "defaultBranch"
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
extensionsSection = "extensions"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
descriptionKey = "description"
defaultBranchKey = "defaultBranch"
repositoryFormatVersionKey = "repositoryformatversion"
objectFormat = "objectformat"
mirrorKey = "mirror"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.
@@ -391,6 +408,7 @@ func (c *Config) unmarshalInit() {
// Marshal returns Config encoded as a git-config file.
func (c *Config) Marshal() ([]byte, error) {
c.marshalCore()
c.marshalExtensions()
c.marshalUser()
c.marshalPack()
c.marshalRemotes()
@@ -410,12 +428,24 @@ func (c *Config) Marshal() ([]byte, error) {
func (c *Config) marshalCore() {
s := c.Raw.Section(coreSection)
s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare))
if string(c.Core.RepositoryFormatVersion) != "" {
s.SetOption(repositoryFormatVersionKey, string(c.Core.RepositoryFormatVersion))
}
if c.Core.Worktree != "" {
s.SetOption(worktreeKey, c.Core.Worktree)
}
}
func (c *Config) marshalExtensions() {
// Extensions are only supported on Version 1, therefore
// ignore them otherwise.
if c.Core.RepositoryFormatVersion == format.Version_1 {
s := c.Raw.Section(extensionsSection)
s.SetOption(objectFormat, string(c.Extensions.ObjectFormat))
}
}
func (c *Config) marshalUser() {
s := c.Raw.Section(userSection)
if c.User.Name != "" {
@@ -549,6 +579,8 @@ type RemoteConfig struct {
// URLs the URLs of a remote repository. It must be non-empty. Fetch will
// always use the first URL, while push will use all of them.
URLs []string
// Mirror indicates that the repository is a mirror of remote.
Mirror bool
// insteadOfRulesApplied have urls been modified
insteadOfRulesApplied bool
@@ -583,7 +615,7 @@ func (c *RemoteConfig) Validate() error {
c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))}
}
return nil
return plumbing.NewRemoteHEADReferenceName(c.Name).Validate()
}
func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
@@ -602,6 +634,7 @@ func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
c.Name = c.raw.Name
c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...)
c.Fetch = fetch
c.Mirror = c.raw.Options.Get(mirrorKey) == "true"
return nil
}
@@ -634,6 +667,10 @@ func (c *RemoteConfig) marshal() *format.Subsection {
c.raw.SetOption(fetchKey, values...)
}
if c.Mirror {
c.raw.SetOption(mirrorKey, strconv.FormatBool(c.Mirror))
}
return c.raw
}

View File

@@ -64,7 +64,7 @@ func (s RefSpec) IsExactSHA1() bool {
return plumbing.IsHash(s.Src())
}
// Src return the src side.
// Src returns the src side.
func (s RefSpec) Src() string {
spec := string(s)

View File

@@ -0,0 +1,29 @@
package path_util
import (
"os"
"os/user"
"strings"
)
func ReplaceTildeWithHome(path string) (string, error) {
if strings.HasPrefix(path, "~") {
firstSlash := strings.Index(path, "/")
if firstSlash == 1 {
home, err := os.UserHomeDir()
if err != nil {
return path, err
}
return strings.Replace(path, "~", home, 1), nil
} else if firstSlash > 1 {
username := path[1:firstSlash]
userAccount, err := user.Lookup(username)
if err != nil {
return path, err
}
return strings.Replace(path, path[:firstSlash], userAccount.HomeDir, 1), nil
}
}
return path, nil
}

View File

@@ -322,6 +322,8 @@ func (p *Parser) parseAt() (Revisioner, error) {
}
return AtDate{t}, nil
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in @{<data>} structure`}
default:
date += lit
}
@@ -424,6 +426,8 @@ func (p *Parser) parseCaretBraces() (Revisioner, error) {
p.unscan()
case tok != slash && start:
return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)}
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in ^{<data>} structure`}
case tok != cbrace:
p.unscan()
re += lit

View File

@@ -5,8 +5,10 @@ import (
)
var (
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})(?:\/|:))?(?P<path>[^\\].*\/[^\\].*)$`)
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
// Ref: https://github.com/git/git/blob/master/Documentation/urls.txt#L37
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5}):)?(?P<path>[^\\].*)$`)
)
// MatchesScheme returns true if the given string matches a URL-like

View File

@@ -60,7 +60,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
// Fetch the object.
obj, err := object.GetObject(p.Storer, hash)
if err != nil {
return fmt.Errorf("Getting object %s failed: %v", hash, err)
return fmt.Errorf("getting object %s failed: %v", hash, err)
}
// Walk all children depending on object type.
switch obj := obj.(type) {
@@ -98,7 +98,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
return p.walkObjectTree(obj.Target)
default:
// Error out on unhandled object types.
return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj)
return fmt.Errorf("unknown object %X %s %T", obj.ID(), obj.Type(), obj)
}
return nil
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
formatcfg "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband"
"github.com/go-git/go-git/v5/plumbing/transport"
@@ -45,6 +46,14 @@ type CloneOptions struct {
ReferenceName plumbing.ReferenceName
// Fetch only ReferenceName if true.
SingleBranch bool
// Mirror clones the repository as a mirror.
//
// Compared to a bare clone, mirror not only maps local branches of the
// source to local branches of the target, it maps all refs (including
// remote-tracking branches, notes etc.) and sets up a refspec configuration
// such that all these refs are overwritten by a git remote update in the
// target repository.
Mirror bool
// No checkout of HEAD after clone if true.
NoCheckout bool
// Limit fetching to the specified number of commits.
@@ -53,6 +62,9 @@ type CloneOptions struct {
// within, using their default settings. This option is ignored if the
// cloned repository does not have a worktree.
RecurseSubmodules SubmoduleRescursivity
// ShallowSubmodules limit cloning submodules to the 1 level of depth.
// It matches the git command --shallow-submodules.
ShallowSubmodules bool
// Progress is where the human readable information sent by the server is
// stored, if nil nothing is stored and the capability (if supported)
// no-progress, is sent to the server to avoid send this information.
@@ -64,6 +76,17 @@ type CloneOptions struct {
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
// When the repository to clone is on the local machine, instead of
// using hard links, automatically setup .git/objects/info/alternates
// to share the objects with the source repository.
// The resulting repository starts out without any object of its own.
// NOTE: this is a possibly dangerous operation; do not use it unless
// you understand what it does.
//
// [Reference]: https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---shared
Shared bool
}
// Validate validates the fields and sets the default values.
@@ -91,6 +114,8 @@ func (o *CloneOptions) Validate() error {
type PullOptions struct {
// Name of the remote to be pulled. If empty, uses the default.
RemoteName string
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// Remote branch to clone. If empty, uses HEAD.
ReferenceName plumbing.ReferenceName
// Fetch only ReferenceName if true.
@@ -113,6 +138,8 @@ type PullOptions struct {
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
}
// Validate validates the fields and sets the default values.
@@ -147,7 +174,9 @@ const (
type FetchOptions struct {
// Name of the remote to fetch from. Defaults to origin.
RemoteName string
RefSpecs []config.RefSpec
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
RefSpecs []config.RefSpec
// Depth limit fetching to the specified number of commits from the tip of
// each remote branch history.
Depth int
@@ -167,6 +196,8 @@ type FetchOptions struct {
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
}
// Validate validates the fields and sets the default values.
@@ -192,8 +223,16 @@ func (o *FetchOptions) Validate() error {
type PushOptions struct {
// RemoteName is the name of the remote to be pushed to.
RemoteName string
// RefSpecs specify what destination ref to update with what source
// object. A refspec with empty src can be used to delete a reference.
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// RefSpecs specify what destination ref to update with what source object.
//
// The format of a <refspec> parameter is an optional plus +, followed by
// the source object <src>, followed by a colon :, followed by the destination ref <dst>.
// The <src> is often the name of the branch you would want to push, but it can be a SHA-1.
// The <dst> tells which ref on the remote side is updated with this push.
//
// A refspec with empty src can be used to delete a reference.
RefSpecs []config.RefSpec
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
@@ -206,13 +245,37 @@ type PushOptions struct {
// Force allows the push to update a remote branch even when the local
// branch does not descend from it.
Force bool
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// RequireRemoteRefs only allows a remote ref to be updated if its current
// value is the one specified here.
RequireRemoteRefs []config.RefSpec
// FollowTags will send any annotated tags with a commit target reachable from
// the refs already being pushed
FollowTags bool
// ForceWithLease allows a force push as long as the remote ref adheres to a "lease"
ForceWithLease *ForceWithLease
// PushOptions sets options to be transferred to the server during push.
Options map[string]string
// Atomic sets option to be an atomic push
Atomic bool
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
}
// ForceWithLease sets fields on the lease
// If neither RefName nor Hash are set, ForceWithLease protects
// all refs in the refspec by ensuring the ref of the remote in the local repsitory
// matches the one in the ref advertisement.
type ForceWithLease struct {
// RefName, when set will protect the ref by ensuring it matches the
// hash in the ref advertisement.
RefName plumbing.ReferenceName
// Hash is the expected object id of RefName. The push will be rejected unless this
// matches the corresponding object id of RefName in the refs advertisement.
Hash plumbing.Hash
}
// Validate validates the fields and sets the default values.
@@ -249,6 +312,9 @@ type SubmoduleUpdateOptions struct {
RecurseSubmodules SubmoduleRescursivity
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// Depth limit fetching to the specified number of commits from the tip of
// each remote branch history.
Depth int
}
var (
@@ -274,6 +340,8 @@ type CheckoutOptions struct {
// target branch. Force and Keep are mutually exclusive, should not be both
// set to true.
Keep bool
// SparseCheckoutDirectories
SparseCheckoutDirectories []string
}
// Validate validates the fields and sets the default values.
@@ -366,7 +434,7 @@ type LogOptions struct {
// Show only those commits in which the specified file was inserted/updated.
// It is equivalent to running `git log -- <file-name>`.
// this field is kept for compatility, it can be replaced with PathFilter
// this field is kept for compatibility, it can be replaced with PathFilter
FileName *string
// Filter commits based on the path of files that are updated
@@ -422,6 +490,10 @@ type CommitOptions struct {
// All automatically stage files that have been modified and deleted, but
// new files you have not told Git about are not affected.
All bool
// AllowEmptyCommits enable empty commits to be created. An empty commit
// is when no changes to the tree were made, but a new commit message is
// provided. The default behavior is false, which results in ErrEmptyCommit.
AllowEmptyCommits bool
// Author is the author's signature of the commit. If Author is empty the
// Name and Email is read from the config, and time.Now it's used as When.
Author *object.Signature
@@ -435,10 +507,21 @@ type CommitOptions struct {
// commit will not be signed. The private key must be present and already
// decrypted.
SignKey *openpgp.Entity
// Amend will create a new commit object and replace the commit that HEAD currently
// points to. Cannot be used with All nor Parents.
Amend bool
}
// Validate validates the fields and sets the default values.
func (o *CommitOptions) Validate(r *Repository) error {
if o.All && o.Amend {
return errors.New("all and amend cannot be used together")
}
if o.Amend && len(o.Parents) > 0 {
return errors.New("parents cannot be used with amend")
}
if o.Author == nil {
if err := o.loadConfigAuthorAndCommitter(r); err != nil {
return err
@@ -571,12 +654,35 @@ func (o *CreateTagOptions) loadConfigTagger(r *Repository) error {
type ListOptions struct {
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// PeelingOption defines how peeled objects are handled during a
// remote list.
PeelingOption PeelingOption
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
// Timeout specifies the timeout in seconds for list operations
Timeout int
}
// PeelingOption represents the different ways to handle peeled references.
//
// Peeled references represent the underlying object of an annotated
// (or signed) tag. Refer to upstream documentation for more info:
// https://github.com/git/git/blob/master/Documentation/technical/reftable.txt
type PeelingOption uint8
const (
// IgnorePeeled ignores all peeled reference names. This is the default behavior.
IgnorePeeled PeelingOption = 0
// OnlyPeeled returns only peeled reference names.
OnlyPeeled PeelingOption = 1
// AppendPeeled appends peeled reference names to the reference list.
AppendPeeled PeelingOption = 2
)
// CleanOptions describes how a clean should be performed.
type CleanOptions struct {
Dir bool
@@ -601,7 +707,13 @@ var (
)
// Validate validates the fields and sets the default values.
//
// TODO: deprecate in favor of Validate(r *Repository) in v6.
func (o *GrepOptions) Validate(w *Worktree) error {
return o.validate(w.r)
}
func (o *GrepOptions) validate(r *Repository) error {
if !o.CommitHash.IsZero() && o.ReferenceName != "" {
return ErrHashOrReference
}
@@ -609,7 +721,7 @@ func (o *GrepOptions) Validate(w *Worktree) error {
// If none of CommitHash and ReferenceName are provided, set commit hash of
// the repository's head.
if o.CommitHash.IsZero() && o.ReferenceName == "" {
ref, err := w.r.Head()
ref, err := r.Head()
if err != nil {
return err
}
@@ -632,3 +744,13 @@ type PlainOpenOptions struct {
// Validate validates the fields and sets the default values.
func (o *PlainOpenOptions) Validate() error { return nil }
type PlainInitOptions struct {
InitOptions
// Determines if the repository will have a worktree (non-bare) or not (bare).
Bare bool
ObjectFormat formatcfg.ObjectFormat
}
// Validate validates the fields and sets the default values.
func (o *PlainInitOptions) Validate() error { return nil }

35
vendor/github.com/go-git/go-git/v5/oss-fuzz.sh generated vendored Normal file
View File

@@ -0,0 +1,35 @@
#!/bin/bash -eu
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
go mod download
go get github.com/AdamKorcz/go-118-fuzz-build/testing
if [ "$SANITIZER" != "coverage" ]; then
sed -i '/func (s \*DecoderSuite) TestDecode(/,/^}/ s/^/\/\//' plumbing/format/config/decoder_test.go
sed -n '35,$p' plumbing/format/packfile/common_test.go >> plumbing/format/packfile/delta_test.go
sed -n '20,53p' plumbing/object/object_test.go >> plumbing/object/tree_test.go
sed -i 's|func Test|// func Test|' plumbing/transport/common_test.go
fi
compile_native_go_fuzzer $(pwd)/internal/revision FuzzParser fuzz_parser
compile_native_go_fuzzer $(pwd)/plumbing/format/config FuzzDecoder fuzz_decoder_config
compile_native_go_fuzzer $(pwd)/plumbing/format/packfile FuzzPatchDelta fuzz_patch_delta
compile_native_go_fuzzer $(pwd)/plumbing/object FuzzParseSignedBytes fuzz_parse_signed_bytes
compile_native_go_fuzzer $(pwd)/plumbing/object FuzzDecode fuzz_decode
compile_native_go_fuzzer $(pwd)/plumbing/protocol/packp FuzzDecoder fuzz_decoder_packp
compile_native_go_fuzzer $(pwd)/plumbing/transport FuzzNewEndpoint fuzz_new_endpoint

View File

@@ -133,7 +133,7 @@ func (m FileMode) IsMalformed() bool {
m != Submodule
}
// String returns the FileMode as a string in the standatd git format,
// String returns the FileMode as a string in the standard git format,
// this is, an octal number padded with ceros to 7 digits. Malformed
// modes are printed in that same format, for easier debugging.
//

View File

@@ -11,6 +11,10 @@ type Encoder struct {
w io.Writer
}
var (
subsectionReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
valueReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`, "\n", `\n`, "\t", `\t`, "\b", `\b`)
)
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w}
@@ -48,8 +52,7 @@ func (e *Encoder) encodeSection(s *Section) error {
}
func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
//TODO: escape
if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil {
if err := e.printf("[%s \"%s\"]\n", sectionName, subsectionReplacer.Replace(s.Name)); err != nil {
return err
}
@@ -58,12 +61,14 @@ func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
func (e *Encoder) encodeOptions(opts Options) error {
for _, o := range opts {
pattern := "\t%s = %s\n"
if strings.Contains(o.Value, "\\") {
pattern = "\t%s = %q\n"
var value string
if strings.ContainsAny(o.Value, "#;\"\t\n\\") || strings.HasPrefix(o.Value, " ") || strings.HasSuffix(o.Value, " ") {
value = `"`+valueReplacer.Replace(o.Value)+`"`
} else {
value = o.Value
}
if err := e.printf(pattern, o.Key, o.Value); err != nil {
if err := e.printf("\t%s = %s\n", o.Key, value); err != nil {
return err
}
}

View File

@@ -0,0 +1,53 @@
package config
// RepositoryFormatVersion represents the repository format version,
// as per defined at:
//
// https://git-scm.com/docs/repository-version
type RepositoryFormatVersion string
const (
// Version_0 is the format defined by the initial version of git,
// including but not limited to the format of the repository
// directory, the repository configuration file, and the object
// and ref storage.
//
// Specifying the complete behavior of git is beyond the scope
// of this document.
Version_0 = "0"
// Version_1 is identical to version 0, with the following exceptions:
//
// 1. When reading the core.repositoryformatversion variable, a git
// implementation which supports version 1 MUST also read any
// configuration keys found in the extensions section of the
// configuration file.
//
// 2. If a version-1 repository specifies any extensions.* keys that
// the running git has not implemented, the operation MUST NOT proceed.
// Similarly, if the value of any known key is not understood by the
// implementation, the operation MUST NOT proceed.
//
// Note that if no extensions are specified in the config file, then
// core.repositoryformatversion SHOULD be set to 0 (setting it to 1 provides
// no benefit, and makes the repository incompatible with older
// implementations of git).
Version_1 = "1"
// DefaultRepositoryFormatVersion holds the default repository format version.
DefaultRepositoryFormatVersion = Version_0
)
// ObjectFormat defines the object format.
type ObjectFormat string
const (
// SHA1 represents the object format used for SHA1.
SHA1 ObjectFormat = "sha1"
// SHA256 represents the object format used for SHA256.
SHA256 ObjectFormat = "sha256"
// DefaultObjectFormat holds the default object format.
DefaultObjectFormat = SHA1
)

View File

@@ -103,7 +103,7 @@ func (s *Section) RemoveSubsection(name string) *Section {
return s
}
// Option return the value for the specified key. Empty string is returned if
// Option returns the value for the specified key. Empty string is returned if
// key does not exists.
func (s *Section) Option(key string) string {
return s.Options.Get(key)

View File

@@ -9,7 +9,7 @@ import (
type Operation int
const (
// Equal item represents a equals diff.
// Equal item represents an equals diff.
Equal Operation = iota
// Add item represents an insert diff.
Add
@@ -26,15 +26,15 @@ type Patch interface {
Message() string
}
// FilePatch represents the necessary steps to transform one file to another.
// FilePatch represents the necessary steps to transform one file into another.
type FilePatch interface {
// IsBinary returns true if this patch is representing a binary file.
IsBinary() bool
// Files returns the from and to Files, with all the necessary metadata to
// Files returns the from and to Files, with all the necessary metadata
// about them. If the patch creates a new file, "from" will be nil.
// If the patch deletes a file, "to" will be nil.
Files() (from, to File)
// Chunks returns a slice of ordered changes to transform "from" File to
// Chunks returns a slice of ordered changes to transform "from" File into
// "to" File. If the file is a binary one, Chunks will be empty.
Chunks() []Chunk
}
@@ -49,7 +49,7 @@ type File interface {
Path() string
}
// Chunk represents a portion of a file transformation to another.
// Chunk represents a portion of a file transformation into another.
type Chunk interface {
// Content contains the portion of the file.
Content() string

View File

@@ -3,27 +3,32 @@ package gitignore
import (
"bufio"
"bytes"
"io/ioutil"
"io"
"os"
"strings"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-git/v5/internal/path_util"
"github.com/go-git/go-git/v5/plumbing/format/config"
gioutil "github.com/go-git/go-git/v5/utils/ioutil"
)
const (
commentPrefix = "#"
coreSection = "core"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
commentPrefix = "#"
coreSection = "core"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
infoExcludeFile = gitDir + "/info/exclude"
)
// readIgnoreFile reads a specific git ignore file.
func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) {
ignoreFile, _ = path_util.ReplaceTildeWithHome(ignoreFile)
f, err := fs.Open(fs.Join(append(path, ignoreFile)...))
if err == nil {
defer f.Close()
@@ -42,10 +47,14 @@ func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps [
return
}
// ReadPatterns reads gitignore patterns recursively traversing through the directory
// structure. The result is in the ascending order of priority (last higher).
// ReadPatterns reads the .git/info/exclude and then the gitignore patterns
// recursively traversing through the directory structure. The result is in
// the ascending order of priority (last higher).
func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) {
ps, _ = readIgnoreFile(fs, path, gitignoreFile)
ps, _ = readIgnoreFile(fs, path, infoExcludeFile)
subps, _ := readIgnoreFile(fs, path, gitignoreFile)
ps = append(ps, subps...)
var fis []os.FileInfo
fis, err = fs.ReadDir(fs.Join(path...))
@@ -81,7 +90,7 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) {
defer gioutil.CheckClose(f, &err)
b, err := ioutil.ReadAll(f)
b, err := io.ReadAll(f)
if err != nil {
return
}

View File

@@ -39,6 +39,8 @@ type pattern struct {
// ParsePattern parses a gitignore pattern string into the Pattern structure.
func ParsePattern(p string, domain []string) Pattern {
// storing domain, copy it to ensure it isn't changed externally
domain = append([]string(nil), domain...)
res := pattern{domain: domain}
if strings.HasPrefix(p, inclusionPrefix) {

View File

@@ -6,20 +6,21 @@ import (
"errors"
"io"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
var (
// ErrUnsupportedVersion is returned by Decode when the idx file version
// is not supported.
ErrUnsupportedVersion = errors.New("Unsupported version")
ErrUnsupportedVersion = errors.New("unsupported version")
// ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
ErrMalformedIdxFile = errors.New("Malformed IDX file")
ErrMalformedIdxFile = errors.New("malformed IDX file")
)
const (
fanout = 256
objectIDLength = 20
objectIDLength = hash.Size
)
// Decoder reads and decodes idx files from an input stream.

View File

@@ -1,10 +1,9 @@
package idxfile
import (
"crypto/sha1"
"hash"
"io"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
@@ -16,7 +15,7 @@ type Encoder struct {
// NewEncoder returns a new stream encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
h := hash.New(hash.CryptoType)
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}
@@ -133,10 +132,10 @@ func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) {
return 0, err
}
copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20])
copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:hash.Size])
if _, err := e.Write(idx.IdxChecksum[:]); err != nil {
return 0, err
}
return 40, nil
return hash.HexSize, nil
}

View File

@@ -8,6 +8,7 @@ import (
encbin "encoding/binary"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
)
const (
@@ -53,8 +54,8 @@ type MemoryIndex struct {
Offset32 [][]byte
CRC32 [][]byte
Offset64 []byte
PackfileChecksum [20]byte
IdxChecksum [20]byte
PackfileChecksum [hash.Size]byte
IdxChecksum [hash.Size]byte
offsetHash map[int64]plumbing.Hash
offsetHashIsFull bool

View File

@@ -84,11 +84,8 @@ func (w *Writer) OnFooter(h plumbing.Hash) error {
w.checksum = h
w.finished = true
_, err := w.createIndex()
if err != nil {
return err
}
return nil
return err
}
// creatIndex returns a filled MemoryIndex with the information filled by
@@ -139,15 +136,23 @@ func (w *Writer) createIndex() (*MemoryIndex, error) {
offset := o.Offset
if offset > math.MaxInt32 {
offset = w.addOffset64(offset)
var err error
offset, err = w.addOffset64(offset)
if err != nil {
return nil, err
}
}
buf.Truncate(0)
binary.WriteUint32(buf, uint32(offset))
if err := binary.WriteUint32(buf, uint32(offset)); err != nil {
return nil, err
}
idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
buf.Truncate(0)
binary.WriteUint32(buf, o.CRC32)
if err := binary.WriteUint32(buf, o.CRC32); err != nil {
return nil, err
}
idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
}
@@ -161,15 +166,17 @@ func (w *Writer) createIndex() (*MemoryIndex, error) {
return idx, nil
}
func (w *Writer) addOffset64(pos uint64) uint64 {
func (w *Writer) addOffset64(pos uint64) (uint64, error) {
buf := new(bytes.Buffer)
binary.WriteUint64(buf, pos)
w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
if err := binary.WriteUint64(buf, pos); err != nil {
return 0, err
}
w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
index := uint64(w.offset64 | (1 << 31))
w.offset64++
return index
return index, nil
}
func (o objects) Len() int {

View File

@@ -3,15 +3,14 @@ package index
import (
"bufio"
"bytes"
"crypto/sha1"
"errors"
"hash"
"io"
"io/ioutil"
"strconv"
"time"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
@@ -49,7 +48,7 @@ type Decoder struct {
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
h := sha1.New()
h := hash.New(hash.CryptoType)
return &Decoder{
r: io.TeeReader(r, h),
hash: h,
@@ -202,7 +201,7 @@ func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {
entrySize := read + len(e.Name)
padLen := 8 - entrySize%8
_, err := io.CopyN(ioutil.Discard, d.r, int64(padLen))
_, err := io.CopyN(io.Discard, d.r, int64(padLen))
return err
}

View File

@@ -2,19 +2,18 @@ package index
import (
"bytes"
"crypto/sha1"
"errors"
"hash"
"io"
"sort"
"time"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
var (
// EncodeVersionSupported is the range of supported index versions
EncodeVersionSupported uint32 = 2
EncodeVersionSupported uint32 = 3
// ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
// negative timestamp values
@@ -29,16 +28,16 @@ type Encoder struct {
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
h := hash.New(hash.CryptoType)
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}
// Encode writes the Index to the stream of the encoder.
func (e *Encoder) Encode(idx *Index) error {
// TODO: support versions v3 and v4
// TODO: support v4
// TODO: support extensions
if idx.Version != EncodeVersionSupported {
if idx.Version > EncodeVersionSupported {
return ErrUnsupportedVersion
}
@@ -68,8 +67,12 @@ func (e *Encoder) encodeEntries(idx *Index) error {
if err := e.encodeEntry(entry); err != nil {
return err
}
entryLength := entryHeaderLength
if entry.IntentToAdd || entry.SkipWorktree {
entryLength += 2
}
wrote := entryHeaderLength + len(entry.Name)
wrote := entryLength + len(entry.Name)
if err := e.padEntry(wrote); err != nil {
return err
}
@@ -79,10 +82,6 @@ func (e *Encoder) encodeEntries(idx *Index) error {
}
func (e *Encoder) encodeEntry(entry *Entry) error {
if entry.IntentToAdd || entry.SkipWorktree {
return ErrUnsupportedVersion
}
sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
if err != nil {
return err
@@ -110,9 +109,25 @@ func (e *Encoder) encodeEntry(entry *Entry) error {
entry.GID,
entry.Size,
entry.Hash[:],
flags,
}
flagsFlow := []interface{}{flags}
if entry.IntentToAdd || entry.SkipWorktree {
var extendedFlags uint16
if entry.IntentToAdd {
extendedFlags |= intentToAddMask
}
if entry.SkipWorktree {
extendedFlags |= skipWorkTreeMask
}
flagsFlow = []interface{}{flags | entryExtended, extendedFlags}
}
flow = append(flow, flagsFlow...)
if err := binary.Write(e.w, flow...); err != nil {
return err
}

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/go-git/go-git/v5/plumbing"
@@ -211,3 +212,20 @@ type EndOfIndexEntry struct {
// their contents).
Hash plumbing.Hash
}
// SkipUnless applies patterns in the form of A, A/B, A/B/C
// to the index to prevent the files from being checked out
func (i *Index) SkipUnless(patterns []string) {
for _, e := range i.Entries {
var include bool
for _, pattern := range patterns {
if strings.HasPrefix(e.Name, pattern) {
include = true
break
}
}
if !include {
e.SkipWorktree = true
}
}
}

View File

@@ -1,13 +1,13 @@
package objfile
import (
"compress/zlib"
"errors"
"io"
"strconv"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/packfile"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@@ -20,20 +20,22 @@ var (
// Reader implements io.ReadCloser. Close should be called when finished with
// the Reader. Close will not close the underlying io.Reader.
type Reader struct {
multi io.Reader
zlib io.ReadCloser
hasher plumbing.Hasher
multi io.Reader
zlib io.Reader
zlibref sync.ZLibReader
hasher plumbing.Hasher
}
// NewReader returns a new Reader reading from r.
func NewReader(r io.Reader) (*Reader, error) {
zlib, err := zlib.NewReader(r)
zlib, err := sync.GetZlibReader(r)
if err != nil {
return nil, packfile.ErrZLib.AddDetails(err.Error())
}
return &Reader{
zlib: zlib,
zlib: zlib.Reader,
zlibref: zlib,
}, nil
}
@@ -110,5 +112,6 @@ func (r *Reader) Hash() plumbing.Hash {
// Close releases any resources consumed by the Reader. Calling Close does not
// close the wrapped io.Reader originally passed to NewReader.
func (r *Reader) Close() error {
return r.zlib.Close()
sync.PutZlibReader(r.zlibref)
return nil
}

View File

@@ -7,6 +7,7 @@ import (
"strconv"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@@ -18,9 +19,9 @@ var (
// not close the underlying io.Writer.
type Writer struct {
raw io.Writer
zlib io.WriteCloser
hasher plumbing.Hasher
multi io.Writer
zlib *zlib.Writer
closed bool
pending int64 // number of unwritten bytes
@@ -31,9 +32,10 @@ type Writer struct {
// The returned Writer implements io.WriteCloser. Close should be called when
// finished with the Writer. Close will not close the underlying io.Writer.
func NewWriter(w io.Writer) *Writer {
zlib := sync.GetZlibWriter(w)
return &Writer{
raw: w,
zlib: zlib.NewWriter(w),
zlib: zlib,
}
}
@@ -100,6 +102,7 @@ func (w *Writer) Hash() plumbing.Hash {
// Calling Close does not close the wrapped io.Writer originally passed to
// NewWriter.
func (w *Writer) Close() error {
defer sync.PutZlibWriter(w.zlib)
if err := w.zlib.Close(); err != nil {
return err
}

View File

@@ -1,10 +1,7 @@
package packfile
import (
"bytes"
"compress/zlib"
"io"
"sync"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
@@ -61,18 +58,3 @@ func WritePackfileToObjectStorage(
return err
}
var bufPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(nil)
},
}
var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
var zlibReaderPool = sync.Pool{
New: func() interface{} {
r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
return r
},
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and
@@ -16,8 +17,11 @@ const (
s = 16
// https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428
// Max size of a copy operation (64KB)
// Max size of a copy operation (64KB).
maxCopySize = 64 * 1024
// Min size of a copy operation.
minCopySize = 4
)
// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object,
@@ -43,18 +47,16 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbin
defer ioutil.CheckClose(tr, &err)
bb := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(bb)
bb.Reset()
bb := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(bb)
_, err = bb.ReadFrom(br)
if err != nil {
return nil, err
}
tb := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(tb)
tb.Reset()
tb := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(tb)
_, err = tb.ReadFrom(tr)
if err != nil {
@@ -80,9 +82,8 @@ func DiffDelta(src, tgt []byte) []byte {
}
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
buf.Write(deltaEncodeSize(len(src)))
buf.Write(deltaEncodeSize(len(tgt)))
@@ -90,9 +91,8 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
index.init(src)
}
ibuf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(ibuf)
ibuf.Reset()
ibuf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(ibuf)
for i := 0; i < len(tgt); i++ {
offset, l := index.findMatch(src, tgt, i)

View File

@@ -2,11 +2,11 @@ package packfile
import (
"compress/zlib"
"crypto/sha1"
"fmt"
"io"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/binary"
"github.com/go-git/go-git/v5/utils/ioutil"
@@ -28,7 +28,7 @@ type Encoder struct {
// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true.
func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder {
h := plumbing.Hasher{
Hash: sha1.New(),
Hash: hash.New(hash.CryptoType),
}
mw := io.MultiWriter(w, h)
ow := newOffsetWriter(mw)
@@ -131,11 +131,7 @@ func (e *Encoder) entry(o *ObjectToPack) (err error) {
defer ioutil.CheckClose(or, &err)
_, err = io.Copy(e.zw, or)
if err != nil {
return err
}
return nil
return err
}
func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error {

View File

@@ -7,19 +7,20 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/format/idxfile"
"github.com/go-git/go-git/v5/utils/ioutil"
)
// FSObject is an object from the packfile on the filesystem.
type FSObject struct {
hash plumbing.Hash
h *ObjectHeader
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
hash plumbing.Hash
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
largeObjectThreshold int64
}
// NewFSObject creates a new filesystem object.
@@ -32,16 +33,18 @@ func NewFSObject(
fs billy.Filesystem,
path string,
cache cache.Object,
largeObjectThreshold int64,
) *FSObject {
return &FSObject{
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
largeObjectThreshold: largeObjectThreshold,
}
}
@@ -62,7 +65,21 @@ func (o *FSObject) Reader() (io.ReadCloser, error) {
return nil, err
}
p := NewPackfileWithCache(o.index, nil, f, o.cache)
p := NewPackfileWithCache(o.index, nil, f, o.cache, o.largeObjectThreshold)
if o.largeObjectThreshold > 0 && o.size > o.largeObjectThreshold {
// We have a big object
h, err := p.objectHeaderAtOffset(o.offset)
if err != nil {
return nil, err
}
r, err := p.getReaderDirect(h)
if err != nil {
_ = f.Close()
return nil, err
}
return ioutil.NewReadCloserWithCloser(r, f.Close), nil
}
r, err := p.getObjectContent(o.offset)
if err != nil {
_ = f.Close()
@@ -100,17 +117,3 @@ func (o *FSObject) Type() plumbing.ObjectType {
func (o *FSObject) Writer() (io.WriteCloser, error) {
return nil, nil
}
type objectReader struct {
io.ReadCloser
f billy.File
}
func (r *objectReader) Close() error {
if err := r.ReadCloser.Close(); err != nil {
_ = r.f.Close()
return err
}
return r.f.Close()
}

View File

@@ -2,6 +2,7 @@ package packfile
import (
"bytes"
"fmt"
"io"
"os"
@@ -11,6 +12,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/format/idxfile"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@@ -35,11 +37,12 @@ const smallObjectThreshold = 16 * 1024
// Packfile allows retrieving information from inside a packfile.
type Packfile struct {
idxfile.Index
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
largeObjectThreshold int64
}
// NewPackfileWithCache creates a new Packfile with the given object cache.
@@ -50,6 +53,7 @@ func NewPackfileWithCache(
fs billy.Filesystem,
file billy.File,
cache cache.Object,
largeObjectThreshold int64,
) *Packfile {
s := NewScanner(file)
return &Packfile{
@@ -59,6 +63,7 @@ func NewPackfileWithCache(
s,
cache,
make(map[int64]plumbing.ObjectType),
largeObjectThreshold,
}
}
@@ -66,8 +71,8 @@ func NewPackfileWithCache(
// and packfile idx.
// If the filesystem is provided, the packfile will return FSObjects, otherwise
// it will return MemoryObjects.
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault())
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File, largeObjectThreshold int64) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault(), largeObjectThreshold)
}
// Get retrieves the encoded object in the packfile with the given hash.
@@ -133,9 +138,8 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return h.Length, nil
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return 0, err
@@ -222,9 +226,9 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
// For delta objects we read the delta data and apply the small object
// optimization only if the expanded version of the object still meets
// the small object threshold condition.
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return nil, err
}
@@ -263,6 +267,7 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
p.fs,
p.file.Name(),
p.deltaBaseCache,
p.largeObjectThreshold,
), nil
}
@@ -282,6 +287,49 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
return obj.Reader()
}
func asyncReader(p *Packfile) (io.ReadCloser, error) {
reader := ioutil.NewReaderUsingReaderAt(p.file, p.s.r.offset)
zr, err := sync.GetZlibReader(reader)
if err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
sync.PutZlibReader(zr)
return nil
}), nil
}
func (p *Packfile) getReaderDirect(h *ObjectHeader) (io.ReadCloser, error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return asyncReader(p)
case plumbing.REFDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readREFDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
case plumbing.OFSDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readOFSDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
default:
return nil, ErrInvalidObject.AddDetails("type %q", h.Type)
}
}
func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length)
@@ -323,9 +371,9 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err err
}
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
@@ -334,6 +382,20 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
}
func (p *Packfile) readREFDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
var err error
base, ok := p.cacheGet(h.Reference)
if !ok {
base, err = p.Get(h.Reference)
if err != nil {
return nil, err
}
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
var err error
@@ -353,9 +415,9 @@ func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObjec
}
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
@@ -364,6 +426,20 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
}
func (p *Packfile) readOFSDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
hash, err := p.FindHash(h.OffsetReference)
if err != nil {
return nil, err
}
base, err := p.objectAtOffset(h.OffsetReference, hash)
if err != nil {
return nil, err
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
hash, err := p.FindHash(offset)
if err != nil {

View File

@@ -3,13 +3,14 @@ package packfile
import (
"bytes"
"errors"
"fmt"
"io"
stdioutil "io/ioutil"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@@ -46,7 +47,6 @@ type Parser struct {
oi []*objectInfo
oiByHash map[plumbing.Hash]*objectInfo
oiByOffset map[int64]*objectInfo
hashOffset map[plumbing.Hash]int64
checksum plumbing.Hash
cache *cache.BufferLRU
@@ -175,12 +175,25 @@ func (p *Parser) init() error {
return nil
}
type objectHeaderWriter func(typ plumbing.ObjectType, sz int64) error
type lazyObjectWriter interface {
// LazyWriter enables an object to be lazily written.
// It returns:
// - w: a writer to receive the object's content.
// - lwh: a func to write the object header.
// - err: any error from the initial writer creation process.
//
// Note that if the object header is not written BEFORE the writer
// is used, this will result in an invalid object.
LazyWriter() (w io.WriteCloser, lwh objectHeaderWriter, err error)
}
func (p *Parser) indexObjects() error {
buf := new(bytes.Buffer)
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for i := uint32(0); i < p.count; i++ {
buf.Reset()
oh, err := p.scanner.NextObjectHeader()
if err != nil {
return err
@@ -220,39 +233,76 @@ func (p *Parser) indexObjects() error {
ota = newBaseObject(oh.Offset, oh.Length, t)
}
_, crc, err := p.scanner.NextObject(buf)
hasher := plumbing.NewHasher(oh.Type, oh.Length)
writers := []io.Writer{hasher}
var obj *plumbing.MemoryObject
// Lazy writing is only available for non-delta objects.
if p.storage != nil && !delta {
// When a storage is set and supports lazy writing,
// use that instead of creating a memory object.
if low, ok := p.storage.(lazyObjectWriter); ok {
ow, lwh, err := low.LazyWriter()
if err != nil {
return err
}
if err = lwh(oh.Type, oh.Length); err != nil {
return err
}
defer ow.Close()
writers = append(writers, ow)
} else {
obj = new(plumbing.MemoryObject)
obj.SetSize(oh.Length)
obj.SetType(oh.Type)
writers = append(writers, obj)
}
}
if delta && !p.scanner.IsSeekable {
buf.Reset()
buf.Grow(int(oh.Length))
writers = append(writers, buf)
}
mw := io.MultiWriter(writers...)
_, crc, err := p.scanner.NextObject(mw)
if err != nil {
return err
}
// Non delta objects needs to be added into the storage. This
// is only required when lazy writing is not supported.
if obj != nil {
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
ota.Crc32 = crc
ota.Length = oh.Length
data := buf.Bytes()
if !delta {
sha1, err := getSHA1(ota.Type, data)
if err != nil {
return err
sha1 := hasher.Sum()
// Move children of placeholder parent into actual parent, in case this
// was a non-external delta reference.
if placeholder, ok := p.oiByHash[sha1]; ok {
ota.Children = placeholder.Children
for _, c := range ota.Children {
c.Parent = ota
}
}
ota.SHA1 = sha1
p.oiByHash[ota.SHA1] = ota
}
if p.storage != nil && !delta {
obj := new(plumbing.MemoryObject)
obj.SetSize(oh.Length)
obj.SetType(oh.Type)
if _, err := obj.Write(data); err != nil {
return err
}
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
if delta && !p.scanner.IsSeekable {
data := buf.Bytes()
p.deltas[oh.Offset] = make([]byte, len(data))
copy(p.deltas[oh.Offset], data)
}
@@ -265,28 +315,37 @@ func (p *Parser) indexObjects() error {
}
func (p *Parser) resolveDeltas() error {
buf := &bytes.Buffer{}
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for _, obj := range p.oi {
buf.Reset()
buf.Grow(int(obj.Length))
err := p.get(obj, buf)
if err != nil {
return err
}
content := buf.Bytes()
if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
return err
}
if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil {
if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, nil); err != nil {
return err
}
if !obj.IsDelta() && len(obj.Children) > 0 {
// Dealing with an io.ReaderAt object, means we can
// create it once and reuse across all children.
r := bytes.NewReader(buf.Bytes())
for _, child := range obj.Children {
if err := p.resolveObject(stdioutil.Discard, child, content); err != nil {
// Even though we are discarding the output, we still need to read it to
// so that the scanner can advance to the next object, and the SHA1 can be
// calculated.
if err := p.resolveObject(io.Discard, child, r); err != nil {
return err
}
p.resolveExternalRef(child)
}
// Remove the delta from the cache.
@@ -299,6 +358,16 @@ func (p *Parser) resolveDeltas() error {
return nil
}
func (p *Parser) resolveExternalRef(o *objectInfo) {
if ref, ok := p.oiByHash[o.SHA1]; ok && ref.ExternalRef {
p.oiByHash[o.SHA1] = o
o.Children = ref.Children
for _, c := range o.Children {
c.Parent = o
}
}
}
func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
if !o.ExternalRef { // skip cache check for placeholder parents
b, ok := p.cache.Get(o.Offset)
@@ -336,16 +405,15 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
}
if o.DiskType.IsDelta() {
b := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(b)
b.Reset()
b := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(b)
buf.Grow(int(o.Length))
err := p.get(o.Parent, b)
if err != nil {
return err
}
base := b.Bytes()
err = p.resolveObject(buf, o, base)
err = p.resolveObject(buf, o, bytes.NewReader(b.Bytes()))
if err != nil {
return err
}
@@ -356,6 +424,13 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
}
}
// If the scanner is seekable, caching this data into
// memory by offset seems wasteful.
// There is a trade-off to be considered here in terms
// of execution time vs memory consumption.
//
// TODO: improve seekable execution time, so that we can
// skip this cache.
if len(o.Children) > 0 {
data := make([]byte, buf.Len())
copy(data, buf.Bytes())
@@ -364,41 +439,75 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
return nil
}
// resolveObject resolves an object from base, using information
// provided by o.
//
// This call has the side-effect of changing field values
// from the object info o:
// - Type: OFSDeltaObject may become the target type (e.g. Blob).
// - Size: The size may be update with the target size.
// - Hash: Zero hashes will be calculated as part of the object
// resolution. Hence why this process can't be avoided even when w
// is an io.Discard.
//
// base must be an io.ReaderAt, which is a requirement from
// patchDeltaStream. The main reason being that reversing an
// delta object may lead to going backs and forths within base,
// which is not supported by io.Reader.
func (p *Parser) resolveObject(
w io.Writer,
o *objectInfo,
base []byte,
base io.ReaderAt,
) error {
if !o.DiskType.IsDelta() {
return nil
}
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
err := p.readData(buf, o)
if err != nil {
return err
}
data := buf.Bytes()
data, err = applyPatchBase(o, data, base)
writers := []io.Writer{w}
var obj *plumbing.MemoryObject
var lwh objectHeaderWriter
if p.storage != nil {
if low, ok := p.storage.(lazyObjectWriter); ok {
ow, wh, err := low.LazyWriter()
if err != nil {
return err
}
lwh = wh
defer ow.Close()
writers = append(writers, ow)
} else {
obj = new(plumbing.MemoryObject)
ow, err := obj.Writer()
if err != nil {
return err
}
writers = append(writers, ow)
}
}
mw := io.MultiWriter(writers...)
err = applyPatchBase(o, base, buf, mw, lwh)
if err != nil {
return err
}
if p.storage != nil {
obj := new(plumbing.MemoryObject)
obj.SetSize(o.Size())
if obj != nil {
obj.SetType(o.Type)
if _, err := obj.Write(data); err != nil {
return err
}
obj.SetSize(o.Size()) // Size here is correct as it was populated by applyPatchBase.
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
_, err = w.Write(data)
return err
}
@@ -422,24 +531,31 @@ func (p *Parser) readData(w io.Writer, o *objectInfo) error {
return nil
}
func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
patched, err := PatchDelta(base, data)
// applyPatchBase applies the patch to target.
//
// Note that ota will be updated based on the description in resolveObject.
func applyPatchBase(ota *objectInfo, base io.ReaderAt, delta io.Reader, target io.Writer, wh objectHeaderWriter) error {
if target == nil {
return fmt.Errorf("cannot apply patch against nil target")
}
typ := ota.Type
if ota.SHA1 == plumbing.ZeroHash {
typ = ota.Parent.Type
}
sz, h, err := patchDeltaWriter(target, base, delta, typ, wh)
if err != nil {
return nil, err
return err
}
if ota.SHA1 == plumbing.ZeroHash {
ota.Type = ota.Parent.Type
sha1, err := getSHA1(ota.Type, patched)
if err != nil {
return nil, err
}
ota.SHA1 = sha1
ota.Length = int64(len(patched))
ota.Type = typ
ota.Length = int64(sz)
ota.SHA1 = h
}
return patched, nil
return nil
}
func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {

View File

@@ -1,12 +1,16 @@
package packfile
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"math"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
@@ -14,7 +18,33 @@ import (
// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
// for details about the delta format.
const deltaSizeMin = 4
var (
ErrInvalidDelta = errors.New("invalid delta")
ErrDeltaCmd = errors.New("wrong delta command")
)
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
)
type offset struct {
mask byte
shift uint
}
var offsets = []offset{
{mask: 0x01, shift: 0},
{mask: 0x02, shift: 8},
{mask: 0x04, shift: 16},
{mask: 0x08, shift: 24},
}
var sizes = []offset{
{mask: 0x10, shift: 0},
{mask: 0x20, shift: 8},
{mask: 0x40, shift: 16},
}
// ApplyDelta writes to target the result of applying the modification deltas in delta to base.
func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
@@ -32,18 +62,16 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
defer ioutil.CheckClose(w, &err)
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, err = buf.ReadFrom(r)
if err != nil {
return err
}
src := buf.Bytes()
dst := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(dst)
dst.Reset()
dst := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(dst)
err = patchDelta(dst, src, delta)
if err != nil {
return err
@@ -51,17 +79,12 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
target.SetSize(int64(dst.Len()))
b := byteSlicePool.Get().([]byte)
_, err = io.CopyBuffer(w, dst, b)
byteSlicePool.Put(b)
b := sync.GetByteSlice()
_, err = io.CopyBuffer(w, dst, *b)
sync.PutByteSlice(b)
return err
}
var (
ErrInvalidDelta = errors.New("invalid delta")
ErrDeltaCmd = errors.New("wrong delta command")
)
// PatchDelta returns the result of applying the modification deltas in delta to src.
// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
// is not copy from source or copy from delta (ErrDeltaCmd).
@@ -73,8 +96,137 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
return b.Bytes(), nil
}
func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadCloser, error) {
deltaBuf := bufio.NewReaderSize(deltaRC, 1024)
srcSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
if srcSz != uint(base.Size()) {
return nil, ErrInvalidDelta
}
targetSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
remainingTargetSz := targetSz
dstRd, dstWr := io.Pipe()
go func() {
baseRd, err := base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
defer baseRd.Close()
baseBuf := bufio.NewReader(baseRd)
basePos := uint(0)
for {
cmd, err := deltaBuf.ReadByte()
if err == io.EOF {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
switch {
case isCopyFromSrc(cmd):
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
sz, err := decodeSizeByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
_ = dstWr.Close()
return
}
discard := offset - basePos
if basePos > offset {
_ = baseRd.Close()
baseRd, err = base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
baseBuf.Reset(baseRd)
discard = offset
}
for discard > math.MaxInt32 {
n, err := baseBuf.Discard(math.MaxInt32)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
for discard > 0 {
n, err := baseBuf.Discard(int(discard))
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
if _, err := io.Copy(dstWr, io.LimitReader(baseBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
basePos += sz
case isCopyFromDelta(cmd):
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if _, err := io.Copy(dstWr, io.LimitReader(deltaBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
default:
_ = dstWr.CloseWithError(ErrDeltaCmd)
return
}
if remainingTargetSz <= 0 {
_ = dstWr.Close()
return
}
}
}()
return dstRd, nil
}
func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
if len(delta) < deltaSizeMin {
if len(delta) < minCopySize {
return ErrInvalidDelta
}
@@ -95,7 +247,9 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
cmd = delta[0]
delta = delta[1:]
if isCopyFromSrc(cmd) {
switch {
case isCopyFromSrc(cmd):
var offset, sz uint
var err error
offset, delta, err = decodeOffset(cmd, delta)
@@ -114,7 +268,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
}
dst.Write(src[offset : offset+sz])
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
case isCopyFromDelta(cmd):
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
return ErrInvalidDelta
@@ -127,7 +282,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
dst.Write(delta[0:sz])
remainingTargetSz -= sz
delta = delta[sz:]
} else {
default:
return ErrDeltaCmd
}
@@ -139,6 +295,107 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
return nil
}
func patchDeltaWriter(dst io.Writer, base io.ReaderAt, delta io.Reader,
typ plumbing.ObjectType, writeHeader objectHeaderWriter) (uint, plumbing.Hash, error) {
deltaBuf := bufio.NewReaderSize(delta, 1024)
srcSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
return 0, plumbing.ZeroHash, err
}
if r, ok := base.(*bytes.Reader); ok && srcSz != uint(r.Size()) {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
targetSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
return 0, plumbing.ZeroHash, err
}
// If header still needs to be written, caller will provide
// a LazyObjectWriterHeader. This seems to be the case when
// dealing with thin-packs.
if writeHeader != nil {
err = writeHeader(typ, int64(targetSz))
if err != nil {
return 0, plumbing.ZeroHash, fmt.Errorf("could not lazy write header: %w", err)
}
}
remainingTargetSz := targetSz
hasher := plumbing.NewHasher(typ, int64(targetSz))
mw := io.MultiWriter(dst, hasher)
bufp := sync.GetByteSlice()
defer sync.PutByteSlice(bufp)
sr := io.NewSectionReader(base, int64(0), int64(srcSz))
// Keep both the io.LimitedReader types, so we can reset N.
baselr := io.LimitReader(sr, 0).(*io.LimitedReader)
deltalr := io.LimitReader(deltaBuf, 0).(*io.LimitedReader)
for {
buf := *bufp
cmd, err := deltaBuf.ReadByte()
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
if err != nil {
return 0, plumbing.ZeroHash, err
}
if isCopyFromSrc(cmd) {
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
return 0, plumbing.ZeroHash, err
}
sz, err := decodeSizeByteReader(cmd, deltaBuf)
if err != nil {
return 0, plumbing.ZeroHash, err
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
return 0, plumbing.ZeroHash, err
}
if _, err := sr.Seek(int64(offset), io.SeekStart); err != nil {
return 0, plumbing.ZeroHash, err
}
baselr.N = int64(sz)
if _, err := io.CopyBuffer(mw, baselr, buf); err != nil {
return 0, plumbing.ZeroHash, err
}
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
deltalr.N = int64(sz)
if _, err := io.CopyBuffer(mw, deltalr, buf); err != nil {
return 0, plumbing.ZeroHash, err
}
remainingTargetSz -= sz
} else {
return 0, plumbing.ZeroHash, err
}
if remainingTargetSz <= 0 {
break
}
}
return targetSz, hasher.Sum(), nil
}
// Decodes a number encoded as an unsigned LEB128 at the start of some
// binary data and returns the decoded number and the rest of the
// stream.
@@ -161,78 +418,95 @@ func decodeLEB128(input []byte) (uint, []byte) {
return num, input[sz:]
}
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
)
func decodeLEB128ByteReader(input io.ByteReader) (uint, error) {
var num, sz uint
for {
b, err := input.ReadByte()
if err != nil {
return 0, err
}
num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
sz++
if uint(b)&continuation == 0 {
break
}
}
return num, nil
}
func isCopyFromSrc(cmd byte) bool {
return (cmd & 0x80) != 0
return (cmd & continuation) != 0
}
func isCopyFromDelta(cmd byte) bool {
return (cmd&0x80) == 0 && cmd != 0
return (cmd&continuation) == 0 && cmd != 0
}
func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var offset uint
for _, o := range offsets {
if (cmd & o.mask) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << o.shift
}
}
return offset, nil
}
func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
var offset uint
if (cmd & 0x01) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
for _, o := range offsets {
if (cmd & o.mask) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << o.shift
delta = delta[1:]
}
offset = uint(delta[0])
delta = delta[1:]
}
if (cmd & 0x02) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 8
delta = delta[1:]
}
if (cmd & 0x04) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 16
delta = delta[1:]
}
if (cmd & 0x08) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 24
delta = delta[1:]
}
return offset, delta, nil
}
func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var sz uint
for _, s := range sizes {
if (cmd & s.mask) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << s.shift
}
}
if sz == 0 {
sz = maxCopySize
}
return sz, nil
}
func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
var sz uint
if (cmd & 0x10) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
for _, s := range sizes {
if (cmd & s.mask) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << s.shift
delta = delta[1:]
}
sz = uint(delta[0])
delta = delta[1:]
}
if (cmd & 0x20) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << 8
delta = delta[1:]
}
if (cmd & 0x40) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << 16
delta = delta[1:]
}
if sz == 0 {
sz = 0x10000
sz = maxCopySize
}
return sz, delta, nil

View File

@@ -3,17 +3,15 @@ package packfile
import (
"bufio"
"bytes"
"compress/zlib"
"fmt"
"hash"
"hash/crc32"
"io"
stdioutil "io/ioutil"
"sync"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/binary"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@@ -114,7 +112,7 @@ func (s *Scanner) Header() (version, objects uint32, err error) {
return
}
// readSignature reads an returns the signature field in the packfile.
// readSignature reads a returns the signature field in the packfile.
func (s *Scanner) readSignature() ([]byte, error) {
var sig = make([]byte, 4)
if _, err := io.ReadFull(s.r, sig); err != nil {
@@ -243,7 +241,7 @@ func (s *Scanner) discardObjectIfNeeded() error {
}
h := s.pendingObject
n, _, err := s.NextObject(stdioutil.Discard)
n, _, err := s.NextObject(io.Discard)
if err != nil {
return err
}
@@ -320,29 +318,38 @@ func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err erro
return
}
// ReadObject returns a reader for the object content and an error
func (s *Scanner) ReadObject() (io.ReadCloser, error) {
s.pendingObject = nil
zr, err := sync.GetZlibReader(s.r)
if err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
sync.PutZlibReader(zr)
return nil
}), nil
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
zr := zlibReaderPool.Get().(io.ReadCloser)
defer zlibReaderPool.Put(zr)
zr, err := sync.GetZlibReader(s.r)
defer sync.PutZlibReader(zr)
if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
if err != nil {
return 0, fmt.Errorf("zlib reset error: %s", err)
}
defer ioutil.CheckClose(zr, &err)
buf := byteSlicePool.Get().([]byte)
n, err = io.CopyBuffer(w, zr, buf)
byteSlicePool.Put(buf)
defer ioutil.CheckClose(zr.Reader, &err)
buf := sync.GetByteSlice()
n, err = io.CopyBuffer(w, zr.Reader, *buf)
sync.PutByteSlice(buf)
return
}
var byteSlicePool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
// SeekFromStart sets a new offset from start, returns the old position before
// the change.
func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
@@ -372,9 +379,10 @@ func (s *Scanner) Checksum() (plumbing.Hash, error) {
// Close reads the reader until io.EOF
func (s *Scanner) Close() error {
buf := byteSlicePool.Get().([]byte)
_, err := io.CopyBuffer(stdioutil.Discard, s.r, buf)
byteSlicePool.Put(buf)
buf := sync.GetByteSlice()
_, err := io.CopyBuffer(io.Discard, s.r, *buf)
sync.PutByteSlice(buf)
return err
}
@@ -384,13 +392,13 @@ func (s *Scanner) Flush() error {
}
// scannerReader has the following characteristics:
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penality for performing small writes
// to the crc32 hash writer.
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penalty for performing small writes
// to the crc32 hash writer.
type scannerReader struct {
reader io.Reader
crc io.Writer

View File

@@ -7,6 +7,8 @@ import (
"errors"
"fmt"
"io"
"github.com/go-git/go-git/v5/utils/trace"
)
// An Encoder writes pkt-lines to an output stream.
@@ -43,6 +45,7 @@ func NewEncoder(w io.Writer) *Encoder {
// Flush encodes a flush-pkt to the output stream.
func (e *Encoder) Flush() error {
defer trace.Packet.Print("packet: > 0000")
_, err := e.w.Write(FlushPkt)
return err
}
@@ -70,6 +73,7 @@ func (e *Encoder) encodeLine(p []byte) error {
}
n := len(p) + 4
defer trace.Packet.Printf("packet: > %04x %s", n, p)
if _, err := e.w.Write(asciiHex16(n)); err != nil {
return err
}

View File

@@ -0,0 +1,51 @@
package pktline
import (
"bytes"
"errors"
"io"
"strings"
)
var (
// ErrInvalidErrorLine is returned by Decode when the packet line is not an
// error line.
ErrInvalidErrorLine = errors.New("expected an error-line")
errPrefix = []byte("ERR ")
)
// ErrorLine is a packet line that contains an error message.
// Once this packet is sent by client or server, the data transfer process is
// terminated.
// See https://git-scm.com/docs/pack-protocol#_pkt_line_format
type ErrorLine struct {
Text string
}
// Error implements the error interface.
func (e *ErrorLine) Error() string {
return e.Text
}
// Encode encodes the ErrorLine into a packet line.
func (e *ErrorLine) Encode(w io.Writer) error {
p := NewEncoder(w)
return p.Encodef("%s%s\n", string(errPrefix), e.Text)
}
// Decode decodes a packet line into an ErrorLine.
func (e *ErrorLine) Decode(r io.Reader) error {
s := NewScanner(r)
if !s.Scan() {
return s.Err()
}
line := s.Bytes()
if !bytes.HasPrefix(line, errPrefix) {
return ErrInvalidErrorLine
}
e.Text = strings.TrimSpace(string(line[4:]))
return nil
}

View File

@@ -1,8 +1,12 @@
package pktline
import (
"bytes"
"errors"
"io"
"strings"
"github.com/go-git/go-git/v5/utils/trace"
)
const (
@@ -65,6 +69,14 @@ func (s *Scanner) Scan() bool {
return false
}
s.payload = s.payload[:l]
trace.Packet.Printf("packet: < %04x %s", l, s.payload)
if bytes.HasPrefix(s.payload, errPrefix) {
s.err = &ErrorLine{
Text: strings.TrimSpace(string(s.payload[4:])),
}
return false
}
return true
}

View File

@@ -2,15 +2,15 @@ package plumbing
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"hash"
"sort"
"strconv"
"github.com/go-git/go-git/v5/plumbing/hash"
)
// Hash SHA1 hashed content
type Hash [20]byte
type Hash [hash.Size]byte
// ZeroHash is Hash with value zero
var ZeroHash Hash
@@ -46,7 +46,7 @@ type Hasher struct {
}
func NewHasher(t ObjectType, size int64) Hasher {
h := Hasher{sha1.New()}
h := Hasher{hash.New(hash.CryptoType)}
h.Write(t.Bytes())
h.Write([]byte(" "))
h.Write([]byte(strconv.FormatInt(size, 10)))
@@ -74,10 +74,11 @@ func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// IsHash returns true if the given string is a valid hash.
func IsHash(s string) bool {
if len(s) != 40 {
switch len(s) {
case hash.HexSize:
_, err := hex.DecodeString(s)
return err == nil
default:
return false
}
_, err := hex.DecodeString(s)
return err == nil
}

View File

@@ -0,0 +1,60 @@
// package hash provides a way for managing the
// underlying hash implementations used across go-git.
package hash
import (
"crypto"
"fmt"
"hash"
"github.com/pjbgf/sha1cd"
)
// algos is a map of hash algorithms.
var algos = map[crypto.Hash]func() hash.Hash{}
func init() {
reset()
}
// reset resets the default algos value. Can be used after running tests
// that registers new algorithms to avoid side effects.
func reset() {
algos[crypto.SHA1] = sha1cd.New
algos[crypto.SHA256] = crypto.SHA256.New
}
// RegisterHash allows for the hash algorithm used to be overridden.
// This ensures the hash selection for go-git must be explicit, when
// overriding the default value.
func RegisterHash(h crypto.Hash, f func() hash.Hash) error {
if f == nil {
return fmt.Errorf("cannot register hash: f is nil")
}
switch h {
case crypto.SHA1:
algos[h] = f
case crypto.SHA256:
algos[h] = f
default:
return fmt.Errorf("unsupported hash function: %v", h)
}
return nil
}
// Hash is the same as hash.Hash. This allows consumers
// to not having to import this package alongside "hash".
type Hash interface {
hash.Hash
}
// New returns a new Hash for the given hash function.
// It panics if the hash function is not registered.
func New(h crypto.Hash) Hash {
hh, ok := algos[h]
if !ok {
panic(fmt.Sprintf("hash algorithm not registered: %v", h))
}
return hh()
}

View File

@@ -0,0 +1,15 @@
//go:build !sha256
// +build !sha256
package hash
import "crypto"
const (
// CryptoType defines what hash algorithm is being used.
CryptoType = crypto.SHA1
// Size defines the amount of bytes the hash yields.
Size = 20
// HexSize defines the strings size of the hash when represented in hexadecimal.
HexSize = 40
)

View File

@@ -0,0 +1,15 @@
//go:build sha256
// +build sha256
package hash
import "crypto"
const (
// CryptoType defines what hash algorithm is being used.
CryptoType = crypto.SHA256
// Size defines the amount of bytes the hash yields.
Size = 32
// HexSize defines the strings size of the hash when represented in hexadecimal.
HexSize = 64
)

View File

@@ -25,13 +25,13 @@ func (o *MemoryObject) Hash() Hash {
return o.h
}
// Type return the ObjectType
// Type returns the ObjectType
func (o *MemoryObject) Type() ObjectType { return o.t }
// SetType sets the ObjectType
func (o *MemoryObject) SetType(t ObjectType) { o.t = t }
// Size return the size of the object
// Size returns the size of the object
func (o *MemoryObject) Size() int64 { return o.sz }
// SetSize set the object size, a content of the given size should be written

View File

@@ -82,7 +82,7 @@ func (t ObjectType) Valid() bool {
return t >= CommitObject && t <= REFDeltaObject
}
// IsDelta returns true for any ObjectTyoe that represents a delta (i.e.
// IsDelta returns true for any ObjectType that represents a delta (i.e.
// REFDeltaObject or OFSDeltaObject).
func (t ObjectType) IsDelta() bool {
return t == REFDeltaObject || t == OFSDeltaObject

View File

@@ -39,7 +39,7 @@ func (c *Change) Action() (merkletrie.Action, error) {
return merkletrie.Modify, nil
}
// Files return the files before and after a change.
// Files returns the files before and after a change.
// For insertions from will be nil. For deletions to will be nil.
func (c *Change) Files() (from, to *File, err error) {
action, err := c.Action()

View File

@@ -16,11 +16,11 @@ func newChange(c merkletrie.Change) (*Change, error) {
var err error
if ret.From, err = newChangeEntry(c.From); err != nil {
return nil, fmt.Errorf("From field: %s", err)
return nil, fmt.Errorf("from field: %s", err)
}
if ret.To, err = newChangeEntry(c.To); err != nil {
return nil, fmt.Errorf("To field: %s", err)
return nil, fmt.Errorf("to field: %s", err)
}
return ret, nil

View File

@@ -1,7 +1,6 @@
package object
import (
"bufio"
"bytes"
"context"
"errors"
@@ -14,17 +13,29 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
const (
beginpgp string = "-----BEGIN PGP SIGNATURE-----"
endpgp string = "-----END PGP SIGNATURE-----"
headerpgp string = "gpgsig"
beginpgp string = "-----BEGIN PGP SIGNATURE-----"
endpgp string = "-----END PGP SIGNATURE-----"
headerpgp string = "gpgsig"
headerencoding string = "encoding"
// https://github.com/git/git/blob/bcb6cae2966cc407ca1afc77413b3ef11103c175/Documentation/gitformat-signature.txt#L153
// When a merge commit is created from a signed tag, the tag is embedded in
// the commit with the "mergetag" header.
headermergetag string = "mergetag"
defaultUtf8CommitMesageEncoding MessageEncoding = "UTF-8"
)
// Hash represents the hash of an object
type Hash plumbing.Hash
// MessageEncoding represents the encoding of a commit
type MessageEncoding string
// Commit points to a single tree, marking it as what the project looked like
// at a certain point in time. It contains meta-information about that point
// in time, such as a timestamp, the author of the changes since the last
@@ -38,6 +49,9 @@ type Commit struct {
// Committer is the one performing the commit, might be different from
// Author.
Committer Signature
// MergeTag is the embedded tag object when a merge commit is created by
// merging a signed tag.
MergeTag string
// PGPSignature is the PGP signature of the commit.
PGPSignature string
// Message is the commit message, contains arbitrary text.
@@ -46,6 +60,8 @@ type Commit struct {
TreeHash plumbing.Hash
// ParentHashes are the hashes of the parent commits of the commit.
ParentHashes []plumbing.Hash
// Encoding is the encoding of the commit.
Encoding MessageEncoding
s storer.EncodedObjectStorer
}
@@ -173,6 +189,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
c.Hash = o.Hash()
c.Encoding = defaultUtf8CommitMesageEncoding
reader, err := o.Reader()
if err != nil {
@@ -180,11 +197,11 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
var message bool
var mergetag bool
var pgpsig bool
var msgbuf bytes.Buffer
for {
@@ -193,6 +210,16 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
return err
}
if mergetag {
if len(line) > 0 && line[0] == ' ' {
line = bytes.TrimLeft(line, " ")
c.MergeTag += string(line)
continue
} else {
mergetag = false
}
}
if pgpsig {
if len(line) > 0 && line[0] == ' ' {
line = bytes.TrimLeft(line, " ")
@@ -226,6 +253,11 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
c.Author.Decode(data)
case "committer":
c.Committer.Decode(data)
case headermergetag:
c.MergeTag += string(data) + "\n"
mergetag = true
case headerencoding:
c.Encoding = MessageEncoding(data)
case headerpgp:
c.PGPSignature += string(data) + "\n"
pgpsig = true
@@ -287,6 +319,28 @@ func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
return err
}
if c.MergeTag != "" {
if _, err = fmt.Fprint(w, "\n"+headermergetag+" "); err != nil {
return err
}
// Split tag information lines and re-write with a left padding and
// newline. Use join for this so it's clear that a newline should not be
// added after this section. The newline will be added either as part of
// the PGP signature or the commit message.
mergetag := strings.TrimSuffix(c.MergeTag, "\n")
lines := strings.Split(mergetag, "\n")
if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
return err
}
}
if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMesageEncoding {
if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil {
return err
}
}
if c.PGPSignature != "" && includeSig {
if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
return err
@@ -377,6 +431,17 @@ func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil)
}
// Less defines a compare function to determine which commit is 'earlier' by:
// - First use Committer.When
// - If Committer.When are equal then use Author.When
// - If Author.When also equal then compare the string value of the hash
func (c *Commit) Less(rhs *Commit) bool {
return c.Committer.When.Before(rhs.Committer.When) ||
(c.Committer.When.Equal(rhs.Committer.When) &&
(c.Author.When.Before(rhs.Author.When) ||
(c.Author.When.Equal(rhs.Author.When) && bytes.Compare(c.Hash[:], rhs.Hash[:]) < 0)))
}
func indent(t string) string {
var output []string
for _, line := range strings.Split(t, "\n") {

View File

@@ -1,12 +0,0 @@
package object
import (
"bufio"
"sync"
)
var bufPool = sync.Pool{
New: func() interface{} {
return bufio.NewReader(nil)
},
}

View File

@@ -96,10 +96,6 @@ func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, erro
}
func filePatch(c *Change) (fdiff.FilePatch, error) {
return filePatchWithContext(context.Background(), c)
}
func fileContent(f *File) (content string, isBinary bool, err error) {
if f == nil {
return
@@ -321,8 +317,8 @@ func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
// File is deleted.
cs.Name = from.Path()
} else if from.Path() != to.Path() {
// File is renamed. Not supported.
// cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
// File is renamed.
cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
} else {
cs.Name = from.Path()
}

View File

@@ -403,10 +403,16 @@ func min(a, b int) int {
return b
}
const maxMatrixSize = 10000
func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) {
// Allocate for the worst-case scenario where every pair has a score
// that we need to consider. We might not need that many.
matrix := make(similarityMatrix, 0, len(srcs)*len(dsts))
matrixSize := len(srcs) * len(dsts)
if matrixSize > maxMatrixSize {
matrixSize = maxMatrixSize
}
matrix := make(similarityMatrix, 0, matrixSize)
srcSizes := make([]int64, len(srcs))
dstSizes := make([]int64, len(dsts))
dstTooLarge := make(map[int]bool)
@@ -735,10 +741,7 @@ func (i *similarityIndex) add(key int, cnt uint64) error {
// It's the same key, so increment the counter.
var err error
i.hashes[j], err = newKeyCountPair(key, v.count()+cnt)
if err != nil {
return err
}
return nil
return err
} else if j+1 >= len(i.hashes) {
j = 0
} else {

View File

@@ -0,0 +1,101 @@
package object
import "bytes"
const (
signatureTypeUnknown signatureType = iota
signatureTypeOpenPGP
signatureTypeX509
signatureTypeSSH
)
var (
// openPGPSignatureFormat is the format of an OpenPGP signature.
openPGPSignatureFormat = signatureFormat{
[]byte("-----BEGIN PGP SIGNATURE-----"),
[]byte("-----BEGIN PGP MESSAGE-----"),
}
// x509SignatureFormat is the format of an X509 signature, which is
// a PKCS#7 (S/MIME) signature.
x509SignatureFormat = signatureFormat{
[]byte("-----BEGIN CERTIFICATE-----"),
}
// sshSignatureFormat is the format of an SSH signature.
sshSignatureFormat = signatureFormat{
[]byte("-----BEGIN SSH SIGNATURE-----"),
}
)
var (
// knownSignatureFormats is a map of known signature formats, indexed by
// their signatureType.
knownSignatureFormats = map[signatureType]signatureFormat{
signatureTypeOpenPGP: openPGPSignatureFormat,
signatureTypeX509: x509SignatureFormat,
signatureTypeSSH: sshSignatureFormat,
}
)
// signatureType represents the type of the signature.
type signatureType int8
// signatureFormat represents the beginning of a signature.
type signatureFormat [][]byte
// typeForSignature returns the type of the signature based on its format.
func typeForSignature(b []byte) signatureType {
for t, i := range knownSignatureFormats {
for _, begin := range i {
if bytes.HasPrefix(b, begin) {
return t
}
}
}
return signatureTypeUnknown
}
// parseSignedBytes returns the position of the last signature block found in
// the given bytes. If no signature block is found, it returns -1.
//
// When multiple signature blocks are found, the position of the last one is
// returned. Any tailing bytes after this signature block start should be
// considered part of the signature.
//
// Given this, it would be safe to use the returned position to split the bytes
// into two parts: the first part containing the message, the second part
// containing the signature.
//
// Example:
//
// message := []byte(`Message with signature
//
// -----BEGIN SSH SIGNATURE-----
// ...`)
//
// var signature string
// if pos, _ := parseSignedBytes(message); pos != -1 {
// signature = string(message[pos:])
// message = message[:pos]
// }
//
// This logic is on par with git's gpg-interface.c:parse_signed_buffer().
// https://github.com/git/git/blob/7c2ef319c52c4997256f5807564523dfd4acdfc7/gpg-interface.c#L668
func parseSignedBytes(b []byte) (int, signatureType) {
var n, match = 0, -1
var t signatureType
for n < len(b) {
var i = b[n:]
if st := typeForSignature(i); st != signatureTypeUnknown {
match = n
t = st
}
if eol := bytes.IndexByte(i, '\n'); eol >= 0 {
n += eol + 1
continue
}
// If we reach this point, we've reached the end.
break
}
return match, t
}

View File

@@ -1,18 +1,16 @@
package object
import (
"bufio"
"bytes"
"fmt"
"io"
stdioutil "io/ioutil"
"strings"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// Tag represents an annotated tag object. It points to a single git object of
@@ -93,9 +91,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
for {
var line []byte
line, err = r.ReadBytes('\n')
@@ -128,40 +126,15 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
}
data, err := stdioutil.ReadAll(r)
data, err := io.ReadAll(r)
if err != nil {
return err
}
var pgpsig bool
// Check if data contains PGP signature.
if bytes.Contains(data, []byte(beginpgp)) {
// Split the lines at newline.
messageAndSig := bytes.Split(data, []byte("\n"))
for _, l := range messageAndSig {
if pgpsig {
if bytes.Contains(l, []byte(endpgp)) {
t.PGPSignature += endpgp + "\n"
break
} else {
t.PGPSignature += string(l) + "\n"
}
continue
}
// Check if it's the beginning of a PGP signature.
if bytes.Contains(l, []byte(beginpgp)) {
t.PGPSignature += beginpgp + "\n"
pgpsig = true
continue
}
t.Message += string(l) + "\n"
}
} else {
t.Message = string(data)
if sm, _ := parseSignedBytes(data); sm >= 0 {
t.PGPSignature = string(data[sm:])
data = data[:sm]
}
t.Message = string(data)
return nil
}

View File

@@ -1,7 +1,6 @@
package object
import (
"bufio"
"context"
"errors"
"fmt"
@@ -14,6 +13,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
const (
@@ -230,9 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
for {
str, err := r.ReadString(' ')
if err != nil {

View File

@@ -38,6 +38,10 @@ func NewTreeRootNode(t *Tree) noder.Noder {
}
}
func (t *treeNoder) Skip() bool {
return false
}
func (t *treeNoder) isRoot() bool {
return t.name == ""
}

View File

@@ -57,7 +57,7 @@ func (a *AdvRefs) AddReference(r *plumbing.Reference) error {
switch r.Type() {
case plumbing.SymbolicReference:
v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String())
a.Capabilities.Add(capability.SymRef, v)
return a.Capabilities.Add(capability.SymRef, v)
case plumbing.HashReference:
a.References[r.Name().String()] = r.Hash()
default:
@@ -96,12 +96,12 @@ func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error {
//
// Git versions prior to 1.8.4.3 has an special procedure to get
// the reference where is pointing to HEAD:
// - Check if a reference called master exists. If exists and it
// has the same hash as HEAD hash, we can say that HEAD is pointing to master
// - If master does not exists or does not have the same hash as HEAD,
// order references and check in that order if that reference has the same
// hash than HEAD. If yes, set HEAD pointing to that branch hash
// - If no reference is found, throw an error
// - Check if a reference called master exists. If exists and it
// has the same hash as HEAD hash, we can say that HEAD is pointing to master
// - If master does not exists or does not have the same hash as HEAD,
// order references and check in that order if that reference has the same
// hash than HEAD. If yes, set HEAD pointing to that branch hash
// - If no reference is found, throw an error
func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
if a.Head == nil {
return nil

View File

@@ -133,6 +133,7 @@ func decodeFirstHash(p *advRefsDecoder) decoderStateFn {
return nil
}
// TODO: Use object-format (when available) for hash size. Git 2.41+
if len(p.line) < hashSize {
p.error("cannot read hash, pkt-line too short")
return nil

View File

@@ -1,6 +1,11 @@
// Package capability defines the server and client capabilities.
package capability
import (
"fmt"
"os"
)
// Capability describes a server or client capability.
type Capability string
@@ -238,7 +243,15 @@ const (
Filter Capability = "filter"
)
const DefaultAgent = "go-git/4.x"
const userAgent = "go-git/5.x"
// DefaultAgent provides the user agent string.
func DefaultAgent() string {
if envUserAgent, ok := os.LookupEnv("GO_GIT_USER_AGENT_EXTRA"); ok {
return fmt.Sprintf("%s %s", userAgent, envUserAgent)
}
return userAgent
}
var known = map[Capability]bool{
MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true,

View File

@@ -86,7 +86,9 @@ func (l *List) Get(capability Capability) []string {
// Set sets a capability removing the previous values
func (l *List) Set(capability Capability, values ...string) error {
delete(l.m, capability)
if _, ok := l.m[capability]; ok {
l.m[capability].Values = l.m[capability].Values[:0]
}
return l.Add(capability, values...)
}

View File

@@ -19,7 +19,6 @@ var (
// common
sp = []byte(" ")
eol = []byte("\n")
eq = []byte{'='}
// advertised-refs
null = []byte("\x00")
@@ -49,6 +48,11 @@ func isFlush(payload []byte) bool {
return len(payload) == 0
}
var (
// ErrNilWriter is returned when a nil writer is passed to the encoder.
ErrNilWriter = fmt.Errorf("nil writer")
)
// ErrUnexpectedData represents an unexpected data decoding a message
type ErrUnexpectedData struct {
Msg string

View File

@@ -0,0 +1,120 @@
package packp
import (
"fmt"
"io"
"strings"
"github.com/go-git/go-git/v5/plumbing/format/pktline"
)
var (
// ErrInvalidGitProtoRequest is returned by Decode if the input is not a
// valid git protocol request.
ErrInvalidGitProtoRequest = fmt.Errorf("invalid git protocol request")
)
// GitProtoRequest is a command request for the git protocol.
// It is used to send the command, endpoint, and extra parameters to the
// remote.
// See https://git-scm.com/docs/pack-protocol#_git_transport
type GitProtoRequest struct {
RequestCommand string
Pathname string
// Optional
Host string
// Optional
ExtraParams []string
}
// validate validates the request.
func (g *GitProtoRequest) validate() error {
if g.RequestCommand == "" {
return fmt.Errorf("%w: empty request command", ErrInvalidGitProtoRequest)
}
if g.Pathname == "" {
return fmt.Errorf("%w: empty pathname", ErrInvalidGitProtoRequest)
}
return nil
}
// Encode encodes the request into the writer.
func (g *GitProtoRequest) Encode(w io.Writer) error {
if w == nil {
return ErrNilWriter
}
if err := g.validate(); err != nil {
return err
}
p := pktline.NewEncoder(w)
req := fmt.Sprintf("%s %s\x00", g.RequestCommand, g.Pathname)
if host := g.Host; host != "" {
req += fmt.Sprintf("host=%s\x00", host)
}
if len(g.ExtraParams) > 0 {
req += "\x00"
for _, param := range g.ExtraParams {
req += param + "\x00"
}
}
if err := p.Encode([]byte(req)); err != nil {
return err
}
return nil
}
// Decode decodes the request from the reader.
func (g *GitProtoRequest) Decode(r io.Reader) error {
s := pktline.NewScanner(r)
if !s.Scan() {
err := s.Err()
if err == nil {
return ErrInvalidGitProtoRequest
}
return err
}
line := string(s.Bytes())
if len(line) == 0 {
return io.EOF
}
if line[len(line)-1] != 0 {
return fmt.Errorf("%w: missing null terminator", ErrInvalidGitProtoRequest)
}
parts := strings.SplitN(line, " ", 2)
if len(parts) != 2 {
return fmt.Errorf("%w: short request", ErrInvalidGitProtoRequest)
}
g.RequestCommand = parts[0]
params := strings.Split(parts[1], string(null))
if len(params) < 1 {
return fmt.Errorf("%w: missing pathname", ErrInvalidGitProtoRequest)
}
g.Pathname = params[0]
if len(params) > 1 {
g.Host = strings.TrimPrefix(params[1], "host=")
}
if len(params) > 2 {
for _, param := range params[2:] {
if param != "" {
g.ExtraParams = append(g.ExtraParams, param)
}
}
}
return nil
}

View File

@@ -21,11 +21,6 @@ type ServerResponse struct {
// Decode decodes the response into the struct, isMultiACK should be true, if
// the request was done with multi_ack or multi_ack_detailed capabilities.
func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
// TODO: implement support for multi_ack or multi_ack_detailed responses
if isMultiACK {
return errors.New("multi_ack and multi_ack_detailed are not supported")
}
s := pktline.NewScanner(reader)
for s.Scan() {
@@ -48,7 +43,23 @@ func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
}
}
return s.Err()
// isMultiACK is true when the remote server advertises the related
// capabilities when they are not in transport.UnsupportedCapabilities.
//
// Users may decide to remove multi_ack and multi_ack_detailed from the
// unsupported capabilities list, which allows them to do initial clones
// from Azure DevOps.
//
// Follow-up fetches may error, therefore errors are wrapped with additional
// information highlighting that this capabilities are not supported by go-git.
//
// TODO: Implement support for multi_ack or multi_ack_detailed responses.
err := s.Err()
if err != nil && isMultiACK {
return fmt.Errorf("multi_ack and multi_ack_detailed are not supported: %w", err)
}
return err
}
// stopReading detects when a valid command such as ACK or NAK is found to be
@@ -90,12 +101,14 @@ func (r *ServerResponse) decodeLine(line []byte) error {
return fmt.Errorf("unexpected flush")
}
if bytes.Equal(line[0:3], ack) {
return r.decodeACKLine(line)
}
if len(line) >= 3 {
if bytes.Equal(line[0:3], ack) {
return r.decodeACKLine(line)
}
if bytes.Equal(line[0:3], nak) {
return nil
if bytes.Equal(line[0:3], nak) {
return nil
}
}
return fmt.Errorf("unexpected content %q", string(line))
@@ -113,8 +126,9 @@ func (r *ServerResponse) decodeACKLine(line []byte) error {
}
// Encode encodes the ServerResponse into a writer.
func (r *ServerResponse) Encode(w io.Writer) error {
if len(r.ACKs) > 1 {
func (r *ServerResponse) Encode(w io.Writer, isMultiACK bool) error {
if len(r.ACKs) > 1 && !isMultiACK {
// For further information, refer to comments in the Decode func above.
return errors.New("multi_ack and multi_ack_detailed are not supported")
}

View File

@@ -95,7 +95,7 @@ func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
}
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
}
return r

View File

@@ -43,7 +43,7 @@ func (d *ulReqDecoder) Decode(v *UploadRequest) error {
return d.err
}
// fills out the parser stiky error
// fills out the parser sticky error
func (d *ulReqDecoder) error(format string, a ...interface{}) {
msg := fmt.Sprintf(
"pkt-line %d: %s", d.nLine,

View File

@@ -19,6 +19,7 @@ var (
type ReferenceUpdateRequest struct {
Capabilities *capability.List
Commands []*Command
Options []*Option
Shallow *plumbing.Hash
// Packfile contains an optional packfile reader.
Packfile io.ReadCloser
@@ -58,7 +59,7 @@ func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceU
r := NewReferenceUpdateRequest()
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
}
if adv.Supports(capability.ReportStatus) {
@@ -86,9 +87,9 @@ type Action string
const (
Create Action = "create"
Update = "update"
Delete = "delete"
Invalid = "invalid"
Update Action = "update"
Delete Action = "delete"
Invalid Action = "invalid"
)
type Command struct {
@@ -120,3 +121,8 @@ func (c *Command) validate() error {
return nil
}
type Option struct {
Key string
Value string
}

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/pktline"
@@ -81,7 +80,7 @@ func (req *ReferenceUpdateRequest) Decode(r io.Reader) error {
var ok bool
rc, ok = r.(io.ReadCloser)
if !ok {
rc = ioutil.NopCloser(r)
rc = io.NopCloser(r)
}
d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)}

View File

@@ -9,10 +9,6 @@ import (
"github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
)
var (
zeroHashString = plumbing.ZeroHash.String()
)
// Encode writes the ReferenceUpdateRequest encoding to the stream.
func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
if err := req.validate(); err != nil {
@@ -29,6 +25,12 @@ func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
return err
}
if req.Capabilities.Supports(capability.PushOptions) {
if err := req.encodeOptions(e, req.Options); err != nil {
return err
}
}
if req.Packfile != nil {
if _, err := io.Copy(w, req.Packfile); err != nil {
return err
@@ -73,3 +75,15 @@ func formatCommand(cmd *Command) string {
n := cmd.New.String()
return fmt.Sprintf("%s %s %s", o, n, cmd.Name)
}
func (req *ReferenceUpdateRequest) encodeOptions(e *pktline.Encoder,
opts []*Option) error {
for _, opt := range opts {
if err := e.Encodef("%s=%s", opt.Key, opt.Value); err != nil {
return err
}
}
return e.Flush()
}

View File

@@ -38,10 +38,10 @@ func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackReque
}
}
// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants
// length is zero
// IsEmpty returns whether a request is empty - it is empty if Haves are contained
// in the Wants, or if Wants length is zero, and we don't have any shallows
func (r *UploadPackRequest) IsEmpty() bool {
return isSubset(r.Wants, r.Haves)
return isSubset(r.Wants, r.Haves) && len(r.Shallows) == 0
}
func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool {

View File

@@ -24,7 +24,6 @@ type UploadPackResponse struct {
r io.ReadCloser
isShallow bool
isMultiACK bool
isOk bool
}
// NewUploadPackResponse create a new UploadPackResponse instance, the request
@@ -79,7 +78,7 @@ func (r *UploadPackResponse) Encode(w io.Writer) (err error) {
}
}
if err := r.ServerResponse.Encode(w); err != nil {
if err := r.ServerResponse.Encode(w, r.isMultiACK); err != nil {
return err
}

View File

@@ -3,6 +3,7 @@ package plumbing
import (
"errors"
"fmt"
"regexp"
"strings"
)
@@ -15,10 +16,11 @@ const (
symrefPrefix = "ref: "
)
// RefRevParseRules are a set of rules to parse references into short names.
// These are the same rules as used by git in shorten_unambiguous_ref.
// RefRevParseRules are a set of rules to parse references into short names, or expand into a full reference.
// These are the same rules as used by git in shorten_unambiguous_ref and expand_ref.
// See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417
var RefRevParseRules = []string{
"%s",
"refs/%s",
"refs/tags/%s",
"refs/heads/%s",
@@ -28,6 +30,9 @@ var RefRevParseRules = []string{
var (
ErrReferenceNotFound = errors.New("reference not found")
// ErrInvalidReferenceName is returned when a reference name is invalid.
ErrInvalidReferenceName = errors.New("invalid reference name")
)
// ReferenceType reference type's
@@ -113,7 +118,7 @@ func (r ReferenceName) String() string {
func (r ReferenceName) Short() string {
s := string(r)
res := s
for _, format := range RefRevParseRules {
for _, format := range RefRevParseRules[1:] {
_, err := fmt.Sscanf(s, format, &res)
if err == nil {
continue
@@ -123,9 +128,95 @@ func (r ReferenceName) Short() string {
return res
}
var (
ctrlSeqs = regexp.MustCompile(`[\000-\037\177]`)
)
// Validate validates a reference name.
// This follows the git-check-ref-format rules.
// See https://git-scm.com/docs/git-check-ref-format
//
// It is important to note that this function does not check if the reference
// exists in the repository.
// It only checks if the reference name is valid.
// This functions does not support the --refspec-pattern, --normalize, and
// --allow-onelevel options.
//
// Git imposes the following rules on how references are named:
//
// 1. They can include slash / for hierarchical (directory) grouping, but no
// slash-separated component can begin with a dot . or end with the
// sequence .lock.
// 2. They must contain at least one /. This enforces the presence of a
// category like heads/, tags/ etc. but the actual names are not
// restricted. If the --allow-onelevel option is used, this rule is
// waived.
// 3. They cannot have two consecutive dots .. anywhere.
// 4. They cannot have ASCII control characters (i.e. bytes whose values are
// lower than \040, or \177 DEL), space, tilde ~, caret ^, or colon :
// anywhere.
// 5. They cannot have question-mark ?, asterisk *, or open bracket [
// anywhere. See the --refspec-pattern option below for an exception to this
// rule.
// 6. They cannot begin or end with a slash / or contain multiple consecutive
// slashes (see the --normalize option below for an exception to this rule).
// 7. They cannot end with a dot ..
// 8. They cannot contain a sequence @{.
// 9. They cannot be the single character @.
// 10. They cannot contain a \.
func (r ReferenceName) Validate() error {
s := string(r)
if len(s) == 0 {
return ErrInvalidReferenceName
}
// HEAD is a special case
if r == HEAD {
return nil
}
// rule 7
if strings.HasSuffix(s, ".") {
return ErrInvalidReferenceName
}
// rule 2
parts := strings.Split(s, "/")
if len(parts) < 2 {
return ErrInvalidReferenceName
}
isBranch := r.IsBranch()
isTag := r.IsTag()
for _, part := range parts {
// rule 6
if len(part) == 0 {
return ErrInvalidReferenceName
}
if strings.HasPrefix(part, ".") || // rule 1
strings.Contains(part, "..") || // rule 3
ctrlSeqs.MatchString(part) || // rule 4
strings.ContainsAny(part, "~^:?*[ \t\n") || // rule 4 & 5
strings.Contains(part, "@{") || // rule 8
part == "@" || // rule 9
strings.Contains(part, "\\") || // rule 10
strings.HasSuffix(part, ".lock") { // rule 1
return ErrInvalidReferenceName
}
if (isBranch || isTag) && strings.HasPrefix(part, "-") { // branches & tags can't start with -
return ErrInvalidReferenceName
}
}
return nil
}
const (
HEAD ReferenceName = "HEAD"
Master ReferenceName = "refs/heads/master"
Main ReferenceName = "refs/heads/main"
)
// Reference is a representation of git reference
@@ -168,22 +259,22 @@ func NewHashReference(n ReferenceName, h Hash) *Reference {
}
}
// Type return the type of a reference
// Type returns the type of a reference
func (r *Reference) Type() ReferenceType {
return r.t
}
// Name return the name of a reference
// Name returns the name of a reference
func (r *Reference) Name() ReferenceName {
return r.n
}
// Hash return the hash of a hash reference
// Hash returns the hash of a hash reference
func (r *Reference) Hash() Hash {
return r.h
}
// Target return the target of a symbolic reference
// Target returns the target of a symbolic reference
func (r *Reference) Target() ReferenceName {
return r.target
}
@@ -204,6 +295,21 @@ func (r *Reference) Strings() [2]string {
}
func (r *Reference) String() string {
s := r.Strings()
return fmt.Sprintf("%s %s", s[1], s[0])
ref := ""
switch r.Type() {
case HashReference:
ref = r.Hash().String()
case SymbolicReference:
ref = symrefPrefix + r.Target().String()
default:
return ""
}
name := r.Name().String()
var v strings.Builder
v.Grow(len(ref) + len(name) + 1)
v.WriteString(ref)
v.WriteString(" ")
v.WriteString(name)
return v.String()
}

View File

@@ -42,6 +42,7 @@ type EncodedObjectStorer interface {
HasEncodedObject(plumbing.Hash) error
// EncodedObjectSize returns the plaintext size of the encoded object.
EncodedObjectSize(plumbing.Hash) (int64, error)
AddAlternate(remote string) error
}
// DeltaObjectStorer is an EncodedObjectStorer that can return delta
@@ -52,8 +53,8 @@ type DeltaObjectStorer interface {
DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
}
// Transactioner is a optional method for ObjectStorer, it enable transaction
// base write and read operations in the storage
// Transactioner is a optional method for ObjectStorer, it enables transactional read and write
// operations.
type Transactioner interface {
// Begin starts a transaction.
Begin() Transaction
@@ -87,8 +88,8 @@ type PackedObjectStorer interface {
DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error
}
// PackfileWriter is a optional method for ObjectStorer, it enable direct write
// of packfile to the storage
// PackfileWriter is an optional method for ObjectStorer, it enables directly writing
// a packfile to storage.
type PackfileWriter interface {
// PackfileWriter returns a writer for writing a packfile to the storage
//

View File

@@ -3,10 +3,7 @@
package client
import (
"crypto/tls"
"crypto/x509"
"fmt"
gohttp "net/http"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/file"
@@ -24,14 +21,6 @@ var Protocols = map[string]transport.Transport{
"file": file.DefaultClient,
}
var insecureClient = http.NewClient(&gohttp.Client{
Transport: &gohttp.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
})
// InstallProtocol adds or modifies an existing protocol.
func InstallProtocol(scheme string, c transport.Transport) {
if c == nil {
@@ -50,27 +39,6 @@ func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) {
}
func getTransport(endpoint *transport.Endpoint) (transport.Transport, error) {
if endpoint.Protocol == "https" {
if endpoint.InsecureSkipTLS {
return insecureClient, nil
}
if len(endpoint.CaBundle) != 0 {
rootCAs, _ := x509.SystemCertPool()
if rootCAs == nil {
rootCAs = x509.NewCertPool()
}
rootCAs.AppendCertsFromPEM(endpoint.CaBundle)
return http.NewClient(&gohttp.Client{
Transport: &gohttp.Transport{
TLSClientConfig: &tls.Config{
RootCAs: rootCAs,
},
},
}), nil
}
}
f, ok := Protocols[endpoint.Protocol]
if !ok {
return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol)

View File

@@ -108,14 +108,45 @@ type Endpoint struct {
// Host is the host.
Host string
// Port is the port to connect, if 0 the default port for the given protocol
// wil be used.
// will be used.
Port int
// Path is the repository path.
Path string
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CaBundle specify additional ca bundle with system cert pool
CaBundle []byte
// Proxy provides info required for connecting to a proxy.
Proxy ProxyOptions
}
type ProxyOptions struct {
URL string
Username string
Password string
}
func (o *ProxyOptions) Validate() error {
if o.URL != "" {
_, err := url.Parse(o.URL)
return err
}
return nil
}
func (o *ProxyOptions) FullURL() (*url.URL, error) {
proxyURL, err := url.Parse(o.URL)
if err != nil {
return nil, err
}
if o.Username != "" {
if o.Password != "" {
proxyURL.User = url.UserPassword(o.Username, o.Password)
} else {
proxyURL.User = url.User(o.Username)
}
}
return proxyURL, nil
}
var defaultPorts = map[string]int{
@@ -196,11 +227,17 @@ func parseURL(endpoint string) (*Endpoint, error) {
pass, _ = u.User.Password()
}
host := u.Hostname()
if strings.Contains(host, ":") {
// IPv6 address
host = "[" + host + "]"
}
return &Endpoint{
Protocol: u.Scheme,
User: user,
Password: pass,
Host: u.Hostname(),
Host: host,
Port: getPort(u),
Path: getPath(u),
}, nil

View File

@@ -11,7 +11,6 @@ import (
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/internal/common"
"github.com/go-git/go-git/v5/utils/ioutil"
"golang.org/x/sys/execabs"
)
@@ -112,7 +111,7 @@ func (c *command) Start() error {
func (c *command) StderrPipe() (io.Reader, error) {
// Pipe returned by Command.StderrPipe has a race with Read + Command.Wait.
// We use an io.Pipe and close it after the command finishes.
r, w := ioutil.Pipe()
r, w := io.Pipe()
c.cmd.Stderr = w
c.stderrCloser = r
return r, nil

View File

@@ -2,11 +2,11 @@
package git
import (
"fmt"
"io"
"net"
"strconv"
"github.com/go-git/go-git/v5/plumbing/format/pktline"
"github.com/go-git/go-git/v5/plumbing/protocol/packp"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/internal/common"
"github.com/go-git/go-git/v5/utils/ioutil"
@@ -41,10 +41,18 @@ type command struct {
// Start executes the command sending the required message to the TCP connection
func (c *command) Start() error {
cmd := endpointToCommand(c.command, c.endpoint)
req := packp.GitProtoRequest{
RequestCommand: c.command,
Pathname: c.endpoint.Path,
}
host := c.endpoint.Host
if c.endpoint.Port != DefaultPort {
host = net.JoinHostPort(c.endpoint.Host, strconv.Itoa(c.endpoint.Port))
}
e := pktline.NewEncoder(c.conn)
return e.Encode([]byte(cmd))
req.Host = host
return req.Encode(c.conn)
}
func (c *command) connect() error {
@@ -69,7 +77,7 @@ func (c *command) getHostWithPort() string {
port = DefaultPort
}
return fmt.Sprintf("%s:%d", host, port)
return net.JoinHostPort(host, strconv.Itoa(port))
}
// StderrPipe git protocol doesn't have any dedicated error channel
@@ -77,27 +85,18 @@ func (c *command) StderrPipe() (io.Reader, error) {
return nil, nil
}
// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent
// StdinPipe returns the underlying connection as WriteCloser, wrapped to prevent
// call to the Close function from the connection, a command execution in git
// protocol can't be closed or killed
func (c *command) StdinPipe() (io.WriteCloser, error) {
return ioutil.WriteNopCloser(c.conn), nil
}
// StdoutPipe return the underlying connection as Reader
// StdoutPipe returns the underlying connection as Reader
func (c *command) StdoutPipe() (io.Reader, error) {
return c.conn, nil
}
func endpointToCommand(cmd string, ep *transport.Endpoint) string {
host := ep.Host
if ep.Port != DefaultPort {
host = fmt.Sprintf("%s:%d", ep.Host, ep.Port)
}
return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0)
}
// Close closes the TCP connection and connection.
func (c *command) Close() error {
if !c.connected {

View File

@@ -4,16 +4,22 @@ package http
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"sync"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/protocol/packp"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/golang/groupcache/lru"
)
// it requires a bytes.Buffer, because we need to know the length
@@ -67,6 +73,17 @@ func advertisedReferences(ctx context.Context, s *session, serviceName string) (
return nil, err
}
// Git 2.41+ returns a zero-id plus capabilities when an empty
// repository is being cloned. This skips the existing logic within
// advrefs_decode.decodeFirstHash, which expects a flush-pkt instead.
//
// This logic aligns with plumbing/transport/internal/common/common.go.
if ar.IsEmpty() &&
// Empty repositories are valid for git-receive-pack.
transport.ReceivePackServiceName != serviceName {
return nil, transport.ErrEmptyRemoteRepository
}
transport.FilterUnsupportedCapabilities(ar.Capabilities)
s.advRefs = ar
@@ -74,40 +91,83 @@ func advertisedReferences(ctx context.Context, s *session, serviceName string) (
}
type client struct {
c *http.Client
c *http.Client
transports *lru.Cache
m sync.RWMutex
}
// DefaultClient is the default HTTP client, which uses `http.DefaultClient`.
var DefaultClient = NewClient(nil)
// ClientOptions holds user configurable options for the client.
type ClientOptions struct {
// CacheMaxEntries is the max no. of entries that the transport objects
// cache will hold at any given point of time. It must be a positive integer.
// Calling `client.addTransport()` after the cache has reached the specified
// size, will result in the least recently used transport getting deleted
// before the provided transport is added to the cache.
CacheMaxEntries int
}
var (
// defaultTransportCacheSize is the default capacity of the transport objects cache.
// Its value is 0 because transport caching is turned off by default and is an
// opt-in feature.
defaultTransportCacheSize = 0
// DefaultClient is the default HTTP client, which uses a net/http client configured
// with http.DefaultTransport.
DefaultClient = NewClient(nil)
)
// NewClient creates a new client with a custom net/http client.
// See `InstallProtocol` to install and override default http client.
// Unless a properly initialized client is given, it will fall back into
// `http.DefaultClient`.
// If the net/http client is nil or empty, it will use a net/http client configured
// with http.DefaultTransport.
//
// Note that for HTTP client cannot distinguish between private repositories and
// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired`
// for both.
func NewClient(c *http.Client) transport.Transport {
if c == nil {
return &client{http.DefaultClient}
c = &http.Client{
Transport: http.DefaultTransport,
}
}
return NewClientWithOptions(c, &ClientOptions{
CacheMaxEntries: defaultTransportCacheSize,
})
}
return &client{
// NewClientWithOptions returns a new client configured with the provided net/http client
// and other custom options specific to the client.
// If the net/http client is nil or empty, it will use a net/http client configured
// with http.DefaultTransport.
func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transport {
if c == nil {
c = &http.Client{
Transport: http.DefaultTransport,
}
}
cl := &client{
c: c,
}
if opts != nil {
if opts.CacheMaxEntries > 0 {
cl.transports = lru.New(opts.CacheMaxEntries)
}
}
return cl
}
func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
transport.UploadPackSession, error) {
return newUploadPackSession(c.c, ep, auth)
return newUploadPackSession(c, ep, auth)
}
func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
transport.ReceivePackSession, error) {
return newReceivePackSession(c.c, ep, auth)
return newReceivePackSession(c, ep, auth)
}
type session struct {
@@ -117,10 +177,106 @@ type session struct {
advRefs *packp.AdvRefs
}
func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
func transportWithInsecureTLS(transport *http.Transport) {
if transport.TLSClientConfig == nil {
transport.TLSClientConfig = &tls.Config{}
}
transport.TLSClientConfig.InsecureSkipVerify = true
}
func transportWithCABundle(transport *http.Transport, caBundle []byte) error {
rootCAs, err := x509.SystemCertPool()
if err != nil {
return err
}
if rootCAs == nil {
rootCAs = x509.NewCertPool()
}
rootCAs.AppendCertsFromPEM(caBundle)
if transport.TLSClientConfig == nil {
transport.TLSClientConfig = &tls.Config{}
}
transport.TLSClientConfig.RootCAs = rootCAs
return nil
}
func transportWithProxy(transport *http.Transport, proxyURL *url.URL) {
transport.Proxy = http.ProxyURL(proxyURL)
}
func configureTransport(transport *http.Transport, ep *transport.Endpoint) error {
if len(ep.CaBundle) > 0 {
if err := transportWithCABundle(transport, ep.CaBundle); err != nil {
return err
}
}
if ep.InsecureSkipTLS {
transportWithInsecureTLS(transport)
}
if ep.Proxy.URL != "" {
proxyURL, err := ep.Proxy.FullURL()
if err != nil {
return err
}
transportWithProxy(transport, proxyURL)
}
return nil
}
func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
var httpClient *http.Client
// We need to configure the http transport if there are transport specific
// options present in the endpoint.
if len(ep.CaBundle) > 0 || ep.InsecureSkipTLS || ep.Proxy.URL != "" {
var transport *http.Transport
// if the client wasn't configured to have a cache for transports then just configure
// the transport and use it directly, otherwise try to use the cache.
if c.transports == nil {
tr, ok := c.c.Transport.(*http.Transport)
if !ok {
return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s",
reflect.TypeOf(transport), reflect.TypeOf(c.c.Transport))
}
transport = tr.Clone()
configureTransport(transport, ep)
} else {
transportOpts := transportOptions{
caBundle: string(ep.CaBundle),
insecureSkipTLS: ep.InsecureSkipTLS,
}
if ep.Proxy.URL != "" {
proxyURL, err := ep.Proxy.FullURL()
if err != nil {
return nil, err
}
transportOpts.proxyURL = *proxyURL
}
var found bool
transport, found = c.fetchTransport(transportOpts)
if !found {
transport = c.c.Transport.(*http.Transport).Clone()
configureTransport(transport, ep)
c.addTransport(transportOpts, transport)
}
}
httpClient = &http.Client{
Transport: transport,
CheckRedirect: c.c.CheckRedirect,
Jar: c.c.Jar,
Timeout: c.c.Timeout,
}
} else {
httpClient = c.c
}
s := &session{
auth: basicAuthFromEndpoint(ep),
client: c,
client: httpClient,
endpoint: ep,
}
if auth != nil {
@@ -250,14 +406,28 @@ func (a *TokenAuth) String() string {
// Err is a dedicated error to return errors based on status code
type Err struct {
Response *http.Response
Reason string
}
// NewErr returns a new Err based on a http response
// NewErr returns a new Err based on a http response and closes response body
// if needed
func NewErr(r *http.Response) error {
if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices {
return nil
}
var reason string
// If a response message is present, add it to error
var messageBuffer bytes.Buffer
if r.Body != nil {
messageLength, _ := messageBuffer.ReadFrom(r.Body)
if messageLength > 0 {
reason = messageBuffer.String()
}
_ = r.Body.Close()
}
switch r.StatusCode {
case http.StatusUnauthorized:
return transport.ErrAuthenticationRequired
@@ -267,7 +437,7 @@ func NewErr(r *http.Response) error {
return transport.ErrRepositoryNotFound
}
return plumbing.NewUnexpectedError(&Err{r})
return plumbing.NewUnexpectedError(&Err{r, reason})
}
// StatusCode returns the status code of the response

View File

@@ -19,7 +19,7 @@ type rpSession struct {
*session
}
func newReceivePackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
func newReceivePackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
s, err := newSession(c, ep, auth)
return &rpSession{s}, err
}
@@ -102,7 +102,6 @@ func (s *rpSession) doRequest(
}
if err := NewErr(res); err != nil {
_ = res.Body.Close()
return nil, err
}

View File

@@ -0,0 +1,40 @@
package http
import (
"net/http"
"net/url"
)
// transportOptions contains transport specific configuration.
type transportOptions struct {
insecureSkipTLS bool
// []byte is not comparable.
caBundle string
proxyURL url.URL
}
func (c *client) addTransport(opts transportOptions, transport *http.Transport) {
c.m.Lock()
c.transports.Add(opts, transport)
c.m.Unlock()
}
func (c *client) removeTransport(opts transportOptions) {
c.m.Lock()
c.transports.Remove(opts)
c.m.Unlock()
}
func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) {
c.m.RLock()
t, ok := c.transports.Get(opts)
c.m.RUnlock()
if !ok {
return nil, false
}
transport, ok := t.(*http.Transport)
if !ok {
return nil, false
}
return transport, true
}

View File

@@ -19,7 +19,7 @@ type upSession struct {
*session
}
func newUploadPackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) {
func newUploadPackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) {
s, err := newSession(c, ep, auth)
return &upSession{s}, err
}
@@ -100,7 +100,6 @@ func (s *upSession) doRequest(
}
if err := NewErr(res); err != nil {
_ = res.Body.Close()
return nil, err
}

View File

@@ -11,7 +11,7 @@ import (
"errors"
"fmt"
"io"
stdioutil "io/ioutil"
"regexp"
"strings"
"time"
@@ -29,6 +29,10 @@ const (
var (
ErrTimeoutExceeded = errors.New("timeout exceeded")
// stdErrSkipPattern is used for skipping lines from a command's stderr output.
// Any line matching this pattern will be skipped from further
// processing and not be returned to calling code.
stdErrSkipPattern = regexp.MustCompile("^remote:( =*){0,1}$")
)
// Commander creates Command instances. This is the main entry point for
@@ -150,13 +154,20 @@ func (c *client) listenFirstError(r io.Reader) chan string {
errLine := make(chan string, 1)
go func() {
s := bufio.NewScanner(r)
if s.Scan() {
errLine <- s.Text()
} else {
close(errLine)
for {
if s.Scan() {
line := s.Text()
if !stdErrSkipPattern.MatchString(line) {
errLine <- line
break
}
} else {
close(errLine)
break
}
}
_, _ = io.Copy(stdioutil.Discard, r)
_, _ = io.Copy(io.Discard, r)
}()
return errLine
@@ -192,9 +203,22 @@ func (s *session) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRe
}
func (s *session) handleAdvRefDecodeError(err error) error {
var errLine *pktline.ErrorLine
if errors.As(err, &errLine) {
if isRepoNotFoundError(errLine.Text) {
return transport.ErrRepositoryNotFound
}
return errLine
}
// If repository is not found, we get empty stdout and server writes an
// error to stderr.
if err == packp.ErrEmptyInput {
if errors.Is(err, packp.ErrEmptyInput) {
// TODO:(v6): handle this error in a better way.
// Instead of checking the stderr output for a specific error message,
// define an ExitError and embed the stderr output and exit (if one
// exists) in the error struct. Just like exec.ExitError.
s.finished = true
if err := s.checkNotFoundError(); err != nil {
return err
@@ -233,7 +257,13 @@ func (s *session) handleAdvRefDecodeError(err error) error {
// UploadPack performs a request to the server to fetch a packfile. A reader is
// returned with the packfile content. The reader must be closed after reading.
func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) {
if req.IsEmpty() && len(req.Shallows) == 0 {
if req.IsEmpty() {
// XXX: IsEmpty means haves are a subset of wants, in that case we have
// everything we asked for. Close the connection and return nil.
if err := s.finish(); err != nil {
return nil, err
}
// TODO:(v6) return nil here
return nil, transport.ErrEmptyUploadPackRequest
}
@@ -374,7 +404,7 @@ func (s *session) checkNotFoundError() error {
case <-t.C:
return ErrTimeoutExceeded
case line, ok := <-s.firstErrLine:
if !ok {
if !ok || len(line) == 0 {
return nil
}
@@ -382,59 +412,43 @@ func (s *session) checkNotFoundError() error {
return transport.ErrRepositoryNotFound
}
// TODO:(v6): return server error just as it is without a prefix
return fmt.Errorf("unknown error: %s", line)
}
}
var (
githubRepoNotFoundErr = "ERROR: Repository not found."
bitbucketRepoNotFoundErr = "conq: repository does not exist."
const (
githubRepoNotFoundErr = "Repository not found."
bitbucketRepoNotFoundErr = "repository does not exist."
localRepoNotFoundErr = "does not appear to be a git repository"
gitProtocolNotFoundErr = "ERR \n Repository not found."
gitProtocolNoSuchErr = "ERR no such repository"
gitProtocolAccessDeniedErr = "ERR access denied"
gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access"
gitProtocolNotFoundErr = "Repository not found."
gitProtocolNoSuchErr = "no such repository"
gitProtocolAccessDeniedErr = "access denied"
gogsAccessDeniedErr = "Repository does not exist or you do not have access"
gitlabRepoNotFoundErr = "The project you were looking for could not be found"
)
func isRepoNotFoundError(s string) bool {
if strings.HasPrefix(s, githubRepoNotFoundErr) {
return true
}
if strings.HasPrefix(s, bitbucketRepoNotFoundErr) {
return true
}
if strings.HasSuffix(s, localRepoNotFoundErr) {
return true
}
if strings.HasPrefix(s, gitProtocolNotFoundErr) {
return true
}
if strings.HasPrefix(s, gitProtocolNoSuchErr) {
return true
}
if strings.HasPrefix(s, gitProtocolAccessDeniedErr) {
return true
}
if strings.HasPrefix(s, gogsAccessDeniedErr) {
return true
for _, err := range []string{
githubRepoNotFoundErr,
bitbucketRepoNotFoundErr,
localRepoNotFoundErr,
gitProtocolNotFoundErr,
gitProtocolNoSuchErr,
gitProtocolAccessDeniedErr,
gogsAccessDeniedErr,
gitlabRepoNotFoundErr,
} {
if strings.Contains(s, err) {
return true
}
}
return false
}
var (
nak = []byte("NAK")
eol = []byte("\n")
)
// uploadPack implements the git-upload-pack protocol.
func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error {
func uploadPack(w io.WriteCloser, _ io.Reader, req *packp.UploadPackRequest) error {
// TODO support multi_ack mode
// TODO support multi_ack_detailed mode
// TODO support acks for common objects

View File

@@ -0,0 +1,46 @@
package common
import (
"bytes"
"io"
gogitioutil "github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/plumbing/transport"
)
type MockCommand struct {
stdin bytes.Buffer
stdout bytes.Buffer
stderr bytes.Buffer
}
func (c MockCommand) StderrPipe() (io.Reader, error) {
return &c.stderr, nil
}
func (c MockCommand) StdinPipe() (io.WriteCloser, error) {
return gogitioutil.WriteNopCloser(&c.stdin), nil
}
func (c MockCommand) StdoutPipe() (io.Reader, error) {
return &c.stdout, nil
}
func (c MockCommand) Start() error {
return nil
}
func (c MockCommand) Close() error {
panic("not implemented")
}
type MockCommander struct {
stderr string
}
func (c MockCommander) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) {
return &MockCommand{
stderr: *bytes.NewBufferString(c.stderr),
}, nil
}

View File

@@ -166,7 +166,7 @@ func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest
return nil, err
}
pr, pw := ioutil.Pipe()
pr, pw := io.Pipe()
e := packfile.NewEncoder(pw, s.storer, false)
go func() {
// TODO: plumb through a pack window.
@@ -189,7 +189,7 @@ func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Ha
}
func (*upSession) setSupportedCapabilities(c *capability.List) error {
if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil {
if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil {
return err
}
@@ -355,7 +355,7 @@ func (s *rpSession) reportStatus() *packp.ReportStatus {
}
func (*rpSession) setSupportedCapabilities(c *capability.List) error {
if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil {
if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil {
return err
}

View File

@@ -3,17 +3,15 @@ package ssh
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/mitchellh/go-homedir"
"github.com/skeema/knownhosts"
sshagent "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
)
const DefaultUsername = "git"
@@ -135,7 +133,7 @@ func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys,
// encoded private key. An encryption password should be given if the pemBytes
// contains a password encrypted PEM block otherwise password should be empty.
func NewPublicKeysFromFile(user, pemFile, password string) (*PublicKeys, error) {
bytes, err := ioutil.ReadFile(pemFile)
bytes, err := os.ReadFile(pemFile)
if err != nil {
return nil, err
}
@@ -224,12 +222,19 @@ func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
//
// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS
// environment variable, example:
// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
//
// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
//
// If SSH_KNOWN_HOSTS is not set the following file locations will be used:
// ~/.ssh/known_hosts
// /etc/ssh/ssh_known_hosts
//
// ~/.ssh/known_hosts
// /etc/ssh/ssh_known_hosts
func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) {
kh, err := newKnownHosts(files...)
return ssh.HostKeyCallback(kh), err
}
func newKnownHosts(files ...string) (knownhosts.HostKeyCallback, error) {
var err error
if len(files) == 0 {
@@ -251,7 +256,7 @@ func getDefaultKnownHostsFiles() ([]string, error) {
return files, nil
}
homeDirPath, err := homedir.Dir()
homeDirPath, err := os.UserHomeDir()
if err != nil {
return nil, err
}

View File

@@ -4,12 +4,14 @@ package ssh
import (
"context"
"fmt"
"net"
"reflect"
"strconv"
"strings"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/internal/common"
"github.com/skeema/knownhosts"
"github.com/kevinburke/ssh_config"
"golang.org/x/crypto/ssh"
@@ -121,10 +123,24 @@ func (c *command) connect() error {
if err != nil {
return err
}
hostWithPort := c.getHostWithPort()
if config.HostKeyCallback == nil {
kh, err := newKnownHosts()
if err != nil {
return err
}
config.HostKeyCallback = kh.HostKeyCallback()
config.HostKeyAlgorithms = kh.HostKeyAlgorithms(hostWithPort)
} else if len(config.HostKeyAlgorithms) == 0 {
// Set the HostKeyAlgorithms based on HostKeyCallback.
// For background see https://github.com/go-git/go-git/issues/411 as well as
// https://github.com/golang/go/issues/29286 for root cause.
config.HostKeyAlgorithms = knownhosts.HostKeyAlgorithms(config.HostKeyCallback, hostWithPort)
}
overrideConfig(c.config, config)
c.client, err = dial("tcp", c.getHostWithPort(), config)
c.client, err = dial("tcp", hostWithPort, c.endpoint.Proxy, config)
if err != nil {
return err
}
@@ -139,7 +155,7 @@ func (c *command) connect() error {
return nil
}
func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
func dial(network, addr string, proxyOpts transport.ProxyOptions, config *ssh.ClientConfig) (*ssh.Client, error) {
var (
ctx = context.Background()
cancel context.CancelFunc
@@ -151,10 +167,33 @@ func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
}
defer cancel()
conn, err := proxy.Dial(ctx, network, addr)
if err != nil {
return nil, err
var conn net.Conn
var dialErr error
if proxyOpts.URL != "" {
proxyUrl, err := proxyOpts.FullURL()
if err != nil {
return nil, err
}
dialer, err := proxy.FromURL(proxyUrl, proxy.Direct)
if err != nil {
return nil, err
}
// Try to use a ContextDialer, but fall back to a Dialer if that goes south.
ctxDialer, ok := dialer.(proxy.ContextDialer)
if !ok {
return nil, fmt.Errorf("expected ssh proxy dialer to be of type %s; got %s",
reflect.TypeOf(ctxDialer), reflect.TypeOf(dialer))
}
conn, dialErr = ctxDialer.DialContext(ctx, "tcp", addr)
} else {
conn, dialErr = proxy.Dial(ctx, network, addr)
}
if dialErr != nil {
return nil, dialErr
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
if err != nil {
return nil, err
@@ -173,7 +212,7 @@ func (c *command) getHostWithPort() string {
port = DefaultPort
}
return fmt.Sprintf("%s:%d", host, port)
return net.JoinHostPort(host, strconv.Itoa(port))
}
func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) {
@@ -201,7 +240,7 @@ func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) {
}
}
addr = fmt.Sprintf("%s:%d", host, port)
addr = net.JoinHostPort(host, strconv.Itoa(port))
return
}

View File

@@ -17,7 +17,7 @@ type PruneOptions struct {
Handler PruneHandler
}
var ErrLooseObjectsNotSupported = errors.New("Loose objects not supported")
var ErrLooseObjectsNotSupported = errors.New("loose objects not supported")
// DeleteObject deletes an object from a repository.
// The type conveniently matches PruneHandler.

View File

@@ -1,264 +0,0 @@
package git
import (
"io"
"sort"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/utils/diff"
"github.com/sergi/go-diff/diffmatchpatch"
)
// References returns a slice of Commits for the file at "path", starting from
// the commit provided that contains the file from the provided path. The last
// commit into the returned slice is the commit where the file was created.
// If the provided commit does not contains the specified path, a nil slice is
// returned. The commits are sorted in commit order, newer to older.
//
// Caveats:
//
// - Moves and copies are not currently supported.
//
// - Cherry-picks are not detected unless there are no commits between them and
// therefore can appear repeated in the list. (see git path-id for hints on how
// to fix this).
func references(c *object.Commit, path string) ([]*object.Commit, error) {
var result []*object.Commit
seen := make(map[plumbing.Hash]struct{})
if err := walkGraph(&result, &seen, c, path); err != nil {
return nil, err
}
// TODO result should be returned without ordering
sortCommits(result)
// for merges of identical cherry-picks
return removeComp(path, result, equivalent)
}
type commitSorterer struct {
l []*object.Commit
}
func (s commitSorterer) Len() int {
return len(s.l)
}
func (s commitSorterer) Less(i, j int) bool {
return s.l[i].Committer.When.Before(s.l[j].Committer.When) ||
s.l[i].Committer.When.Equal(s.l[j].Committer.When) &&
s.l[i].Author.When.Before(s.l[j].Author.When)
}
func (s commitSorterer) Swap(i, j int) {
s.l[i], s.l[j] = s.l[j], s.l[i]
}
// SortCommits sorts a commit list by commit date, from older to newer.
func sortCommits(l []*object.Commit) {
s := &commitSorterer{l}
sort.Sort(s)
}
// Recursive traversal of the commit graph, generating a linear history of the
// path.
func walkGraph(result *[]*object.Commit, seen *map[plumbing.Hash]struct{}, current *object.Commit, path string) error {
// check and update seen
if _, ok := (*seen)[current.Hash]; ok {
return nil
}
(*seen)[current.Hash] = struct{}{}
// if the path is not in the current commit, stop searching.
if _, err := current.File(path); err != nil {
return nil
}
// optimization: don't traverse branches that does not
// contain the path.
parents, err := parentsContainingPath(path, current)
if err != nil {
return err
}
switch len(parents) {
// if the path is not found in any of its parents, the path was
// created by this commit; we must add it to the revisions list and
// stop searching. This includes the case when current is the
// initial commit.
case 0:
*result = append(*result, current)
return nil
case 1: // only one parent contains the path
// if the file contents has change, add the current commit
different, err := differentContents(path, current, parents)
if err != nil {
return err
}
if len(different) == 1 {
*result = append(*result, current)
}
// in any case, walk the parent
return walkGraph(result, seen, parents[0], path)
default: // more than one parent contains the path
// TODO: detect merges that had a conflict, because they must be
// included in the result here.
for _, p := range parents {
err := walkGraph(result, seen, p, path)
if err != nil {
return err
}
}
}
return nil
}
func parentsContainingPath(path string, c *object.Commit) ([]*object.Commit, error) {
// TODO: benchmark this method making git.object.Commit.parent public instead of using
// an iterator
var result []*object.Commit
iter := c.Parents()
for {
parent, err := iter.Next()
if err == io.EOF {
return result, nil
}
if err != nil {
return nil, err
}
if _, err := parent.File(path); err == nil {
result = append(result, parent)
}
}
}
// Returns an slice of the commits in "cs" that has the file "path", but with different
// contents than what can be found in "c".
func differentContents(path string, c *object.Commit, cs []*object.Commit) ([]*object.Commit, error) {
result := make([]*object.Commit, 0, len(cs))
h, found := blobHash(path, c)
if !found {
return nil, object.ErrFileNotFound
}
for _, cx := range cs {
if hx, found := blobHash(path, cx); found && h != hx {
result = append(result, cx)
}
}
return result, nil
}
// blobHash returns the hash of a path in a commit
func blobHash(path string, commit *object.Commit) (hash plumbing.Hash, found bool) {
file, err := commit.File(path)
if err != nil {
var empty plumbing.Hash
return empty, found
}
return file.Hash, true
}
type contentsComparatorFn func(path string, a, b *object.Commit) (bool, error)
// Returns a new slice of commits, with duplicates removed. Expects a
// sorted commit list. Duplication is defined according to "comp". It
// will always keep the first commit of a series of duplicated commits.
func removeComp(path string, cs []*object.Commit, comp contentsComparatorFn) ([]*object.Commit, error) {
result := make([]*object.Commit, 0, len(cs))
if len(cs) == 0 {
return result, nil
}
result = append(result, cs[0])
for i := 1; i < len(cs); i++ {
equals, err := comp(path, cs[i], cs[i-1])
if err != nil {
return nil, err
}
if !equals {
result = append(result, cs[i])
}
}
return result, nil
}
// Equivalent commits are commits whose patch is the same.
func equivalent(path string, a, b *object.Commit) (bool, error) {
numParentsA := a.NumParents()
numParentsB := b.NumParents()
// the first commit is not equivalent to anyone
// and "I think" merges can not be equivalent to anything
if numParentsA != 1 || numParentsB != 1 {
return false, nil
}
diffsA, err := patch(a, path)
if err != nil {
return false, err
}
diffsB, err := patch(b, path)
if err != nil {
return false, err
}
return sameDiffs(diffsA, diffsB), nil
}
func patch(c *object.Commit, path string) ([]diffmatchpatch.Diff, error) {
// get contents of the file in the commit
file, err := c.File(path)
if err != nil {
return nil, err
}
content, err := file.Contents()
if err != nil {
return nil, err
}
// get contents of the file in the first parent of the commit
var contentParent string
iter := c.Parents()
parent, err := iter.Next()
if err != nil {
return nil, err
}
file, err = parent.File(path)
if err != nil {
contentParent = ""
} else {
contentParent, err = file.Contents()
if err != nil {
return nil, err
}
}
// compare the contents of parent and child
return diff.Do(content, contentParent), nil
}
func sameDiffs(a, b []diffmatchpatch.Diff) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if !sameDiff(a[i], b[i]) {
return false
}
}
return true
}
func sameDiff(a, b diffmatchpatch.Diff) bool {
if a.Type != b.Type {
return false
}
switch a.Type {
case 0:
return countLines(a.Text) == countLines(b.Text)
case 1, -1:
return a.Text == b.Text
default:
panic("unreachable")
}
}

View File

@@ -5,10 +5,12 @@ import (
"errors"
"fmt"
"io"
"strings"
"time"
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/internal/url"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/format/packfile"
@@ -31,6 +33,7 @@ var (
ErrDeleteRefNotSupported = errors.New("server does not support delete-refs")
ErrForceNeeded = errors.New("some refs were not updated")
ErrExactSHA1NotSupported = errors.New("server does not support exact SHA1 refspec")
ErrEmptyUrls = errors.New("URLs cannot be empty")
)
type NoMatchingRefSpecError struct {
@@ -52,6 +55,9 @@ const (
// repo containing this remote, when not using the multi-ack
// protocol. Setting this to 0 means there is no limit.
maxHavesToVisitPerRef = 100
// peeledSuffix is the suffix used to build peeled reference names.
peeledSuffix = "^{}"
)
// Remote represents a connection to a remote repository.
@@ -103,7 +109,11 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name)
}
s, err := newSendPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle)
if o.RemoteURL == "" {
o.RemoteURL = r.c.URLs[0]
}
s, err := newSendPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions)
if err != nil {
return err
}
@@ -183,12 +193,12 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
var hashesToPush []plumbing.Hash
// Avoid the expensive revlist operation if we're only doing deletes.
if !allDelete {
if r.c.IsFirstURLLocal() {
if url.IsLocalEndpoint(o.RemoteURL) {
// If we're are pushing to a local repo, it might be much
// faster to use a local storage layer to get the commits
// to ignore, when calculating the object revlist.
localStorer := filesystem.NewStorage(
osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault())
osfs.New(o.RemoteURL), cache.NewObjectLRUDefault())
hashesToPush, err = revlist.ObjectsWithStorageForIgnores(
r.s, localStorer, objects, haves)
} else {
@@ -214,17 +224,87 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
return err
}
if err = rs.Error(); err != nil {
return err
if rs != nil {
if err = rs.Error(); err != nil {
return err
}
}
return r.updateRemoteReferenceStorage(req, rs)
return r.updateRemoteReferenceStorage(req)
}
func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool {
return !ar.Capabilities.Supports(capability.OFSDelta)
}
func (r *Remote) addReachableTags(localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest) error {
tags := make(map[plumbing.Reference]struct{})
// get a list of all tags locally
for _, ref := range localRefs {
if strings.HasPrefix(string(ref.Name()), "refs/tags") {
tags[*ref] = struct{}{}
}
}
remoteRefIter, err := remoteRefs.IterReferences()
if err != nil {
return err
}
// remove any that are already on the remote
if err := remoteRefIter.ForEach(func(reference *plumbing.Reference) error {
delete(tags, *reference)
return nil
}); err != nil {
return err
}
for tag := range tags {
tagObject, err := object.GetObject(r.s, tag.Hash())
var tagCommit *object.Commit
if err != nil {
return fmt.Errorf("get tag object: %w", err)
}
if tagObject.Type() != plumbing.TagObject {
continue
}
annotatedTag, ok := tagObject.(*object.Tag)
if !ok {
return errors.New("could not get annotated tag object")
}
tagCommit, err = object.GetCommit(r.s, annotatedTag.Target)
if err != nil {
return fmt.Errorf("get annotated tag commit: %w", err)
}
// only include tags that are reachable from one of the refs
// already being pushed
for _, cmd := range req.Commands {
if tag.Name() == cmd.Name {
continue
}
if strings.HasPrefix(cmd.Name.String(), "refs/tags") {
continue
}
c, err := object.GetCommit(r.s, cmd.New)
if err != nil {
return fmt.Errorf("get commit %v: %w", cmd.Name, err)
}
if isAncestor, err := tagCommit.IsAncestor(c); err == nil && isAncestor {
req.Commands = append(req.Commands, &packp.Command{Name: tag.Name(), New: tag.Hash()})
}
}
}
return nil
}
func (r *Remote) newReferenceUpdateRequest(
o *PushOptions,
localRefs []*plumbing.Reference,
@@ -242,16 +322,33 @@ func (r *Remote) newReferenceUpdateRequest(
}
}
if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil {
if ar.Capabilities.Supports(capability.PushOptions) {
_ = req.Capabilities.Set(capability.PushOptions)
for k, v := range o.Options {
req.Options = append(req.Options, &packp.Option{Key: k, Value: v})
}
}
if o.Atomic && ar.Capabilities.Supports(capability.Atomic) {
_ = req.Capabilities.Set(capability.Atomic)
}
if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune, o.ForceWithLease); err != nil {
return nil, err
}
if o.FollowTags {
if err := r.addReachableTags(localRefs, remoteRefs, req); err != nil {
return nil, err
}
}
return req, nil
}
func (r *Remote) updateRemoteReferenceStorage(
req *packp.ReferenceUpdateRequest,
result *packp.ReportStatus,
) error {
for _, spec := range r.c.Fetch {
@@ -314,7 +411,11 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
o.RefSpecs = r.c.Fetch
}
s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle)
if o.RemoteURL == "" {
o.RemoteURL = r.c.URLs[0]
}
s, err := newUploadPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions)
if err != nil {
return nil, err
}
@@ -345,7 +446,7 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
return nil, err
}
refs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags)
refs, specToRefs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags)
if err != nil {
return nil, err
}
@@ -357,9 +458,9 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
}
}
req.Wants, err = getWants(r.s, refs)
req.Wants, err = getWants(r.s, refs, o.Depth)
if len(req.Wants) > 0 {
req.Haves, err = getHaves(localRefs, remoteRefs, r.s)
req.Haves, err = getHaves(localRefs, remoteRefs, r.s, o.Depth)
if err != nil {
return nil, err
}
@@ -369,7 +470,7 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
}
}
updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, o.Tags, o.Force)
updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force)
if err != nil {
return nil, err
}
@@ -411,8 +512,8 @@ func depthChanged(before []plumbing.Hash, s storage.Storer) (bool, error) {
return false, nil
}
func newUploadPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte) (transport.UploadPackSession, error) {
c, ep, err := newClient(url, auth, insecure, cabundle)
func newUploadPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.UploadPackSession, error) {
c, ep, err := newClient(url, insecure, cabundle, proxyOpts)
if err != nil {
return nil, err
}
@@ -420,8 +521,8 @@ func newUploadPackSession(url string, auth transport.AuthMethod, insecure bool,
return c.NewUploadPackSession(ep, auth)
}
func newSendPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte) (transport.ReceivePackSession, error) {
c, ep, err := newClient(url, auth, insecure, cabundle)
func newSendPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.ReceivePackSession, error) {
c, ep, err := newClient(url, insecure, cabundle, proxyOpts)
if err != nil {
return nil, err
}
@@ -429,13 +530,14 @@ func newSendPackSession(url string, auth transport.AuthMethod, insecure bool, ca
return c.NewReceivePackSession(ep, auth)
}
func newClient(url string, auth transport.AuthMethod, insecure bool, cabundle []byte) (transport.Transport, *transport.Endpoint, error) {
func newClient(url string, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.Transport, *transport.Endpoint, error) {
ep, err := transport.NewEndpoint(url)
if err != nil {
return nil, nil, err
}
ep.InsecureSkipTLS = insecure
ep.CaBundle = cabundle
ep.Proxy = proxyOpts
c, err := client.NewClient(ep)
if err != nil {
@@ -450,6 +552,10 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl
reader, err := s.UploadPack(ctx, req)
if err != nil {
if errors.Is(err, transport.ErrEmptyUploadPackRequest) {
// XXX: no packfile provided, everything is up-to-date.
return nil
}
return err
}
@@ -474,6 +580,7 @@ func (r *Remote) addReferencesToUpdate(
remoteRefs storer.ReferenceStorer,
req *packp.ReferenceUpdateRequest,
prune bool,
forceWithLease *ForceWithLease,
) error {
// This references dictionary will be used to search references by name.
refsDict := make(map[string]*plumbing.Reference)
@@ -487,7 +594,7 @@ func (r *Remote) addReferencesToUpdate(
return err
}
} else {
err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req)
err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req, forceWithLease)
if err != nil {
return err
}
@@ -509,20 +616,25 @@ func (r *Remote) addOrUpdateReferences(
refsDict map[string]*plumbing.Reference,
remoteRefs storer.ReferenceStorer,
req *packp.ReferenceUpdateRequest,
forceWithLease *ForceWithLease,
) error {
// If it is not a wilcard refspec we can directly search for the reference
// If it is not a wildcard refspec we can directly search for the reference
// in the references dictionary.
if !rs.IsWildcard() {
ref, ok := refsDict[rs.Src()]
if !ok {
commit, err := object.GetCommit(r.s, plumbing.NewHash(rs.Src()))
if err == nil {
return r.addCommit(rs, remoteRefs, commit.Hash, req)
}
return nil
}
return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease)
}
for _, ref := range localRefs {
err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease)
if err != nil {
return err
}
@@ -569,9 +681,46 @@ func (r *Remote) deleteReferences(rs config.RefSpec,
})
}
func (r *Remote) addCommit(rs config.RefSpec,
remoteRefs storer.ReferenceStorer, localCommit plumbing.Hash,
req *packp.ReferenceUpdateRequest) error {
if rs.IsWildcard() {
return errors.New("can't use wildcard together with hash refspecs")
}
cmd := &packp.Command{
Name: rs.Dst(""),
Old: plumbing.ZeroHash,
New: localCommit,
}
remoteRef, err := remoteRefs.Reference(cmd.Name)
if err == nil {
if remoteRef.Type() != plumbing.HashReference {
// TODO: check actual git behavior here
return nil
}
cmd.Old = remoteRef.Hash()
} else if err != plumbing.ErrReferenceNotFound {
return err
}
if cmd.Old == cmd.New {
return nil
}
if !rs.IsForceUpdate() {
if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
return err
}
}
req.Commands = append(req.Commands, cmd)
return nil
}
func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference,
req *packp.ReferenceUpdateRequest) error {
req *packp.ReferenceUpdateRequest, forceWithLease *ForceWithLease) error {
if localRef.Type() != plumbing.HashReference {
return nil
@@ -590,7 +739,7 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
remoteRef, err := remoteRefs.Reference(cmd.Name)
if err == nil {
if remoteRef.Type() != plumbing.HashReference {
//TODO: check actual git behavior here
// TODO: check actual git behavior here
return nil
}
@@ -603,7 +752,11 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
return nil
}
if !rs.IsForceUpdate() {
if forceWithLease != nil {
if err = r.checkForceWithLease(localRef, cmd, forceWithLease); err != nil {
return err
}
} else if !rs.IsForceUpdate() {
if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
return err
}
@@ -613,6 +766,31 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
return nil
}
func (r *Remote) checkForceWithLease(localRef *plumbing.Reference, cmd *packp.Command, forceWithLease *ForceWithLease) error {
remotePrefix := fmt.Sprintf("refs/remotes/%s/", r.Config().Name)
ref, err := storer.ResolveReference(
r.s,
plumbing.ReferenceName(remotePrefix+strings.Replace(localRef.Name().String(), "refs/heads/", "", -1)))
if err != nil {
return err
}
if forceWithLease.RefName.String() == "" || (forceWithLease.RefName == cmd.Name) {
expectedOID := ref.Hash()
if !forceWithLease.Hash.IsZero() {
expectedOID = forceWithLease.Hash
}
if cmd.Old != expectedOID {
return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
}
}
return nil
}
func (r *Remote) references() ([]*plumbing.Reference, error) {
var localRefs []*plumbing.Reference
@@ -664,6 +842,7 @@ func getHavesFromRef(
remoteRefs map[plumbing.Hash]bool,
s storage.Storer,
haves map[plumbing.Hash]bool,
depth int,
) error {
h := ref.Hash()
if haves[h] {
@@ -689,7 +868,13 @@ func getHavesFromRef(
// commits from the history of each ref.
walker := object.NewCommitPreorderIter(commit, haves, nil)
toVisit := maxHavesToVisitPerRef
return walker.ForEach(func(c *object.Commit) error {
// But only need up to the requested depth
if depth > 0 && depth < maxHavesToVisitPerRef {
toVisit = depth
}
// It is safe to ignore any error here as we are just trying to find the references that we already have
// An example of a legitimate failure is we have a shallow clone and don't have the previous commit(s)
_ = walker.ForEach(func(c *object.Commit) error {
haves[c.Hash] = true
toVisit--
// If toVisit starts out at 0 (indicating there is no
@@ -700,12 +885,15 @@ func getHavesFromRef(
}
return nil
})
return nil
}
func getHaves(
localRefs []*plumbing.Reference,
remoteRefStorer storer.ReferenceStorer,
s storage.Storer,
depth int,
) ([]plumbing.Hash, error) {
haves := map[plumbing.Hash]bool{}
@@ -726,7 +914,7 @@ func getHaves(
continue
}
err = getHavesFromRef(ref, remoteRefs, s, haves)
err = getHavesFromRef(ref, remoteRefs, s, haves, depth)
if err != nil {
return nil, err
}
@@ -746,42 +934,41 @@ func calculateRefs(
spec []config.RefSpec,
remoteRefs storer.ReferenceStorer,
tagMode TagMode,
) (memory.ReferenceStorage, error) {
) (memory.ReferenceStorage, [][]*plumbing.Reference, error) {
if tagMode == AllTags {
spec = append(spec, refspecAllTags)
}
refs := make(memory.ReferenceStorage)
for _, s := range spec {
if err := doCalculateRefs(s, remoteRefs, refs); err != nil {
return nil, err
// list of references matched for each spec
specToRefs := make([][]*plumbing.Reference, len(spec))
for i := range spec {
var err error
specToRefs[i], err = doCalculateRefs(spec[i], remoteRefs, refs)
if err != nil {
return nil, nil, err
}
}
return refs, nil
return refs, specToRefs, nil
}
func doCalculateRefs(
s config.RefSpec,
remoteRefs storer.ReferenceStorer,
refs memory.ReferenceStorage,
) error {
iter, err := remoteRefs.IterReferences()
if err != nil {
return err
}
) ([]*plumbing.Reference, error) {
var refList []*plumbing.Reference
if s.IsExactSHA1() {
ref := plumbing.NewHashReference(s.Dst(""), plumbing.NewHash(s.Src()))
return refs.SetReference(ref)
refList = append(refList, ref)
return refList, refs.SetReference(ref)
}
var matched bool
err = iter.ForEach(func(ref *plumbing.Reference) error {
if !s.Match(ref.Name()) {
return nil
}
onMatched := func(ref *plumbing.Reference) error {
if ref.Type() == plumbing.SymbolicReference {
target, err := storer.ResolveReference(remoteRefs, ref.Name())
if err != nil {
@@ -796,28 +983,47 @@ func doCalculateRefs(
}
matched = true
if err := refs.SetReference(ref); err != nil {
return err
}
if !s.IsWildcard() {
return storer.ErrStop
}
return nil
})
if !matched && !s.IsWildcard() {
return NoMatchingRefSpecError{refSpec: s}
refList = append(refList, ref)
return refs.SetReference(ref)
}
return err
var ret error
if s.IsWildcard() {
iter, err := remoteRefs.IterReferences()
if err != nil {
return nil, err
}
ret = iter.ForEach(func(ref *plumbing.Reference) error {
if !s.Match(ref.Name()) {
return nil
}
return onMatched(ref)
})
} else {
var resolvedRef *plumbing.Reference
src := s.Src()
resolvedRef, ret = expand_ref(remoteRefs, plumbing.ReferenceName(src))
if ret == nil {
ret = onMatched(resolvedRef)
}
}
if !matched && !s.IsWildcard() {
return nil, NoMatchingRefSpecError{refSpec: s}
}
return refList, ret
}
func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) {
func getWants(localStorer storage.Storer, refs memory.ReferenceStorage, depth int) ([]plumbing.Hash, error) {
// If depth is anything other than 1 and the repo has shallow commits then just because we have the commit
// at the reference doesn't mean that we don't still need to fetch the parents
shallow := false
if s, _ := localStorer.Shallow(); len(s) > 0 {
shallow = true
if depth != 1 {
if s, _ := localStorer.Shallow(); len(s) > 0 {
shallow = true
}
}
wants := map[plumbing.Hash]bool{}
@@ -864,7 +1070,7 @@ func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.Refe
return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
}
ff, err := isFastForward(s, cmd.Old, cmd.New)
ff, err := isFastForward(s, cmd.Old, cmd.New, nil)
if err != nil {
return err
}
@@ -876,14 +1082,28 @@ func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.Refe
return nil
}
func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash) (bool, error) {
func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash, earliestShallow *plumbing.Hash) (bool, error) {
c, err := object.GetCommit(s, new)
if err != nil {
return false, err
}
parentsToIgnore := []plumbing.Hash{}
if earliestShallow != nil {
earliestCommit, err := object.GetCommit(s, *earliestShallow)
if err != nil {
return false, err
}
parentsToIgnore = earliestCommit.ParentHashes
}
found := false
iter := object.NewCommitPreorderIter(c, nil, nil)
// stop iterating at the earlist shallow commit, ignoring its parents
// note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents.
// as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no
// real way of telling whether it will be a fast-forward merge.
iter := object.NewCommitPreorderIter(c, nil, parentsToIgnore)
err = iter.ForEach(func(c *object.Commit) error {
if c.Hash != old {
return nil
@@ -971,34 +1191,35 @@ func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.P
func (r *Remote) updateLocalReferenceStorage(
specs []config.RefSpec,
fetchedRefs, remoteRefs memory.ReferenceStorage,
specToRefs [][]*plumbing.Reference,
tagMode TagMode,
force bool,
) (updated bool, err error) {
isWildcard := true
forceNeeded := false
for _, spec := range specs {
for i, spec := range specs {
if !spec.IsWildcard() {
isWildcard = false
}
for _, ref := range fetchedRefs {
if !spec.Match(ref.Name()) && !spec.IsExactSHA1() {
continue
}
for _, ref := range specToRefs[i] {
if ref.Type() != plumbing.HashReference {
continue
}
localName := spec.Dst(ref.Name())
// If localName doesn't start with "refs/" then treat as a branch.
if !strings.HasPrefix(localName.String(), "refs/") {
localName = plumbing.NewBranchReferenceName(localName.String())
}
old, _ := storer.ResolveReference(r.s, localName)
new := plumbing.NewHashReference(localName, ref.Hash())
// If the ref exists locally as a branch and force is not specified,
// only update if the new ref is an ancestor of the old
if old != nil && old.Name().IsBranch() && !force && !spec.IsForceUpdate() {
ff, err := isFastForward(r.s, old.Hash(), new.Hash())
// If the ref exists locally as a non-tag and force is not
// specified, only update if the new ref is an ancestor of the old
if old != nil && !old.Name().IsTag() && !force && !spec.IsForceUpdate() {
ff, err := isFastForward(r.s, old.Hash(), new.Hash(), nil)
if err != nil {
return updated, err
}
@@ -1077,21 +1298,29 @@ func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, e
// operation is complete, an error is returned. The context only affects to the
// transport operations.
func (r *Remote) ListContext(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) {
refs, err := r.list(ctx, o)
if err != nil {
return refs, err
}
return refs, nil
return r.list(ctx, o)
}
func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
timeout := o.Timeout
// Default to the old hardcoded 10s value if a timeout is not explicitly set.
if timeout == 0 {
timeout = 10
}
if timeout < 0 {
return nil, fmt.Errorf("invalid timeout: %d", timeout)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
defer cancel()
return r.ListContext(ctx, o)
}
func (r *Remote) list(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) {
s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle)
if r.c == nil || len(r.c.URLs) == 0 {
return nil, ErrEmptyUrls
}
s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions)
if err != nil {
return nil, err
}
@@ -1114,13 +1343,22 @@ func (r *Remote) list(ctx context.Context, o *ListOptions) (rfs []*plumbing.Refe
}
var resultRefs []*plumbing.Reference
err = refs.ForEach(func(ref *plumbing.Reference) error {
resultRefs = append(resultRefs, ref)
return nil
})
if err != nil {
return nil, err
if o.PeelingOption == AppendPeeled || o.PeelingOption == IgnorePeeled {
err = refs.ForEach(func(ref *plumbing.Reference) error {
resultRefs = append(resultRefs, ref)
return nil
})
if err != nil {
return nil, err
}
}
if o.PeelingOption == AppendPeeled || o.PeelingOption == OnlyPeeled {
for k, v := range ar.Peeled {
resultRefs = append(resultRefs, plumbing.NewReferenceFromStrings(k+"^{}", v.String()))
}
}
return resultRefs, nil
}
@@ -1166,8 +1404,7 @@ func pushHashes(
useRefDeltas bool,
allDelete bool,
) (*packp.ReportStatus, error) {
rd, wr := ioutil.Pipe()
rd, wr := io.Pipe()
config, err := s.Config()
if err != nil {

View File

@@ -3,32 +3,37 @@ package git
import (
"bytes"
"context"
"crypto"
"encoding/hex"
"errors"
"fmt"
stdioutil "io/ioutil"
"io"
"os"
"path"
"path/filepath"
"strings"
"time"
"dario.cat/mergo"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-billy/v5/util"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/internal/path_util"
"github.com/go-git/go-git/v5/internal/revision"
"github.com/go-git/go-git/v5/internal/url"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
formatcfg "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/go-git/go-git/v5/plumbing/format/packfile"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/storage"
"github.com/go-git/go-git/v5/storage/filesystem"
"github.com/go-git/go-git/v5/storage/filesystem/dotgit"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/imdario/mergo"
)
// GitDirName this is a special folder where all the git stuff is.
@@ -56,7 +61,9 @@ var (
ErrWorktreeNotProvided = errors.New("worktree should be provided")
ErrIsBareRepository = errors.New("worktree not available in a bare repository")
ErrUnableToResolveCommit = errors.New("unable to resolve commit")
ErrPackedObjectsNotSupported = errors.New("Packed objects not supported")
ErrPackedObjectsNotSupported = errors.New("packed objects not supported")
ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support")
ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme")
)
// Repository represents a git repository
@@ -67,14 +74,34 @@ type Repository struct {
wt billy.Filesystem
}
type InitOptions struct {
// The default branch (e.g. "refs/heads/master")
DefaultBranch plumbing.ReferenceName
}
// Init creates an empty git repository, based on the given Storer and worktree.
// The worktree Filesystem is optional, if nil a bare repository is created. If
// the given storer is not empty ErrRepositoryAlreadyExists is returned
func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) {
options := InitOptions{
DefaultBranch: plumbing.Master,
}
return InitWithOptions(s, worktree, options)
}
func InitWithOptions(s storage.Storer, worktree billy.Filesystem, options InitOptions) (*Repository, error) {
if err := initStorer(s); err != nil {
return nil, err
}
if options.DefaultBranch == "" {
options.DefaultBranch = plumbing.Master
}
if err := options.DefaultBranch.Validate(); err != nil {
return nil, err
}
r := newRepository(s, worktree)
_, err := r.Reference(plumbing.HEAD, false)
switch err {
@@ -85,7 +112,7 @@ func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) {
return nil, err
}
h := plumbing.NewSymbolicReference(plumbing.HEAD, plumbing.Master)
h := plumbing.NewSymbolicReference(plumbing.HEAD, options.DefaultBranch)
if err := s.SetReference(h); err != nil {
return nil, err
}
@@ -214,9 +241,19 @@ func CloneContext(
// if the repository will have worktree (non-bare) or not (bare), if the path
// is not empty ErrRepositoryAlreadyExists is returned.
func PlainInit(path string, isBare bool) (*Repository, error) {
return PlainInitWithOptions(path, &PlainInitOptions{
Bare: isBare,
})
}
func PlainInitWithOptions(path string, opts *PlainInitOptions) (*Repository, error) {
if opts == nil {
opts = &PlainInitOptions{}
}
var wt, dot billy.Filesystem
if isBare {
if opts.Bare {
dot = osfs.New(path)
} else {
wt = osfs.New(path)
@@ -225,7 +262,31 @@ func PlainInit(path string, isBare bool) (*Repository, error) {
s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
return Init(s, wt)
r, err := InitWithOptions(s, wt, opts.InitOptions)
if err != nil {
return nil, err
}
cfg, err := r.Config()
if err != nil {
return nil, err
}
if opts.ObjectFormat != "" {
if opts.ObjectFormat == formatcfg.SHA256 && hash.CryptoType != crypto.SHA256 {
return nil, ErrSHA256NotSupported
}
cfg.Core.RepositoryFormatVersion = formatcfg.Version_1
cfg.Extensions.ObjectFormat = opts.ObjectFormat
}
err = r.Storer.SetConfig(cfg)
if err != nil {
return nil, err
}
return r, err
}
// PlainOpen opens a git repository from the given path. It detects if the
@@ -269,6 +330,11 @@ func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error)
}
func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) {
path, err = path_util.ReplaceTildeWithHome(path)
if err != nil {
return nil, nil, err
}
if path, err = filepath.Abs(path); err != nil {
return nil, nil, err
}
@@ -280,6 +346,9 @@ func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem,
pathinfo, err := fs.Stat("/")
if !os.IsNotExist(err) {
if pathinfo == nil {
return nil, nil, err
}
if !pathinfo.IsDir() && detect {
fs = osfs.New(filepath.Dir(path))
}
@@ -327,7 +396,7 @@ func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Files
}
defer ioutil.CheckClose(f, &err)
b, err := stdioutil.ReadAll(f)
b, err := io.ReadAll(f)
if err != nil {
return nil, err
}
@@ -356,7 +425,7 @@ func dotGitCommonDirectory(fs billy.Filesystem) (commonDir billy.Filesystem, err
return nil, err
}
b, err := stdioutil.ReadAll(f)
b, err := io.ReadAll(f)
if err != nil {
return nil, err
}
@@ -404,6 +473,9 @@ func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOp
return nil, err
}
if o.Mirror {
isBare = true
}
r, err := PlainInit(path, isBare)
if err != nil {
return nil, err
@@ -656,7 +728,10 @@ func (r *Repository) DeleteBranch(name string) error {
// CreateTag creates a tag. If opts is included, the tag is an annotated tag,
// otherwise a lightweight tag is created.
func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) {
rname := plumbing.ReferenceName(path.Join("refs", "tags", name))
rname := plumbing.NewTagReferenceName(name)
if err := rname.Validate(); err != nil {
return nil, err
}
_, err := r.Storer.Reference(rname)
switch err {
@@ -747,21 +822,20 @@ func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity)
// If you want to check to see if the tag is an annotated tag, you can call
// TagObject on the hash of the reference in ForEach:
//
// ref, err := r.Tag("v0.1.0")
// if err != nil {
// // Handle error
// }
//
// obj, err := r.TagObject(ref.Hash())
// switch err {
// case nil:
// // Tag object present
// case plumbing.ErrObjectNotFound:
// // Not a tag object
// default:
// // Some other error
// }
// ref, err := r.Tag("v0.1.0")
// if err != nil {
// // Handle error
// }
//
// obj, err := r.TagObject(ref.Hash())
// switch err {
// case nil:
// // Tag object present
// case plumbing.ErrObjectNotFound:
// // Not a tag object
// default:
// // Some other error
// }
func (r *Repository) Tag(name string) (*plumbing.Reference, error) {
ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false)
if err != nil {
@@ -812,15 +886,40 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
}
c := &config.RemoteConfig{
Name: o.RemoteName,
URLs: []string{o.URL},
Fetch: r.cloneRefSpec(o),
Name: o.RemoteName,
URLs: []string{o.URL},
Fetch: r.cloneRefSpec(o),
Mirror: o.Mirror,
}
if _, err := r.CreateRemote(c); err != nil {
return err
}
// When the repository to clone is on the local machine,
// instead of using hard links, automatically setup .git/objects/info/alternates
// to share the objects with the source repository
if o.Shared {
if !url.IsLocalEndpoint(o.URL) {
return ErrAlternatePathNotSupported
}
altpath := o.URL
remoteRepo, err := PlainOpen(o.URL)
if err != nil {
return fmt.Errorf("failed to open remote repository: %w", err)
}
conf, err := remoteRepo.Config()
if err != nil {
return fmt.Errorf("failed to read remote repository configuration: %w", err)
}
if !conf.Core.IsBare {
altpath = path.Join(altpath, GitDirName)
}
if err := r.Storer.AddAlternate(altpath); err != nil {
return fmt.Errorf("failed to add alternate file to git objects dir: %w", err)
}
}
ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{
RefSpecs: c.Fetch,
Depth: o.Depth,
@@ -830,6 +929,7 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
RemoteName: o.RemoteName,
InsecureSkipTLS: o.InsecureSkipTLS,
CABundle: o.CABundle,
ProxyOptions: o.ProxyOptions,
}, o.ReferenceName)
if err != nil {
return err
@@ -856,7 +956,13 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
if o.RecurseSubmodules != NoRecurseSubmodules {
if err := w.updateSubmodules(&SubmoduleUpdateOptions{
RecurseSubmodules: o.RecurseSubmodules,
Auth: o.Auth,
Depth: func() int {
if o.ShallowSubmodules {
return 1
}
return 0
}(),
Auth: o.Auth,
}); err != nil {
return err
}
@@ -867,7 +973,7 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
return err
}
if ref.Name().IsBranch() {
if !o.Mirror && ref.Name().IsBranch() {
branchRef := ref.Name()
branchName := strings.Split(string(branchRef), "refs/heads/")[1]
@@ -898,6 +1004,8 @@ const (
func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec {
switch {
case o.Mirror:
return []config.RefSpec{"+refs/*:refs/*"}
case o.ReferenceName.IsTag():
return []config.RefSpec{
config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())),
@@ -905,7 +1013,6 @@ func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec {
case o.SingleBranch && o.ReferenceName == plumbing.HEAD:
return []config.RefSpec{
config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)),
config.RefSpec(fmt.Sprintf(refspecSingleBranch, plumbing.Master.Short(), o.RemoteName)),
}
case o.SingleBranch:
return []config.RefSpec{
@@ -967,7 +1074,7 @@ func (r *Repository) fetchAndUpdateReferences(
return nil, err
}
resolvedRef, err := storer.ResolveReference(remoteRefs, ref)
resolvedRef, err := expand_ref(remoteRefs, ref)
if err != nil {
return nil, err
}
@@ -1238,26 +1345,25 @@ func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter {
// If you want to check to see if the tag is an annotated tag, you can call
// TagObject on the hash Reference passed in through ForEach:
//
// iter, err := r.Tags()
// if err != nil {
// // Handle error
// }
//
// if err := iter.ForEach(func (ref *plumbing.Reference) error {
// obj, err := r.TagObject(ref.Hash())
// switch err {
// case nil:
// // Tag object present
// case plumbing.ErrObjectNotFound:
// // Not a tag object
// default:
// // Some other error
// return err
// }
// }); err != nil {
// // Handle outer iterator error
// }
// iter, err := r.Tags()
// if err != nil {
// // Handle error
// }
//
// if err := iter.ForEach(func (ref *plumbing.Reference) error {
// obj, err := r.TagObject(ref.Hash())
// switch err {
// case nil:
// // Tag object present
// case plumbing.ErrObjectNotFound:
// // Not a tag object
// default:
// // Some other error
// return err
// }
// }); err != nil {
// // Handle outer iterator error
// }
func (r *Repository) Tags() (storer.ReferenceIter, error) {
refIter, err := r.Storer.IterReferences()
if err != nil {
@@ -1416,14 +1522,35 @@ func (r *Repository) Worktree() (*Worktree, error) {
return &Worktree{r: r, Filesystem: r.wt}, nil
}
func expand_ref(s storer.ReferenceStorer, ref plumbing.ReferenceName) (*plumbing.Reference, error) {
// For improving troubleshooting, this preserves the error for the provided `ref`,
// and returns the error for that specific ref in case all parse rules fails.
var ret error
for _, rule := range plumbing.RefRevParseRules {
resolvedRef, err := storer.ResolveReference(s, plumbing.ReferenceName(fmt.Sprintf(rule, ref)))
if err == nil {
return resolvedRef, nil
} else if ret == nil {
ret = err
}
}
return nil, ret
}
// ResolveRevision resolves revision to corresponding hash. It will always
// resolve to a commit hash, not a tree or annotated tag.
//
// Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch,
// refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug}), hash (prefix and full)
func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, error) {
p := revision.NewParserFromString(string(rev))
func (r *Repository) ResolveRevision(in plumbing.Revision) (*plumbing.Hash, error) {
rev := in.String()
if rev == "" {
return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
}
p := revision.NewParserFromString(rev)
items, err := p.Parse()
if err != nil {
@@ -1441,13 +1568,9 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
tryHashes = append(tryHashes, r.resolveHashPrefix(string(revisionRef))...)
for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) {
ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
if err == nil {
tryHashes = append(tryHashes, ref.Hash())
break
}
ref, err := expand_ref(r.Storer, plumbing.ReferenceName(revisionRef))
if err == nil {
tryHashes = append(tryHashes, ref.Hash())
}
// in ambiguous cases, `git rev-parse` will emit a warning, but
@@ -1547,13 +1670,17 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
}
if c == nil {
return &plumbing.ZeroHash, fmt.Errorf(`No commit message match regexp : "%s"`, re.String())
return &plumbing.ZeroHash, fmt.Errorf("no commit message match regexp: %q", re.String())
}
commit = c
}
}
if commit == nil {
return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
}
return &commit.Hash, nil
}

View File

@@ -7,19 +7,22 @@ import (
"errors"
"fmt"
"io"
stdioutil "io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"runtime"
"sort"
"strings"
"time"
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/storage"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/helper/chroot"
)
const (
@@ -38,6 +41,7 @@ const (
remotesPath = "remotes"
logsPath = "logs"
worktreesPath = "worktrees"
alternatesPath = "alternates"
tmpPackedRefsPrefix = "._packed-refs"
@@ -78,6 +82,10 @@ type Options struct {
// KeepDescriptors makes the file descriptors to be reused but they will
// need to be manually closed calling Close().
KeepDescriptors bool
// AlternatesFS provides the billy filesystem to be used for Git Alternates.
// If none is provided, it falls back to using the underlying instance used for
// DotGit.
AlternatesFS billy.Filesystem
}
// The DotGit type represents a local git repository on disk. This
@@ -552,8 +560,8 @@ func (d *DotGit) hasPack(h plumbing.Hash) error {
}
func (d *DotGit) objectPath(h plumbing.Hash) string {
hash := h.String()
return d.fs.Join(objectsPath, hash[0:2], hash[2:40])
hex := h.String()
return d.fs.Join(objectsPath, hex[0:2], hex[2:hash.HexSize])
}
// incomingObjectPath is intended to add support for a git pre-receive hook
@@ -563,15 +571,16 @@ func (d *DotGit) objectPath(h plumbing.Hash) string {
//
// More on git hooks found here : https://git-scm.com/docs/githooks
// More on 'quarantine'/incoming directory here:
// https://git-scm.com/docs/git-receive-pack
//
// https://git-scm.com/docs/git-receive-pack
func (d *DotGit) incomingObjectPath(h plumbing.Hash) string {
hString := h.String()
if d.incomingDirName == "" {
return d.fs.Join(objectsPath, hString[0:2], hString[2:40])
return d.fs.Join(objectsPath, hString[0:2], hString[2:hash.HexSize])
}
return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40])
return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:hash.HexSize])
}
// hasIncomingObjects searches for an incoming directory and keeps its name
@@ -581,7 +590,9 @@ func (d *DotGit) hasIncomingObjects() bool {
directoryContents, err := d.fs.ReadDir(objectsPath)
if err == nil {
for _, file := range directoryContents {
if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() {
if file.IsDir() && (strings.HasPrefix(file.Name(), "tmp_objdir-incoming-") ||
// Before Git 2.35 incoming commits directory had another prefix
strings.HasPrefix(file.Name(), "incoming-")) {
d.incomingDirName = file.Name()
}
}
@@ -645,7 +656,7 @@ func (d *DotGit) ObjectDelete(h plumbing.Hash) error {
}
func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) {
b, err := stdioutil.ReadAll(rd)
b, err := io.ReadAll(rd)
if err != nil {
return nil, err
}
@@ -716,48 +727,56 @@ func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) {
return d.packedRef(name)
}
func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, error) {
func (d *DotGit) findPackedRefsInFile(f billy.File, recv refsRecv) error {
s := bufio.NewScanner(f)
var refs []*plumbing.Reference
for s.Scan() {
ref, err := d.processLine(s.Text())
if err != nil {
return nil, err
return err
}
if ref != nil {
refs = append(refs, ref)
if !recv(ref) {
// skip parse
return nil
}
}
return refs, s.Err()
if err := s.Err(); err != nil {
return err
}
return nil
}
func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) {
// refsRecv: returning true means that the reference continues to be resolved, otherwise it is stopped, which will speed up the lookup of a single reference.
type refsRecv func(*plumbing.Reference) bool
func (d *DotGit) findPackedRefs(recv refsRecv) error {
f, err := d.fs.Open(packedRefsPath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
return nil
}
return nil, err
return err
}
defer ioutil.CheckClose(f, &err)
return d.findPackedRefsInFile(f)
return d.findPackedRefsInFile(f, recv)
}
func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) {
refs, err := d.findPackedRefs()
if err != nil {
var ref *plumbing.Reference
if err := d.findPackedRefs(func(r *plumbing.Reference) bool {
if r != nil && r.Name() == name {
ref = r
// ref found
return false
}
return true
}); err != nil {
return nil, err
}
for _, ref := range refs {
if ref.Name() == name {
return ref, nil
}
if ref != nil {
return ref, nil
}
return nil, plumbing.ErrReferenceNotFound
}
@@ -777,34 +796,22 @@ func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error {
return d.rewritePackedRefsWithoutRef(name)
}
func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) {
packedRefs, err := d.findPackedRefs()
if err != nil {
return err
}
for _, ref := range packedRefs {
if !seen[ref.Name()] {
*refs = append(*refs, ref)
seen[ref.Name()] = true
func refsRecvFunc(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) refsRecv {
return func(r *plumbing.Reference) bool {
if r != nil && !seen[r.Name()] {
*refs = append(*refs, r)
seen[r.Name()] = true
}
return true
}
return nil
}
func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) {
return d.findPackedRefs(refsRecvFunc(refs, seen))
}
func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) {
packedRefs, err := d.findPackedRefsInFile(f)
if err != nil {
return err
}
for _, ref := range packedRefs {
if !seen[ref.Name()] {
*refs = append(*refs, ref)
seen[ref.Name()] = true
}
}
return nil
return d.findPackedRefsInFile(f, refsRecvFunc(refs, seen))
}
func (d *DotGit) openAndLockPackedRefs(doCreate bool) (
@@ -943,6 +950,7 @@ func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []strin
files, err := d.fs.ReadDir(d.fs.Join(relPath...))
if err != nil {
if os.IsNotExist(err) {
// a race happened, and our directory is gone now
return nil
}
@@ -960,6 +968,10 @@ func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []strin
}
ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/"))
if os.IsNotExist(err) {
// a race happened, and our file is gone now
continue
}
if err != nil {
return err
}
@@ -1101,38 +1113,93 @@ func (d *DotGit) Module(name string) (billy.Filesystem, error) {
return d.fs.Chroot(d.fs.Join(modulePath, name))
}
func (d *DotGit) AddAlternate(remote string) error {
altpath := d.fs.Join(objectsPath, infoPath, alternatesPath)
f, err := d.fs.OpenFile(altpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0640)
if err != nil {
return fmt.Errorf("cannot open file: %w", err)
}
defer f.Close()
// locking in windows throws an error, based on comments
// https://github.com/go-git/go-git/pull/860#issuecomment-1751823044
// do not lock on windows platform.
if runtime.GOOS != "windows" {
if err = f.Lock(); err != nil {
return fmt.Errorf("cannot lock file: %w", err)
}
defer f.Unlock()
}
line := path.Join(remote, objectsPath) + "\n"
_, err = io.WriteString(f, line)
if err != nil {
return fmt.Errorf("error writing 'alternates' file: %w", err)
}
return nil
}
// Alternates returns DotGit(s) based off paths in objects/info/alternates if
// available. This can be used to checks if it's a shared repository.
func (d *DotGit) Alternates() ([]*DotGit, error) {
altpath := d.fs.Join("objects", "info", "alternates")
altpath := d.fs.Join(objectsPath, infoPath, alternatesPath)
f, err := d.fs.Open(altpath)
if err != nil {
return nil, err
}
defer f.Close()
fs := d.options.AlternatesFS
if fs == nil {
fs = d.fs
}
var alternates []*DotGit
seen := make(map[string]struct{})
// Read alternate paths line-by-line and create DotGit objects.
scanner := bufio.NewScanner(f)
for scanner.Scan() {
path := scanner.Text()
if !filepath.IsAbs(path) {
// For relative paths, we can perform an internal conversion to
// slash so that they work cross-platform.
slashPath := filepath.ToSlash(path)
// If the path is not absolute, it must be relative to object
// database (.git/objects/info).
// https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html
// Hence, derive a path relative to DotGit's root.
// "../../../reponame/.git/" -> "../../reponame/.git"
// Remove the first ../
relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...)
normalPath := filepath.FromSlash(relpath)
path = filepath.Join(d.fs.Root(), normalPath)
// Avoid creating multiple dotgits for the same alternative path.
if _, ok := seen[path]; ok {
continue
}
fs := osfs.New(filepath.Dir(path))
alternates = append(alternates, New(fs))
seen[path] = struct{}{}
if filepath.IsAbs(path) {
// Handling absolute paths should be straight-forward. However, the default osfs (Chroot)
// tries to concatenate an abs path with the root path in some operations (e.g. Stat),
// which leads to unexpected errors. Therefore, make the path relative to the current FS instead.
if reflect.TypeOf(fs) == reflect.TypeOf(&chroot.ChrootHelper{}) {
path, err = filepath.Rel(fs.Root(), path)
if err != nil {
return nil, fmt.Errorf("cannot make path %q relative: %w", path, err)
}
}
} else {
// By Git conventions, relative paths should be based on the object database (.git/objects/info)
// location as per: https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html
// However, due to the nature of go-git and its filesystem handling via Billy, paths cannot
// cross its "chroot boundaries". Therefore, ignore any "../" and treat the path from the
// fs root. If this is not correct based on the dotgit fs, set a different one via AlternatesFS.
abs := filepath.Join(string(filepath.Separator), filepath.ToSlash(path))
path = filepath.FromSlash(abs)
}
// Aligns with upstream behavior: exit if target path is not a valid directory.
if fi, err := fs.Stat(path); err != nil || !fi.IsDir() {
return nil, fmt.Errorf("invalid object directory %q: %w", path, err)
}
afs, err := fs.Chroot(filepath.Dir(path))
if err != nil {
return nil, fmt.Errorf("cannot chroot %q: %w", path, err)
}
alternates = append(alternates, New(afs))
}
if err = scanner.Err(); err != nil {

View File

@@ -0,0 +1,79 @@
package dotgit
import (
"fmt"
"io"
"os"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/objfile"
"github.com/go-git/go-git/v5/utils/ioutil"
)
var _ (plumbing.EncodedObject) = &EncodedObject{}
type EncodedObject struct {
dir *DotGit
h plumbing.Hash
t plumbing.ObjectType
sz int64
}
func (e *EncodedObject) Hash() plumbing.Hash {
return e.h
}
func (e *EncodedObject) Reader() (io.ReadCloser, error) {
f, err := e.dir.Object(e.h)
if err != nil {
if os.IsNotExist(err) {
return nil, plumbing.ErrObjectNotFound
}
return nil, err
}
r, err := objfile.NewReader(f)
if err != nil {
return nil, err
}
t, size, err := r.Header()
if err != nil {
_ = r.Close()
return nil, err
}
if t != e.t {
_ = r.Close()
return nil, objfile.ErrHeader
}
if size != e.sz {
_ = r.Close()
return nil, objfile.ErrHeader
}
return ioutil.NewReadCloserWithCloser(r, f.Close), nil
}
func (e *EncodedObject) SetType(plumbing.ObjectType) {}
func (e *EncodedObject) Type() plumbing.ObjectType {
return e.t
}
func (e *EncodedObject) Size() int64 {
return e.sz
}
func (e *EncodedObject) SetSize(int64) {}
func (e *EncodedObject) Writer() (io.WriteCloser, error) {
return nil, fmt.Errorf("not supported")
}
func NewEncodedObject(dir *DotGit, h plumbing.Hash, t plumbing.ObjectType, size int64) *EncodedObject {
return &EncodedObject{
dir: dir,
h: h,
t: t,
sz: size,
}
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/format/idxfile"
"github.com/go-git/go-git/v5/plumbing/format/objfile"
"github.com/go-git/go-git/v5/plumbing/format/packfile"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-billy/v5"
)
@@ -277,8 +278,8 @@ func (w *ObjectWriter) Close() error {
}
func (w *ObjectWriter) save() error {
hash := w.Hash().String()
file := w.fs.Join(objectsPath, hash[0:2], hash[2:40])
hex := w.Hash().String()
file := w.fs.Join(objectsPath, hex[0:2], hex[2:hash.HexSize])
return w.fs.Rename(w.f.Name(), file)
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"io"
"os"
"sync"
"time"
"github.com/go-git/go-git/v5/plumbing"
@@ -145,6 +146,19 @@ func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.H
return o.Hash(), err
}
// LazyWriter returns a lazy ObjectWriter that is bound to a DotGit file.
// It first write the header passing on the object type and size, so
// that the object contents can be written later, without the need to
// create a MemoryObject and buffering its entire contents into memory.
func (s *ObjectStorage) LazyWriter() (w io.WriteCloser, wh func(typ plumbing.ObjectType, sz int64) error, err error) {
ow, err := s.dir.NewObject()
if err != nil {
return nil, nil, err
}
return ow, ow.WriteHeader, nil
}
// HasEncodedObject returns nil if the object exists, without actually
// reading the object data from storage.
func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
@@ -204,9 +218,9 @@ func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfi
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache, s.options.LargeObjectThreshold)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
p = packfile.NewPackfile(idx, s.dir.Fs(), f, s.options.LargeObjectThreshold)
}
return p, s.storePackfileInCache(pack, p)
@@ -389,7 +403,6 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
return cacheObj, nil
}
obj = s.NewEncodedObject()
r, err := objfile.NewReader(f)
if err != nil {
return nil, err
@@ -402,6 +415,13 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
return nil, err
}
if s.options.LargeObjectThreshold > 0 && size > s.options.LargeObjectThreshold {
obj = dotgit.NewEncodedObject(s.dir, h, t, size)
return obj, nil
}
obj = s.NewEncodedObject()
obj.SetType(t)
obj.SetSize(size)
w, err := obj.Writer()
@@ -413,10 +433,21 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
s.objectCache.Put(obj)
_, err = io.Copy(w, r)
bufp := copyBufferPool.Get().(*[]byte)
buf := *bufp
_, err = io.CopyBuffer(w, r, buf)
copyBufferPool.Put(bufp)
return obj, err
}
var copyBufferPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 32*1024)
return &b
},
}
// Get returns the object with the given hash, by searching for it in
// the packfile.
func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
@@ -519,14 +550,21 @@ func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, pl
return plumbing.ZeroHash, plumbing.ZeroHash, -1
}
// HashesWithPrefix returns all objects with a hash that starts with a prefix by searching for
// them in the packfile and the git object directories.
func (s *ObjectStorage) HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) {
hashes, err := s.dir.ObjectsWithPrefix(prefix)
if err != nil {
return nil, err
}
seen := hashListAsMap(hashes)
// TODO: This could be faster with some idxfile changes,
// or diving into the packfile.
if err := s.requireIndex(); err != nil {
return nil, err
}
for _, index := range s.index {
ei, err := index.Entries()
if err != nil {
@@ -540,6 +578,9 @@ func (s *ObjectStorage) HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error)
return nil, err
}
if bytes.HasPrefix(e.Hash[:], prefix) {
if _, ok := seen[e.Hash]; ok {
continue
}
hashes = append(hashes, e.Hash)
}
}
@@ -595,6 +636,7 @@ func (s *ObjectStorage) buildPackfileIters(
return newPackfileIter(
s.dir.Fs(), pack, t, seen, s.index[h],
s.objectCache, s.options.KeepDescriptors,
s.options.LargeObjectThreshold,
)
},
}, nil
@@ -684,6 +726,7 @@ func NewPackfileIter(
idxFile billy.File,
t plumbing.ObjectType,
keepPack bool,
largeObjectThreshold int64,
) (storer.EncodedObjectIter, error) {
idx := idxfile.NewMemoryIndex()
if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
@@ -695,7 +738,7 @@ func NewPackfileIter(
}
seen := make(map[plumbing.Hash]struct{})
return newPackfileIter(fs, f, t, seen, idx, nil, keepPack)
return newPackfileIter(fs, f, t, seen, idx, nil, keepPack, largeObjectThreshold)
}
func newPackfileIter(
@@ -706,12 +749,13 @@ func newPackfileIter(
index idxfile.Index,
cache cache.Object,
keepPack bool,
largeObjectThreshold int64,
) (storer.EncodedObjectIter, error) {
var p *packfile.Packfile
if cache != nil {
p = packfile.NewPackfileWithCache(index, fs, f, cache)
p = packfile.NewPackfileWithCache(index, fs, f, cache, largeObjectThreshold)
} else {
p = packfile.NewPackfile(index, fs, f)
p = packfile.NewPackfile(index, fs, f, largeObjectThreshold)
}
iter, err := p.GetByType(t)

View File

@@ -34,7 +34,7 @@ func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
return err
}
// Shallow return the shallow commits reading from shallo file from .git
// Shallow returns the shallow commits reading from shallo file from .git
func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) {
f, err := s.dir.Shallow()
if f == nil || err != nil {

View File

@@ -34,6 +34,13 @@ type Options struct {
// MaxOpenDescriptors is the max number of file descriptors to keep
// open. If KeepDescriptors is true, all file descriptors will remain open.
MaxOpenDescriptors int
// LargeObjectThreshold maximum object size (in bytes) that will be read in to memory.
// If left unset or set to 0 there is no limit
LargeObjectThreshold int64
// AlternatesFS provides the billy filesystem to be used for Git Alternates.
// If none is provided, it falls back to using the underlying instance used for
// DotGit.
AlternatesFS billy.Filesystem
}
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
@@ -46,6 +53,7 @@ func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
dirOps := dotgit.Options{
ExclusiveAccess: ops.ExclusiveAccess,
AlternatesFS: ops.AlternatesFS,
}
dir := dotgit.NewWithOptions(fs, dirOps)
@@ -71,3 +79,7 @@ func (s *Storage) Filesystem() billy.Filesystem {
func (s *Storage) Init() error {
return s.dir.Initialize()
}
func (s *Storage) AddAlternate(remote string) error {
return s.dir.AddAlternate(remote)
}

View File

@@ -193,7 +193,7 @@ func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) er
return nil
}
var errNotSupported = fmt.Errorf("Not supported")
var errNotSupported = fmt.Errorf("not supported")
func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
return time.Time{}, errNotSupported
@@ -202,6 +202,10 @@ func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error {
return errNotSupported
}
func (o *ObjectStorage) AddAlternate(remote string) error {
return errNotSupported
}
type TxObjectStorage struct {
Storage *ObjectStorage
Objects map[plumbing.Hash]plumbing.EncodedObject

View File

@@ -5,13 +5,13 @@ import (
"context"
"errors"
"fmt"
"net/url"
"path"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/index"
"github.com/go-git/go-git/v5/plumbing/transport"
)
var (
@@ -133,29 +133,29 @@ func (s *Submodule) Repository() (*Repository, error) {
return nil, err
}
moduleURL, err := url.Parse(s.c.URL)
moduleEndpoint, err := transport.NewEndpoint(s.c.URL)
if err != nil {
return nil, err
}
if !path.IsAbs(moduleURL.Path) {
if !path.IsAbs(moduleEndpoint.Path) && moduleEndpoint.Protocol == "file" {
remotes, err := s.w.r.Remotes()
if err != nil {
return nil, err
}
rootURL, err := url.Parse(remotes[0].c.URLs[0])
rootEndpoint, err := transport.NewEndpoint(remotes[0].c.URLs[0])
if err != nil {
return nil, err
}
rootURL.Path = path.Join(rootURL.Path, moduleURL.Path)
*moduleURL = *rootURL
rootEndpoint.Path = path.Join(rootEndpoint.Path, moduleEndpoint.Path)
*moduleEndpoint = *rootEndpoint
}
_, err = r.CreateRemote(&config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{moduleURL.String()},
URLs: []string{moduleEndpoint.String()},
})
return r, err
@@ -243,7 +243,7 @@ func (s *Submodule) fetchAndCheckout(
ctx context.Context, r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash,
) error {
if !o.NoFetch {
err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth})
err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth, Depth: o.Depth})
if err != nil && err != NoErrAlreadyUpToDate {
return err
}
@@ -265,6 +265,7 @@ func (s *Submodule) fetchAndCheckout(
err := r.FetchContext(ctx, &FetchOptions{
Auth: o.Auth,
RefSpecs: []config.RefSpec{refSpec},
Depth: o.Depth,
})
if err != nil && err != NoErrAlreadyUpToDate && err != ErrExactSHA1NotSupported {
return err

View File

@@ -1,4 +1,4 @@
// Package binary implements sintax-sugar functions on top of the standard
// Package binary implements syntax-sugar functions on top of the standard
// library binary package
package binary

Some files were not shown because too many files have changed in this diff Show More