chore(deps): bump github.com/go-git/go-git/v5 from 5.16.5 to 5.17.2

Bumps [github.com/go-git/go-git/v5](https://github.com/go-git/go-git) from 5.16.5 to 5.17.2.
- [Release notes](https://github.com/go-git/go-git/releases)
- [Commits](https://github.com/go-git/go-git/compare/v5.16.5...v5.17.2)

---
updated-dependencies:
- dependency-name: github.com/go-git/go-git/v5
  dependency-version: 5.17.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2026-04-02 22:12:19 +00:00
committed by GitHub
parent b8098dc1b9
commit dac645c366
19 changed files with 542 additions and 90 deletions

View File

@@ -13,7 +13,7 @@ type Polyfill struct {
c capabilities
}
type capabilities struct{ tempfile, dir, symlink, chroot bool }
type capabilities struct{ tempfile, dir, symlink, chroot, chmod bool }
// New creates a new filesystem wrapping up 'fs' the intercepts all the calls
// made and errors if fs doesn't implement any of the billy interfaces.
@@ -28,6 +28,7 @@ func New(fs billy.Basic) billy.Filesystem {
_, h.c.dir = h.Basic.(billy.Dir)
_, h.c.symlink = h.Basic.(billy.Symlink)
_, h.c.chroot = h.Basic.(billy.Chroot)
_, h.c.chmod = h.Basic.(billy.Chmod)
return h
}
@@ -87,6 +88,14 @@ func (h *Polyfill) Chroot(path string) (billy.Filesystem, error) {
return h.Basic.(billy.Chroot).Chroot(path)
}
func (h *Polyfill) Chmod(path string, mode os.FileMode) error {
if !h.c.chmod {
return billy.ErrNotSupported
}
return h.Basic.(billy.Chmod).Chmod(path, mode)
}
func (h *Polyfill) Root() string {
if !h.c.chroot {
return string(filepath.Separator)

View File

@@ -126,6 +126,14 @@ func (fs *BoundOS) TempFile(dir, prefix string) (billy.File, error) {
if err != nil {
return nil, err
}
_, err = os.Stat(dir)
if err != nil && os.IsNotExist(err) {
err = os.MkdirAll(dir, defaultDirectoryMode)
if err != nil {
return nil, err
}
}
}
return tempFile(dir, prefix)

View File

@@ -91,8 +91,8 @@ func readVersion(idx *MemoryIndex, r io.Reader) error {
return err
}
if v > VersionSupported {
return ErrUnsupportedVersion
if v != VersionSupported {
return fmt.Errorf("%w: v%d", ErrUnsupportedVersion, v)
}
idx.Version = v
@@ -106,6 +106,10 @@ func readFanout(idx *MemoryIndex, r io.Reader) error {
return err
}
if k > 0 && n < idx.Fanout[k-1] {
return fmt.Errorf("%w: fanout table is not monotonically non-decreasing at entry %d", ErrMalformedIdxFile, k)
}
idx.Fanout[k] = n
idx.FanoutMapping[k] = noMapping
}
@@ -155,7 +159,7 @@ func readCRC32(idx *MemoryIndex, r io.Reader) error {
}
func readOffsets(idx *MemoryIndex, r io.Reader) error {
var o64cnt int
var o64cnt int64
for k := 0; k < fanout; k++ {
if pos := idx.FanoutMapping[k]; pos != noMapping {
if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil {

View File

@@ -4,8 +4,8 @@ import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"strconv"
"time"
@@ -26,12 +26,14 @@ var (
ErrInvalidChecksum = errors.New("invalid checksum")
// ErrUnknownExtension is returned when an index extension is encountered that is considered mandatory
ErrUnknownExtension = errors.New("unknown extension")
// ErrMalformedIndexFile is returned when the index file contents are
// structurally invalid.
ErrMalformedIndexFile = errors.New("index decoder: malformed index file")
)
const (
entryHeaderLength = 62
entryExtended = 0x4000
entryValid = 0x8000
nameMask = 0xfff
intentToAddMask = 1 << 13
skipWorkTreeMask = 1 << 14
@@ -140,33 +142,55 @@ func (d *Decoder) readEntry(idx *Index) (*Entry, error) {
e.SkipWorktree = extended&skipWorkTreeMask != 0
}
if err := d.readEntryName(idx, e, flags); err != nil {
nameConsumed, err := d.readEntryName(idx, e, flags)
if err != nil {
return nil, err
}
return e, d.padEntry(idx, e, read)
return e, d.padEntry(idx, e, read, nameConsumed)
}
func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error {
var name string
var err error
// readEntryName reads the entry path and sets e.Name. It returns the
// number of bytes consumed from the stream for the name portion.
func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) (int, error) {
switch idx.Version {
case 2, 3:
len := flags & nameMask
name, err = d.doReadEntryName(len)
nameLen := flags & nameMask
name, consumed, err := d.doReadEntryName(nameLen)
if err != nil {
return 0, err
}
e.Name = name
return consumed, nil
case 4:
name, err = d.doReadEntryNameV4()
name, err := d.doReadEntryNameV4()
if err != nil {
return 0, err
}
e.Name = name
return 0, nil // V4 has no padding; consumed count unused
default:
return ErrUnsupportedVersion
return 0, ErrUnsupportedVersion
}
}
// doReadEntryName reads the entry path for V2/V3 indexes. It returns the
// name, the number of bytes consumed from the stream, and any error.
// When nameLen equals nameMask (0xFFF), the name was too long to fit in
// the 12-bit field and the real length is found by scanning for the NUL
// terminator — matching C Git's strlen(name) fallback in create_from_disk.
func (d *Decoder) doReadEntryName(nameLen uint16) (string, int, error) {
if nameLen == nameMask {
name, err := binary.ReadUntil(d.r, '\x00')
if err != nil {
return "", 0, err
}
return string(name), len(name) + 1, nil // +1 for the consumed NUL delimiter
}
if err != nil {
return err
}
e.Name = name
return nil
name := make([]byte, nameLen)
_, err := io.ReadFull(d.r, name)
return string(name), int(nameLen), err
}
func (d *Decoder) doReadEntryNameV4() (string, error) {
@@ -177,7 +201,14 @@ func (d *Decoder) doReadEntryNameV4() (string, error) {
var base string
if d.lastEntry != nil {
if l < 0 || int(l) > len(d.lastEntry.Name) {
return "", fmt.Errorf("%w: invalid V4 entry name strip length %d (previous name length: %d)",
ErrMalformedIndexFile, l, len(d.lastEntry.Name))
}
base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)]
} else if l > 0 {
return "", fmt.Errorf("%w: non-zero strip length %d on first V4 entry",
ErrMalformedIndexFile, l)
}
name, err := binary.ReadUntil(d.r, '\x00')
@@ -188,24 +219,23 @@ func (d *Decoder) doReadEntryNameV4() (string, error) {
return base + string(name), nil
}
func (d *Decoder) doReadEntryName(len uint16) (string, error) {
name := make([]byte, len)
_, err := io.ReadFull(d.r, name)
return string(name), err
}
// Index entries are padded out to the next 8 byte alignment
// for historical reasons related to how C Git read the files.
func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {
// padEntry discards NUL padding bytes that follow each V2/V3 entry on
// disk. nameConsumed is the number of stream bytes consumed while reading
// the entry name (which may exceed len(e.Name) when a NUL terminator was
// consumed for long names where the 12-bit length field overflowed).
func (d *Decoder) padEntry(idx *Index, e *Entry, read, nameConsumed int) error {
if idx.Version == 4 {
return nil
}
entrySize := read + len(e.Name)
padLen := 8 - entrySize%8
_, err := io.CopyN(io.Discard, d.r, int64(padLen))
return err
padLen -= nameConsumed - len(e.Name)
if padLen > 0 {
_, err := io.CopyN(io.Discard, d.r, int64(padLen))
return err
}
return nil
}
func (d *Decoder) readExtensions(idx *Index) error {
@@ -312,7 +342,7 @@ func (d *Decoder) readChecksum(expected []byte) error {
}
func validateHeader(r io.Reader) (version uint32, err error) {
var s = make([]byte, 4)
s := make([]byte, 4)
if _, err := io.ReadFull(r, s); err != nil {
return 0, err
}
@@ -376,24 +406,26 @@ func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
return nil, err
}
// An entry can be in an invalidated state and is represented by having a
// negative number in the entry_count field.
if i == -1 {
return nil, nil
}
e.Entries = i
trees, err := binary.ReadUntil(d.r, '\n')
if err != nil {
return nil, err
}
i, err = strconv.Atoi(string(trees))
subtrees, err := strconv.Atoi(string(trees))
if err != nil {
return nil, err
}
e.Trees = i
e.Trees = subtrees
// An entry can be in an invalidated state and is represented by having a
// negative number in the entry_count field. In this case, there is no
// object name and the next entry starts immediately after the newline.
if i < 0 {
return nil, nil
}
_, err = io.ReadFull(d.r, e.Hash[:])
if err != nil {
return nil, err

View File

@@ -5,9 +5,7 @@ import (
"errors"
"fmt"
"io"
"path"
"sort"
"strings"
"time"
"github.com/go-git/go-git/v5/plumbing/hash"
@@ -160,26 +158,39 @@ func (e *Encoder) encodeEntryName(entry *Entry) error {
}
func (e *Encoder) encodeEntryNameV4(entry *Entry) error {
name := entry.Name
l := 0
// V4 prefix compression: find the longest common prefix between the
// previous entry's name and the current one. The strip length tells
// the decoder how many bytes to remove from the end of the previous
// name, and the suffix is the remainder of the current name.
prefix := 0
if e.lastEntry != nil {
dir := path.Dir(e.lastEntry.Name) + "/"
if strings.HasPrefix(entry.Name, dir) {
l = len(e.lastEntry.Name) - len(dir)
name = strings.TrimPrefix(entry.Name, dir)
} else {
l = len(e.lastEntry.Name)
}
prefix = commonPrefixLen(e.lastEntry.Name, entry.Name)
}
stripLen := 0
if e.lastEntry != nil {
stripLen = len(e.lastEntry.Name) - prefix
}
e.lastEntry = entry
err := binary.WriteVariableWidthInt(e.w, int64(l))
if err != nil {
if err := binary.WriteVariableWidthInt(e.w, int64(stripLen)); err != nil {
return err
}
return binary.Write(e.w, []byte(name+string('\x00')))
suffix := entry.Name[prefix:]
return binary.Write(e.w, append([]byte(suffix), '\x00'))
}
// commonPrefixLen returns the length of the longest common byte prefix
// between a and b.
func commonPrefixLen(a, b string) int {
n := min(len(b), len(a))
for i := range n {
if a[i] != b[i] {
return i
}
}
return n
}
func (e *Encoder) encodeRawExtension(signature string, data []byte) error {

View File

@@ -54,6 +54,8 @@ type Index struct {
ResolveUndo *ResolveUndo
// EndOfIndexEntry represents the 'End of Index Entry' extension
EndOfIndexEntry *EndOfIndexEntry
// ModTime is the modification time of the index file
ModTime time.Time
}
// Add creates a new Entry and returns it. The caller should first check that

View File

@@ -208,6 +208,12 @@ func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) {
return nil, ErrRepositoryNotExists
}
cfg, err := s.Config()
if err != nil {
return nil, err
}
err = verifyExtensions(s, cfg)
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,121 @@
package git
import (
"errors"
"fmt"
"strings"
"github.com/go-git/go-git/v5/config"
cfgformat "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/go-git/go-git/v5/storage"
)
var (
// ErrUnsupportedExtensionRepositoryFormatVersion represents when an
// extension being used is not compatible with the repository's
// core.repositoryFormatVersion.
ErrUnsupportedExtensionRepositoryFormatVersion = errors.New("core.repositoryformatversion does not support extension")
// ErrUnsupportedRepositoryFormatVersion represents when an repository
// is using a format version that is not supported.
ErrUnsupportedRepositoryFormatVersion = errors.New("core.repositoryformatversion not supported")
// ErrUnknownExtension represents when a repository has an extension
// which is unknown or unsupported by go-git.
ErrUnknownExtension = errors.New("unknown extension")
// builtinExtensions defines the Git extensions that are supported by
// the core go-git implementation.
//
// Some extensions are storage-specific, those are defined by the Storers
// themselves by implementing the ExtensionChecker interface.
builtinExtensions = map[string]struct{}{
// noop does not change gits behavior at all.
// It is useful only for testing format-1 compatibility.
//
// This extension is respected regardless of the
// core.repositoryFormatVersion setting.
"noop": {},
// noop-v1 does not change gits behavior at all.
// It is useful only for testing format-1 compatibility.
"noop-v1": {},
}
// Some Git extensions were supported upstream before the introduction
// of repositoryformatversion. These are the only extensions that can be
// enabled while core.repositoryformatversion is unset or set to 0.
extensionsValidForV0 = map[string]struct{}{
"noop": {},
"partialClone": {},
"preciousObjects": {},
"worktreeConfig": {},
}
)
type extension struct {
name string
value string
}
func extensions(cfg *config.Config) []extension {
if cfg == nil || cfg.Raw == nil {
return nil
}
if !cfg.Raw.HasSection("extensions") {
return nil
}
section := cfg.Raw.Section("extensions")
out := make([]extension, 0, len(section.Options))
for _, opt := range section.Options {
out = append(out, extension{name: strings.ToLower(opt.Key), value: strings.ToLower(opt.Value)})
}
return out
}
func verifyExtensions(st storage.Storer, cfg *config.Config) error {
needed := extensions(cfg)
switch cfg.Core.RepositoryFormatVersion {
case "", cfgformat.Version_0, cfgformat.Version_1:
default:
return fmt.Errorf("%w: %q",
ErrUnsupportedRepositoryFormatVersion,
cfg.Core.RepositoryFormatVersion)
}
if len(needed) > 0 {
if cfg.Core.RepositoryFormatVersion == cfgformat.Version_0 ||
cfg.Core.RepositoryFormatVersion == "" {
var unsupported []string
for _, ext := range needed {
if _, ok := extensionsValidForV0[ext.name]; !ok {
unsupported = append(unsupported, ext.name)
}
}
if len(unsupported) > 0 {
return fmt.Errorf("%w: %s",
ErrUnsupportedExtensionRepositoryFormatVersion,
strings.Join(unsupported, ", "))
}
}
var missing []string
for _, ext := range needed {
if _, ok := builtinExtensions[ext.name]; ok {
continue
}
missing = append(missing, ext.name)
}
if len(missing) > 0 {
return fmt.Errorf("%w: %s", ErrUnknownExtension, strings.Join(missing, ", "))
}
}
return nil
}

View File

@@ -3,6 +3,7 @@ package dotgit
import (
"fmt"
"io"
"os"
"sync/atomic"
"github.com/go-git/go-git/v5/plumbing"
@@ -131,20 +132,62 @@ func (w *PackWriter) clean() error {
func (w *PackWriter) save() error {
base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum))
idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base))
// Pack files are content addressable. Each file is checked
// individually — if it already exists on disk, skip creating it.
idxPath := fmt.Sprintf("%s.idx", base)
exists, err := fileExists(w.fs, idxPath)
if err != nil {
return err
}
if !exists {
idx, err := w.fs.Create(idxPath)
if err != nil {
return err
}
if err := w.encodeIdx(idx); err != nil {
return err
if err := w.encodeIdx(idx); err != nil {
_ = idx.Close()
return err
}
if err := idx.Close(); err != nil {
return err
}
fixPermissions(w.fs, idxPath)
}
if err := idx.Close(); err != nil {
packPath := fmt.Sprintf("%s.pack", base)
exists, err = fileExists(w.fs, packPath)
if err != nil {
return err
}
if !exists {
if err := w.fs.Rename(w.fw.Name(), packPath); err != nil {
return err
}
fixPermissions(w.fs, packPath)
} else {
// Pack already exists, clean up the temp file.
return w.clean()
}
return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base))
return nil
}
// fileExists checks whether path already exists as a regular file.
// It returns (true, nil) for an existing regular file, (false, nil) when the
// path does not exist, and (false, err) if the path exists but is not a
// regular file (e.g. a directory or symlink).
func fileExists(fs billy.Filesystem, path string) (bool, error) {
fi, err := fs.Lstat(path)
if err != nil {
return false, nil
}
if !fi.Mode().IsRegular() {
return false, fmt.Errorf("unexpected file type for %q: %s", path, fi.Mode().Type())
}
return true, nil
}
func (w *PackWriter) encodeIdx(writer io.Writer) error {
@@ -226,7 +269,6 @@ func (s *syncedReader) sleep() {
atomic.StoreUint32(&s.blocked, 1)
<-s.news
}
}
func (s *syncedReader) Seek(offset int64, whence int) (int64, error) {
@@ -281,5 +323,17 @@ func (w *ObjectWriter) save() error {
hex := w.Hash().String()
file := w.fs.Join(objectsPath, hex[0:2], hex[2:hash.HexSize])
return w.fs.Rename(w.f.Name(), file)
// Loose objects are content addressable, if they already exist
// we can safely delete the temporary file and short-circuit the
// operation.
if _, err := w.fs.Lstat(file); err == nil || os.IsExist(err) {
return w.fs.Remove(w.f.Name())
}
if err := w.fs.Rename(w.f.Name(), file); err != nil {
return err
}
fixPermissions(w.fs, file)
return nil
}

View File

@@ -0,0 +1,29 @@
//go:build !windows
package dotgit
import (
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-git/v5/utils/trace"
)
func fixPermissions(fs billy.Filesystem, path string) {
if chmodFS, ok := fs.(billy.Chmod); ok {
if err := chmodFS.Chmod(path, 0o444); err != nil {
trace.General.Printf("failed to chmod %s: %v", path, err)
}
}
}
func isReadOnly(fs billy.Filesystem, path string) (bool, error) {
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
if fi.Mode().Perm() == 0o444 {
return true, nil
}
return false, nil
}

View File

@@ -0,0 +1,58 @@
//go:build windows
package dotgit
import (
"fmt"
"path/filepath"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-git/v5/utils/trace"
"golang.org/x/sys/windows"
)
func fixPermissions(fs billy.Filesystem, path string) {
fullpath := filepath.Join(fs.Root(), path)
p, err := windows.UTF16PtrFromString(fullpath)
if err != nil {
trace.General.Printf("failed to chmod %s: %v", fullpath, err)
return
}
attrs, err := windows.GetFileAttributes(p)
if err != nil {
trace.General.Printf("failed to chmod %s: %v", fullpath, err)
return
}
if attrs&windows.FILE_ATTRIBUTE_READONLY != 0 {
return
}
err = windows.SetFileAttributes(p,
attrs|windows.FILE_ATTRIBUTE_READONLY,
)
if err != nil {
trace.General.Printf("failed to chmod %s: %v", fullpath, err)
}
}
func isReadOnly(fs billy.Filesystem, path string) (bool, error) {
fullpath := filepath.Join(fs.Root(), path)
p, err := windows.UTF16PtrFromString(fullpath)
if err != nil {
return false, fmt.Errorf("%w: %q", err, fullpath)
}
attrs, err := windows.GetFileAttributes(p)
if err != nil {
return false, fmt.Errorf("%w: %q", err, fullpath)
}
if attrs&windows.FILE_ATTRIBUTE_READONLY != 0 {
return true, nil
}
return false, nil
}

View File

@@ -48,6 +48,11 @@ func (s *IndexStorage) Index() (i *index.Index, err error) {
defer ioutil.CheckClose(f, &err)
fi, statErr := s.dir.Fs().Stat(f.Name())
if statErr == nil {
idx.ModTime = fi.ModTime()
}
d := index.NewDecoder(f)
err = d.Decode(idx)
return idx, err

View File

@@ -69,7 +69,11 @@ type IndexStorage struct {
index *index.Index
}
// SetIndex stores the given index.
// Note: this method sets idx.ModTime to simulate filesystem storage behavior.
func (c *IndexStorage) SetIndex(idx *index.Index) error {
// Set ModTime to enable racy git detection in the metadata optimization.
idx.ModTime = time.Now()
c.index = idx
return nil
}

View File

@@ -4,9 +4,11 @@ import (
"io"
"os"
"path"
"time"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/format/index"
"github.com/go-git/go-git/v5/utils/merkletrie/noder"
"github.com/go-git/go-billy/v5"
@@ -16,6 +18,14 @@ var ignore = map[string]bool{
".git": true,
}
// Options contains configuration for the filesystem node.
type Options struct {
// Index is used to enable the metadata-first comparison optimization while
// correctly handling the "racy git" condition. If no index is provided,
// the function works without the optimization.
Index *index.Index
}
// The node represents a file or a directory in a billy.Filesystem. It
// implements the interface noder.Noder of merkletrie package.
//
@@ -24,6 +34,8 @@ var ignore = map[string]bool{
type node struct {
fs billy.Filesystem
submodules map[string]plumbing.Hash
idx *index.Index
idxMap map[string]*index.Entry
path string
hash []byte
@@ -31,6 +43,7 @@ type node struct {
isDir bool
mode os.FileMode
size int64
modTime time.Time
}
// NewRootNode returns the root node based on a given billy.Filesystem.
@@ -42,7 +55,41 @@ func NewRootNode(
fs billy.Filesystem,
submodules map[string]plumbing.Hash,
) noder.Noder {
return &node{fs: fs, submodules: submodules, isDir: true}
return NewRootNodeWithOptions(fs, submodules, Options{})
}
// NewRootNodeWithOptions returns the root node based on a given billy.Filesystem
// with options to set an index. Providing an index enables the metadata-first
// comparison optimization while correctly handling the "racy git" condition. If
// no index is provided, the function works without the optimization.
//
// The index's ModTime field is used to detect the racy git condition. When a file's
// mtime equals or is newer than the index ModTime, we must hash the file content
// even if other metadata matches, because the file may have been modified in the
// same second that the index was written.
//
// Reference: https://git-scm.com/docs/racy-git
func NewRootNodeWithOptions(
fs billy.Filesystem,
submodules map[string]plumbing.Hash,
options Options,
) noder.Noder {
var idxMap map[string]*index.Entry
if options.Index != nil {
idxMap = make(map[string]*index.Entry, len(options.Index.Entries))
for _, entry := range options.Index.Entries {
idxMap[entry.Name] = entry
}
}
return &node{
fs: fs,
submodules: submodules,
idx: options.Index,
idxMap: idxMap,
isDir: true,
}
}
// Hash the hash of a filesystem is the result of concatenating the computed
@@ -133,11 +180,14 @@ func (n *node) newChildNode(file os.FileInfo) (*node, error) {
node := &node{
fs: n.fs,
submodules: n.submodules,
idx: n.idx,
idxMap: n.idxMap,
path: path,
isDir: file.IsDir(),
size: file.Size(),
mode: file.Mode(),
path: path,
isDir: file.IsDir(),
size: file.Size(),
mode: file.Mode(),
modTime: file.ModTime(),
}
if _, isSubmodule := n.submodules[path]; isSubmodule {
@@ -161,6 +211,16 @@ func (n *node) calculateHash() {
n.hash = append(submoduleHash[:], filemode.Submodule.Bytes()...)
return
}
if n.idxMap != nil {
if entry, ok := n.idxMap[n.path]; ok {
if n.metadataMatches(entry) {
n.hash = append(entry.Hash[:], mode.Bytes()...)
return
}
}
}
var hash plumbing.Hash
if n.mode&os.ModeSymlink != 0 {
hash = n.doCalculateHashForSymlink()
@@ -170,6 +230,44 @@ func (n *node) calculateHash() {
n.hash = append(hash[:], mode.Bytes()...)
}
func (n *node) metadataMatches(entry *index.Entry) bool {
if entry == nil {
return false
}
if uint32(n.size) != entry.Size {
return false
}
if !n.modTime.IsZero() && !n.modTime.Equal(entry.ModifiedAt) {
return false
}
mode, err := filemode.NewFromOSFileMode(n.mode)
if err != nil {
return false
}
if mode != entry.Mode {
return false
}
if n.idx != nil && !n.idx.ModTime.IsZero() && !n.modTime.IsZero() {
if !n.modTime.Before(n.idx.ModTime) {
return false
}
}
// If we couldn't perform the racy git check (idx is nil or idx.ModTime is zero),
// we cannot safely rely on metadata alone — force content hashing.
// This can occur with in-memory storage where the index file timestamp is unavailable.
if n.idx == nil || n.idx.ModTime.IsZero() {
return false
}
return true
}
func (n *node) doCalculateHashForRegular() plumbing.Hash {
f, err := n.fs.Open(n.path)
if err != nil {

View File

@@ -385,7 +385,8 @@ func (w *Worktree) resetIndex(t *object.Tree, dirs []string, files []string) ([]
return nil, err
}
var removedFiles []string
removedFiles := make([]string, 0, len(changes))
filesMap := buildFilePathMap(files)
for _, ch := range changes {
a, err := ch.Action()
if err != nil {
@@ -407,7 +408,7 @@ func (w *Worktree) resetIndex(t *object.Tree, dirs []string, files []string) ([]
}
if len(files) > 0 {
contains := inFiles(files, name)
contains := inFiles(filesMap, name)
if !contains {
continue
}
@@ -436,15 +437,11 @@ func (w *Worktree) resetIndex(t *object.Tree, dirs []string, files []string) ([]
return removedFiles, w.r.Storer.SetIndex(idx)
}
func inFiles(files []string, v string) bool {
// inFiles checks if the given file is in the list of files. The incoming filepaths in files should be cleaned before calling this function.
func inFiles(files map[string]struct{}, v string) bool {
v = filepath.Clean(v)
for _, s := range files {
if filepath.Clean(s) == v {
return true
}
}
return false
_, exists := files[v]
return exists
}
func (w *Worktree) resetWorktree(t *object.Tree, files []string) error {
@@ -459,6 +456,7 @@ func (w *Worktree) resetWorktree(t *object.Tree, files []string) error {
}
b := newIndexBuilder(idx)
filesMap := buildFilePathMap(files)
for _, ch := range changes {
if err := w.validChange(ch); err != nil {
return err
@@ -476,7 +474,7 @@ func (w *Worktree) resetWorktree(t *object.Tree, files []string) error {
continue
}
contains := inFiles(files, file)
contains := inFiles(filesMap, file)
if !contains {
continue
}
@@ -1206,3 +1204,16 @@ func (b *indexBuilder) Add(e *index.Entry) {
func (b *indexBuilder) Remove(name string) {
delete(b.entries, filepath.ToSlash(name))
}
// buildFilePathMap creates a map of cleaned file paths for efficient lookup.
// Returns nil if the input slice is empty.
func buildFilePathMap(files []string) map[string]struct{} {
if len(files) == 0 {
return nil
}
filesMap := make(map[string]struct{}, len(files))
for _, f := range files {
filesMap[filepath.Clean(f)] = struct{}{}
}
return filesMap
}

View File

@@ -141,7 +141,7 @@ func (w *Worktree) diffStagingWithWorktree(reverse, excludeIgnoredChanges bool)
return nil, err
}
to := filesystem.NewRootNode(w.Filesystem, submodules)
to := filesystem.NewRootNodeWithOptions(w.Filesystem, submodules, filesystem.Options{Index: idx})
var c merkletrie.Changes
if reverse {