Merge pull request #808 from astef/tar
feat: replace tar (cli) with code-only packing
This commit is contained in:
commit
d3c0242115
1
go.mod
1
go.mod
@ -97,6 +97,7 @@ require (
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.22.0
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@ -457,6 +457,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
||||
@ -404,10 +404,11 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
|
||||
srcPath := getInternalVolumePath(cs.Driver.workingMountDir, srcVol)
|
||||
dstPath := filepath.Join(snapInternalVolPath, snapshot.archiveName())
|
||||
|
||||
klog.V(2).Infof("tar %v -> %v", srcPath, dstPath)
|
||||
out, err := exec.Command("tar", "-C", srcPath, "-czvf", dstPath, ".").CombinedOutput()
|
||||
err = TarPack(srcPath, dstPath, true)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to create archive for snapshot: %v: %v", err, string(out))
|
||||
return nil, status.Errorf(codes.Internal, "failed to create archive for snapshot: %v", err)
|
||||
}
|
||||
klog.V(2).Infof("tar %s -> %s complete", srcPath, dstPath)
|
||||
|
||||
@ -571,9 +572,10 @@ func (cs *ControllerServer) copyFromSnapshot(ctx context.Context, req *csi.Creat
|
||||
snapPath := filepath.Join(getInternalVolumePath(cs.Driver.workingMountDir, snapVol), snap.archiveName())
|
||||
dstPath := getInternalVolumePath(cs.Driver.workingMountDir, dstVol)
|
||||
klog.V(2).Infof("copy volume from snapshot %v -> %v", snapPath, dstPath)
|
||||
out, err := exec.Command("tar", "-xzvf", snapPath, "-C", dstPath).CombinedOutput()
|
||||
|
||||
err = TarUnpack(snapPath, dstPath, true)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "failed to copy volume for snapshot: %v: %v", err, string(out))
|
||||
return status.Errorf(codes.Internal, "failed to copy volume for snapshot: %v", err)
|
||||
}
|
||||
klog.V(2).Infof("volume copied from snapshot %v -> %v", snapPath, dstPath)
|
||||
return nil
|
||||
|
||||
246
pkg/nfs/tar.go
Normal file
246
pkg/nfs/tar.go
Normal file
@ -0,0 +1,246 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TarPack(srcDirPath string, dstPath string, enableCompression bool) error {
|
||||
// normalize all paths to be absolute and clean
|
||||
dstPath, err := filepath.Abs(dstPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("normalizing destination path: %w", err)
|
||||
}
|
||||
|
||||
srcDirPath, err = filepath.Abs(srcDirPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("normalizing source path: %w", err)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(filepath.Dir(dstPath), srcDirPath) {
|
||||
return fmt.Errorf("destination file %s cannot be under source directory %s", dstPath, srcDirPath)
|
||||
}
|
||||
|
||||
tarFile, err := os.Create(dstPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating destination file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
err = errors.Join(err, closeAndWrapErr(tarFile, "closing destination file %s: %w", dstPath))
|
||||
}()
|
||||
|
||||
var tarDst io.Writer = tarFile
|
||||
if enableCompression {
|
||||
gzipWriter := gzip.NewWriter(tarFile)
|
||||
defer func() {
|
||||
err = errors.Join(err, closeAndWrapErr(gzipWriter, "closing gzip writer"))
|
||||
}()
|
||||
tarDst = gzipWriter
|
||||
}
|
||||
|
||||
tarWriter := tar.NewWriter(tarDst)
|
||||
defer func() {
|
||||
err = errors.Join(err, closeAndWrapErr(tarWriter, "closing tar writer"))
|
||||
}()
|
||||
|
||||
// recursively visit every file and write it
|
||||
if err = filepath.Walk(
|
||||
srcDirPath,
|
||||
func(srcSubPath string, fileInfo fs.FileInfo, walkErr error) error {
|
||||
return tarVisitFileToPack(tarWriter, srcDirPath, srcSubPath, fileInfo, walkErr)
|
||||
},
|
||||
); err != nil {
|
||||
return fmt.Errorf("walking source directory: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tarVisitFileToPack(
|
||||
tarWriter *tar.Writer,
|
||||
srcPath string,
|
||||
srcSubPath string,
|
||||
fileInfo os.FileInfo,
|
||||
walkErr error,
|
||||
) (err error) {
|
||||
if walkErr != nil {
|
||||
return walkErr
|
||||
}
|
||||
|
||||
linkTarget := ""
|
||||
if fileInfo.Mode()&fs.ModeSymlink != 0 {
|
||||
linkTarget, err = os.Readlink(srcSubPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading link %s: %w", srcSubPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
tarHeader, err := tar.FileInfoHeader(fileInfo, linkTarget)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating tar header for %s: %w", srcSubPath, err)
|
||||
}
|
||||
|
||||
// srcSubPath always starts with srcPath and both are absolute
|
||||
tarHeader.Name, err = filepath.Rel(srcPath, srcSubPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("making tar header name for file %s: %w", srcSubPath, err)
|
||||
}
|
||||
|
||||
if err = tarWriter.WriteHeader(tarHeader); err != nil {
|
||||
return fmt.Errorf("writing tar header for file %s: %w", srcSubPath, err)
|
||||
}
|
||||
|
||||
if !fileInfo.Mode().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
|
||||
srcFile, err := os.Open(srcSubPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening file being packed %s: %w", srcSubPath, err)
|
||||
}
|
||||
defer func() {
|
||||
err = errors.Join(err, closeAndWrapErr(srcFile, "closing file being packed %s: %w", srcSubPath))
|
||||
}()
|
||||
_, err = io.Copy(tarWriter, srcFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("packing file %s: %w", srcSubPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TarUnpack(srcPath, dstDirPath string, enableCompression bool) (err error) {
|
||||
// normalize all paths to be absolute and clean
|
||||
srcPath, err = filepath.Abs(srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("normalizing archive path: %w", err)
|
||||
}
|
||||
|
||||
dstDirPath, err = filepath.Abs(dstDirPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("normalizing archive destination path: %w", err)
|
||||
}
|
||||
|
||||
tarFile, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening archive %s: %w", srcPath, err)
|
||||
}
|
||||
defer func() {
|
||||
err = errors.Join(err, closeAndWrapErr(tarFile, "closing archive %s: %w", srcPath))
|
||||
}()
|
||||
|
||||
var tarDst io.Reader = tarFile
|
||||
if enableCompression {
|
||||
var gzipReader *gzip.Reader
|
||||
gzipReader, err = gzip.NewReader(tarFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating gzip reader: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
err = errors.Join(err, closeAndWrapErr(gzipReader, "closing gzip reader: %w"))
|
||||
}()
|
||||
|
||||
tarDst = gzipReader
|
||||
}
|
||||
|
||||
tarReader := tar.NewReader(tarDst)
|
||||
|
||||
for {
|
||||
var tarHeader *tar.Header
|
||||
tarHeader, err = tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading tar header of %s: %w", srcPath, err)
|
||||
}
|
||||
|
||||
fileInfo := tarHeader.FileInfo()
|
||||
|
||||
filePath := filepath.Join(dstDirPath, tarHeader.Name)
|
||||
|
||||
// protect against "Zip Slip"
|
||||
if !strings.HasPrefix(filePath, dstDirPath) {
|
||||
// mimic standard error, which will be returned in future versions of Go by default
|
||||
// more info can be found by "tarinsecurepath" variable name
|
||||
return tar.ErrInsecurePath
|
||||
}
|
||||
|
||||
fileDirPath := filePath
|
||||
if !fileInfo.Mode().IsDir() {
|
||||
fileDirPath = filepath.Dir(fileDirPath)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(fileDirPath, 0755); err != nil {
|
||||
return fmt.Errorf("making dirs for path %s: %w", fileDirPath, err)
|
||||
}
|
||||
|
||||
if fileInfo.Mode().IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if fileInfo.Mode()&fs.ModeSymlink != 0 {
|
||||
if err := os.Symlink(tarHeader.Linkname, filePath); err != nil {
|
||||
return fmt.Errorf("creating symlink %s: %w", filePath, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err = tarUnpackFile(filePath, tarReader, fileInfo); err != nil {
|
||||
return fmt.Errorf("unpacking file %s: %w", filePath, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tarUnpackFile(dstFileName string, src io.Reader, srcFileInfo fs.FileInfo) (err error) {
|
||||
var dstFile *os.File
|
||||
dstFile, err = os.OpenFile(dstFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcFileInfo.Mode().Perm())
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening destination file %s: %w", dstFileName, err)
|
||||
}
|
||||
defer func() {
|
||||
err = errors.Join(err, closeAndWrapErr(dstFile, "closing destination file %s: %w", dstFile))
|
||||
}()
|
||||
|
||||
n, err := io.Copy(dstFile, src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying to destination file %s: %w", dstFileName, err)
|
||||
}
|
||||
|
||||
if srcFileInfo.Mode().IsRegular() && n != srcFileInfo.Size() {
|
||||
return fmt.Errorf("written size check failed for %s: wrote %d, want %d", dstFileName, n, srcFileInfo.Size())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func closeAndWrapErr(closer io.Closer, errFormat string, a ...any) error {
|
||||
if err := closer.Close(); err != nil {
|
||||
a = append(a, err)
|
||||
return fmt.Errorf(errFormat, a...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
350
pkg/nfs/tar_test.go
Normal file
350
pkg/nfs/tar_test.go
Normal file
@ -0,0 +1,350 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/mod/sumdb/dirhash"
|
||||
)
|
||||
|
||||
const (
|
||||
code producedFrom = '0'
|
||||
cli producedFrom = '1'
|
||||
)
|
||||
|
||||
type producedFrom byte
|
||||
|
||||
const archiveFileExt = ".tar.gz"
|
||||
|
||||
func TestPackUnpack(t *testing.T) {
|
||||
inputPath := t.TempDir()
|
||||
generateFileSystem(t, inputPath)
|
||||
|
||||
outputPath := t.TempDir()
|
||||
|
||||
// produced file names (without extensions) have a suffix,
|
||||
// which determine the last operation:
|
||||
// "0" means that it was produced from code
|
||||
// "1" means that it was produced from CLI
|
||||
// e.g.: "testdata011.tar.gz" - was packed from code,
|
||||
// then unpacked from cli and packed again from cli
|
||||
|
||||
pathsBySuffix := make(map[string]string)
|
||||
|
||||
// number of pack/unpack operations
|
||||
opNum := 4
|
||||
|
||||
// generate all operation combinations
|
||||
fileNum := int(math.Pow(2, float64(opNum)))
|
||||
for i := 0; i < fileNum; i++ {
|
||||
binStr := fmt.Sprintf("%b", i)
|
||||
|
||||
// left-pad with zeroes
|
||||
binStr = strings.Repeat("0", opNum-len(binStr)) + binStr
|
||||
|
||||
// copy slices to satisfy type system
|
||||
ops := make([]producedFrom, opNum)
|
||||
for opIdx := 0; opIdx < opNum; opIdx++ {
|
||||
ops[opIdx] = producedFrom(binStr[opIdx])
|
||||
}
|
||||
|
||||
// produce folders and archives
|
||||
produce(t, pathsBySuffix, inputPath, outputPath, ops...)
|
||||
}
|
||||
|
||||
// compare all unpacked directories
|
||||
paths := slices.Collect(maps.Values(pathsBySuffix))
|
||||
assertUnpackedFilesEqual(t, inputPath, paths)
|
||||
}
|
||||
|
||||
func produce(
|
||||
t *testing.T,
|
||||
results map[string]string,
|
||||
inputDirPath string,
|
||||
outputDirPath string,
|
||||
ops ...producedFrom,
|
||||
) {
|
||||
const baseName = "testdata"
|
||||
|
||||
for i := 0; i < len(ops); i++ {
|
||||
packing := i%2 == 0
|
||||
|
||||
srcPath := inputDirPath
|
||||
if i > 0 {
|
||||
prevSuffix := string(ops[:i])
|
||||
srcPath = filepath.Join(outputDirPath, baseName+prevSuffix)
|
||||
if !packing {
|
||||
srcPath += archiveFileExt
|
||||
}
|
||||
}
|
||||
|
||||
suffix := string(ops[:i+1])
|
||||
dstPath := filepath.Join(outputDirPath, baseName+suffix)
|
||||
if packing {
|
||||
dstPath += archiveFileExt
|
||||
}
|
||||
|
||||
if _, ok := results[suffix]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case packing && ops[i] == code:
|
||||
// packing from code
|
||||
if err := TarPack(srcPath, dstPath, true); err != nil {
|
||||
t.Fatalf("packing '%s' with TarPack into '%s': %v", srcPath, dstPath, err)
|
||||
}
|
||||
case packing && ops[i] == cli:
|
||||
// packing from CLI
|
||||
if out, err := exec.Command("tar", "-C", srcPath, "-czvf", dstPath, ".").CombinedOutput(); err != nil {
|
||||
t.Log("TAR OUTPUT:", string(out))
|
||||
t.Fatalf("packing '%s' with tar into '%s': %v", srcPath, dstPath, err)
|
||||
}
|
||||
case !packing && ops[i] == code:
|
||||
// unpacking from code
|
||||
if err := TarUnpack(srcPath, dstPath, true); err != nil {
|
||||
t.Fatalf("unpacking '%s' with TarUnpack into '%s': %v", srcPath, dstPath, err)
|
||||
}
|
||||
case !packing && ops[i] == cli:
|
||||
// unpacking from CLI
|
||||
// tar requires destination directory to exist
|
||||
if err := os.MkdirAll(dstPath, 0755); err != nil {
|
||||
t.Fatalf("making dir '%s' for unpacking with tar: %v", dstPath, err)
|
||||
}
|
||||
if out, err := exec.Command("tar", "-xzvf", srcPath, "-C", dstPath).CombinedOutput(); err != nil {
|
||||
t.Log("TAR OUTPUT:", string(out))
|
||||
t.Fatalf("unpacking '%s' with tar into '%s': %v", srcPath, dstPath, err)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unknown suffix: %s", string(ops[i]))
|
||||
}
|
||||
|
||||
results[suffix] = dstPath
|
||||
}
|
||||
}
|
||||
|
||||
func assertUnpackedFilesEqual(t *testing.T, originalDir string, paths []string) {
|
||||
originalDirHash, err := dirhash.HashDir(originalDir, "_", dirhash.DefaultHash)
|
||||
if err != nil {
|
||||
t.Fatal("failed hashing original dir ", err)
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if strings.HasSuffix(p, archiveFileExt) {
|
||||
// archive, not a directory
|
||||
continue
|
||||
}
|
||||
|
||||
// unpacked directory
|
||||
hs, err := dirhash.HashDir(p, "_", dirhash.DefaultHash)
|
||||
if err != nil {
|
||||
t.Fatal("failed hashing dir ", err)
|
||||
}
|
||||
|
||||
if hs != originalDirHash {
|
||||
t.Errorf("expected '%s' to have the same hash as '%s', got different", originalDir, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func generateFileSystem(t *testing.T, inputPath string) {
|
||||
// empty directory
|
||||
if err := os.MkdirAll(filepath.Join(inputPath, "empty_dir"), 0755); err != nil {
|
||||
t.Fatalf("generating empty directory: %v", err)
|
||||
}
|
||||
|
||||
// deep empty directories
|
||||
deepEmptyDirPath := filepath.Join(inputPath, "deep_empty_dir", strings.Repeat("/0/1/2", 20))
|
||||
if err := os.MkdirAll(deepEmptyDirPath, 0755); err != nil {
|
||||
t.Fatalf("generating deep empty directory '%s': %v", deepEmptyDirPath, err)
|
||||
}
|
||||
|
||||
// empty file
|
||||
f, err := os.Create(filepath.Join(inputPath, "empty_file"))
|
||||
if err != nil {
|
||||
t.Fatalf("generating empty file: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
// big (100MB) file
|
||||
bigFilePath := filepath.Join(inputPath, "big_file")
|
||||
for i := byte(0); i < 100; i++ {
|
||||
// write 1MB
|
||||
err := os.WriteFile(bigFilePath, bytes.Repeat([]byte{i}, 1024*1024), 0755)
|
||||
if err != nil {
|
||||
t.Fatalf("generating empty file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackZipSlip(t *testing.T) {
|
||||
// Arrange: produce malicious archive
|
||||
inputDir := t.TempDir()
|
||||
|
||||
const mContent = "malicious content"
|
||||
const mFileName = "malicious.txt"
|
||||
const mHeaderPath = "../" + mFileName // attack: path traversal
|
||||
var mArchivePath = filepath.Join(inputDir, "malicious.tar.gz")
|
||||
|
||||
// temp file to pack
|
||||
maliciousFile, err := os.Create(mArchivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp file: %v", err)
|
||||
}
|
||||
|
||||
gzWriter := gzip.NewWriter(maliciousFile)
|
||||
tarWriter := tar.NewWriter(gzWriter)
|
||||
|
||||
// define a malicious file header
|
||||
maliciousHeader := &tar.Header{
|
||||
Name: mHeaderPath,
|
||||
Size: int64(len(mContent)),
|
||||
Mode: 0600,
|
||||
}
|
||||
|
||||
err = tarWriter.WriteHeader(maliciousHeader)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to write malicious header: %v", err)
|
||||
}
|
||||
|
||||
// write malicious content
|
||||
_, err = tarWriter.Write([]byte(mContent))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to write content: %v", err)
|
||||
}
|
||||
|
||||
err = errors.Join(tarWriter.Close(), gzWriter.Close(), maliciousFile.Close())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to close writers: %v", err)
|
||||
}
|
||||
|
||||
// Act & Assert: unpack nearby, expect error
|
||||
var outputDir = filepath.Join(inputDir, "output")
|
||||
if err := TarUnpack(mArchivePath, outputDir, true); err != nil {
|
||||
if !errors.Is(err, tar.ErrInsecurePath) {
|
||||
t.Fatalf("expected error tar.ErrInsecurePath, got: %v", err)
|
||||
}
|
||||
} else {
|
||||
t.Error("unpack of malicious file succeeded, expected it to fail")
|
||||
}
|
||||
|
||||
// Assert: check that file did not escape
|
||||
var attackPath = filepath.Join(inputDir, mFileName)
|
||||
if _, err := os.Stat(attackPath); err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
t.Fatalf("failed to check the existence of the malicious file: %v", err)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("malicious file escaped the destination: %s", attackPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackSameDir(t *testing.T) {
|
||||
inputDir := t.TempDir()
|
||||
|
||||
err := TarPack(inputDir, filepath.Join(inputDir, "a.tar.gz"), false)
|
||||
|
||||
const expectedErr = "cannot be under source directory"
|
||||
if err == nil {
|
||||
t.Errorf("expected error '%s', got success", expectedErr)
|
||||
} else if !strings.Contains(err.Error(), expectedErr) {
|
||||
t.Errorf("expected error '%s', got: %v", expectedErr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSymlinks(t *testing.T) {
|
||||
inputDir := t.TempDir()
|
||||
|
||||
testContent := []byte(time.Now().String())
|
||||
|
||||
testFileName := "d.txt"
|
||||
testFilePath := filepath.Join(inputDir, testFileName)
|
||||
|
||||
if err := os.WriteFile(testFilePath, testContent, 0644); err != nil {
|
||||
t.Fatalf("writing to %s: %v", testFilePath, err)
|
||||
}
|
||||
|
||||
absSymlinkName := "abs_symlink_to_" + testFileName
|
||||
absSymlinkPath := filepath.Join(inputDir, absSymlinkName)
|
||||
if err := os.Symlink(testFilePath, absSymlinkPath); err != nil {
|
||||
t.Fatalf("creating absolute symlink %s: %v", absSymlinkPath, err)
|
||||
}
|
||||
|
||||
relSymlinkName := "rel_symlink_to_" + testFileName
|
||||
relSymlinkPath := filepath.Join(inputDir, relSymlinkName)
|
||||
|
||||
relSymlinkTgt := "." + string(filepath.Separator) + testFileName
|
||||
if err := os.Symlink(relSymlinkTgt, relSymlinkPath); err != nil {
|
||||
t.Fatalf("creating relative symlink %s: %v", relSymlinkPath, err)
|
||||
}
|
||||
|
||||
outputDir := t.TempDir()
|
||||
|
||||
archivePath := filepath.Join(outputDir, "output.tar.gz")
|
||||
if err := TarPack(inputDir, archivePath, true); err != nil {
|
||||
t.Fatalf("packing %s to %s: %v", inputDir, archivePath, err)
|
||||
}
|
||||
|
||||
unpackedPath := filepath.Join(outputDir, "output")
|
||||
if err := TarUnpack(archivePath, unpackedPath, true); err != nil {
|
||||
t.Fatalf("unpacking %s to %s: %v", archivePath, unpackedPath, err)
|
||||
}
|
||||
|
||||
// check absolute symlink
|
||||
outputAbsSymlinkPath := filepath.Join(unpackedPath, absSymlinkName)
|
||||
outputAbsSymlinkTgt, err := os.Readlink(outputAbsSymlinkPath)
|
||||
if err != nil {
|
||||
t.Fatalf("reading absolute link %s: %v", outputAbsSymlinkPath, err)
|
||||
}
|
||||
if outputAbsSymlinkTgt != testFilePath {
|
||||
t.Errorf("expected absolute symlink to point to %s, got %s", testFilePath, outputAbsSymlinkTgt)
|
||||
}
|
||||
if data, err := os.ReadFile(outputAbsSymlinkPath); err != nil {
|
||||
t.Fatalf("reading file %s: %v", outputAbsSymlinkPath, err)
|
||||
} else if !bytes.Equal(testContent, data) {
|
||||
t.Errorf("expected file %s to be: %X, got %X", outputAbsSymlinkPath, testContent, data)
|
||||
}
|
||||
|
||||
// check relative symlink
|
||||
outputRelSymlinkPath := filepath.Join(unpackedPath, relSymlinkName)
|
||||
outputRelSymlinkTgt, err := os.Readlink(outputRelSymlinkPath)
|
||||
if err != nil {
|
||||
t.Fatalf("reading relative link %s: %v", outputRelSymlinkPath, err)
|
||||
}
|
||||
if outputRelSymlinkTgt != relSymlinkTgt {
|
||||
t.Errorf("expected relative symlink to point to %s, got %s", relSymlinkTgt, outputRelSymlinkTgt)
|
||||
}
|
||||
if data, err := os.ReadFile(outputRelSymlinkPath); err != nil {
|
||||
t.Fatalf("reading file %s: %v", outputRelSymlinkPath, err)
|
||||
} else if !bytes.Equal(testContent, data) {
|
||||
t.Errorf("expected file %s to be: %X, got %X", outputRelSymlinkPath, testContent, data)
|
||||
}
|
||||
}
|
||||
27
vendor/golang.org/x/mod/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/mod/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
22
vendor/golang.org/x/mod/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/mod/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
135
vendor/golang.org/x/mod/sumdb/dirhash/hash.go
generated
vendored
Normal file
135
vendor/golang.org/x/mod/sumdb/dirhash/hash.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package dirhash defines hashes over directory trees.
|
||||
// These hashes are recorded in go.sum files and in the Go checksum database,
|
||||
// to allow verifying that a newly-downloaded module has the expected content.
|
||||
package dirhash
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DefaultHash is the default hash function used in new go.sum entries.
|
||||
var DefaultHash Hash = Hash1
|
||||
|
||||
// A Hash is a directory hash function.
|
||||
// It accepts a list of files along with a function that opens the content of each file.
|
||||
// It opens, reads, hashes, and closes each file and returns the overall directory hash.
|
||||
type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error)
|
||||
|
||||
// Hash1 is the "h1:" directory hash function, using SHA-256.
|
||||
//
|
||||
// Hash1 is "h1:" followed by the base64-encoded SHA-256 hash of a summary
|
||||
// prepared as if by the Unix command:
|
||||
//
|
||||
// sha256sum $(find . -type f | sort) | sha256sum
|
||||
//
|
||||
// More precisely, the hashed summary contains a single line for each file in the list,
|
||||
// ordered by sort.Strings applied to the file names, where each line consists of
|
||||
// the hexadecimal SHA-256 hash of the file content,
|
||||
// two spaces (U+0020), the file name, and a newline (U+000A).
|
||||
//
|
||||
// File names with newlines (U+000A) are disallowed.
|
||||
func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) {
|
||||
h := sha256.New()
|
||||
files = append([]string(nil), files...)
|
||||
sort.Strings(files)
|
||||
for _, file := range files {
|
||||
if strings.Contains(file, "\n") {
|
||||
return "", errors.New("dirhash: filenames with newlines are not supported")
|
||||
}
|
||||
r, err := open(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hf := sha256.New()
|
||||
_, err = io.Copy(hf, r)
|
||||
r.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
fmt.Fprintf(h, "%x %s\n", hf.Sum(nil), file)
|
||||
}
|
||||
return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// HashDir returns the hash of the local file system directory dir,
|
||||
// replacing the directory name itself with prefix in the file names
|
||||
// used in the hash function.
|
||||
func HashDir(dir, prefix string, hash Hash) (string, error) {
|
||||
files, err := DirFiles(dir, prefix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
osOpen := func(name string) (io.ReadCloser, error) {
|
||||
return os.Open(filepath.Join(dir, strings.TrimPrefix(name, prefix)))
|
||||
}
|
||||
return hash(files, osOpen)
|
||||
}
|
||||
|
||||
// DirFiles returns the list of files in the tree rooted at dir,
|
||||
// replacing the directory name dir with prefix in each name.
|
||||
// The resulting names always use forward slashes.
|
||||
func DirFiles(dir, prefix string) ([]string, error) {
|
||||
var files []string
|
||||
dir = filepath.Clean(dir)
|
||||
err := filepath.Walk(dir, func(file string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
} else if file == dir {
|
||||
return fmt.Errorf("%s is not a directory", dir)
|
||||
}
|
||||
|
||||
rel := file
|
||||
if dir != "." {
|
||||
rel = file[len(dir)+1:]
|
||||
}
|
||||
f := filepath.Join(prefix, rel)
|
||||
files = append(files, filepath.ToSlash(f))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// HashZip returns the hash of the file content in the named zip file.
|
||||
// Only the file names and their contents are included in the hash:
|
||||
// the exact zip file format encoding, compression method,
|
||||
// per-file modification times, and other metadata are ignored.
|
||||
func HashZip(zipfile string, hash Hash) (string, error) {
|
||||
z, err := zip.OpenReader(zipfile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer z.Close()
|
||||
var files []string
|
||||
zfiles := make(map[string]*zip.File)
|
||||
for _, file := range z.File {
|
||||
files = append(files, file.Name)
|
||||
zfiles[file.Name] = file
|
||||
}
|
||||
zipOpen := func(name string) (io.ReadCloser, error) {
|
||||
f := zfiles[name]
|
||||
if f == nil {
|
||||
return nil, fmt.Errorf("file %q not found in zip", name) // should never happen
|
||||
}
|
||||
return f.Open()
|
||||
}
|
||||
return hash(files, zipOpen)
|
||||
}
|
||||
3
vendor/modules.txt
vendored
3
vendor/modules.txt
vendored
@ -381,6 +381,9 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
## explicit; go 1.20
|
||||
golang.org/x/exp/constraints
|
||||
golang.org/x/exp/slices
|
||||
# golang.org/x/mod v0.22.0
|
||||
## explicit; go 1.22.0
|
||||
golang.org/x/mod/sumdb/dirhash
|
||||
# golang.org/x/net v0.34.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/net/context
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user