summaryrefslogtreecommitdiffstats
path: root/external/meta-virtualization/recipes-containers/oci-image-tools/files
diff options
context:
space:
mode:
Diffstat (limited to 'external/meta-virtualization/recipes-containers/oci-image-tools/files')
-rw-r--r--external/meta-virtualization/recipes-containers/oci-image-tools/files/0001-config-make-Config.User-mapping-errors-a-warning.patch30
-rw-r--r--external/meta-virtualization/recipes-containers/oci-image-tools/files/0001-image-manifest-Recursively-remove-pre-existing-entri.patch78
-rw-r--r--external/meta-virtualization/recipes-containers/oci-image-tools/files/0002-image-manifest-Split-unpackLayerEntry-into-its-own-f.patch242
3 files changed, 350 insertions, 0 deletions
diff --git a/external/meta-virtualization/recipes-containers/oci-image-tools/files/0001-config-make-Config.User-mapping-errors-a-warning.patch b/external/meta-virtualization/recipes-containers/oci-image-tools/files/0001-config-make-Config.User-mapping-errors-a-warning.patch
new file mode 100644
index 00000000..78d2b9d0
--- /dev/null
+++ b/external/meta-virtualization/recipes-containers/oci-image-tools/files/0001-config-make-Config.User-mapping-errors-a-warning.patch
@@ -0,0 +1,30 @@
+From fbd62eff9ff2f447c2eb4634398110609fbf9d59 Mon Sep 17 00:00:00 2001
+From: Bruce Ashfield <bruce.ashfield@windriver.com>
+Date: Thu, 16 Nov 2017 23:40:17 -0500
+Subject: [PATCH] config: make Config.User mapping errors a warning
+
+Rather than throwing an error if we can't map a user to a uid,
+output a warning. We aren't actually running the code, but are
+just extracting it .. so the user not existing isn't an issue.
+
+Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
+---
+ image/config.go | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/import/image/config.go b/src/import/image/config.go
+index d28b1bc4fe5f..37dfd1f14ef7 100644
+--- a/src/import/image/config.go
++++ b/src/import/image/config.go
+@@ -106,7 +106,7 @@ func (c *config) runtimeSpec(rootfs string) (*specs.Spec, error) {
+ s.Process.User.UID = uint32(uid)
+ s.Process.User.GID = uint32(gid)
+ } else if c.Config.User != "" {
+- return nil, errors.New("config.User: unsupported format")
++ fmt.Println("Warning: could not map UID for user:", c.Config.User)
+ }
+
+ s.Linux = &specs.Linux{}
+--
+2.4.0.53.g8440f74
+
diff --git a/external/meta-virtualization/recipes-containers/oci-image-tools/files/0001-image-manifest-Recursively-remove-pre-existing-entri.patch b/external/meta-virtualization/recipes-containers/oci-image-tools/files/0001-image-manifest-Recursively-remove-pre-existing-entri.patch
new file mode 100644
index 00000000..5594f976
--- /dev/null
+++ b/external/meta-virtualization/recipes-containers/oci-image-tools/files/0001-image-manifest-Recursively-remove-pre-existing-entri.patch
@@ -0,0 +1,78 @@
+From 1f205c0aec5ea9e983d61a64e7ce871ae416bebd Mon Sep 17 00:00:00 2001
+From: "W. Trevor King" <wking@tremily.us>
+Date: Tue, 18 Oct 2016 02:16:46 -0700
+Subject: [PATCH 1/2] image/manifest: Recursively remove pre-existing entries
+ when unpacking
+
+Implementing the logic that is in-flight with [1], but using recursive
+removal [2]. GNU tar has a --recursive-unlink option that's not
+enabled by default, with the motivation being something like "folks
+would be mad if we blew away a full tree and replaced it with a broken
+symlink" [3]. That makes sense for working filesystems, but we're
+building the rootfs from scratch here so losing information is not a
+concern. This commit always uses recursive removal to get that old
+thing off the filesystem (whatever it takes ;).
+
+The exception to the removal is if both the tar entry and existing
+path occupant are directories. In this case we want to use GNU tar's
+default --overwrite-dir behavior, but unpackLayer's metadata handling
+is currently very weak so I've left it at "don't delete the old
+directory".
+
+The reworked directory case also fixes a minor bug from 44210d05
+(cmd/oci-image-tool: fix unpacking..., 2016-07-22, #177) where the:
+
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+
+block would not error out if the Lstat failed for a reason besides the
+acceptable IsNotExist. Instead, it would attempt to call MkdirAll,
+which would probably fail for the same reason that Lstat failed
+(e.g. ENOTDIR). But it's better to handle the Lstat errors directly.
+
+[1]: https://github.com/opencontainers/image-spec/pull/317
+[2]: https://github.com/opencontainers/image-spec/pull/317/files#r79214718
+[3]: https://www.gnu.org/software/tar/manual/html_node/Dealing-with-Old-Files.html
+
+Signed-off-by: W. Trevor King <wking@tremily.us>
+---
+ image/manifest.go | 22 +++++++++++++++++++---
+ 1 file changed, 19 insertions(+), 3 deletions(-)
+
+diff --git a/image/manifest.go b/image/manifest.go
+index 8834c1e5f2f0..144bd4f62219 100644
+--- a/src/import/image/manifest.go
++++ b/src/import/image/manifest.go
+@@ -253,11 +253,27 @@ loop:
+ continue loop
+ }
+
++ if hdr.Typeflag != tar.TypeDir {
++ err = os.RemoveAll(path)
++ if err != nil && !os.IsNotExist(err) {
++ return err
++ }
++ }
++
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+- if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+- if err2 := os.MkdirAll(path, info.Mode()); err2 != nil {
+- return errors.Wrap(err2, "error creating directory")
++ fi, err := os.Lstat(path)
++ if err != nil && !os.IsNotExist(err) {
++ return err
++ }
++ if os.IsNotExist(err) || !fi.IsDir() {
++ err = os.RemoveAll(path)
++ if err != nil && !os.IsNotExist(err) {
++ return err
++ }
++ err = os.MkdirAll(path, info.Mode())
++ if err != nil {
++ return err
+ }
+ }
+
+--
+2.4.0.53.g8440f74
+
diff --git a/external/meta-virtualization/recipes-containers/oci-image-tools/files/0002-image-manifest-Split-unpackLayerEntry-into-its-own-f.patch b/external/meta-virtualization/recipes-containers/oci-image-tools/files/0002-image-manifest-Split-unpackLayerEntry-into-its-own-f.patch
new file mode 100644
index 00000000..69bdcdb5
--- /dev/null
+++ b/external/meta-virtualization/recipes-containers/oci-image-tools/files/0002-image-manifest-Split-unpackLayerEntry-into-its-own-f.patch
@@ -0,0 +1,242 @@
+From 1e55f2a83b1f644803b640b72171b4ae0d95217b Mon Sep 17 00:00:00 2001
+From: "W. Trevor King" <wking@tremily.us>
+Date: Thu, 20 Oct 2016 23:30:22 -0700
+Subject: [PATCH 2/2] image/manifest: Split unpackLayerEntry into its own
+ function
+
+To help address:
+
+ $ make lint
+ checking lint
+ image/manifest.go:140::warning: cyclomatic complexity 39 of function unpackLayer() is high (> 35) (gocyclo)
+ ...
+
+Signed-off-by: W. Trevor King <wking@tremily.us>
+---
+ image/manifest.go | 185 +++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 100 insertions(+), 85 deletions(-)
+
+diff --git a/image/manifest.go b/image/manifest.go
+index 144bd4f62219..dfd5a83f70e4 100644
+--- a/src/import/image/manifest.go
++++ b/src/import/image/manifest.go
+@@ -218,116 +218,131 @@ loop:
+ return errors.Wrapf(err, "error advancing tar stream")
+ }
+
+- hdr.Name = filepath.Clean(hdr.Name)
+- if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+- // Not the root directory, ensure that the parent directory exists
+- parent := filepath.Dir(hdr.Name)
+- parentPath := filepath.Join(dest, parent)
+- if _, err2 := os.Lstat(parentPath); err2 != nil && os.IsNotExist(err2) {
+- if err3 := os.MkdirAll(parentPath, 0755); err3 != nil {
+- return err3
+- }
+- }
+- }
+- path := filepath.Join(dest, hdr.Name)
+- if entries[path] {
+- return fmt.Errorf("duplicate entry for %s", path)
+- }
+- entries[path] = true
+- rel, err := filepath.Rel(dest, path)
++ var whiteout bool
++ whiteout, err = unpackLayerEntry(dest, hdr, tr, &entries)
+ if err != nil {
+ return err
+ }
+- info := hdr.FileInfo()
+- if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+- return fmt.Errorf("%q is outside of %q", hdr.Name, dest)
++ if whiteout {
++ continue loop
+ }
+
+- if strings.HasPrefix(info.Name(), ".wh.") {
+- path = strings.Replace(path, ".wh.", "", 1)
++ // Directory mtimes must be handled at the end to avoid further
++ // file creation in them to modify the directory mtime
++ if hdr.Typeflag == tar.TypeDir {
++ dirs = append(dirs, hdr)
++ }
++ }
++ for _, hdr := range dirs {
++ path := filepath.Join(dest, hdr.Name)
+
+- if err := os.RemoveAll(path); err != nil {
+- return errors.Wrap(err, "unable to delete whiteout path")
++ finfo := hdr.FileInfo()
++ // I believe the old version was using time.Now().UTC() to overcome an
++ // invalid error from chtimes.....but here we lose hdr.AccessTime like this...
++ if err := os.Chtimes(path, time.Now().UTC(), finfo.ModTime()); err != nil {
++ return errors.Wrap(err, "error changing time")
++ }
++ }
++ return nil
++}
++
++// unpackLayerEntry unpacks a single entry from a layer.
++func unpackLayerEntry(dest string, header *tar.Header, reader io.Reader, entries *map[string]bool) (whiteout bool, err error) {
++ header.Name = filepath.Clean(header.Name)
++ if !strings.HasSuffix(header.Name, string(os.PathSeparator)) {
++ // Not the root directory, ensure that the parent directory exists
++ parent := filepath.Dir(header.Name)
++ parentPath := filepath.Join(dest, parent)
++ if _, err2 := os.Lstat(parentPath); err2 != nil && os.IsNotExist(err2) {
++ if err3 := os.MkdirAll(parentPath, 0755); err3 != nil {
++ return false, err3
+ }
++ }
++ }
++ path := filepath.Join(dest, header.Name)
++ if (*entries)[path] {
++ return false, fmt.Errorf("duplicate entry for %s", path)
++ }
++ (*entries)[path] = true
++ rel, err := filepath.Rel(dest, path)
++ if err != nil {
++ return false, err
++ }
++ info := header.FileInfo()
++ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
++ return false, fmt.Errorf("%q is outside of %q", header.Name, dest)
++ }
+
+- continue loop
++ if strings.HasPrefix(info.Name(), ".wh.") {
++ path = strings.Replace(path, ".wh.", "", 1)
++
++ if err = os.RemoveAll(path); err != nil {
++ return true, errors.Wrap(err, "unable to delete whiteout path")
+ }
+
+- if hdr.Typeflag != tar.TypeDir {
+- err = os.RemoveAll(path)
+- if err != nil && !os.IsNotExist(err) {
+- return err
+- }
++ return true, nil
++ }
++
++ if header.Typeflag != tar.TypeDir {
++ err = os.RemoveAll(path)
++ if err != nil && !os.IsNotExist(err) {
++ return false, err
+ }
++ }
+
+- switch hdr.Typeflag {
+- case tar.TypeDir:
+- fi, err := os.Lstat(path)
++ switch header.Typeflag {
++ case tar.TypeDir:
++ fi, err := os.Lstat(path)
++ if err != nil && !os.IsNotExist(err) {
++ return false, err
++ }
++ if os.IsNotExist(err) || !fi.IsDir() {
++ err = os.RemoveAll(path)
+ if err != nil && !os.IsNotExist(err) {
+- return err
+- }
+- if os.IsNotExist(err) || !fi.IsDir() {
+- err = os.RemoveAll(path)
+- if err != nil && !os.IsNotExist(err) {
+- return err
+- }
+- err = os.MkdirAll(path, info.Mode())
+- if err != nil {
+- return err
+- }
++ return false, err
+ }
+-
+- case tar.TypeReg, tar.TypeRegA:
+- f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, info.Mode())
++ err = os.MkdirAll(path, info.Mode())
+ if err != nil {
+- return errors.Wrap(err, "unable to open file")
++ return false, err
+ }
++ }
+
+- if _, err := io.Copy(f, tr); err != nil {
+- f.Close()
+- return errors.Wrap(err, "unable to copy")
+- }
+- f.Close()
++ case tar.TypeReg, tar.TypeRegA:
++ f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, info.Mode())
++ if err != nil {
++ return false, errors.Wrap(err, "unable to open file")
++ }
+
+- case tar.TypeLink:
+- target := filepath.Join(dest, hdr.Linkname)
++ if _, err := io.Copy(f, reader); err != nil {
++ f.Close()
++ return false, errors.Wrap(err, "unable to copy")
++ }
++ f.Close()
+
+- if !strings.HasPrefix(target, dest) {
+- return fmt.Errorf("invalid hardlink %q -> %q", target, hdr.Linkname)
+- }
++ case tar.TypeLink:
++ target := filepath.Join(dest, header.Linkname)
+
+- if err := os.Link(target, path); err != nil {
+- return err
+- }
++ if !strings.HasPrefix(target, dest) {
++ return false, fmt.Errorf("invalid hardlink %q -> %q", target, header.Linkname)
++ }
+
+- case tar.TypeSymlink:
+- target := filepath.Join(filepath.Dir(path), hdr.Linkname)
++ if err := os.Link(target, path); err != nil {
++ return false, err
++ }
+
+- if !strings.HasPrefix(target, dest) {
+- return fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)
+- }
++ case tar.TypeSymlink:
++ target := filepath.Join(filepath.Dir(path), header.Linkname)
+
+- if err := os.Symlink(hdr.Linkname, path); err != nil {
+- return err
+- }
+- case tar.TypeXGlobalHeader:
+- return nil
++ if !strings.HasPrefix(target, dest) {
++ return false, fmt.Errorf("invalid symlink %q -> %q", path, header.Linkname)
+ }
+- // Directory mtimes must be handled at the end to avoid further
+- // file creation in them to modify the directory mtime
+- if hdr.Typeflag == tar.TypeDir {
+- dirs = append(dirs, hdr)
+- }
+- }
+- for _, hdr := range dirs {
+- path := filepath.Join(dest, hdr.Name)
+
+- finfo := hdr.FileInfo()
+- // I believe the old version was using time.Now().UTC() to overcome an
+- // invalid error from chtimes.....but here we lose hdr.AccessTime like this...
+- if err := os.Chtimes(path, time.Now().UTC(), finfo.ModTime()); err != nil {
+- return errors.Wrap(err, "error changing time")
++ if err := os.Symlink(header.Linkname, path); err != nil {
++ return false, err
+ }
++ case tar.TypeXGlobalHeader:
++ return false, nil
+ }
+- return nil
++
++ return false, nil
+ }
+--
+2.4.0.53.g8440f74
+