-
Notifications
You must be signed in to change notification settings - Fork 18.8k
LCOW: Mount to short container paths to avoid command-line length limit #37659
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||
---|---|---|---|---|
|
@@ -212,6 +212,17 @@ func (d *Driver) getVMID(id string) string { | |||
return id | ||||
} | ||||
|
||||
// remapLongToShortContainerPath does the mapping of a long container path for a | ||||
// SCSI attached disk, to a short container path where it's actually mounted. | ||||
func remapLongToShortContainerPath(longContainerPath string, attachCounter uint64, svmName string) string { | ||||
shortContainerPath := longContainerPath | ||||
if shortContainerPath != "" && shortContainerPath != toolsScratchPath { | ||||
shortContainerPath = fmt.Sprintf("/tmp/d%d", attachCounter) | ||||
logrus.Debugf("lcowdriver: UVM %s: remapping %s --> %s", svmName, longContainerPath, shortContainerPath) | ||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should rewrite logging for this driver to be in line with the other drivers, and log the driver-name in a separate field; moby/daemon/graphdriver/aufs/aufs.go Line 66 in 5fc1244
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sure, but it's consistent currently. For a followon perhaps There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes; definitely |
||||
} | ||||
return shortContainerPath | ||||
} | ||||
|
||||
// startServiceVMIfNotRunning starts a service utility VM if it is not currently running. | ||||
// It can optionally be started with a mapped virtual disk. Returns a opengcs config structure | ||||
// representing the VM. | ||||
|
@@ -239,6 +250,8 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.Mapped | |||
|
||||
if exists { | ||||
// Service VM is already up and running. In this case, just hot add the vhds. | ||||
// Note that hotAddVHDs will remap long to short container paths, so no need | ||||
// for us to that here. | ||||
logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd) | ||||
if err := svm.hotAddVHDs(mvdToAdd...); err != nil { | ||||
logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err) | ||||
|
@@ -302,10 +315,23 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.Mapped | |||
logrus.Debugf("%s: releasing cachedScratchMutex", title) | ||||
d.cachedScratchMutex.Unlock() | ||||
|
||||
// If requested to start it with a mapped virtual disk, add it now. | ||||
svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvdToAdd...) | ||||
for _, mvd := range svm.config.MappedVirtualDisks { | ||||
svm.attachedVHDs[mvd.HostPath] = 1 | ||||
// Add mapped virtual disks. First those that are already in the configuration. Generally, | ||||
// the only one that will be here is the service VMs scratch. The exception is when invoked | ||||
// via the graphdrivers DiffGetter implementation. | ||||
for i, mvd := range svm.config.MappedVirtualDisks { | ||||
svm.attachCounter++ | ||||
svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter} | ||||
|
||||
// No-op for the service VMs scratch disk. Only applicable in the DiffGetter interface invocation. | ||||
svm.config.MappedVirtualDisks[i].ContainerPath = remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name) | ||||
} | ||||
|
||||
// Then the remaining ones to add, and adding them to the startup configuration. | ||||
for _, mvd := range mvdToAdd { | ||||
svm.attachCounter++ | ||||
svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter} | ||||
mvd.ContainerPath = remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name) | ||||
svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd) | ||||
} | ||||
|
||||
// Start it. | ||||
|
@@ -351,6 +377,7 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.Mapped | |||
return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err) | ||||
}< 8000 /td> | ||||
svm.scratchAttached = true | ||||
// Don't need to ref-count here as it will be done via hotAddVHDsAtStart() call above. | ||||
} | ||||
|
||||
logrus.Debugf("%s: (%s) success", title, context) | ||||
|
@@ -787,8 +814,13 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { | |||
} | ||||
|
||||
// Obtain the tar stream for it | ||||
logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, mvd.ContainerPath, ld.size, ld.isSandbox) | ||||
tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, mvd.ContainerPath, ld.isSandbox, ld.size) | ||||
// The actual container path will have be remapped to a short name, so use that. | ||||
actualContainerPath := svm.getShortContainerPath(&mvd) | ||||
if actualContainerPath == "" { | ||||
return nil, fmt.Errorf("failed to get short container path for %+v in SVM %s", mvd, svm.config.Name) | ||||
} | ||||
logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, actualContainerPath, ld.size, ld.isSandbox) | ||||
tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, actualContainerPath, ld.isSandbox, ld.size) | ||||
if err != nil { | ||||
svm.hotRemoveVHDs(mvd) | ||||
d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) | ||||
|
@@ -960,6 +992,17 @@ func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) { | |||
} | ||||
|
||||
func hostToGuest(hostpath string) string { | ||||
// This is the "long" container path. At the point of which we are | ||||
// calculating this, we don't know which service VM we're going to be | ||||
// using, so we can't translate this to a short path yet, instead | ||||
// deferring until the point of which it's added to an SVM. We don't | ||||
// use long container paths in SVMs for SCSI disks, otherwise it can cause | ||||
// command line operations that we invoke to fail due to being over ~4200 | ||||
// characters when there are ~47 layers involved. An example of this is | ||||
// the mount call to create the overlay across multiple SCSI-attached disks. | ||||
// It doesn't affect VPMem attached layers during container creation as | ||||
// these get mapped by openGCS to /tmp/N/M where N is a container instance | ||||
// number, and M is a layer number. | ||||
return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath))) | ||||
} | ||||
|
||||
|
@@ -1002,7 +1045,12 @@ func (fgc *fileGetCloserFromSVM) Close() error { | |||
func (fgc *fileGetCloserFromSVM) Get(filename string) (io.ReadCloser, error) { | ||||
errOut := &bytes.Buffer{} | ||||
outOut := &bytes.Buffer{} | ||||
file := path.Join(fgc.mvd.ContainerPath, filename) | ||||
// Must map to the actual "short" container path where the SCSI disk was mounted | ||||
actualContainerPath := fgc.svm.getShortContainerPath(fgc.mvd) | ||||
if actualContainerPath == "" { | ||||
return nil, fmt.Errorf("inconsistency detected: couldn't get short container path for %+v in utility VM %s", fgc.mvd, fgc.svm.config.Name) | ||||
} | ||||
file := path.Join(actualContainerPath, filename) | ||||
if err := fgc.svm.runProcess(fmt.Sprintf("cat %s", file), nil, outOut, errOut); err != nil { | ||||
logrus.Debugf("cat %s failed: %s", file, errOut.String()) | ||||
return nil, err | ||||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,6 +3,7 @@ | |
package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" | ||
|
||
import ( | ||
"bytes" | ||
"errors" | ||
"fmt" | ||
"io" | ||
|
@@ -34,6 +35,13 @@ type serviceVMMapItem struct { | |
refCount int // refcount for VM | ||
} | ||
|
||
// attachedVHD is for reference counting SCSI disks attached to a service VM, | ||
// and for a counter used to generate a short path name for the container path. | ||
type attachedVHD struct { | ||
refCount int | ||
attachCounter uint64 | ||
} | ||
|
||
type serviceVM struct { | ||
sync.Mutex // Serialises operations being performed in this service VM. | ||
scratchAttached bool // Has a scratch been attached? | ||
|
@@ -47,8 +55,9 @@ type serviceVM struct { | |
stopStatus chan interface{} | ||
stopError error | ||
|
||
attachedVHDs map[string]int // Map ref counting all the VHDS we've hot-added/hot-removed. | ||
unionMounts map[string]int // Map ref counting all the union filesystems we mounted. | ||
attachCounter uint64 // Increasing counter for each add | ||
attachedVHDs map[string]*attachedVHD // Map ref counting all the VHDS we've hot-added/hot-removed. | ||
unionMounts map[string]int // Map ref counting all the union filesystems we mounted. | ||
} | ||
|
||
// add will add an id to the service vm map. There are three cases: | ||
|
@@ -73,7 +82,7 @@ func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, | |
newSVM := &serviceVM{ | ||
startStatus: make(chan interface{}), | ||
stopStatus: make(chan interface{}), | ||
attachedVHDs: make(map[string]int), | ||
attachedVHDs: make(map[string]*attachedVHD), | ||
unionMounts: make(map[string]int), | ||
config: &client.Config{}, | ||
} | ||
|
@@ -203,15 +212,18 @@ func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error | |
defer svm.Unlock() | ||
for i, mvd := range mvds { | ||
if _, ok := svm.attachedVHDs[mvd.HostPath]; ok { | ||
svm.attachedVHDs[mvd.HostPath]++ | ||
svm.attachedVHDs[mvd.HostPath].refCount++ | ||
logrus.Debugf("lcowdriver: UVM %s: %s already present, refCount now %d", svm.config.Name, mvd.HostPath, svm.attachedVHDs[mvd.HostPath].refCount) | ||
continue | ||
} | ||
|
||
if err := svm.config.HotAddVhd(mvd.HostPath, mvd.ContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil { | ||
svm.attachCounter++ | ||
shortContainerPath := remapLongToShortContainerPath(mvd.ContainerPath, svm.attachCounter, svm.config.Name) | ||
if err := svm.config.HotAddVhd(mvd.HostPath, shortContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil { | ||
svm.hotRemoveVHDsNoLock(mvds[:i]...) | ||
return err | ||
} | ||
svm.attachedVHDs[mvd.HostPath] = 1 | ||
svm.attachedVHDs[mvd.HostPath] = &attachedVHD{refCount: 1, attachCounter: svm.attachCounter} | ||
} | ||
return nil | ||
} | ||
|
@@ -238,15 +250,17 @@ func (svm *serviceVM) hotRemoveVHDsNoLock(mvds ...hcsshim.MappedVirtualDisk) err | |
// defers the VM start to the first operation, it's possible that nothing have been hot-added | ||
// when Put() is called. To avoid Put returning an error in that case, we simply continue if we | ||
// don't find the vhd attached. | ||
logrus.Debugf("lcowdriver: UVM %s: %s is not attached, not doing anything", svm.config.Name, mvd.HostPath) | ||
continue | ||
} | ||
|
||
if svm.attachedVHDs[mvd.HostPath] > 1 { | ||
svm.attachedVHDs[mvd.HostPath]-- | ||
if svm.attachedVHDs[mvd.HostPath].refCount > 1 { | ||
svm.attachedVHDs[mvd.HostPath].refCount-- | ||
logrus.Debugf("lcowdriver: UVM %s: %s refCount dropped to %d. not removing from UVM", svm.config.Name, mvd.HostPath, svm.attachedVHDs[mvd.HostPath].refCount) | ||
continue | ||
} | ||
|
||
// last VHD, so remove from VM and map | ||
// last reference to VHD, so remove from VM and map | ||
if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil { | ||
delete(svm.attachedVHDs, mvd.HostPath) | ||
} else { | ||
|
@@ -270,6 +284,19 @@ func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile s | |
return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile) | ||
} | ||
|
||
// getShortContainerPath looks up where a SCSI disk was actually mounted | ||
// in a service VM when we remapped a long path name to a short name. | ||
func (svm *serviceVM) getShortContainerPath(mvd *hcsshim.MappedVirtualDisk) string { | ||
if mvd.ContainerPath == "" { | ||
return "" | ||
} | ||
avhd, ok := svm.attachedVHDs[mvd.HostPath] | ||
if !ok { | ||
return "" | ||
} | ||
return fmt.Sprintf("/tmp/d%d", avhd.attachCounter) | ||
} | ||
|
||
func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) { | ||
if len(mvds) == 0 { | ||
return fmt.Errorf("createUnionMount: error must have at least 1 layer") | ||
|
@@ -288,11 +315,11 @@ func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedV | |
|
||
var lowerLayers []string | ||
if mvds[0].ReadOnly { | ||
lowerLayers = append(lowerLayers, mvds[0].ContainerPath) | ||
lowerLayers = append(lowerLayers, svm.getShortContainerPath(&mvds[0])) | ||
} | ||
|
||
for i := 1; i < len(mvds); i++ { | ||
lowerLayers = append(lowerLayers, mvds[i].ContainerPath) | ||
lowerLayers = append(lowerLayers, svm.getShortContainerPath(&mvds[i])) | ||
} | ||
|
||
logrus.Debugf("Doing the overlay mount with union directory=%s", mountName) | ||
|
@@ -303,15 +330,15 @@ func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedV | |
var cmd string | ||
if len(mvds) == 1 { | ||
// `FROM SCRATCH` case and the only layer. No overlay required. | ||
cmd = fmt.Sprintf("mount %s %s", mvds[0].ContainerPath, mountName) | ||
cmd = fmt.Sprintf("mount %s %s", svm.getShortContainerPath(&mvds[0]), mountName) | ||
} else if mvds[0].ReadOnly { | ||
// Readonly overlay | ||
cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s", | ||
strings.Join(lowerLayers, ","), | ||
mountName) | ||
} else { | ||
upper := fmt.Sprintf("%s/upper", mvds[0].ContainerPath) | ||
work := fmt.Sprintf("%s/work", mvds[0].ContainerPath) | ||
upper := fmt.Sprintf("%s/upper", svm.getShortContainerPath(&mvds[0])) | ||
work := fmt.Sprintf("%s/work", svm.getShortContainerPath(&mvds[0])) | ||
|
||
if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil { | ||
return err | ||
|
@@ -359,7 +386,15 @@ func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.Mapped | |
} | ||
|
||
func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { | ||
process, err := svm.config.RunProcess(command, stdin, stdout, stderr) | ||
var process hcsshim.Process | ||
var err error | ||
errOut := &bytes.Buffer{} | ||
|
||
if stderr != nil { | ||
process, err = svm.config.RunProcess(command, stdin, stdout, stderr) | ||
} else { | ||
process, err = svm.config.RunProcess(command, stdin, stdout, errOut) | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could you assign There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @johnstep did you want that in this PR, or in a follow up? (CI looks green, so if a follow up; feel free to merge) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just a nit; a follow-up is fine. |
||
if err != nil { | ||
return err | ||
} | ||
|
@@ -372,7 +407,12 @@ func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writ | |
} | ||
|
||
if exitCode != 0 { | ||
return fmt.Errorf("svm.runProcess: command %s failed with exit code %d", command, exitCode) | ||
// If the caller isn't explicitly capturing stderr output, then capture it here instead. | ||
e := fmt.Sprintf("svm.runProcess: command %s failed with exit code %d", command, exitCode) | ||
if stderr == nil { | ||
e = fmt.Sprintf("%s. (%s)", e, errOut.String()) | ||
} | ||
return fmt.Errorf(e) | ||
} | ||
return nil | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Perhaps use
errors.Wrapf(err, "failed to .....")
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think that would make it non-sensicle and put the actual error in the wrong place.