from ganeti import ssconf
from ganeti import uidpool
from ganeti import compat
+from ganeti import masterd
+
+import ganeti.masterd.instance # pylint: disable-msg=W0611
class LogicalUnit(object):
return faulty
-def _FormatTimestamp(secs):
- """Formats a Unix timestamp with the local timezone.
-
- """
- return time.strftime("%F %T %Z", time.gmtime(secs))
-
-
class LUPostInitCluster(LogicalUnit):
"""Logical unit for running hooks after cluster initialization.
return master
-def _VerifyCertificateInner(filename, expired, not_before, not_after, now,
- warn_days=constants.SSL_CERT_EXPIRATION_WARN,
- error_days=constants.SSL_CERT_EXPIRATION_ERROR):
- """Verifies certificate details for LUVerifyCluster.
-
- """
- if expired:
- msg = "Certificate %s is expired" % filename
-
- if not_before is not None and not_after is not None:
- msg += (" (valid from %s to %s)" %
- (_FormatTimestamp(not_before),
- _FormatTimestamp(not_after)))
- elif not_before is not None:
- msg += " (valid from %s)" % _FormatTimestamp(not_before)
- elif not_after is not None:
- msg += " (valid until %s)" % _FormatTimestamp(not_after)
-
- return (LUVerifyCluster.ETYPE_ERROR, msg)
-
- elif not_before is not None and not_before > now:
- return (LUVerifyCluster.ETYPE_WARNING,
- "Certificate %s not yet valid (valid from %s)" %
- (filename, _FormatTimestamp(not_before)))
-
- elif not_after is not None:
- remaining_days = int((not_after - now) / (24 * 3600))
-
- msg = ("Certificate %s expires in %d days" % (filename, remaining_days))
-
- if remaining_days <= error_days:
- return (LUVerifyCluster.ETYPE_ERROR, msg)
-
- if remaining_days <= warn_days:
- return (LUVerifyCluster.ETYPE_WARNING, msg)
-
- return (None, None)
-
-
def _VerifyCertificate(filename):
"""Verifies a certificate for LUVerifyCluster.
return (LUVerifyCluster.ETYPE_ERROR,
"Failed to load X509 certificate %s: %s" % (filename, err))
- # Depending on the pyOpenSSL version, this can just return (None, None)
- (not_before, not_after) = utils.GetX509CertValidity(cert)
+ (errcode, msg) = \
+ utils.VerifyX509Certificate(cert, constants.SSL_CERT_EXPIRATION_WARN,
+ constants.SSL_CERT_EXPIRATION_ERROR)
+
+ if msg:
+ fnamemsg = "While verifying %s: %s" % (filename, msg)
+ else:
+ fnamemsg = None
+
+ if errcode is None:
+ return (None, fnamemsg)
+ elif errcode == utils.CERT_WARNING:
+ return (LUVerifyCluster.ETYPE_WARNING, fnamemsg)
+ elif errcode == utils.CERT_ERROR:
+ return (LUVerifyCluster.ETYPE_ERROR, fnamemsg)
- return _VerifyCertificateInner(filename, cert.has_expired(),
- not_before, not_after, time.time())
+ raise errors.ProgrammerError("Unhandled certificate error code %r" % errcode)
class LUVerifyCluster(LogicalUnit):
" node %s: %s" %
(instance.name, instance.primary_node, msg))
- logging.info("Removing block devices for instance %s", instance.name)
+ _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
- if not _RemoveDisks(self, instance):
- if self.op.ignore_failures:
- feedback_fn("Warning: can't remove instance's disks")
- else:
- raise errors.OpExecError("Can't remove instance's disks")
- logging.info("Removing instance %s out of cluster config", instance.name)
+def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
+ """Utility function to remove an instance.
+
+ """
+ logging.info("Removing block devices for instance %s", instance.name)
+
+ if not _RemoveDisks(lu, instance):
+ if not ignore_failures:
+ raise errors.OpExecError("Can't remove instance's disks")
+ feedback_fn("Warning: can't remove instance's disks")
- self.cfg.RemoveInstance(instance.name)
- self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
+ logging.info("Removing instance %s out of cluster config", instance.name)
+
+ lu.cfg.RemoveInstance(instance.name)
+
+ assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
+ "Instance lock removal conflict"
+
+ # Remove lock for the instance
+ lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
class LUQueryInstances(NoHooksLU):
else:
file_storage_dir = ""
-
disks = _GenerateDiskTemplate(self,
self.op.disk_template,
instance, pnode_name,
elif self.op.mode == constants.INSTANCE_IMPORT:
feedback_fn("* running the instance OS import scripts...")
- src_node = self.op.src_node
- src_images = self.src_images
- cluster_name = self.cfg.GetClusterName()
- # FIXME: pass debug option from opcode to backend
- import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
- src_node, src_images,
- cluster_name,
- self.op.debug_level)
- msg = import_result.fail_msg
- if msg:
- self.LogWarning("Error while importing the disk images for instance"
- " %s on node %s: %s" % (instance, pnode_name, msg))
+
+ transfers = []
+
+ for idx, image in enumerate(self.src_images):
+ if not image:
+ continue
+
+ # FIXME: pass debug option from opcode to backend
+ dt = masterd.instance.DiskTransfer("disk/%s" % idx,
+ constants.IEIO_FILE, (image, ),
+ constants.IEIO_SCRIPT,
+ (iobj.disks[idx], idx),
+ None)
+ transfers.append(dt)
+
+ import_result = \
+ masterd.instance.TransferInstanceData(self, feedback_fn,
+ self.op.src_node, pnode_name,
+ self.pnode.secondary_ip,
+ iobj, transfers)
+ if not compat.all(import_result):
+ self.LogWarning("Some disks for instance %s on node %s were not"
+ " imported successfully" % (instance, pnode_name))
+
else:
# also checked in the prereq part
raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
"""Check the arguments.
"""
+ _CheckBooleanOpField(self.op, "remove_instance")
+ _CheckBooleanOpField(self.op, "ignore_remove_failures")
+
self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
constants.DEFAULT_SHUTDOWN_TIMEOUT)
+ self.remove_instance = getattr(self.op, "remove_instance", False)
+ self.ignore_remove_failures = getattr(self.op, "ignore_remove_failures",
+ False)
+
+ if self.remove_instance and not self.op.shutdown:
+ raise errors.OpPrereqError("Can not remove instance without shutting it"
+ " down before")
def ExpandNames(self):
self._ExpandAndLockInstance()
+
# FIXME: lock only instance primary and destination node
#
# Sad but true, for now we have do lock all nodes, as we don't know where
"EXPORT_NODE": self.op.target_node,
"EXPORT_DO_SHUTDOWN": self.op.shutdown,
"SHUTDOWN_TIMEOUT": self.shutdown_timeout,
+ # TODO: Generic function for boolean env variables
+ "REMOVE_INSTANCE": str(bool(self.remove_instance)),
}
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
nl = [self.cfg.GetMasterNode(), self.instance.primary_node,
_CheckNodeNotDrained(self, self.dst_node.name)
# instance disk type verification
+ # TODO: Implement export support for file-based disks
for disk in self.instance.disks:
if disk.dev_type == constants.LD_FILE:
raise errors.OpPrereqError("Export not supported for instances with"
feedback_fn("Shutting down instance %s" % instance.name)
result = self.rpc.call_instance_shutdown(src_node, instance,
self.shutdown_timeout)
+ # TODO: Maybe ignore failures if ignore_remove_failures is set
result.Raise("Could not shutdown instance %s on"
" node %s" % (instance.name, src_node))
try:
# per-disk results
- dresults = []
removed_snaps = [False] * len(instance.disks)
snap_disks = None
try:
snap_disks = self._CreateSnapshots(feedback_fn)
finally:
- if self.op.shutdown and instance.admin_up:
+ if (self.op.shutdown and instance.admin_up and
+ not self.remove_instance):
feedback_fn("Starting instance %s" % instance.name)
result = self.rpc.call_instance_start(src_node, instance,
None, None)
# TODO: check for size
- cluster_name = self.cfg.GetClusterName()
+ def _TransferFinished(idx):
+ logging.debug("Transfer %s finished", idx)
+ if self._RemoveSnapshot(feedback_fn, snap_disks, idx):
+ removed_snaps[idx] = True
+
+ transfers = []
+
for idx, dev in enumerate(snap_disks):
- feedback_fn("Exporting snapshot %s from %s to %s" %
- (idx, src_node, dst_node.name))
- if dev:
- # FIXME: pass debug from opcode to backend
- result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
- instance, cluster_name,
- idx, self.op.debug_level)
- msg = result.fail_msg
- if msg:
- self.LogWarning("Could not export disk/%s from node %s to"
- " node %s: %s", idx, src_node, dst_node.name, msg)
- dresults.append(False)
- else:
- dresults.append(True)
+ if not dev:
+ transfers.append(None)
+ continue
- # Remove snapshot
- if self._RemoveSnapshot(feedback_fn, snap_disks, idx):
- removed_snaps[idx] = True
- else:
- dresults.append(False)
+ path = utils.PathJoin(constants.EXPORT_DIR, "%s.new" % instance.name,
+ dev.physical_id[1])
+
+ finished_fn = compat.partial(_TransferFinished, idx)
+
+ # FIXME: pass debug option from opcode to backend
+ dt = masterd.instance.DiskTransfer("snapshot/%s" % idx,
+ constants.IEIO_SCRIPT, (dev, idx),
+ constants.IEIO_FILE, (path, ),
+ finished_fn)
+ transfers.append(dt)
+
+ # Actually export data
+ dresults = \
+ masterd.instance.TransferInstanceData(self, feedback_fn,
+ src_node, dst_node.name,
+ dst_node.secondary_ip,
+ instance, transfers)
assert len(dresults) == len(instance.disks)
feedback_fn("Deactivating disks for %s" % instance.name)
_ShutdownInstanceDisks(self, instance)
+ # Remove instance if requested
+ if self.remove_instance:
+ feedback_fn("Removing instance %s" % instance.name)
+ _RemoveInstance(self, feedback_fn, instance, self.ignore_remove_failures)
+
self._CleanupExports(feedback_fn)
return fin_resu, dresults