Modify two mirror-device related rpc calls
[ganeti-local] / lib / cmdlib.py
index ea56345..04612d7 100644 (file)
@@ -528,6 +528,18 @@ def _InitGanetiServerSetup(ss):
                              (result.cmd, result.exit_code, result.output))
 
 
+def _CheckInstanceBridgesExist(instance):
+  """Check that the brigdes needed by an instance exist.
+
+  """
+  # check bridges existance
+  brlist = [nic.bridge for nic in instance.nics]
+  if not rpc.call_bridges_exist(instance.primary_node, brlist):
+    raise errors.OpPrereqError("one or more target bridges %s does not"
+                               " exist on destination node '%s'" %
+                               (brlist, instance.primary_node))
+
+
 class LUInitCluster(LogicalUnit):
   """Initialise the cluster.
 
@@ -798,7 +810,7 @@ class LUVerifyCluster(NoHooksLU):
                           (instance, node))
           bad = True
 
-    return not bad
+    return bad
 
   def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
     """Verify if there are any unknown volumes in the cluster.
@@ -1824,6 +1836,12 @@ def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
     device_info.append((instance.primary_node, inst_disk.iv_name,
                         master_result))
 
+  # leave the disks configured for the primary node
+  # this is a workaround that would be fixed better by
+  # improving the logical/physical id handling
+  for disk in instance.disks:
+    cfg.SetDiskID(disk, instance.primary_node)
+
   return disks_ok, device_info
 
 
@@ -1934,11 +1952,7 @@ class LUStartupInstance(LogicalUnit):
                                  self.op.instance_name)
 
     # check bridges existance
-    brlist = [nic.bridge for nic in instance.nics]
-    if not rpc.call_bridges_exist(instance.primary_node, brlist):
-      raise errors.OpPrereqError("one or more target bridges %s does not"
-                                 " exist on destination node '%s'" %
-                                 (brlist, instance.primary_node))
+    _CheckInstanceBridgesExist(instance)
 
     self.instance = instance
     self.op.instance_name = instance.name
@@ -1976,6 +1990,82 @@ class LUStartupInstance(LogicalUnit):
     self.cfg.MarkInstanceUp(instance.name)
 
 
+class LURebootInstance(LogicalUnit):
+  """Reboot an instance.
+
+  """
+  HPATH = "instance-reboot"
+  HTYPE = constants.HTYPE_INSTANCE
+  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
+
+  def BuildHooksEnv(self):
+    """Build hooks env.
+
+    This runs on master, primary and secondary nodes of the instance.
+
+    """
+    env = {
+      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
+      }
+    env.update(_BuildInstanceHookEnvByObject(self.instance))
+    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
+          list(self.instance.secondary_nodes))
+    return env, nl, nl
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    This checks that the instance is in the cluster.
+
+    """
+    instance = self.cfg.GetInstanceInfo(
+      self.cfg.ExpandInstanceName(self.op.instance_name))
+    if instance is None:
+      raise errors.OpPrereqError("Instance '%s' not known" %
+                                 self.op.instance_name)
+
+    # check bridges existance
+    _CheckInstanceBridgesExist(instance)
+
+    self.instance = instance
+    self.op.instance_name = instance.name
+
+  def Exec(self, feedback_fn):
+    """Reboot the instance.
+
+    """
+    instance = self.instance
+    ignore_secondaries = self.op.ignore_secondaries
+    reboot_type = self.op.reboot_type
+    extra_args = getattr(self.op, "extra_args", "")
+
+    node_current = instance.primary_node
+
+    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
+                           constants.INSTANCE_REBOOT_HARD,
+                           constants.INSTANCE_REBOOT_FULL]:
+      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
+                                  (constants.INSTANCE_REBOOT_SOFT,
+                                   constants.INSTANCE_REBOOT_HARD,
+                                   constants.INSTANCE_REBOOT_FULL))
+
+    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
+                       constants.INSTANCE_REBOOT_HARD]:
+      if not rpc.call_instance_reboot(node_current, instance,
+                                      reboot_type, extra_args):
+        raise errors.OpExecError("Could not reboot instance")
+    else:
+      if not rpc.call_instance_shutdown(node_current, instance):
+        raise errors.OpExecError("could not shutdown instance for full reboot")
+      _ShutdownInstanceDisks(instance, self.cfg)
+      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
+      if not rpc.call_instance_start(node_current, instance, extra_args):
+        _ShutdownInstanceDisks(instance, self.cfg)
+        raise errors.OpExecError("Could not start instance for full reboot")
+
+    self.cfg.MarkInstanceUp(instance.name)
+
+
 class LUShutdownInstance(LogicalUnit):
   """Shutdown an instance.
 
@@ -2193,8 +2283,7 @@ class LURemoveInstance(LogicalUnit):
 
     """
     env = _BuildInstanceHookEnvByObject(self.instance)
-    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
-          list(self.instance.secondary_nodes))
+    nl = [self.sstore.GetMasterNode()]
     return env, nl, nl
 
   def CheckPrereq(self):
@@ -2219,12 +2308,19 @@ class LURemoveInstance(LogicalUnit):
                 (instance.name, instance.primary_node))
 
     if not rpc.call_instance_shutdown(instance.primary_node, instance):
-      raise errors.OpExecError("Could not shutdown instance %s on node %s" %
-                               (instance.name, instance.primary_node))
+      if self.op.ignore_failures:
+        feedback_fn("Warning: can't shutdown instance")
+      else:
+        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
+                                 (instance.name, instance.primary_node))
 
     logger.Info("removing block devices for instance %s" % instance.name)
 
-    _RemoveDisks(instance, self.cfg)
+    if not _RemoveDisks(instance, self.cfg):
+      if self.op.ignore_failures:
+        feedback_fn("Warning: can't remove instance's disks")
+      else:
+        raise errors.OpExecError("Can't remove instance's disks")
 
     logger.Info("removing instance %s out of cluster config" % instance.name)
 
@@ -2364,9 +2460,9 @@ class LUFailoverInstance(LogicalUnit):
       raise errors.OpPrereqError("Instance '%s' not known" %
                                  self.op.instance_name)
 
-    if instance.disk_template != constants.DT_REMOTE_RAID1:
+    if instance.disk_template not in constants.DTS_NET_MIRROR:
       raise errors.OpPrereqError("Instance's disk layout is not"
-                                 " remote_raid1.")
+                                 " network mirrored, cannot failover.")
 
     secondary_nodes = instance.secondary_nodes
     if not secondary_nodes:
@@ -2530,16 +2626,32 @@ def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
   """
   port = cfg.AllocatePort()
   vgname = cfg.GetVGName()
-  dev_data = objects.Disk(dev_type="lvm", size=size,
+  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
                           logical_id=(vgname, names[0]))
-  dev_meta = objects.Disk(dev_type="lvm", size=128,
+  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
                           logical_id=(vgname, names[1]))
-  drbd_dev = objects.Disk(dev_type="drbd", size=size,
+  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
                           logical_id = (primary, secondary, port),
                           children = [dev_data, dev_meta])
   return drbd_dev
 
 
+def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
+  """Generate a drbd8 device complete with its children.
+
+  """
+  port = cfg.AllocatePort()
+  vgname = cfg.GetVGName()
+  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
+                          logical_id=(vgname, names[0]))
+  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
+                          logical_id=(vgname, names[1]))
+  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
+                          logical_id = (primary, secondary, port),
+                          children = [dev_data, dev_meta],
+                          iv_name=iv_name)
+  return drbd_dev
+
 def _GenerateDiskTemplate(cfg, template_name,
                           instance_name, primary_node,
                           secondary_nodes, disk_sz, swap_sz):
@@ -2556,10 +2668,10 @@ def _GenerateDiskTemplate(cfg, template_name,
       raise errors.ProgrammerError("Wrong template configuration")
 
     names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
-    sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
+    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
                            logical_id=(vgname, names[0]),
                            iv_name = "sda")
-    sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
+    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
                            logical_id=(vgname, names[1]),
                            iv_name = "sdb")
     disks = [sda_dev, sdb_dev]
@@ -2570,18 +2682,18 @@ def _GenerateDiskTemplate(cfg, template_name,
 
     names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
                                        ".sdb_m1", ".sdb_m2"])
-    sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
+    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
                               logical_id=(vgname, names[0]))
-    sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
+    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
                               logical_id=(vgname, names[1]))
-    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
+    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
                               size=disk_sz,
                               children = [sda_dev_m1, sda_dev_m2])
-    sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
+    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
                               logical_id=(vgname, names[2]))
-    sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
+    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
                               logical_id=(vgname, names[3]))
-    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
+    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
                               size=swap_sz,
                               children = [sdb_dev_m1, sdb_dev_m2])
     disks = [md_sda_dev, md_sdb_dev]
@@ -2593,13 +2705,24 @@ def _GenerateDiskTemplate(cfg, template_name,
                                        ".sdb_data", ".sdb_meta"])
     drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
                                          disk_sz, names[0:2])
-    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
+    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
                               children = [drbd_sda_dev], size=disk_sz)
     drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
                                          swap_sz, names[2:4])
-    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
+    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
                               children = [drbd_sdb_dev], size=swap_sz)
     disks = [md_sda_dev, md_sdb_dev]
+  elif template_name == constants.DT_DRBD8:
+    if len(secondary_nodes) != 1:
+      raise errors.ProgrammerError("Wrong template configuration")
+    remote_node = secondary_nodes[0]
+    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
+                                       ".sdb_data", ".sdb_meta"])
+    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
+                                         disk_sz, names[0:2], "sda")
+    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
+                                         swap_sz, names[2:4], "sdb")
+    disks = [drbd_sda_dev, drbd_sdb_dev]
   else:
     raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
   return disks
@@ -2649,7 +2772,7 @@ def _RemoveDisks(instance, cfg):
 
   This abstracts away some work from `AddInstance()` and
   `RemoveInstance()`. Note that in case some of the devices couldn't
-  be remove, the removal will continue with the other ones (compare
+  be removed, the removal will continue with the other ones (compare
   with `_CreateDisks()`).
 
   Args:
@@ -2776,9 +2899,9 @@ class LUCreateInstance(LogicalUnit):
     if self.op.disk_template not in constants.DISK_TEMPLATES:
       raise errors.OpPrereqError("Invalid disk template name")
 
-    if self.op.disk_template == constants.DT_REMOTE_RAID1:
+    if self.op.disk_template in constants.DTS_NET_MIRROR:
       if getattr(self.op, "snode", None) is None:
-        raise errors.OpPrereqError("The 'remote_raid1' disk template needs"
+        raise errors.OpPrereqError("The networked disk templates need"
                                    " a mirror node")
 
       snode_name = self.cfg.ExpandNodeName(self.op.snode)
@@ -2801,6 +2924,7 @@ class LUCreateInstance(LogicalUnit):
       constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
       # 256 MB are added for drbd metadata, 128MB for each drbd device
       constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
+      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
     }
 
     if self.op.disk_template not in req_size_dict:
@@ -2910,7 +3034,7 @@ class LUCreateInstance(LogicalUnit):
 
     if self.op.wait_for_sync:
       disk_abort = not _WaitForSync(self.cfg, iobj)
-    elif iobj.disk_template == constants.DT_REMOTE_RAID1:
+    elif iobj.disk_template in constants.DTS_NET_MIRROR:
       # make sure the disks are not degraded (still sync-ing is ok)
       time.sleep(15)
       feedback_fn("* checking mirrors status")
@@ -3099,8 +3223,8 @@ class LUAddMDDRBDComponent(LogicalUnit):
     # the device exists now
     # call the primary node to add the mirror to md
     logger.Info("adding new mirror component to md")
-    if not rpc.call_blockdev_addchild(instance.primary_node,
-                                           disk, new_drbd):
+    if not rpc.call_blockdev_addchildren(instance.primary_node,
+                                         disk, [new_drbd]):
       logger.Error("Can't add mirror compoment to md!")
       self.cfg.SetDiskID(new_drbd, remote_node)
       if not rpc.call_blockdev_remove(remote_node, new_drbd):
@@ -3166,7 +3290,8 @@ class LURemoveMDDRBDComponent(LogicalUnit):
       raise errors.OpPrereqError("Can't find this device ('%s') in the"
                                  " instance." % self.op.disk_name)
     for child in disk.children:
-      if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
+      if (child.dev_type == constants.LD_DRBD7 and
+          child.logical_id[2] == self.op.disk_id):
         break
     else:
       raise errors.OpPrereqError("Can't find the device with this port.")
@@ -3191,8 +3316,8 @@ class LURemoveMDDRBDComponent(LogicalUnit):
     child = self.child
     logger.Info("remove mirror component")
     self.cfg.SetDiskID(disk, instance.primary_node)
-    if not rpc.call_blockdev_removechild(instance.primary_node,
-                                              disk, child):
+    if not rpc.call_blockdev_removechildren(instance.primary_node,
+                                            disk, [child]):
       raise errors.OpExecError("Can't remove child from mirror.")
 
     for node in child.logical_id[:2]:
@@ -3302,8 +3427,8 @@ class LUReplaceDisks(LogicalUnit):
       # the device exists now
       # call the primary node to add the mirror to md
       logger.Info("adding new mirror component to md")
-      if not rpc.call_blockdev_addchild(instance.primary_node, dev,
-                                        new_drbd):
+      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
+                                           [new_drbd]):
         logger.Error("Can't add mirror compoment to md!")
         cfg.SetDiskID(new_drbd, remote_node)
         if not rpc.call_blockdev_remove(remote_node, new_drbd):
@@ -3337,8 +3462,8 @@ class LUReplaceDisks(LogicalUnit):
       dev, child, new_drbd = iv_names[name]
       logger.Info("remove mirror %s component" % name)
       cfg.SetDiskID(dev, instance.primary_node)
-      if not rpc.call_blockdev_removechild(instance.primary_node,
-                                                dev, child):
+      if not rpc.call_blockdev_removechildren(instance.primary_node,
+                                              dev, [child]):
         logger.Error("Can't remove child from mirror, aborting"
                      " *this device cleanup*.\nYou need to cleanup manually!!")
         continue
@@ -3389,7 +3514,7 @@ class LUQueryInstanceData(NoHooksLU):
     """
     self.cfg.SetDiskID(dev, instance.primary_node)
     dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
-    if dev.dev_type == "drbd":
+    if dev.dev_type in constants.LDS_DRBD:
       # we change the snode then (otherwise we use the one passed in)
       if dev.logical_id[0] == instance.primary_node:
         snode = dev.logical_id[1]
@@ -3448,6 +3573,7 @@ class LUQueryInstanceData(NoHooksLU):
         "memory": instance.memory,
         "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
         "disks": disks,
+        "vcpus": instance.vcpus,
         }
 
       result[instance.name] = idict
@@ -3650,7 +3776,7 @@ class LUExportInstance(LogicalUnit):
             logger.Error("could not snapshot block device %s on node %s" %
                          (disk.logical_id[1], src_node))
           else:
-            new_dev = objects.Disk(dev_type="lvm", size=disk.size,
+            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
                                       logical_id=(vgname, new_dev_name),
                                       physical_id=(vgname, new_dev_name),
                                       iv_name=disk.iv_name)