Revision 46c936d6

b/lib/cmdlib.py
3447 3447
    assert self.op.power_delay >= 0.0
3448 3448

  
3449 3449
    if self.op.node_names:
3450
      if self.op.command in self._SKIP_MASTER:
3451
        if self.master_node in self.op.node_names:
3452
          master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3453
          master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3454

  
3455
          if master_oob_handler:
3456
            additional_text = ("Run '%s %s %s' if you want to operate on the"
3457
                               " master regardless") % (master_oob_handler,
3458
                                                        self.op.command,
3459
                                                        self.master_node)
3460
          else:
3461
            additional_text = "The master node does not support out-of-band"
3450
      if (self.op.command in self._SKIP_MASTER and
3451
          self.master_node in self.op.node_names):
3452
        master_node_obj = self.cfg.GetNodeInfo(self.master_node)
3453
        master_oob_handler = _SupportsOob(self.cfg, master_node_obj)
3454

  
3455
        if master_oob_handler:
3456
          additional_text = ("run '%s %s %s' if you want to operate on the"
3457
                             " master regardless") % (master_oob_handler,
3458
                                                      self.op.command,
3459
                                                      self.master_node)
3460
        else:
3461
          additional_text = "it does not support out-of-band operations"
3462 3462

  
3463
          raise errors.OpPrereqError(("Operating on the master node %s is not"
3464
                                      " allowed for %s\n%s") %
3465
                                     (self.master_node, self.op.command,
3466
                                      additional_text), errors.ECODE_INVAL)
3463
        raise errors.OpPrereqError(("Operating on the master node %s is not"
3464
                                    " allowed for %s; %s") %
3465
                                   (self.master_node, self.op.command,
3466
                                    additional_text), errors.ECODE_INVAL)
3467 3467
    else:
3468 3468
      self.op.node_names = self.cfg.GetNodeList()
3469 3469
      if self.op.command in self._SKIP_MASTER:
......
3526 3526
                                     self.op.timeout)
3527 3527

  
3528 3528
      if result.fail_msg:
3529
        self.LogWarning("On node '%s' out-of-band RPC failed with: %s",
3529
        self.LogWarning("Out-of-band RPC failed on node '%s': %s",
3530 3530
                        node.name, result.fail_msg)
3531 3531
        node_entry.append((constants.RS_NODATA, None))
3532 3532
      else:
3533 3533
        try:
3534 3534
          self._CheckPayload(result)
3535 3535
        except errors.OpExecError, err:
3536
          self.LogWarning("The payload returned by '%s' is not valid: %s",
3536
          self.LogWarning("Payload returned by node '%s' is not valid: %s",
3537 3537
                          node.name, err)
3538 3538
          node_entry.append((constants.RS_NODATA, None))
3539 3539
        else:
......
3542 3542
            for item, status in result.payload:
3543 3543
              if status in [constants.OOB_STATUS_WARNING,
3544 3544
                            constants.OOB_STATUS_CRITICAL]:
3545
                self.LogWarning("On node '%s' item '%s' has status '%s'",
3546
                                node.name, item, status)
3545
                self.LogWarning("Item '%s' on node '%s' has status '%s'",
3546
                                item, node.name, status)
3547 3547

  
3548 3548
          if self.op.command == constants.OOB_POWER_ON:
3549 3549
            node.powered = True
......
3813 3813

  
3814 3814
    masternode = self.cfg.GetMasterNode()
3815 3815
    if node.name == masternode:
3816
      raise errors.OpPrereqError("Node is the master node,"
3817
                                 " you need to failover first.",
3818
                                 errors.ECODE_INVAL)
3816
      raise errors.OpPrereqError("Node is the master node, failover to another"
3817
                                 " node is required", errors.ECODE_INVAL)
3819 3818

  
3820 3819
    for instance_name in instance_list:
3821 3820
      instance = self.cfg.GetInstanceInfo(instance_name)
3822 3821
      if node.name in instance.all_nodes:
3823 3822
        raise errors.OpPrereqError("Instance %s is still running on the node,"
3824
                                   " please remove first." % instance_name,
3823
                                   " please remove first" % instance_name,
3825 3824
                                   errors.ECODE_INVAL)
3826 3825
    self.op.node_name = node.name
3827 3826
    self.node = node
......
4708 4707

  
4709 4708
    self.old_flags = old_flags = (node.master_candidate,
4710 4709
                                  node.drained, node.offline)
4711
    assert old_flags in self._F2R, "Un-handled old flags  %s" % str(old_flags)
4710
    assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags)
4712 4711
    self.old_role = old_role = self._F2R[old_flags]
4713 4712

  
4714 4713
    # Check for ineffective changes
......
4724 4723
    if _SupportsOob(self.cfg, node):
4725 4724
      if self.op.offline is False and not (node.powered or
4726 4725
                                           self.op.powered == True):
4727
        raise errors.OpPrereqError(("Please power on node %s first before you"
4728
                                    " can reset offline state") %
4726
        raise errors.OpPrereqError(("Node %s needs to be turned on before its"
4727
                                    " offline status can be reset") %
4729 4728
                                   self.op.node_name)
4730 4729
    elif self.op.powered is not None:
4731 4730
      raise errors.OpPrereqError(("Unable to change powered state for node %s"
4732
                                  " which does not support out-of-band"
4731
                                  " as it does not support out-of-band"
4733 4732
                                  " handling") % self.op.node_name)
4734 4733

  
4735 4734
    # If we're being deofflined/drained, we'll MC ourself if needed
......
5740 5739
    else:
5741 5740
      for idx in self.op.disks:
5742 5741
        if idx >= len(instance.disks):
5743
          raise errors.OpPrereqError("Invalid disk index passed '%s'" % idx,
5742
          raise errors.OpPrereqError("Invalid disk index '%s'" % idx,
5744 5743
                                     errors.ECODE_INVAL)
5745 5744

  
5746 5745
    self.instance = instance
......
5771 5770
    """
5772 5771
    if self.op.ip_check and not self.op.name_check:
5773 5772
      # TODO: make the ip check more flexible and not depend on the name check
5774
      raise errors.OpPrereqError("Cannot do ip check without a name check",
5773
      raise errors.OpPrereqError("IP address check requires a name check",
5775 5774
                                 errors.ECODE_INVAL)
5776 5775

  
5777 5776
  def BuildHooksEnv(self):
......
6677 6676

  
6678 6677
    if runningon_source and runningon_target:
6679 6678
      raise errors.OpExecError("Instance seems to be running on two nodes,"
6680
                               " or the hypervisor is confused. You will have"
6679
                               " or the hypervisor is confused; you will have"
6681 6680
                               " to ensure manually that it runs only on one"
6682
                               " and restart this operation.")
6681
                               " and restart this operation")
6683 6682

  
6684 6683
    if not (runningon_source or runningon_target):
6685
      raise errors.OpExecError("Instance does not seem to be running at all."
6686
                               " In this case, it's safer to repair by"
6684
      raise errors.OpExecError("Instance does not seem to be running at all;"
6685
                               " in this case it's safer to repair by"
6687 6686
                               " running 'gnt-instance stop' to ensure disk"
6688
                               " shutdown, and then restarting it.")
6687
                               " shutdown, and then restarting it")
6689 6688

  
6690 6689
    if runningon_target:
6691 6690
      # the migration has actually succeeded, we need to update the config
......
6727 6726
      self._GoReconnect(False)
6728 6727
      self._WaitUntilSync()
6729 6728
    except errors.OpExecError, err:
6730
      self.lu.LogWarning("Migration failed and I can't reconnect the"
6731
                         " drives: error '%s'\n"
6732
                         "Please look and recover the instance status" %
6733
                         str(err))
6729
      self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
6730
                         " please try to recover the instance manually;"
6731
                         " error '%s'" % str(err))
6734 6732

  
6735 6733
  def _AbortMigration(self):
6736 6734
    """Call the hypervisor code to abort a started migration.
......
6772 6770
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
6773 6771
        raise errors.OpExecError("Disk %s is degraded or not fully"
6774 6772
                                 " synchronized on target node,"
6775
                                 " aborting migrate." % dev.iv_name)
6773
                                 " aborting migration" % dev.iv_name)
6776 6774

  
6777 6775
    # First get the migration information from the remote node
6778 6776
    result = self.rpc.call_migration_info(source_node, instance)
......
6866 6864
        if not _CheckDiskConsistency(self, dev, target_node, False):
6867 6865
          if not self.ignore_consistency:
6868 6866
            raise errors.OpExecError("Disk %s is degraded on target node,"
6869
                                     " aborting failover." % dev.iv_name)
6867
                                     " aborting failover" % dev.iv_name)
6870 6868
    else:
6871 6869
      self.feedback_fn("* not checking disk consistency as instance is not"
6872 6870
                       " running")
......
6880 6878
    msg = result.fail_msg
6881 6879
    if msg:
6882 6880
      if self.ignore_consistency or primary_node.offline:
6883
        self.lu.LogWarning("Could not shutdown instance %s on node %s."
6884
                           " Proceeding anyway. Please make sure node"
6885
                           " %s is down. Error details: %s",
6881
        self.lu.LogWarning("Could not shutdown instance %s on node %s,"
6882
                           " proceeding anyway; please make sure node"
6883
                           " %s is down; error details: %s",
6886 6884
                           instance.name, source_node, source_node, msg)
6887 6885
      else:
6888 6886
        raise errors.OpExecError("Could not shutdown instance %s on"
......
7243 7241

  
7244 7242
    for idx, success in enumerate(result.payload):
7245 7243
      if not success:
7246
        lu.LogWarning("Warning: Resume sync of disk %d failed. Please have a"
7247
                      " look at the status and troubleshoot the issue.", idx)
7244
        lu.LogWarning("Resume sync of disk %d failed, please have a"
7245
                      " look at the status and troubleshoot the issue", idx)
7248 7246
        logging.warn("resume-sync of instance %s for disks %d failed",
7249 7247
                     instance.name, idx)
7250 7248

  
......
7493 7491

  
7494 7492
    if self.op.ip_check and not self.op.name_check:
7495 7493
      # TODO: make the ip check more flexible and not depend on the name check
7496
      raise errors.OpPrereqError("Cannot do ip check without a name check",
7497
                                 errors.ECODE_INVAL)
7494
      raise errors.OpPrereqError("Cannot do IP address check without a name"
7495
                                 " check", errors.ECODE_INVAL)
7498 7496

  
7499 7497
    # check nics' parameter names
7500 7498
    for nic in self.op.nics:
......
7672 7670
        self.op.src_node = None
7673 7671
        if os.path.isabs(src_path):
7674 7672
          raise errors.OpPrereqError("Importing an instance from an absolute"
7675
                                     " path requires a source node option.",
7673
                                     " path requires a source node option",
7676 7674
                                     errors.ECODE_INVAL)
7677 7675
      else:
7678 7676
        self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
......
8121 8119
    if self.op.disk_template in constants.DTS_INT_MIRROR:
8122 8120
      if self.op.snode == pnode.name:
8123 8121
        raise errors.OpPrereqError("The secondary node cannot be the"
8124
                                   " primary node.", errors.ECODE_INVAL)
8122
                                   " primary node", errors.ECODE_INVAL)
8125 8123
      _CheckNodeOnline(self, self.op.snode)
8126 8124
      _CheckNodeNotDrained(self, self.op.snode)
8127 8125
      _CheckNodeVmCapable(self, self.op.snode)
......
8731 8729

  
8732 8730
    if remote_node == self.instance.primary_node:
8733 8731
      raise errors.OpPrereqError("The specified node is the primary node of"
8734
                                 " the instance.", errors.ECODE_INVAL)
8732
                                 " the instance", errors.ECODE_INVAL)
8735 8733

  
8736 8734
    if remote_node == secondary_node:
8737 8735
      raise errors.OpPrereqError("The specified node is already the"
8738
                                 " secondary node of the instance.",
8736
                                 " secondary node of the instance",
8739 8737
                                 errors.ECODE_INVAL)
8740 8738

  
8741 8739
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
......
9448 9446

  
9449 9447
    if instance.disk_template not in constants.DTS_GROWABLE:
9450 9448
      raise errors.OpPrereqError("Instance's disk layout does not support"
9451
                                 " growing.", errors.ECODE_INVAL)
9449
                                 " growing", errors.ECODE_INVAL)
9452 9450

  
9453 9451
    self.disk = instance.FindDisk(self.op.disk)
9454 9452

  
......
9487 9485
    if self.op.wait_for_sync:
9488 9486
      disk_abort = not _WaitForSync(self, instance, disks=[disk])
9489 9487
      if disk_abort:
9490
        self.proc.LogWarning("Warning: disk sync-ing has not returned a good"
9491
                             " status.\nPlease check the instance.")
9488
        self.proc.LogWarning("Disk sync-ing has not returned a good"
9489
                             " status; please check the instance")
9492 9490
      if not instance.admin_up:
9493 9491
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
9494 9492
    elif not instance.admin_up:
9495 9493
      self.proc.LogWarning("Not shutting down the disk even if the instance is"
9496 9494
                           " not supposed to be running because no wait for"
9497
                           " sync mode was requested.")
9495
                           " sync mode was requested")
9498 9496

  
9499 9497

  
9500 9498
class LUInstanceQueryData(NoHooksLU):

Also available in: Unified diff