Revision 5bfac263

b/lib/cmdlib.py
69 69
    validity.
70 70

  
71 71
    """
72
    self.processor = processor
72
    self.proc = processor
73 73
    self.op = op
74 74
    self.cfg = cfg
75 75
    self.sstore = sstore
......
1015 1015
                     "please restart manually.")
1016 1016

  
1017 1017

  
1018
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
1018
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1019 1019
  """Sleep and poll for an instance's disk to sync.
1020 1020

  
1021 1021
  """
......
1023 1023
    return True
1024 1024

  
1025 1025
  if not oneshot:
1026
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
1026
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1027 1027

  
1028 1028
  node = instance.primary_node
1029 1029

  
......
1037 1037
    cumul_degraded = False
1038 1038
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1039 1039
    if not rstats:
1040
      logger.ToStderr("Can't get any data from node %s" % node)
1040
      proc.LogWarning("Can't get any data from node %s" % node)
1041 1041
      retries += 1
1042 1042
      if retries >= 10:
1043 1043
        raise errors.RemoteError("Can't contact node %s for mirror data,"
......
1048 1048
    for i in range(len(rstats)):
1049 1049
      mstat = rstats[i]
1050 1050
      if mstat is None:
1051
        logger.ToStderr("Can't compute data for node %s/%s" %
1051
        proc.LogWarning("Can't compute data for node %s/%s" %
1052 1052
                        (node, instance.disks[i].iv_name))
1053 1053
        continue
1054 1054
      # we ignore the ldisk parameter
......
1061 1061
          max_time = est_time
1062 1062
        else:
1063 1063
          rem_time = "no time estimate"
1064
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
1065
                        (instance.disks[i].iv_name, perc_done, rem_time))
1064
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1065
                     (instance.disks[i].iv_name, perc_done, rem_time))
1066 1066
    if done or oneshot:
1067 1067
      break
1068 1068

  
......
1075 1075
        utils.Lock('cmd')
1076 1076

  
1077 1077
  if done:
1078
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
1078
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1079 1079
  return not cumul_degraded
1080 1080

  
1081 1081

  
......
3050 3050
    self.cfg.AddInstance(iobj)
3051 3051

  
3052 3052
    if self.op.wait_for_sync:
3053
      disk_abort = not _WaitForSync(self.cfg, iobj)
3053
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3054 3054
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3055 3055
      # make sure the disks are not degraded (still sync-ing is ok)
3056 3056
      time.sleep(15)
3057 3057
      feedback_fn("* checking mirrors status")
3058
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
3058
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3059 3059
    else:
3060 3060
      disk_abort = False
3061 3061

  
......
3257 3257

  
3258 3258
    self.cfg.AddInstance(instance)
3259 3259

  
3260
    _WaitForSync(self.cfg, instance)
3260
    _WaitForSync(self.cfg, instance, self.proc)
3261 3261

  
3262 3262
    return 0
3263 3263

  
......
3513 3513
    # this can fail as the old devices are degraded and _WaitForSync
3514 3514
    # does a combined result over all disks, so we don't check its
3515 3515
    # return value
3516
    _WaitForSync(cfg, instance, unlock=True)
3516
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3517 3517

  
3518 3518
    # so check manually all the devices
3519 3519
    for name in iv_names:
......
3566 3566

  
3567 3567
    """
3568 3568
    steps_total = 6
3569
    warning, info = (self.processor.LogWarning, self.processor.LogInfo)
3569
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3570 3570
    instance = self.instance
3571 3571
    iv_names = {}
3572 3572
    vgname = self.cfg.GetVGName()
......
3576 3576
    oth_node = self.oth_node
3577 3577

  
3578 3578
    # Step: check device activation
3579
    self.processor.LogStep(1, steps_total, "check device existence")
3579
    self.proc.LogStep(1, steps_total, "check device existence")
3580 3580
    info("checking volume groups")
3581 3581
    my_vg = cfg.GetVGName()
3582 3582
    results = rpc.call_vg_list([oth_node, tgt_node])
......
3598 3598
                                   (dev.iv_name, node))
3599 3599

  
3600 3600
    # Step: check other node consistency
3601
    self.processor.LogStep(2, steps_total, "check peer consistency")
3601
    self.proc.LogStep(2, steps_total, "check peer consistency")
3602 3602
    for dev in instance.disks:
3603 3603
      if not dev.iv_name in self.op.disks:
3604 3604
        continue
......
3610 3610
                                 (oth_node, tgt_node))
3611 3611

  
3612 3612
    # Step: create new storage
3613
    self.processor.LogStep(3, steps_total, "allocate new storage")
3613
    self.proc.LogStep(3, steps_total, "allocate new storage")
3614 3614
    for dev in instance.disks:
3615 3615
      if not dev.iv_name in self.op.disks:
3616 3616
        continue
......
3638 3638
                                   (new_lv.logical_id[1], tgt_node))
3639 3639

  
3640 3640
    # Step: for each lv, detach+rename*2+attach
3641
    self.processor.LogStep(4, steps_total, "change drbd configuration")
3641
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3642 3642
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3643 3643
      info("detaching %s drbd from local storage" % dev.iv_name)
3644 3644
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
......
3698 3698
    # this can fail as the old devices are degraded and _WaitForSync
3699 3699
    # does a combined result over all disks, so we don't check its
3700 3700
    # return value
3701
    self.processor.LogStep(5, steps_total, "sync devices")
3702
    _WaitForSync(cfg, instance, unlock=True)
3701
    self.proc.LogStep(5, steps_total, "sync devices")
3702
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3703 3703

  
3704 3704
    # so check manually all the devices
3705 3705
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
......
3709 3709
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3710 3710

  
3711 3711
    # Step: remove old storage
3712
    self.processor.LogStep(6, steps_total, "removing old storage")
3712
    self.proc.LogStep(6, steps_total, "removing old storage")
3713 3713
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3714 3714
      info("remove logical volumes for %s" % name)
3715 3715
      for lv in old_lvs:
......
3738 3738

  
3739 3739
    """
3740 3740
    steps_total = 6
3741
    warning, info = (self.processor.LogWarning, self.processor.LogInfo)
3741
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3742 3742
    instance = self.instance
3743 3743
    iv_names = {}
3744 3744
    vgname = self.cfg.GetVGName()
......
3749 3749
    pri_node = instance.primary_node
3750 3750

  
3751 3751
    # Step: check device activation
3752
    self.processor.LogStep(1, steps_total, "check device existence")
3752
    self.proc.LogStep(1, steps_total, "check device existence")
3753 3753
    info("checking volume groups")
3754 3754
    my_vg = cfg.GetVGName()
3755 3755
    results = rpc.call_vg_list([pri_node, new_node])
......
3770 3770
                                 (dev.iv_name, pri_node))
3771 3771

  
3772 3772
    # Step: check other node consistency
3773
    self.processor.LogStep(2, steps_total, "check peer consistency")
3773
    self.proc.LogStep(2, steps_total, "check peer consistency")
3774 3774
    for dev in instance.disks:
3775 3775
      if not dev.iv_name in self.op.disks:
3776 3776
        continue
......
3781 3781
                                 pri_node)
3782 3782

  
3783 3783
    # Step: create new storage
3784
    self.processor.LogStep(3, steps_total, "allocate new storage")
3784
    self.proc.LogStep(3, steps_total, "allocate new storage")
3785 3785
    for dev in instance.disks:
3786 3786
      size = dev.size
3787 3787
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
......
3797 3797

  
3798 3798
      iv_names[dev.iv_name] = (dev, dev.children)
3799 3799

  
3800
    self.processor.LogStep(4, steps_total, "changing drbd configuration")
3800
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3801 3801
    for dev in instance.disks:
3802 3802
      size = dev.size
3803 3803
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
......
3839 3839
    # this can fail as the old devices are degraded and _WaitForSync
3840 3840
    # does a combined result over all disks, so we don't check its
3841 3841
    # return value
3842
    self.processor.LogStep(5, steps_total, "sync devices")
3843
    _WaitForSync(cfg, instance, unlock=True)
3842
    self.proc.LogStep(5, steps_total, "sync devices")
3843
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3844 3844

  
3845 3845
    # so check manually all the devices
3846 3846
    for name, (dev, old_lvs) in iv_names.iteritems():
......
3849 3849
      if is_degr:
3850 3850
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3851 3851

  
3852
    self.processor.LogStep(6, steps_total, "removing old storage")
3852
    self.proc.LogStep(6, steps_total, "removing old storage")
3853 3853
    for name, (dev, old_lvs) in iv_names.iteritems():
3854 3854
      info("remove logical volumes for %s" % name)
3855 3855
      for lv in old_lvs:
......
4157 4157
    # shutdown the instance, unless requested not to do so
4158 4158
    if self.op.shutdown:
4159 4159
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4160
      self.processor.ChainOpCode(op)
4160
      self.proc.ChainOpCode(op)
4161 4161

  
4162 4162
    vgname = self.cfg.GetVGName()
4163 4163

  
......
4183 4183
      if self.op.shutdown:
4184 4184
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4185 4185
                                       force=False)
4186
        self.processor.ChainOpCode(op)
4186
        self.proc.ChainOpCode(op)
4187 4187

  
4188 4188
    # TODO: check for size
4189 4189

  
......
4209 4209
    # substitutes an empty list with the full cluster node list.
4210 4210
    if nodelist:
4211 4211
      op = opcodes.OpQueryExports(nodes=nodelist)
4212
      exportlist = self.processor.ChainOpCode(op)
4212
      exportlist = self.proc.ChainOpCode(op)
4213 4213
      for node in exportlist:
4214 4214
        if instance.name in exportlist[node]:
4215 4215
          if not rpc.call_export_remove(node, instance.name):
b/lib/mcpu.py
173 173
    logger.Debug("Step %d/%d %s" % (current, total, message))
174 174
    self._feedback_fn("STEP %d/%d %s" % (current, total, message))
175 175

  
176
  def LogWarning(self, message, hint):
176
  def LogWarning(self, message, hint=None):
177 177
    """Log a warning to the logs and the user.
178 178

  
179 179
    """
180 180
    logger.Error(message)
181 181
    self._feedback_fn(" - WARNING: %s" % message)
182
    self._feedback_fn("      Hint: %s" % hint)
182
    if hint:
183
      self._feedback_fn("      Hint: %s" % hint)
183 184

  
184 185
  def LogInfo(self, message):
185 186
    """Log an informational message to the logs and the user.

Also available in: Unified diff