Revision 4c4e4e1e

b/lib/cmdlib.py
610 610
            if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
611 611
  if brlist:
612 612
    result = lu.rpc.call_bridges_exist(target_node, brlist)
613
    msg = result.RemoteFailMsg()
614
    if msg:
615
      raise errors.OpPrereqError("Error checking bridges on destination node"
616
                                 " '%s': %s" % (target_node, msg))
613
    result.Raise("Error checking bridges on destination node '%s'" %
614
                 target_node, prereq=True)
617 615

  
618 616

  
619 617
def _CheckInstanceBridgesExist(lu, instance, node=None):
......
656 654
    """
657 655
    master = self.cfg.GetMasterNode()
658 656
    result = self.rpc.call_node_stop_master(master, False)
659
    msg = result.RemoteFailMsg()
660
    if msg:
661
      raise errors.OpExecError("Could not disable the master role: %s" % msg)
657
    result.Raise("Could not disable the master role")
662 658
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
663 659
    utils.CreateBackup(priv_key)
664 660
    utils.CreateBackup(pub_key)
......
1039 1035
        ntype = "regular"
1040 1036
      feedback_fn("* Verifying node %s (%s)" % (node, ntype))
1041 1037

  
1042
      msg = all_nvinfo[node].RemoteFailMsg()
1038
      msg = all_nvinfo[node].fail_msg
1043 1039
      if msg:
1044 1040
        feedback_fn("  - ERROR: while contacting node %s: %s" % (node, msg))
1045 1041
        bad = True
......
1242 1238
        for node_name in hooks_results:
1243 1239
          show_node_header = True
1244 1240
          res = hooks_results[node_name]
1245
          msg = res.RemoteFailMsg()
1241
          msg = res.fail_msg
1246 1242
          if msg:
1247 1243
            if res.offline:
1248 1244
              # no need to warn or set fail return value
......
1327 1323
      node_res = node_lvs[node]
1328 1324
      if node_res.offline:
1329 1325
        continue
1330
      msg = node_res.RemoteFailMsg()
1326
      msg = node_res.fail_msg
1331 1327
      if msg:
1332 1328
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
1333 1329
        res_nodes[node] = msg
......
1400 1396
    # shutdown the master IP
1401 1397
    master = self.cfg.GetMasterNode()
1402 1398
    result = self.rpc.call_node_stop_master(master, False)
1403
    msg = result.RemoteFailMsg()
1404
    if msg:
1405
      raise errors.OpExecError("Could not disable the master role: %s" % msg)
1399
    result.Raise("Could not disable the master role")
1406 1400

  
1407 1401
    try:
1408 1402
      cluster = self.cfg.GetClusterInfo()
......
1420 1414
      result = self.rpc.call_upload_file(node_list,
1421 1415
                                         constants.SSH_KNOWN_HOSTS_FILE)
1422 1416
      for to_node, to_result in result.iteritems():
1423
         msg = to_result.RemoteFailMsg()
1417
         msg = to_result.fail_msg
1424 1418
         if msg:
1425 1419
           msg = ("Copy of file %s to node %s failed: %s" %
1426 1420
                   (constants.SSH_KNOWN_HOSTS_FILE, to_node, msg))
......
1428 1422

  
1429 1423
    finally:
1430 1424
      result = self.rpc.call_node_start_master(master, False)
1431
      msg = result.RemoteFailMsg()
1425
      msg = result.fail_msg
1432 1426
      if msg:
1433 1427
        self.LogWarning("Could not re-enable the master role on"
1434 1428
                        " the master, please restart manually: %s", msg)
......
1514 1508
    if self.op.vg_name:
1515 1509
      vglist = self.rpc.call_vg_list(node_list)
1516 1510
      for node in node_list:
1517
        msg = vglist[node].RemoteFailMsg()
1511
        msg = vglist[node].fail_msg
1518 1512
        if msg:
1519 1513
          # ignoring down node
1520 1514
          self.LogWarning("Error while gathering data on node %s"
......
1636 1630
    if os.path.exists(fname):
1637 1631
      result = lu.rpc.call_upload_file(dist_nodes, fname)
1638 1632
      for to_node, to_result in result.items():
1639
         msg = to_result.RemoteFailMsg()
1633
         msg = to_result.fail_msg
1640 1634
         if msg:
1641 1635
           msg = ("Copy of file %s to node %s failed: %s" %
1642 1636
                   (fname, to_node, msg))
......
1692 1686
    done = True
1693 1687
    cumul_degraded = False
1694 1688
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1695
    msg = rstats.RemoteFailMsg()
1689
    msg = rstats.fail_msg
1696 1690
    if msg:
1697 1691
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
1698 1692
      retries += 1
......
1747 1741
  result = True
1748 1742
  if on_primary or dev.AssembleOnSecondary():
1749 1743
    rstats = lu.rpc.call_blockdev_find(node, dev)
1750
    msg = rstats.RemoteFailMsg()
1744
    msg = rstats.fail_msg
1751 1745
    if msg:
1752 1746
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1753 1747
      result = False
......
1814 1808
    # level), so that nodes with a non-responding node daemon don't
1815 1809
    # make all OSes invalid
1816 1810
    good_nodes = [node_name for node_name in rlist
1817
                  if not rlist[node_name].RemoteFailMsg()]
1811
                  if not rlist[node_name].fail_msg]
1818 1812
    for node_name, nr in rlist.items():
1819
      if nr.RemoteFailMsg() or not nr.payload:
1813
      if nr.fail_msg or not nr.payload:
1820 1814
        continue
1821 1815
      for name, path, status, diagnose in nr.payload:
1822 1816
        if name not in all_os:
......
1920 1914
    self.context.RemoveNode(node.name)
1921 1915

  
1922 1916
    result = self.rpc.call_node_leave_cluster(node.name)
1923
    msg = result.RemoteFailMsg()
1917
    msg = result.fail_msg
1924 1918
    if msg:
1925 1919
      self.LogWarning("Errors encountered on the remote node while leaving"
1926 1920
                      " the cluster: %s", msg)
......
2008 2002
                                          self.cfg.GetHypervisorType())
2009 2003
      for name in nodenames:
2010 2004
        nodeinfo = node_data[name]
2011
        if not nodeinfo.RemoteFailMsg() and nodeinfo.payload:
2005
        if not nodeinfo.fail_msg and nodeinfo.payload:
2012 2006
          nodeinfo = nodeinfo.payload
2013 2007
          fn = utils.TryConvert
2014 2008
          live_data[name] = {
......
2134 2128
      nresult = volumes[node]
2135 2129
      if nresult.offline:
2136 2130
        continue
2137
      msg = nresult.RemoteFailMsg()
2131
      msg = nresult.fail_msg
2138 2132
      if msg:
2139 2133
        self.LogWarning("Can't compute volume data on node %s: %s", node, msg)
2140 2134
        continue
......
2289 2283

  
2290 2284
    # check connectivity
2291 2285
    result = self.rpc.call_version([node])[node]
2292
    msg = result.RemoteFailMsg()
2293
    if msg:
2294
      raise errors.OpExecError("Can't get version information from"
2295
                               " node %s: %s" % (node, msg))
2286
    result.Raise("Can't get version information from node %s" % node)
2296 2287
    if constants.PROTOCOL_VERSION == result.payload:
2297 2288
      logging.info("Communication to node %s fine, sw version %s match",
2298 2289
                   node, result.payload)
......
2319 2310
    result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
2320 2311
                                    keyarray[2],
2321 2312
                                    keyarray[3], keyarray[4], keyarray[5])
2322

  
2323
    msg = result.RemoteFailMsg()
2324
    if msg:
2325
      raise errors.OpExecError("Cannot transfer ssh keys to the"
2326
                               " new node: %s" % msg)
2313
    result.Raise("Cannot transfer ssh keys to the new node")
2327 2314

  
2328 2315
    # Add node to our /etc/hosts, and add key to known_hosts
2329 2316
    if self.cfg.GetClusterInfo().modify_etc_hosts:
......
2332 2319
    if new_node.secondary_ip != new_node.primary_ip:
2333 2320
      result = self.rpc.call_node_has_ip_address(new_node.name,
2334 2321
                                                 new_node.secondary_ip)
2335
      msg = result.RemoteFailMsg()
2336
      if msg:
2337
        raise errors.OpPrereqError("Failure checking secondary ip"
2338
                                   " on node %s: %s" % (new_node.name, msg))
2322
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
2323
                   prereq=True)
2339 2324
      if not result.payload:
2340 2325
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
2341 2326
                                 " you gave (%s). Please fix and re-run this"
......
2350 2335
    result = self.rpc.call_node_verify(node_verify_list, node_verify_param,
2351 2336
                                       self.cfg.GetClusterName())
2352 2337
    for verifier in node_verify_list:
2353
      msg = result[verifier].RemoteFailMsg()
2354
      if msg:
2355
        raise errors.OpExecError("Cannot communicate with node %s: %s" %
2356
                                 (verifier, msg))
2338
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
2357 2339
      nl_payload = result[verifier].payload['nodelist']
2358 2340
      if nl_payload:
2359 2341
        for failed in nl_payload:
......
2471 2453
      result.append(("master_candidate", str(self.op.master_candidate)))
2472 2454
      if self.op.master_candidate == False:
2473 2455
        rrc = self.rpc.call_node_demote_from_mc(node.name)
2474
        msg = rrc.RemoteFailMsg()
2456
        msg = rrc.fail_msg
2475 2457
        if msg:
2476 2458
          self.LogWarning("Node failed to demote itself: %s" % msg)
2477 2459

  
......
2535 2517
    """
2536 2518
    result = self.rpc.call_node_powercycle(self.op.node_name,
2537 2519
                                           self.cfg.GetHypervisorType())
2538
    msg = result.RemoteFailMsg()
2539
    if msg:
2540
      raise errors.OpExecError("Failed to schedule the reboot: %s" % msg)
2520
    result.Raise("Failed to schedule the reboot")
2541 2521
    return result.payload
2542 2522

  
2543 2523

  
......
2698 2678
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
2699 2679
      lu.cfg.SetDiskID(node_disk, node)
2700 2680
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False)
2701
      msg = result.RemoteFailMsg()
2681
      msg = result.fail_msg
2702 2682
      if msg:
2703 2683
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2704 2684
                           " (is_primary=False, pass=1): %s",
......
2715 2695
        continue
2716 2696
      lu.cfg.SetDiskID(node_disk, node)
2717 2697
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True)
2718
      msg = result.RemoteFailMsg()
2698
      msg = result.fail_msg
2719 2699
      if msg:
2720 2700
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
2721 2701
                           " (is_primary=True, pass=2): %s",
......
2790 2770

  
2791 2771
  """
2792 2772
  pnode = instance.primary_node
2793
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])
2794
  ins_l = ins_l[pnode]
2795
  msg = ins_l.RemoteFailMsg()
2796
  if msg:
2797
    raise errors.OpExecError("Can't contact node %s: %s" % (pnode, msg))
2773
  ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor])[pnode]
2774
  ins_l.Raise("Can't contact node %s" % pnode)
2798 2775

  
2799 2776
  if instance.name in ins_l.payload:
2800 2777
    raise errors.OpExecError("Instance is running, can't shutdown"
......
2817 2794
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
2818 2795
      lu.cfg.SetDiskID(top_disk, node)
2819 2796
      result = lu.rpc.call_blockdev_shutdown(node, top_disk)
2820
      msg = result.RemoteFailMsg()
2797
      msg = result.fail_msg
2821 2798
      if msg:
2822 2799
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
2823 2800
                      disk.iv_name, node, msg)
......
2849 2826

  
2850 2827
  """
2851 2828
  nodeinfo = lu.rpc.call_node_info([node], lu.cfg.GetVGName(), hypervisor_name)
2852
  msg = nodeinfo[node].RemoteFailMsg()
2853
  if msg:
2854
    raise errors.OpPrereqError("Can't get data from node %s: %s" % (node, msg))
2829
  nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True)
2855 2830
  free_mem = nodeinfo[node].payload.get('memory_free', None)
2856 2831
  if not isinstance(free_mem, int):
2857 2832
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
......
2934 2909
    remote_info = self.rpc.call_instance_info(instance.primary_node,
2935 2910
                                              instance.name,
2936 2911
                                              instance.hypervisor)
2937
    msg = remote_info.RemoteFailMsg()
2938
    if msg:
2939
      raise errors.OpPrereqError("Error checking node %s: %s" %
2940
                                 (instance.primary_node, msg))
2912
    remote_info.Raise("Error checking node %s" % instance.primary_node,
2913
                      prereq=True)
2941 2914
    if not remote_info.payload: # not running already
2942 2915
      _CheckNodeFreeMemory(self, instance.primary_node,
2943 2916
                           "starting instance %s" % instance.name,
......
2958 2931

  
2959 2932
    result = self.rpc.call_instance_start(node_current, instance,
2960 2933
                                          self.hvparams, self.beparams)
2961
    msg = result.RemoteFailMsg()
2934
    msg = result.fail_msg
2962 2935
    if msg:
2963 2936
      _ShutdownInstanceDisks(self, instance)
2964 2937
      raise errors.OpExecError("Could not start instance: %s" % msg)
......
3028 3001
        self.cfg.SetDiskID(disk, node_current)
3029 3002
      result = self.rpc.call_instance_reboot(node_current, instance,
3030 3003
                                             reboot_type)
3031
      msg = result.RemoteFailMsg()
3032
      if msg:
3033
        raise errors.OpExecError("Could not reboot instance: %s" % msg)
3004
      result.Raise("Could not reboot instance")
3034 3005
    else:
3035 3006
      result = self.rpc.call_instance_shutdown(node_current, instance)
3036
      msg = result.RemoteFailMsg()
3037
      if msg:
3038
        raise errors.OpExecError("Could not shutdown instance for"
3039
                                 " full reboot: %s" % msg)
3007
      result.Raise("Could not shutdown instance for full reboot")
3040 3008
      _ShutdownInstanceDisks(self, instance)
3041 3009
      _StartInstanceDisks(self, instance, ignore_secondaries)
3042 3010
      result = self.rpc.call_instance_start(node_current, instance, None, None)
3043
      msg = result.RemoteFailMsg()
3011
      msg = result.fail_msg
3044 3012
      if msg:
3045 3013
        _ShutdownInstanceDisks(self, instance)
3046 3014
        raise errors.OpExecError("Could not start instance for"
......
3090 3058
    node_current = instance.primary_node
3091 3059
    self.cfg.MarkInstanceDown(instance.name)
3092 3060
    result = self.rpc.call_instance_shutdown(node_current, instance)
3093
    msg = result.RemoteFailMsg()
3061
    msg = result.fail_msg
3094 3062
    if msg:
3095 3063
      self.proc.LogWarning("Could not shutdown instance: %s" % msg)
3096 3064

  
......
3139 3107
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3140 3108
                                              instance.name,
3141 3109
                                              instance.hypervisor)
3142
    msg = remote_info.RemoteFailMsg()
3143
    if msg:
3144
      raise errors.OpPrereqError("Error checking node %s: %s" %
3145
                                 (instance.primary_node, msg))
3110
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3111
                      prereq=True)
3146 3112
    if remote_info.payload:
3147 3113
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3148 3114
                                 (self.op.instance_name,
......
3157 3123
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
3158 3124
                                   self.op.pnode)
3159 3125
      result = self.rpc.call_os_get(pnode.name, self.op.os_type)
3160
      msg = result.RemoteFailMsg()
3161
      if msg:
3162
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
3163
                                   " primary node %s: %s"  %
3164
                                   (self.op.os_type, pnode.pname, msg))
3126
      result.Raise("OS '%s' not in supported OS list for primary node %s" %
3127
                   (self.op.os_type, pnode.name), prereq=True)
3165 3128

  
3166 3129
    self.instance = instance
3167 3130

  
......
3180 3143
    try:
3181 3144
      feedback_fn("Running the instance OS create scripts...")
3182 3145
      result = self.rpc.call_instance_os_add(inst.primary_node, inst, True)
3183
      msg = result.RemoteFailMsg()
3184
      if msg:
3185
        raise errors.OpExecError("Could not install OS for instance %s"
3186
                                 " on node %s: %s" %
3187
                                 (inst.name, inst.primary_node, msg))
3146
      result.Raise("Could not install OS for instance %s on node %s" %
3147
                   (inst.name, inst.primary_node))
3188 3148
    finally:
3189 3149
      _ShutdownInstanceDisks(self, inst)
3190 3150

  
......
3227 3187
    remote_info = self.rpc.call_instance_info(instance.primary_node,
3228 3188
                                              instance.name,
3229 3189
                                              instance.hypervisor)
3230
    msg = remote_info.RemoteFailMsg()
3231
    if msg:
3232
      raise errors.OpPrereqError("Error checking node %s: %s" %
3233
                                 (instance.primary_node, msg))
3190
    remote_info.Raise("Error checking node %s" % instance.primary_node,
3191
                      prereq=True)
3234 3192
    if remote_info.payload:
3235 3193
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
3236 3194
                                 (self.op.instance_name,
......
3275 3233
      result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
3276 3234
                                                     old_file_storage_dir,
3277 3235
                                                     new_file_storage_dir)
3278
      msg = result.RemoteFailMsg()
3279
      if msg:
3280
        raise errors.OpExecError("Could not rename on node %s"
3281
                                 " directory '%s' to '%s' (but the instance"
3282
                                 " has been renamed in Ganeti): %s" %
3283
                                 (inst.primary_node, old_file_storage_dir,
3284
                                  new_file_storage_dir, msg))
3236
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
3237
                   " (but the instance has been renamed in Ganeti)" %
3238
                   (inst.primary_node, old_file_storage_dir,
3239
                    new_file_storage_dir))
3285 3240

  
3286 3241
    _StartInstanceDisks(self, inst, None)
3287 3242
    try:
3288 3243
      result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
3289 3244
                                                 old_name)
3290
      msg = result.RemoteFailMsg()
3245
      msg = result.fail_msg
3291 3246
      if msg:
3292 3247
        msg = ("Could not run OS rename script for instance %s on node %s"
3293 3248
               " (but the instance has been renamed in Ganeti): %s" %
......
3344 3299
                 instance.name, instance.primary_node)
3345 3300

  
3346 3301
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance)
3347
    msg = result.RemoteFailMsg()
3302
    msg = result.fail_msg
3348 3303
    if msg:
3349 3304
      if self.op.ignore_failures:
3350 3305
        feedback_fn("Warning: can't shutdown instance: %s" % msg)
......
3463 3418
        if result.offline:
3464 3419
          # offline nodes will be in both lists
3465 3420
          off_nodes.append(name)
3466
        if result.failed or result.RemoteFailMsg():
3421
        if result.failed or result.fail_msg:
3467 3422
          bad_nodes.append(name)
3468 3423
        else:
3469 3424
          if result.payload:
......
3691 3646
                 instance.name, source_node)
3692 3647

  
3693 3648
    result = self.rpc.call_instance_shutdown(source_node, instance)
3694
    msg = result.RemoteFailMsg()
3649
    msg = result.fail_msg
3695 3650
    if msg:
3696 3651
      if self.op.ignore_consistency:
3697 3652
        self.proc.LogWarning("Could not shutdown instance %s on node %s."
......
3725 3680

  
3726 3681
      feedback_fn("* starting the instance on the target node")
3727 3682
      result = self.rpc.call_instance_start(target_node, instance, None, None)
3728
      msg = result.RemoteFailMsg()
3683
      msg = result.fail_msg
3729 3684
      if msg:
3730 3685
        _ShutdownInstanceDisks(self, instance)
3731 3686
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
......
3802 3757
      _CheckNodeNotDrained(self, target_node)
3803 3758
      result = self.rpc.call_instance_migratable(instance.primary_node,
3804 3759
                                                 instance)
3805
      msg = result.RemoteFailMsg()
3806
      if msg:
3807
        raise errors.OpPrereqError("Can't migrate: %s - please use failover" %
3808
                                   msg)
3760
      result.Raise("Can't migrate, please use failover", prereq=True)
3809 3761

  
3810 3762
    self.instance = instance
3811 3763

  
......
3824 3776
                                            self.instance.disks)
3825 3777
      min_percent = 100
3826 3778
      for node, nres in result.items():
3827
        msg = nres.RemoteFailMsg()
3828
        if msg:
3829
          raise errors.OpExecError("Cannot resync disks on node %s: %s" %
3830
                                   (node, msg))
3779
        nres.Raise("Cannot resync disks on node %s" % node)
3831 3780
        node_done, node_percent = nres.payload
3832 3781
        all_done = all_done and node_done
3833 3782
        if node_percent is not None:
......
3848 3797

  
3849 3798
    result = self.rpc.call_blockdev_close(node, self.instance.name,
3850 3799
                                          self.instance.disks)
3851
    msg = result.RemoteFailMsg()
3852
    if msg:
3853
      raise errors.OpExecError("Cannot change disk to secondary on node %s,"
3854
                               " error %s" % (node, msg))
3800
    result.Raise("Cannot change disk to secondary on node %s" % node)
3855 3801

  
3856 3802
  def _GoStandalone(self):
3857 3803
    """Disconnect from the network.
......
3861 3807
    result = self.rpc.call_drbd_disconnect_net(self.all_nodes, self.nodes_ip,
3862 3808
                                               self.instance.disks)
3863 3809
    for node, nres in result.items():
3864
      msg = nres.RemoteFailMsg()
3865
      if msg:
3866
        raise errors.OpExecError("Cannot disconnect disks node %s,"
3867
                                 " error %s" % (node, msg))
3810
      nres.Raise("Cannot disconnect disks node %s" % node)
3868 3811

  
3869 3812
  def _GoReconnect(self, multimaster):
3870 3813
    """Reconnect to the network.
......
3879 3822
                                           self.instance.disks,
3880 3823
                                           self.instance.name, multimaster)
3881 3824
    for node, nres in result.items():
3882
      msg = nres.RemoteFailMsg()
3883
      if msg:
3884
        raise errors.OpExecError("Cannot change disks config on node %s,"
3885
                                 " error: %s" % (node, msg))
3825
      nres.Raise("Cannot change disks config on node %s" % node)
3886 3826

  
3887 3827
  def _ExecCleanup(self):
3888 3828
    """Try to cleanup after a failed migration.
......
3907 3847
                     " a bad state)")
3908 3848
    ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor])
3909 3849
    for node, result in ins_l.items():
3910
      msg = result.RemoteFailMsg()
3911
      if msg:
3912
        raise errors.OpExecError("Can't contact node %s: %s" % (node, msg))
3850
      result.Raise("Can't contact node %s" % node)
3913 3851

  
3914 3852
    runningon_source = instance.name in ins_l[source_node].payload
3915 3853
    runningon_target = instance.name in ins_l[target_node].payload
......
3979 3917
                                                    instance,
3980 3918
                                                    migration_info,
3981 3919
                                                    False)
3982
    abort_msg = abort_result.RemoteFailMsg()
3920
    abort_msg = abort_result.fail_msg
3983 3921
    if abort_msg:
3984 3922
      logging.error("Aborting migration failed on target node %s: %s" %
3985 3923
                    (target_node, abort_msg))
......
4011 3949

  
4012 3950
    # First get the migration information from the remote node
4013 3951
    result = self.rpc.call_migration_info(source_node, instance)
4014
    msg = result.RemoteFailMsg()
3952
    msg = result.fail_msg
4015 3953
    if msg:
4016 3954
      log_err = ("Failed fetching source migration information from %s: %s" %
4017 3955
                 (source_node, msg))
......
4032 3970
                                           migration_info,
4033 3971
                                           self.nodes_ip[target_node])
4034 3972

  
4035
    msg = result.RemoteFailMsg()
3973
    msg = result.fail_msg
4036 3974
    if msg:
4037 3975
      logging.error("Instance pre-migration failed, trying to revert"
4038 3976
                    " disk status: %s", msg)
......
4046 3984
    result = self.rpc.call_instance_migrate(source_node, instance,
4047 3985
                                            self.nodes_ip[target_node],
4048 3986
                                            self.op.live)
4049
    msg = result.RemoteFailMsg()
3987
    msg = result.fail_msg
4050 3988
    if msg:
4051 3989
      logging.error("Instance migration failed, trying to revert"
4052 3990
                    " disk status: %s", msg)
......
4064 4002
                                              instance,
4065 4003
                                              migration_info,
4066 4004
                                              True)
4067
    msg = result.RemoteFailMsg()
4005
    msg = result.fail_msg
4068 4006
    if msg:
4069 4007
      logging.error("Instance migration succeeded, but finalization failed:"
4070 4008
                    " %s" % msg)
......
4164 4102
  lu.cfg.SetDiskID(device, node)
4165 4103
  result = lu.rpc.call_blockdev_create(node, device, device.size,
4166 4104
                                       instance.name, force_open, info)
4167
  msg = result.RemoteFailMsg()
4168
  if msg:
4169
    raise errors.OpExecError("Can't create block device %s on"
4170
                             " node %s for instance %s: %s" %
4171
                             (device, node, instance.name, msg))
4105
  result.Raise("Can't create block device %s on"
4106
               " node %s for instance %s" % (device, node, instance.name))
4172 4107
  if device.physical_id is None:
4173 4108
    device.physical_id = result.payload
4174 4109

  
......
4300 4235
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4301 4236
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
4302 4237

  
4303
    msg = result.RemoteFailMsg()
4304

  
4305
    if msg:
4306
      raise errors.OpExecError("Failed to create directory '%s' on"
4307
                               " node %s: %s" % (file_storage_dir, msg))
4238
    result.Raise("Failed to create directory '%s' on"
4239
                 " node %s: %s" % (file_storage_dir, pnode))
4308 4240

  
4309 4241
  # Note: this needs to be kept in sync with adding of disks in
4310 4242
  # LUSetInstanceParams
......
4339 4271
  for device in instance.disks:
4340 4272
    for node, disk in device.ComputeNodeTree(instance.primary_node):
4341 4273
      lu.cfg.SetDiskID(disk, node)
4342
      msg = lu.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
4274
      msg = lu.rpc.call_blockdev_remove(node, disk).fail_msg
4343 4275
      if msg:
4344 4276
        lu.LogWarning("Could not remove block device %s on node %s,"
4345 4277
                      " continuing anyway: %s", device.iv_name, node, msg)
......
4349 4281
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
4350 4282
    result = lu.rpc.call_file_storage_dir_remove(instance.primary_node,
4351 4283
                                                 file_storage_dir)
4352
    msg = result.RemoteFailMsg()
4284
    msg = result.fail_msg
4353 4285
    if msg:
4354 4286
      lu.LogWarning("Could not remove directory '%s' on node %s: %s",
4355 4287
                    file_storage_dir, instance.primary_node, msg)
......
4402 4334
    info = hvinfo[node]
4403 4335
    if info.offline:
4404 4336
      continue
4405
    msg = info.RemoteFailMsg()
4406
    if msg:
4407
      raise errors.OpPrereqError("Hypervisor parameter validation"
4408
                                 " failed on node %s: %s" % (node, msg))
4337
    info.Raise("Hypervisor parameter validation failed on node %s" % node)
4409 4338

  
4410 4339

  
4411 4340
class LUCreateInstance(LogicalUnit):
......
4702 4631
        exp_list = self.rpc.call_export_list(locked_nodes)
4703 4632
        found = False
4704 4633
        for node in exp_list:
4705
          if exp_list[node].RemoteFailMsg():
4634
          if exp_list[node].fail_msg:
4706 4635
            continue
4707 4636
          if src_path in exp_list[node].payload:
4708 4637
            found = True
......
4716 4645

  
4717 4646
      _CheckNodeOnline(self, src_node)
4718 4647
      result = self.rpc.call_export_info(src_node, src_path)
4719
      msg = result.RemoteFailMsg()
4720
      if msg:
4721
        raise errors.OpPrereqError("No export or invalid export found in"
4722
                                   " dir %s: %s" % (src_path, msg))
4648
      result.Raise("No export or invalid export found in dir %s" % src_path)
4723 4649

  
4724 4650
      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
4725 4651
      if not export_info.has_section(constants.INISECT_EXP):
......
4827 4753
                                         self.op.hypervisor)
4828 4754
      for node in nodenames:
4829 4755
        info = nodeinfo[node]
4830
        msg = info.RemoteFailMsg()
4831
        if msg:
4832
          raise errors.OpPrereqError("Cannot get current information"
4833
                                     " from node %s: %s" % (node, msg))
4756
        info.Raise("Cannot get current information from node %s" % node)
4834 4757
        info = info.payload
4835 4758
        vg_free = info.get('vg_free', None)
4836 4759
        if not isinstance(vg_free, int):
......
4845 4768

  
4846 4769
    # os verification
4847 4770
    result = self.rpc.call_os_get(pnode.name, self.op.os_type)
4848
    msg = result.RemoteFailMsg()
4849
    if msg:
4850
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
4851
                                 " primary node %s: %s"  %
4852
                                 (self.op.os_type, pnode.name, msg))
4771
    result.Raise("OS '%s' not in supported os list for primary node %s" %
4772
                 (self.op.os_type, pnode.name), prereq=True)
4853 4773

  
4854 4774
    _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
4855 4775

  
......
4961 4881
      if self.op.mode == constants.INSTANCE_CREATE:
4962 4882
        feedback_fn("* running the instance OS create scripts...")
4963 4883
        result = self.rpc.call_instance_os_add(pnode_name, iobj, False)
4964
        msg = result.RemoteFailMsg()
4965
        if msg:
4966
          raise errors.OpExecError("Could not add os for instance %s"
4967
                                   " on node %s: %s" %
4968
                                   (instance, pnode_name, msg))
4884
        result.Raise("Could not add os for instance %s"
4885
                     " on node %s" % (instance, pnode_name))
4969 4886

  
4970 4887
      elif self.op.mode == constants.INSTANCE_IMPORT:
4971 4888
        feedback_fn("* running the instance OS import scripts...")
......
4975 4892
        import_result = self.rpc.call_instance_os_import(pnode_name, iobj,
4976 4893
                                                         src_node, src_images,
4977 4894
                                                         cluster_name)
4978
        msg = import_result.RemoteFailMsg()
4895
        msg = import_result.fail_msg
4979 4896
        if msg:
4980 4897
          self.LogWarning("Error while importing the disk images for instance"
4981 4898
                          " %s on node %s: %s" % (instance, pnode_name, msg))
......
4990 4907
      logging.info("Starting instance %s on node %s", instance, pnode_name)
4991 4908
      feedback_fn("* starting instance...")
4992 4909
      result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
4993
      msg = result.RemoteFailMsg()
4994
      if msg:
4995
        raise errors.OpExecError("Could not start instance: %s" % msg)
4910
      result.Raise("Could not start instance")
4996 4911

  
4997 4912

  
4998 4913
class LUConnectConsole(NoHooksLU):
......
5029 4944

  
5030 4945
    node_insts = self.rpc.call_instance_list([node],
5031 4946
                                             [instance.hypervisor])[node]
5032
    msg = node_insts.RemoteFailMsg()
5033
    if msg:
5034
      raise errors.OpExecError("Can't get node information from %s: %s" %
5035
                               (node, msg))
4947
    node_insts.Raise("Can't get node information from %s" % node)
5036 4948

  
5037 4949
    if instance.name not in node_insts.payload:
5038 4950
      raise errors.OpExecError("Instance %s is not running." % instance.name)
......
5256 5168
      raise errors.OpExecError("Can't list volume groups on the nodes")
5257 5169
    for node in oth_node, tgt_node:
5258 5170
      res = results[node]
5259
      msg = res.RemoteFailMsg()
5260
      if msg:
5261
        raise errors.OpExecError("Error checking node %s: %s" % (node, msg))
5171
      res.Raise("Error checking node %s" % node)
5262 5172
      if my_vg not in res.payload:
5263 5173
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5264 5174
                                 (my_vg, node))
......
5269 5179
        info("checking disk/%d on %s" % (idx, node))
5270 5180
        cfg.SetDiskID(dev, node)
5271 5181
        result = self.rpc.call_blockdev_find(node, dev)
5272
        msg = result.RemoteFailMsg()
5182
        msg = result.fail_msg
5273 5183
        if not msg and not result.payload:
5274 5184
          msg = "disk not found"
5275 5185
        if msg:
......
5317 5227
    for dev, old_lvs, new_lvs in iv_names.itervalues():
5318 5228
      info("detaching %s drbd from local storage" % dev.iv_name)
5319 5229
      result = self.rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs)
5320
      msg = result.RemoteFailMsg()
5321
      if msg:
5322
        raise errors.OpExecError("Can't detach drbd from local storage on node"
5323
                                 " %s for device %s: %s" %
5324
                                 (tgt_node, dev.iv_name, msg))
5230
      result.Raise("Can't detach drbd from local storage on node"
5231
                   " %s for device %s" % (tgt_node, dev.iv_name))
5325 5232
      #dev.children = []
5326 5233
      #cfg.Update(instance)
5327 5234

  
......
5339 5246
      rlist = []
5340 5247
      for to_ren in old_lvs:
5341 5248
        result = self.rpc.call_blockdev_find(tgt_node, to_ren)
5342
        if not result.RemoteFailMsg() and result.payload:
5249
        if not result.fail_msg and result.payload:
5343 5250
          # device exists
5344 5251
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
5345 5252

  
5346 5253
      info("renaming the old LVs on the target node")
5347 5254
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5348
      msg = result.RemoteFailMsg()
5349
      if msg:
5350
        raise errors.OpExecError("Can't rename old LVs on node %s: %s" %
5351
                                 (tgt_node, msg))
5255
      result.Raise("Can't rename old LVs on node %s" % tgt_node)
5352 5256
      # now we rename the new LVs to the old LVs
5353 5257
      info("renaming the new LVs on the target node")
5354 5258
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
5355 5259
      result = self.rpc.call_blockdev_rename(tgt_node, rlist)
5356
      msg = result.RemoteFailMsg()
5357
      if msg:
5358
        raise errors.OpExecError("Can't rename new LVs on node %s: %s" %
5359
                                 (tgt_node, msg))
5260
      result.Raise("Can't rename new LVs on node %s" % tgt_node)
5360 5261

  
5361 5262
      for old, new in zip(old_lvs, new_lvs):
5362 5263
        new.logical_id = old.logical_id
......
5369 5270
      # now that the new lvs have the old name, we can add them to the device
5370 5271
      info("adding new mirror component on %s" % tgt_node)
5371 5272
      result = self.rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs)
5372
      msg = result.RemoteFailMsg()
5273
      msg = result.fail_msg
5373 5274
      if msg:
5374 5275
        for new_lv in new_lvs:
5375
          msg = self.rpc.call_blockdev_remove(tgt_node, new_lv).RemoteFailMsg()
5376
          if msg:
5377
            warning("Can't rollback device %s: %s", dev, msg,
5276
          msg2 = self.rpc.call_blockdev_remove(tgt_node, new_lv).fail_msg
5277
          if msg2:
5278
            warning("Can't rollback device %s: %s", dev, msg2,
5378 5279
                    hint="cleanup manually the unused logical volumes")
5379 5280
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
5380 5281

  
......
5393 5294
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
5394 5295
      cfg.SetDiskID(dev, instance.primary_node)
5395 5296
      result = self.rpc.call_blockdev_find(instance.primary_node, dev)
5396
      msg = result.RemoteFailMsg()
5297
      msg = result.fail_msg
5397 5298
      if not msg and not result.payload:
5398 5299
        msg = "disk not found"
5399 5300
      if msg:
......
5408 5309
      info("remove logical volumes for %s" % name)
5409 5310
      for lv in old_lvs:
5410 5311
        cfg.SetDiskID(lv, tgt_node)
5411
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).RemoteFailMsg()
5312
        msg = self.rpc.call_blockdev_remove(tgt_node, lv).fail_msg
5412 5313
        if msg:
5413 5314
          warning("Can't remove old LV: %s" % msg,
5414 5315
                  hint="manually remove unused LVs")
......
5455 5356
    results = self.rpc.call_vg_list([pri_node, new_node])
5456 5357
    for node in pri_node, new_node:
5457 5358
      res = results[node]
5458
      msg = res.RemoteFailMsg()
5459
      if msg:
5460
        raise errors.OpExecError("Error checking node %s: %s" % (node, msg))
5359
      res.Raise("Error checking node %s" % node)
5461 5360
      if my_vg not in res.payload:
5462 5361
        raise errors.OpExecError("Volume group '%s' not found on %s" %
5463 5362
                                 (my_vg, node))
......
5467 5366
      info("checking disk/%d on %s" % (idx, pri_node))
5468 5367
      cfg.SetDiskID(dev, pri_node)
5469 5368
      result = self.rpc.call_blockdev_find(pri_node, dev)
5470
      msg = result.RemoteFailMsg()
5369
      msg = result.fail_msg
5471 5370
      if not msg and not result.payload:
5472 5371
        msg = "disk not found"
5473 5372
      if msg:
......
5535 5434
      # we have new devices, shutdown the drbd on the old secondary
5536 5435
      info("shutting down drbd for disk/%d on old node" % idx)
5537 5436
      cfg.SetDiskID(dev, old_node)
5538
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).RemoteFailMsg()
5437
      msg = self.rpc.call_blockdev_shutdown(old_node, dev).fail_msg
5539 5438
      if msg:
5540 5439
        warning("Failed to shutdown drbd for disk/%d on old node: %s" %
5541 5440
                (idx, msg),
......
5545 5444
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
5546 5445
                                               instance.disks)[pri_node]
5547 5446

  
5548
    msg = result.RemoteFailMsg()
5447
    msg = result.fail_msg
5549 5448
    if msg:
5550 5449
      # detaches didn't succeed (unlikely)
5551 5450
      self.cfg.ReleaseDRBDMinors(instance.name)
......
5566 5465
                                           instance.disks, instance.name,
5567 5466
                                           False)
5568 5467
    for to_node, to_result in result.items():
5569
      msg = to_result.RemoteFailMsg()
5468
      msg = to_result.fail_msg
5570 5469
      if msg:
5571 5470
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
5572 5471
                hint="please do a gnt-instance info to see the"
......
5582 5481
    for idx, (dev, old_lvs, _) in iv_names.iteritems():
5583 5482
      cfg.SetDiskID(dev, pri_node)
5584 5483
      result = self.rpc.call_blockdev_find(pri_node, dev)
5585
      msg = result.RemoteFailMsg()
5484
      msg = result.fail_msg
5586 5485
      if not msg and not result.payload:
5587 5486
        msg = "disk not found"
5588 5487
      if msg:
......
5596 5495
      info("remove logical volumes for disk/%d" % idx)
5597 5496
      for lv in old_lvs:
5598 5497
        cfg.SetDiskID(lv, old_node)
5599
        msg = self.rpc.call_blockdev_remove(old_node, lv).RemoteFailMsg()
5498
        msg = self.rpc.call_blockdev_remove(old_node, lv).fail_msg
5600 5499
        if msg:
5601 5500
          warning("Can't remove LV on old secondary: %s", msg,
5602 5501
                  hint="Cleanup stale volumes by hand")
......
5688 5587
                                       instance.hypervisor)
5689 5588
    for node in nodenames:
5690 5589
      info = nodeinfo[node]
5691
      msg = info.RemoteFailMsg()
5692
      if msg:
5693
        raise errors.OpPrereqError("Cannot get current information"
5694
                                   " from node %s:" % (node, msg))
5590
      info.Raise("Cannot get current information from node %s" % node)
5695 5591
      vg_free = info.payload.get('vg_free', None)
5696 5592
      if not isinstance(vg_free, int):
5697 5593
        raise errors.OpPrereqError("Can't compute free disk space on"
......
5710 5606
    for node in instance.all_nodes:
5711 5607
      self.cfg.SetDiskID(disk, node)
5712 5608
      result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
5713
      msg = result.RemoteFailMsg()
5714
      if msg:
5715
        raise errors.OpExecError("Grow request failed to node %s: %s" %
5716
                                 (node, msg))
5609
      result.Raise("Grow request failed to node %s" % node)
5717 5610
    disk.RecordGrow(self.op.amount)
5718 5611
    self.cfg.Update(instance)
5719 5612
    if self.op.wait_for_sync:
......
5780 5673
      if dev_pstatus.offline:
5781 5674
        dev_pstatus = None
5782 5675
      else:
5783
        msg = dev_pstatus.RemoteFailMsg()
5784
        if msg:
5785
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5786
                                   (instance.name, msg))
5676
        dev_pstatus.Raise("Can't compute disk status for %s" % instance.name)
5787 5677
        dev_pstatus = dev_pstatus.payload
5788 5678
    else:
5789 5679
      dev_pstatus = None
......
5801 5691
      if dev_sstatus.offline:
5802 5692
        dev_sstatus = None
5803 5693
      else:
5804
        msg = dev_sstatus.RemoteFailMsg()
5805
        if msg:
5806
          raise errors.OpExecError("Can't compute disk status for %s: %s" %
5807
                                   (instance.name, msg))
5694
        dev_sstatus.Raise("Can't compute disk status for %s" % instance.name)
5808 5695
        dev_sstatus = dev_sstatus.payload
5809 5696
    else:
5810 5697
      dev_sstatus = None
......
5839 5726
        remote_info = self.rpc.call_instance_info(instance.primary_node,
5840 5727
                                                  instance.name,
5841 5728
                                                  instance.hypervisor)
5842
        msg = remote_info.RemoteFailMsg()
5843
        if msg:
5844
          raise errors.OpExecError("Error checking node %s: %s" %
5845
                                   (instance.primary_node, msg))
5729
        remote_info.Raise("Error checking node %s" % instance.primary_node)
5846 5730
        remote_info = remote_info.payload
5847 5731
        if remote_info and "state" in remote_info:
5848 5732
          remote_state = "up"
......
6132 6016
      nodeinfo = self.rpc.call_node_info(mem_check_list, self.cfg.GetVGName(),
6133 6017
                                         instance.hypervisor)
6134 6018
      pninfo = nodeinfo[pnode]
6135
      msg = pninfo.RemoteFailMsg()
6019
      msg = pninfo.fail_msg
6136 6020
      if msg:
6137 6021
        # Assume the primary node is unreachable and go ahead
6138 6022
        self.warn.append("Can't get info from primary node %s: %s" %
......
6140 6024
      elif not isinstance(pninfo.payload.get('memory_free', None), int):
6141 6025
        self.warn.append("Node data from primary node %s doesn't contain"
6142 6026
                         " free memory information" % pnode)
6143
      elif instance_info.RemoteFailMsg():
6027
      elif instance_info.fail_msg:
6144 6028
        self.warn.append("Can't get instance runtime information: %s" %
6145
                        instance_info.RemoteFailMsg())
6029
                        instance_info.fail_msg)
6146 6030
      else:
6147 6031
        if instance_info.payload:
6148 6032
          current_mem = int(instance_info.payload['memory'])
......
6162 6046
        for node, nres in nodeinfo.items():
6163 6047
          if node not in instance.secondary_nodes:
6164 6048
            continue
6165
          msg = nres.RemoteFailMsg()
6049
          msg = nres.fail_msg
6166 6050
          if msg:
6167 6051
            self.warn.append("Can't get info from secondary node %s: %s" %
6168 6052
                             (node, msg))
......
6211 6095

  
6212 6096
      if new_nic_mode == constants.NIC_MODE_BRIDGED:
6213 6097
        nic_bridge = new_filled_nic_params[constants.NIC_LINK]
6214
        result = self.rpc.call_bridges_exist(pnode, [nic_bridge])
6215
        msg = result.RemoteFailMsg()
6098
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
6216 6099
        if msg:
6217 6100
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
6218 6101
          if self.force:
......
6251 6134
                                     " an instance")
6252 6135
        ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor])
6253 6136
        ins_l = ins_l[pnode]
6254
        msg = ins_l.RemoteFailMsg()
6137
        msg = ins_l.fail_msg
6255 6138
        if msg:
6256 6139
          raise errors.OpPrereqError("Can't contact node %s: %s" %
6257 6140
                                     (pnode, msg))
......
6294 6177
        device_idx = len(instance.disks)
6295 6178
        for node, disk in device.ComputeNodeTree(instance.primary_node):
6296 6179
          self.cfg.SetDiskID(disk, node)
6297
          msg = self.rpc.call_blockdev_remove(node, disk).RemoteFailMsg()
6180
          msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
6298 6181
          if msg:
6299 6182
            self.LogWarning("Could not remove disk/%d on node %s: %s,"
6300 6183
                            " continuing anyway", device_idx, node, msg)
......
6416 6299
    rpcresult = self.rpc.call_export_list(self.nodes)
6417 6300
    result = {}
6418 6301
    for node in rpcresult:
6419
      if rpcresult[node].RemoteFailMsg():
6302
      if rpcresult[node].fail_msg:
6420 6303
        result[node] = False
6421 6304
      else:
6422 6305
        result[node] = rpcresult[node].payload
......
6501 6384
    if self.op.shutdown:
6502 6385
      # shutdown the instance, but not the disks
6503 6386
      result = self.rpc.call_instance_shutdown(src_node, instance)
6504
      msg = result.RemoteFailMsg()
6505
      if msg:
6506
        raise errors.OpExecError("Could not shutdown instance %s on"
6507
                                 " node %s: %s" %
6508
                                 (instance.name, src_node, msg))
6387
      result.Raise("Could not shutdown instance %s on"
6388
                   " node %s" % (instance.name, src_node))
6509 6389

  
6510 6390
    vgname = self.cfg.GetVGName()
6511 6391

  
......
6520 6400
      for disk in instance.disks:
6521 6401
        # result.payload will be a snapshot of an lvm leaf of the one we passed
6522 6402
        result = self.rpc.call_blockdev_snapshot(src_node, disk)
6523
        msg = result.RemoteFailMsg()
6403
        msg = result.fail_msg
6524 6404
        if msg:
6525 6405
          self.LogWarning("Could not snapshot block device %s on node %s: %s",
6526 6406
                          disk.logical_id[1], src_node, msg)
......
6535 6415
    finally:
6536 6416
      if self.op.shutdown and instance.admin_up:
6537 6417
        result = self.rpc.call_instance_start(src_node, instance, None, None)
6538
        msg = result.RemoteFailMsg()
6418
        msg = result.fail_msg
6539 6419
        if msg:
6540 6420
          _ShutdownInstanceDisks(self, instance)
6541 6421
          raise errors.OpExecError("Could not start instance: %s" % msg)
......
6547 6427
      if dev:
6548 6428
        result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
6549 6429
                                               instance, cluster_name, idx)
6550
        msg = result.RemoteFailMsg()
6430
        msg = result.fail_msg
6551 6431
        if msg:
6552 6432
          self.LogWarning("Could not export block device %s from node %s to"
6553 6433
                          " node %s: %s", dev.logical_id[1], src_node,
6554 6434
                          dst_node.name, msg)
6555
        msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
6435
        msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
6556 6436
        if msg:
6557 6437
          self.LogWarning("Could not remove snapshot block device %s from node"
6558 6438
                          " %s: %s", dev.logical_id[1], src_node, msg)
6559 6439

  
6560 6440
    result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
6561
    msg = result.RemoteFailMsg()
6441
    msg = result.fail_msg
6562 6442
    if msg:
6563 6443
      self.LogWarning("Could not finalize export for instance %s"
6564 6444
                      " on node %s: %s", instance.name, dst_node.name, msg)
......
6573 6453
    if nodelist:
6574 6454
      exportlist = self.rpc.call_export_list(nodelist)
6575 6455
      for node in exportlist:
6576
        if exportlist[node].RemoteFailMsg():
6456
        if exportlist[node].fail_msg:
6577 6457
          continue
6578 6458
        if iname in exportlist[node].payload:
6579
          msg = self.rpc.call_export_remove(node, iname).RemoteFailMsg()
6459
          msg = self.rpc.call_export_remove(node, iname).fail_msg
6580 6460
          if msg:
6581 6461
            self.LogWarning("Could not remove older export for instance %s"
6582 6462
                            " on node %s: %s", iname, node, msg)
......
6617 6497
    exportlist = self.rpc.call_export_list(locked_nodes)
6618 6498
    found = False
6619 6499
    for node in exportlist:
6620
      msg = exportlist[node].RemoteFailMsg()
6500
      msg = exportlist[node].fail_msg
6621 6501
      if msg:
6622 6502
        self.LogWarning("Failed to query node %s (continuing): %s", node, msg)
6623 6503
        continue
6624 6504
      if instance_name in exportlist[node].payload:
6625 6505
        found = True
6626 6506
        result = self.rpc.call_export_remove(node, instance_name)
6627
        msg = result.RemoteFailMsg()
6507
        msg = result.fail_msg
6628 6508
        if msg:
6629 6509
          logging.error("Could not remove export for instance %s"
6630 6510
                        " on node %s: %s", instance_name, node, msg)
......
6840 6720
    if self.op.on_nodes:
6841 6721
      result = self.rpc.call_test_delay(self.op.on_nodes, self.op.duration)
6842 6722
      for node, node_result in result.items():
6843
        msg = node_result.RemoteFailMsg()
6844
        if msg:
6845
          raise errors.OpExecError("Failure during rpc call to node %s: %s"
6846
                                   % (node, msg))
6723
        node_result.Raise("Failure during rpc call to node %s" % node)
6847 6724

  
6848 6725

  
6849 6726
class IAllocator(object):
......
6945 6822
        }
6946 6823

  
6947 6824
      if not ninfo.offline:
6948
        msg = nresult.RemoteFailMsg()
6949
        if msg:
6950
          raise errors.OpExecError("Can't get data for node %s: %s" %
6951
                                   (nname, msg))
6952
        msg = node_iinfo[nname].RemoteFailMsg()
6953
        if msg:
6954
          raise errors.OpExecError("Can't get node instance info"
6955
                                   " from node %s: %s" % (nname, msg))
6825
        nresult.Raise("Can't get data for node %s" % nname)
6826
        node_iinfo[nname].Raise("Can't get node instance info from node %s" %
6827
                                nname)
6956 6828
        remote_info = nresult.payload
6957 6829
        for attr in ['memory_total', 'memory_free', 'memory_dom0',
6958 6830
                     'vg_size', 'vg_free', 'cpu_total']:
......
7119 6991
    data = self.in_text
7120 6992

  
7121 6993
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
7122
    msg = result.RemoteFailMsg()
7123
    if msg:
7124
      raise errors.OpExecError("Failure while running the iallocator"
7125
                               " script: %s" % msg)
6994
    result.Raise("Failure while running the iallocator script")
7126 6995

  
7127 6996
    self.out_text = result.payload
7128 6997
    if validate:
b/lib/rpc.py
93 93
      offline, as opposed to actual failure; offline=True will always
94 94
      imply failed=True, in order to allow simpler checking if
95 95
      the user doesn't care about the exact failure mode
96
  @ivar error: the error message if the call failed
96
  @ivar fail_msg: the error message if the call failed
97 97

  
98 98
  """
99 99
  def __init__(self, data=None, failed=False, offline=False,
......
104 104
    self.node = node
105 105
    if offline:
106 106
      self.failed = True
107
      self.error = "Node is marked offline"
107
      self.fail_msg = "Node is marked offline"
108 108
      self.data = self.payload = None
109 109
    elif failed:
110
      self.error = self._EnsureErr(data)
110
      self.fail_msg = self._EnsureErr(data)
111 111
      self.data = self.payload = None
112 112
    else:
113 113
      self.data = data
114 114
      if not isinstance(self.data, (tuple, list)):
115
        self.error = ("RPC layer error: invalid result type (%s)" %
116
                      type(self.data))
115
        self.fail_msg = ("RPC layer error: invalid result type (%s)" %
116
                         type(self.data))
117 117
      elif len(data) != 2:
118
        self.error = ("RPC layer error: invalid result length (%d), "
119
                      "expected 2" % len(self.data))
118
        self.fail_msg = ("RPC layer error: invalid result length (%d), "
119
                         "expected 2" % len(self.data))
120 120
      elif not self.data[0]:
121
        self.error = self._EnsureErr(self.data[1])
121
        self.fail_msg = self._EnsureErr(self.data[1])
122 122
      else:
123 123
        # finally success
124
        self.error = None
124
        self.fail_msg = None
125 125
        self.payload = data[1]
126 126

  
127 127
  @staticmethod
......
132 132
    else:
133 133
      return "No error information"
134 134

  
135
  def Raise(self):
135
  def Raise(self, msg, prereq=False):
136 136
    """If the result has failed, raise an OpExecError.
137 137

  
138 138
    This is used so that LU code doesn't have to check for each
139 139
    result, but instead can call this function.
140 140

  
141 141
    """
142
    if self.failed:
143
      raise errors.OpExecError("Call '%s' to node '%s' has failed: %s" %
144
                               (self.call, self.node, self.error))
142
    if not self.fail_msg:
143
      return
144

  
145
    if not msg: # one could pass None for default message
146
      msg = ("Call '%s' to node '%s' has failed: %s" %
147
             (self.call, self.node, self.fail_msg))
148
    else:
149
      msg = "%s: %s" % (msg, self.fail_msg)
150
    if prereq:
151
      ec = errors.OpPrereqError
152
    else:
153
      ec = errors.OpExecError
154
    raise ec(msg)
145 155

  
146 156
  def RemoteFailMsg(self):
147 157
    """Check if the remote procedure failed.
......
149 159
    @return: the fail_msg attribute
150 160

  
151 161
    """
152
    return self.error
162
    return self.fail_msg
153 163

  
154 164

  
155 165
class Client:

Also available in: Unified diff