Revision c26a6bd2 lib/backend.py

b/lib/backend.py
154 154
  """
155 155
  _CleanDirectory(constants.QUEUE_DIR, exclude=[constants.JOB_QUEUE_LOCK_FILE])
156 156
  _CleanDirectory(constants.JOB_QUEUE_ARCHIVE_DIR)
157
  return True, None
158 157

  
159 158

  
160 159
def GetMasterInfo():
......
164 163
  for consumption here or from the node daemon.
165 164

  
166 165
  @rtype: tuple
167
  @return: True, (master_netdev, master_ip, master_name) in case of success
166
  @return: master_netdev, master_ip, master_name
168 167
  @raise RPCFail: in case of errors
169 168

  
170 169
  """
......
175 174
    master_node = cfg.GetMasterNode()
176 175
  except errors.ConfigurationError, err:
177 176
    _Fail("Cluster configuration incomplete", exc=True)
178
  return True, (master_netdev, master_ip, master_node)
177
  return master_netdev, master_ip, master_node
179 178

  
180 179

  
181 180
def StartMaster(start_daemons):
......
186 185
  based on the start_daemons parameter.
187 186

  
188 187
  @type start_daemons: boolean
189
  @param start_daemons: whther to also start the master
188
  @param start_daemons: whether to also start the master
190 189
      daemons (ganeti-masterd and ganeti-rapi)
191 190
  @rtype: None
192 191

  
......
228 227
  if payload:
229 228
    _Fail("; ".join(payload))
230 229

  
231
  return True, None
232

  
233 230

  
234 231
def StopMaster(stop_daemons):
235 232
  """Deactivate this node as master.
......
261 258
    for daemon in constants.RAPI_PID, constants.MASTERD_PID:
262 259
      utils.KillProcess(utils.ReadPidFile(utils.DaemonPidFileName(daemon)))
263 260

  
264
  return True, None
265

  
266 261

  
267 262
def AddNode(dsa, dsapub, rsa, rsapub, sshkey, sshpub):
268 263
  """Joins this node to the cluster.
......
308 303

  
309 304
  utils.RunCmd([constants.SSH_INITD_SCRIPT, "restart"])
310 305

  
311
  return (True, "Node added successfully")
312

  
313 306

  
314 307
def LeaveCluster():
315 308
  """Cleans up and remove the current node.
......
376 369
  finally:
377 370
    f.close()
378 371

  
379
  return True, outputarray
372
  return outputarray
380 373

  
381 374

  
382 375
def VerifyNode(what, cluster_name):
......
478 471
      used_minors = str(err)
479 472
    result[constants.NV_DRBDLIST] = used_minors
480 473

  
481
  return True, result
474
  return result
482 475

  
483 476

  
484 477
def GetVolumeList(vg_name):
......
528 521
      size of the volume
529 522

  
530 523
  """
531
  return True, utils.ListVolumeGroups()
524
  return utils.ListVolumeGroups()
532 525

  
533 526

  
534 527
def NodeVolumes():
......
571 564
      'vg': line[3].strip(),
572 565
    }
573 566

  
574
  return True, [map_line(line.split('|'))
575
                for line in result.stdout.splitlines()
576
                if line.count('|') >= 3]
567
  return [map_line(line.split('|')) for line in result.stdout.splitlines()
568
          if line.count('|') >= 3]
577 569

  
578 570

  
579 571
def BridgesExist(bridges_list):
......
591 583
  if missing:
592 584
    _Fail("Missing bridges %s", ", ".join(missing))
593 585

  
594
  return True, None
595

  
596 586

  
597 587
def GetInstanceList(hypervisor_list):
598 588
  """Provides a list of instances.
......
641 631
    output['state'] = iinfo[4]
642 632
    output['time'] = iinfo[5]
643 633

  
644
  return True, output
634
  return output
645 635

  
646 636

  
647 637
def GetInstanceMigratable(instance):
......
666 656
    if not os.path.islink(link_name):
667 657
      _Fail("Instance %s was not restarted since ganeti 1.2.5", iname)
668 658

  
669
  return True, None
670

  
671 659

  
672 660
def GetAllInstancesInfo(hypervisor_list):
673 661
  """Gather data about all instances.
......
709 697
                    " with different parameters", name)
710 698
        output[name] = value
711 699

  
712
  return True, output
700
  return output
713 701

  
714 702

  
715 703
def InstanceOsAdd(instance, reinstall):
......
719 707
  @param instance: Instance whose OS is to be installed
720 708
  @type reinstall: boolean
721 709
  @param reinstall: whether this is an instance reinstall
722
  @rtype: boolean
723
  @return: the success of the operation
710
  @rtype: None
724 711

  
725 712
  """
726 713
  inst_os = OSFromDisk(instance.os)
727 714

  
728

  
729 715
  create_env = OSEnvironment(instance)
730 716
  if reinstall:
731 717
    create_env['INSTANCE_REINSTALL'] = "1"
......
744 730
    _Fail("OS create script failed (%s), last lines in the"
745 731
          " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
746 732

  
747
  return (True, "Successfully installed")
748

  
749 733

  
750 734
def RunRenameInstance(instance, old_name):
751 735
  """Run the OS rename script for an instance.
......
778 762
    _Fail("OS rename script failed (%s), last lines in the"
779 763
          " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
780 764

  
781
  return (True, "Rename successful")
782

  
783 765

  
784 766
def _GetVGInfo(vg_name):
785 767
  """Get informations about the volume group.
......
902 884

  
903 885
  @type instance: L{objects.Instance}
904 886
  @param instance: the instance object
905
  @rtype: boolean
906
  @return: whether the startup was successful or not
887
  @rtype: None
907 888

  
908 889
  """
909 890
  running_instances = GetInstanceList([instance.hypervisor])
910 891

  
911 892
  if instance.name in running_instances:
912
    return (True, "Already running")
893
    logging.info("Instance %s already running, not starting", instance.name)
894
    return
913 895

  
914 896
  try:
915 897
    block_devices = _GatherAndLinkBlockDevs(instance)
......
921 903
    _RemoveBlockDevLinks(instance.name, instance.disks)
922 904
    _Fail("Hypervisor error: %s", err, exc=True)
923 905

  
924
  return (True, "Instance started successfully")
925

  
926 906

  
927 907
def InstanceShutdown(instance):
928 908
  """Shut an instance down.
......
931 911

  
932 912
  @type instance: L{objects.Instance}
933 913
  @param instance: the instance object
934
  @rtype: boolean
935
  @return: whether the startup was successful or not
914
  @rtype: None
936 915

  
937 916
  """
938 917
  hv_name = instance.hypervisor
939 918
  running_instances = GetInstanceList([hv_name])
919
  iname = instance.name
940 920

  
941
  if instance.name not in running_instances:
942
    return (True, "Instance already stopped")
921
  if iname not in running_instances:
922
    logging.info("Instance %s not running, doing nothing", iname)
923
    return
943 924

  
944 925
  hyper = hypervisor.GetHypervisor(hv_name)
945 926
  try:
946 927
    hyper.StopInstance(instance)
947 928
  except errors.HypervisorError, err:
948
    _Fail("Failed to stop instance %s: %s", instance.name, err)
929
    _Fail("Failed to stop instance %s: %s", iname, err)
949 930

  
950 931
  # test every 10secs for 2min
951 932

  
......
956 937
    time.sleep(10)
957 938
  else:
958 939
    # the shutdown did not succeed
959
    logging.error("Shutdown of '%s' unsuccessful, using destroy",
960
                  instance.name)
940
    logging.error("Shutdown of '%s' unsuccessful, using destroy", iname)
961 941

  
962 942
    try:
963 943
      hyper.StopInstance(instance, force=True)
964 944
    except errors.HypervisorError, err:
965
      _Fail("Failed to force stop instance %s: %s", instance.name, err)
945
      _Fail("Failed to force stop instance %s: %s", iname, err)
966 946

  
967 947
    time.sleep(1)
968 948
    if instance.name in GetInstanceList([hv_name]):
969
      _Fail("Could not shutdown instance %s even by destroy", instance.name)
970

  
971
  _RemoveBlockDevLinks(instance.name, instance.disks)
949
      _Fail("Could not shutdown instance %s even by destroy", iname)
972 950

  
973
  return (True, "Instance has been shutdown successfully")
951
  _RemoveBlockDevLinks(iname, instance.disks)
974 952

  
975 953

  
976 954
def InstanceReboot(instance, reboot_type):
......
988 966
      - the other reboot type (L{constants.INSTANCE_REBOOT_HARD})
989 967
        is not accepted here, since that mode is handled
990 968
        differently
991
  @rtype: boolean
992
  @return: the success of the operation
969
  @rtype: None
993 970

  
994 971
  """
995 972
  running_instances = GetInstanceList([instance.hypervisor])
......
1005 982
      _Fail("Failed to soft reboot instance %s: %s", instance.name, err)
1006 983
  elif reboot_type == constants.INSTANCE_REBOOT_HARD:
1007 984
    try:
1008
      stop_result = InstanceShutdown(instance)
1009
      if not stop_result[0]:
1010
        return stop_result
985
      InstanceShutdown(instance)
1011 986
      return StartInstance(instance)
1012 987
    except errors.HypervisorError, err:
1013 988
      _Fail("Failed to hard reboot instance %s: %s", instance.name, err)
1014 989
  else:
1015 990
    _Fail("Invalid reboot_type received: %s", reboot_type)
1016 991

  
1017
  return (True, "Reboot successful")
1018

  
1019 992

  
1020 993
def MigrationInfo(instance):
1021 994
  """Gather information about an instance to be migrated.
......
1029 1002
    info = hyper.MigrationInfo(instance)
1030 1003
  except errors.HypervisorError, err:
1031 1004
    _Fail("Failed to fetch migration information: %s", err, exc=True)
1032
  return (True, info)
1005
  return info
1033 1006

  
1034 1007

  
1035 1008
def AcceptInstance(instance, info, target):
......
1048 1021
    hyper.AcceptInstance(instance, info, target)
1049 1022
  except errors.HypervisorError, err:
1050 1023
    _Fail("Failed to accept instance: %s", err, exc=True)
1051
  return (True, "Accept successfull")
1052 1024

  
1053 1025

  
1054 1026
def FinalizeMigration(instance, info, success):
......
1067 1039
    hyper.FinalizeMigration(instance, info, success)
1068 1040
  except errors.HypervisorError, err:
1069 1041
    _Fail("Failed to finalize migration: %s", err, exc=True)
1070
  return (True, "Migration Finalized")
1071 1042

  
1072 1043

  
1073 1044
def MigrateInstance(instance, target, live):
......
1092 1063
    hyper.MigrateInstance(instance.name, target, live)
1093 1064
  except errors.HypervisorError, err:
1094 1065
    _Fail("Failed to migrate instance: %s", err, exc=True)
1095
  return (True, "Migration successfull")
1096 1066

  
1097 1067

  
1098 1068
def BlockdevCreate(disk, size, owner, on_primary, info):
......
1153 1123

  
1154 1124
  device.SetInfo(info)
1155 1125

  
1156
  physical_id = device.unique_id
1157
  return True, physical_id
1126
  return device.unique_id
1158 1127

  
1159 1128

  
1160 1129
def BlockdevRemove(disk):
......
1169 1138

  
1170 1139
  """
1171 1140
  msgs = []
1172
  result = True
1173 1141
  try:
1174 1142
    rdev = _RecursiveFindBD(disk)
1175 1143
  except errors.BlockDeviceError, err:
......
1182 1150
      rdev.Remove()
1183 1151
    except errors.BlockDeviceError, err:
1184 1152
      msgs.append(str(err))
1185
      result = False
1186
    if result:
1153
    if not msgs:
1187 1154
      DevCacheManager.RemoveCache(r_path)
1188 1155

  
1189 1156
  if disk.children:
1190 1157
    for child in disk.children:
1191
      c_status, c_msg = BlockdevRemove(child)
1192
      result = result and c_status
1193
      if c_msg: # not an empty message
1194
        msgs.append(c_msg)
1158
      try:
1159
        BlockdevRemove(child)
1160
      except RPCFail, err:
1161
        msgs.append(str(err))
1195 1162

  
1196
  if not result:
1163
  if msgs:
1197 1164
    _Fail("; ".join(msgs))
1198 1165

  
1199
  return True, None
1200

  
1201 1166

  
1202 1167
def _RecursiveAssembleBD(disk, owner, as_primary):
1203 1168
  """Activate a block device for an instance.
......
1270 1235
  except errors.BlockDeviceError, err:
1271 1236
    _Fail("Error while assembling disk: %s", err, exc=True)
1272 1237

  
1273
  return True, result
1238
  return result
1274 1239

  
1275 1240

  
1276 1241
def BlockdevShutdown(disk):
......
1287 1252
  @type disk: L{objects.Disk}
1288 1253
  @param disk: the description of the disk we should
1289 1254
      shutdown
1290
  @rtype: boolean
1291
  @return: the success of the operation
1255
  @rtype: None
1292 1256

  
1293 1257
  """
1294 1258
  msgs = []
1295
  result = True
1296 1259
  r_dev = _RecursiveFindBD(disk)
1297 1260
  if r_dev is not None:
1298 1261
    r_path = r_dev.dev_path
......
1301 1264
      DevCacheManager.RemoveCache(r_path)
1302 1265
    except errors.BlockDeviceError, err:
1303 1266
      msgs.append(str(err))
1304
      result = False
1305 1267

  
1306 1268
  if disk.children:
1307 1269
    for child in disk.children:
1308
      c_status, c_msg = BlockdevShutdown(child)
1309
      result = result and c_status
1310
      if c_msg: # not an empty message
1311
        msgs.append(c_msg)
1270
      try:
1271
        BlockdevShutdown(child)
1272
      except RPCFail, err:
1273
        msgs.append(str(err))
1312 1274

  
1313
  if not result:
1275
  if msgs:
1314 1276
    _Fail("; ".join(msgs))
1315
  return (True, None)
1316 1277

  
1317 1278

  
1318 1279
def BlockdevAddchildren(parent_cdev, new_cdevs):
......
1322 1283
  @param parent_cdev: the disk to which we should add children
1323 1284
  @type new_cdevs: list of L{objects.Disk}
1324 1285
  @param new_cdevs: the list of children which we should add
1325
  @rtype: boolean
1326
  @return: the success of the operation
1286
  @rtype: None
1327 1287

  
1328 1288
  """
1329 1289
  parent_bdev = _RecursiveFindBD(parent_cdev)
......
1333 1293
  if new_bdevs.count(None) > 0:
1334 1294
    _Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs)
1335 1295
  parent_bdev.AddChildren(new_bdevs)
1336
  return (True, None)
1337 1296

  
1338 1297

  
1339 1298
def BlockdevRemovechildren(parent_cdev, new_cdevs):
......
1343 1302
  @param parent_cdev: the disk from which we should remove children
1344 1303
  @type new_cdevs: list of L{objects.Disk}
1345 1304
  @param new_cdevs: the list of children which we should remove
1346
  @rtype: boolean
1347
  @return: the success of the operation
1305
  @rtype: None
1348 1306

  
1349 1307
  """
1350 1308
  parent_bdev = _RecursiveFindBD(parent_cdev)
......
1362 1320
    else:
1363 1321
      devs.append(rpath)
1364 1322
  parent_bdev.RemoveChildren(devs)
1365
  return (True, None)
1366 1323

  
1367 1324

  
1368 1325
def BlockdevGetmirrorstatus(disks):
......
1384 1341
    if rbd is None:
1385 1342
      _Fail("Can't find device %s", dsk)
1386 1343
    stats.append(rbd.CombinedSyncStatus())
1387
  return True, stats
1344
  return stats
1388 1345

  
1389 1346

  
1390 1347
def _RecursiveFindBD(disk):
......
1425 1382
  except errors.BlockDeviceError, err:
1426 1383
    _Fail("Failed to find device: %s", err, exc=True)
1427 1384
  if rbd is None:
1428
    return (True, None)
1429
  return (True, (rbd.dev_path, rbd.major, rbd.minor) + rbd.GetSyncStatus())
1385
    return None
1386
  return (rbd.dev_path, rbd.major, rbd.minor) + rbd.GetSyncStatus()
1430 1387

  
1431 1388

  
1432 1389
def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
......
1449 1406
  @param atime: the atime to set on the file (can be None)
1450 1407
  @type mtime: float
1451 1408
  @param mtime: the mtime to set on the file (can be None)
1452
  @rtype: boolean
1453
  @return: the success of the operation; errors are logged
1454
      in the node daemon log
1409
  @rtype: None
1455 1410

  
1456 1411
  """
1457 1412
  if not os.path.isabs(file_name):
......
1478 1433

  
1479 1434
  utils.WriteFile(file_name, data=raw_data, mode=mode, uid=uid, gid=gid,
1480 1435
                  atime=atime, mtime=mtime)
1481
  return (True, "success")
1482 1436

  
1483 1437

  
1484 1438
def WriteSsconfFiles(values):
......
1488 1442

  
1489 1443
  """
1490 1444
  ssconf.SimpleStore().WriteFiles(values)
1491
  return True, None
1492 1445

  
1493 1446

  
1494 1447
def _ErrnoOrStr(err):
......
1592 1545
          diagnose = os_inst
1593 1546
        result.append((name, os_path, status, diagnose))
1594 1547

  
1595
  return True, result
1548
  return result
1596 1549

  
1597 1550

  
1598 1551
def _TryOSFromDisk(name, base_dir=None):
......
1755 1708
  except errors.BlockDeviceError, err:
1756 1709
    _Fail("Failed to grow block device: %s", err, exc=True)
1757 1710

  
1758
  return True, None
1759

  
1760 1711

  
1761 1712
def BlockdevSnapshot(disk):
1762 1713
  """Create a snapshot copy of a block device.
......
1784 1735
    r_dev = _RecursiveFindBD(disk)
1785 1736
    if r_dev is not None:
1786 1737
      # let's stay on the safe side and ask for the full size, for now
1787
      return True, r_dev.Snapshot(disk.size)
1738
      return r_dev.Snapshot(disk.size)
1788 1739
    else:
1789 1740
      _Fail("Cannot find block device %s", disk)
1790 1741
  else:
......
1806 1757
  @type idx: int
1807 1758
  @param idx: the index of the disk in the instance's disk list,
1808 1759
      used to export to the OS scripts environment
1809
  @rtype: boolean
1810
  @return: the success of the operation
1760
  @rtype: None
1811 1761

  
1812 1762
  """
1813 1763
  export_env = OSEnvironment(instance)
......
1854 1804
    _Fail("OS snapshot export command '%s' returned error: %s"
1855 1805
          " output: %s", command, result.fail_reason, result.output)
1856 1806

  
1857
  return (True, None)
1858

  
1859 1807

  
1860 1808
def FinalizeExport(instance, snap_disks):
1861 1809
  """Write out the export configuration information.
......
1867 1815
  @param snap_disks: list of snapshot block devices, which
1868 1816
      will be used to get the actual name of the dump file
1869 1817

  
1870
  @rtype: boolean
1871
  @return: the success of the operation
1818
  @rtype: None
1872 1819

  
1873 1820
  """
1874 1821
  destdir = os.path.join(constants.EXPORT_DIR, instance.name + ".new")
......
1920 1867
  shutil.rmtree(finaldestdir, True)
1921 1868
  shutil.move(destdir, finaldestdir)
1922 1869

  
1923
  return True, None
1924

  
1925 1870

  
1926 1871
def ExportInfo(dest):
1927 1872
  """Get export configuration information.
......
1943 1888
      not config.has_section(constants.INISECT_INS)):
1944 1889
    _Fail("Export info file doesn't have the required fields")
1945 1890

  
1946
  return True, config.Dumps()
1891
  return config.Dumps()
1947 1892

  
1948 1893

  
1949 1894
def ImportOSIntoInstance(instance, src_node, src_images, cluster_name):
......
1992 1937

  
1993 1938
  if final_result:
1994 1939
    _Fail("; ".join(final_result), log=False)
1995
  return True, None
1996 1940

  
1997 1941

  
1998 1942
def ListExports():
......
2003 1947

  
2004 1948
  """
2005 1949
  if os.path.isdir(constants.EXPORT_DIR):
2006
    return True, utils.ListVisibleFiles(constants.EXPORT_DIR)
1950
    return utils.ListVisibleFiles(constants.EXPORT_DIR)
2007 1951
  else:
2008 1952
    _Fail("No exports directory")
2009 1953

  
......
2013 1957

  
2014 1958
  @type export: str
2015 1959
  @param export: the name of the export to remove
2016
  @rtype: boolean
2017
  @return: the success of the operation
1960
  @rtype: None
2018 1961

  
2019 1962
  """
2020 1963
  target = os.path.join(constants.EXPORT_DIR, export)
......
2024 1967
  except EnvironmentError, err:
2025 1968
    _Fail("Error while removing the export: %s", err, exc=True)
2026 1969

  
2027
  return True, None
2028

  
2029 1970

  
2030 1971
def BlockdevRename(devlist):
2031 1972
  """Rename a list of block devices.
......
2066 2007
      result = False
2067 2008
  if not result:
2068 2009
    _Fail("; ".join(msgs))
2069
  return True, None
2070 2010

  
2071 2011

  
2072 2012
def _TransformFileStorageDir(file_storage_dir):
......
2114 2054
    except OSError, err:
2115 2055
      _Fail("Cannot create file storage directory '%s': %s",
2116 2056
            file_storage_dir, err, exc=True)
2117
  return True, None
2118 2057

  
2119 2058

  
2120 2059
def RemoveFileStorageDir(file_storage_dir):
......
2141 2080
      _Fail("Cannot remove file storage directory '%s': %s",
2142 2081
            file_storage_dir, err)
2143 2082

  
2144
  return True, None
2145

  
2146 2083

  
2147 2084
def RenameFileStorageDir(old_file_storage_dir, new_file_storage_dir):
2148 2085
  """Rename the file storage directory.
......
2172 2109
    if os.path.exists(old_file_storage_dir):
2173 2110
      _Fail("Cannot rename '%s' to '%s': both locations exist",
2174 2111
            old_file_storage_dir, new_file_storage_dir)
2175
  return True, None
2176 2112

  
2177 2113

  
2178 2114
def _EnsureJobQueueFile(file_name):
......
2211 2147
  # Write and replace the file atomically
2212 2148
  utils.WriteFile(file_name, data=_Decompress(content))
2213 2149

  
2214
  return True, None
2215

  
2216 2150

  
2217 2151
def JobQueueRename(old, new):
2218 2152
  """Renames a job queue file.
......
2232 2166

  
2233 2167
  utils.RenameFile(old, new, mkdir=True)
2234 2168

  
2235
  return True, None
2236

  
2237 2169

  
2238 2170
def JobQueueSetDrainFlag(drain_flag):
2239 2171
  """Set the drain flag for the queue.
......
2252 2184
  else:
2253 2185
    utils.RemoveFile(constants.JOB_QUEUE_DRAIN_FILE)
2254 2186

  
2255
  return True, None
2256

  
2257 2187

  
2258 2188
def BlockdevClose(instance_name, disks):
2259 2189
  """Closes the given block devices.
......
2290 2220
  else:
2291 2221
    if instance_name:
2292 2222
      _RemoveBlockDevLinks(instance_name, disks)
2293
    return (True, "All devices secondary")
2294 2223

  
2295 2224

  
2296 2225
def ValidateHVParams(hvname, hvparams):
......
2300 2229
  @param hvname: the hypervisor name
2301 2230
  @type hvparams: dict
2302 2231
  @param hvparams: the hypervisor parameters to be validated
2303
  @rtype: tuple (success, message)
2304
  @return: a tuple of success and message, where success
2305
      indicates the succes of the operation, and message
2306
      which will contain the error details in case we
2307
      failed
2232
  @rtype: None
2308 2233

  
2309 2234
  """
2310 2235
  try:
2311 2236
    hv_type = hypervisor.GetHypervisor(hvname)
2312 2237
    hv_type.ValidateParameters(hvparams)
2313
    return (True, "Validation passed")
2314 2238
  except errors.HypervisorError, err:
2315 2239
    _Fail(str(err), log=False)
2316 2240

  
......
2332 2256
    if err.errno != errno.ENOENT:
2333 2257
      _Fail("Error while backing up cluster file: %s", err, exc=True)
2334 2258
  utils.RemoveFile(constants.CLUSTER_CONF_FILE)
2335
  return (True, "Done")
2336 2259

  
2337 2260

  
2338 2261
def _FindDisks(nodes_ip, disks):
......
2367 2290
    except errors.BlockDeviceError, err:
2368 2291
      _Fail("Can't change network configuration to standalone mode: %s",
2369 2292
            err, exc=True)
2370
  return (True, "All disks are now disconnected")
2371 2293

  
2372 2294

  
2373 2295
def DrbdAttachNet(nodes_ip, disks, instance_name, multimaster):
......
2424 2346
        rd.Open()
2425 2347
      except errors.BlockDeviceError, err:
2426 2348
        _Fail("Can't change to primary mode: %s", err)
2427
  if multimaster:
2428
    msg = "multi-master and primary"
2429
  else:
2430
    msg = "single-master"
2431
  return (True, "Disks are now configured as %s" % msg)
2432 2349

  
2433 2350

  
2434 2351
def DrbdWaitSync(nodes_ip, disks):
......
2448 2365
    if stats.sync_percent is not None:
2449 2366
      min_resync = min(min_resync, stats.sync_percent)
2450 2367

  
2451
  return (True, (alldone, min_resync))
2368
  return (alldone, min_resync)
2452 2369

  
2453 2370

  
2454 2371
def PowercycleNode(hypervisor_type):
......
2465 2382
    # if we can't fork, we'll pretend that we're in the child process
2466 2383
    pid = 0
2467 2384
  if pid > 0:
2468
    return (True, "Reboot scheduled in 5 seconds")
2385
    return "Reboot scheduled in 5 seconds"
2469 2386
  time.sleep(5)
2470 2387
  hyper.PowercycleNode()
2471 2388

  
......
2578 2495
      dir_contents = utils.ListVisibleFiles(dir_name)
2579 2496
    except OSError, err:
2580 2497
      # FIXME: must log output in case of failures
2581
      return True, rr
2498
      return rr
2582 2499

  
2583 2500
    # we use the standard python sort order,
2584 2501
    # so 00name is the recommended naming scheme
......
2597 2514
          rrval = constants.HKR_SUCCESS
2598 2515
      rr.append(("%s/%s" % (subdir, relname), rrval, output))
2599 2516

  
2600
    return True, rr
2517
    return rr
2601 2518

  
2602 2519

  
2603 2520
class IAllocatorRunner(object):
......
2637 2554
    finally:
2638 2555
      os.unlink(fin_name)
2639 2556

  
2640
    return True, result.stdout
2557
    return result.stdout
2641 2558

  
2642 2559

  
2643 2560
class DevCacheManager(object):

Also available in: Unified diff