Revision 3ecf6786

b/daemons/ganeti-master
39 39

  
40 40
from optparse import OptionParser
41 41

  
42
from ganeti import logger
43 42
from ganeti import constants
44 43
from ganeti import errors
45 44
from ganeti import ssconf
......
111 110
  result = utils.RunCmd(["fping", "-q", master_ip])
112 111
  if not result.failed:
113 112
    r2 = utils.RunCmd(["fping", "-q", "-S127.0.0.1", master_ip])
114
    if not result.failed:
113
    if not r2.failed:
115 114
      # we already have the ip:
116 115
      if debug:
117 116
        sys.stderr.write("Notice: already started.\n")
......
168 167
  else:
169 168
    fn = StopMaster
170 169

  
171
  return fn(master_netdev, master_ip, debug)
170
  result = fn(master_netdev, master_ip, debug)
171
  sys.exit(result)
172 172

  
173 173

  
174
if __name__=='__main__':
175
  exit_code = main()
176
  sys.exit(exit_code)
174
if __name__ == '__main__':
175
  main()
b/daemons/ganeti-noded
21 21

  
22 22
"""Ganeti node daemon"""
23 23

  
24
# functions in this module need to have a given name structure, so:
25
# pylint: disable-msg=C0103
26

  
24 27
import os
25 28
import sys
26 29
import resource
......
43 46

  
44 47

  
45 48
class ServerContextFactory:
46
  def getContext(self):
49
  """SSL context factory class that uses a given certificate.
50

  
51
  """
52
  @staticmethod
53
  def getContext():
54
    """Return a customized context.
55

  
56
    The context will be set to use our certificate.
57

  
58
    """
47 59
    ctx = SSL.Context(SSL.TLSv1_METHOD)
48 60
    ctx.use_certificate_file(constants.SSL_CERT_FILE)
49 61
    ctx.use_privatekey_file(constants.SSL_CERT_FILE)
50 62
    return ctx
51 63

  
52 64
class ServerObject(pb.Avatar):
65
  """The server implementation.
66

  
67
  This class holds all methods exposed over the RPC interface.
68

  
69
  """
53 70
  def __init__(self, name):
54 71
    self.name = name
55 72

  
56 73
  def perspectiveMessageReceived(self, broker, message, args, kw):
57
    """This method is called when a network message is received.
58

  
59
    I will call::
74
    """Custom message dispatching function.
60 75

  
61
      |  self.perspective_%(message)s(*broker.unserialize(args),
62
      |                               **broker.unserialize(kw))
63

  
64
    to handle the method; subclasses of Avatar are expected to
65
    implement methods of this naming convention.
76
    This function overrides the pb.Avatar function in order to provide
77
    a simple form of exception passing (as text only).
66 78

  
67 79
    """
68 80
    args = broker.unserialize(args, self)
......
79 91

  
80 92
  # the new block devices  --------------------------
81 93

  
82
  def perspective_blockdev_create(self, params):
94
  @staticmethod
95
  def perspective_blockdev_create(params):
96
    """Create a block device.
97

  
98
    """
83 99
    bdev_s, size, on_primary, info = params
84 100
    bdev = objects.ConfigObject.Loads(bdev_s)
85 101
    if bdev is None:
86 102
      raise ValueError("can't unserialize data!")
87 103
    return backend.CreateBlockDevice(bdev, size, on_primary, info)
88 104

  
89
  def perspective_blockdev_remove(self, params):
105
  @staticmethod
106
  def perspective_blockdev_remove(params):
107
    """Remove a block device.
108

  
109
    """
90 110
    bdev_s = params[0]
91 111
    bdev = objects.ConfigObject.Loads(bdev_s)
92 112
    return backend.RemoveBlockDevice(bdev)
93 113

  
94
  def perspective_blockdev_assemble(self, params):
114
  @staticmethod
115
  def perspective_blockdev_assemble(params):
116
    """Assemble a block device.
117

  
118
    """
95 119
    bdev_s, on_primary = params
96 120
    bdev = objects.ConfigObject.Loads(bdev_s)
97 121
    if bdev is None:
98 122
      raise ValueError("can't unserialize data!")
99 123
    return backend.AssembleBlockDevice(bdev, on_primary)
100 124

  
101
  def perspective_blockdev_shutdown(self, params):
125
  @staticmethod
126
  def perspective_blockdev_shutdown(params):
127
    """Shutdown a block device.
128

  
129
    """
102 130
    bdev_s = params[0]
103 131
    bdev = objects.ConfigObject.Loads(bdev_s)
104 132
    if bdev is None:
105 133
      raise ValueError("can't unserialize data!")
106 134
    return backend.ShutdownBlockDevice(bdev)
107 135

  
108
  def perspective_blockdev_addchild(self, params):
136
  @staticmethod
137
  def perspective_blockdev_addchild(params):
138
    """Add a child to a mirror device.
139

  
140
    Note: this is only valid for mirror devices. It's the caller's duty
141
    to send a correct disk, otherwise we raise an error.
142

  
143
    """
109 144
    bdev_s, ndev_s = params
110 145
    bdev = objects.ConfigObject.Loads(bdev_s)
111 146
    ndev = objects.ConfigObject.Loads(ndev_s)
......
113 148
      raise ValueError("can't unserialize data!")
114 149
    return backend.MirrorAddChild(bdev, ndev)
115 150

  
116
  def perspective_blockdev_removechild(self, params):
151
  @staticmethod
152
  def perspective_blockdev_removechild(params):
153
    """Remove a child from a mirror device.
154

  
155
    This is only valid for mirror devices, of course. It's the callers
156
    duty to send a correct disk, otherwise we raise an error.
157

  
158
    """
117 159
    bdev_s, ndev_s = params
118 160
    bdev = objects.ConfigObject.Loads(bdev_s)
119 161
    ndev = objects.ConfigObject.Loads(ndev_s)
......
121 163
      raise ValueError("can't unserialize data!")
122 164
    return backend.MirrorRemoveChild(bdev, ndev)
123 165

  
124
  def perspective_blockdev_getmirrorstatus(self, params):
166
  @staticmethod
167
  def perspective_blockdev_getmirrorstatus(params):
168
    """Return the mirror status for a list of disks.
169

  
170
    """
125 171
    disks = [objects.ConfigObject.Loads(dsk_s)
126 172
            for dsk_s in params]
127 173
    return backend.GetMirrorStatus(disks)
128 174

  
129
  def perspective_blockdev_find(self, params):
175
  @staticmethod
176
  def perspective_blockdev_find(params):
177
    """Expose the FindBlockDevice functionality for a disk.
178

  
179
    This will try to find but not activate a disk.
180

  
181
    """
130 182
    disk = objects.ConfigObject.Loads(params[0])
131 183
    return backend.FindBlockDevice(disk)
132 184

  
133
  def perspective_blockdev_snapshot(self, params):
185
  @staticmethod
186
  def perspective_blockdev_snapshot(params):
187
    """Create a snapshot device.
188

  
189
    Note that this is only valid for LVM disks, if we get passed
190
    something else we raise an exception. The snapshot device can be
191
    remove by calling the generic block device remove call.
192

  
193
    """
134 194
    cfbd = objects.ConfigObject.Loads(params[0])
135 195
    return backend.SnapshotBlockDevice(cfbd)
136 196

  
137 197
  # export/import  --------------------------
138 198

  
139
  def perspective_snapshot_export(self, params):
199
  @staticmethod
200
  def perspective_snapshot_export(params):
201
    """Export a given snapshot.
202

  
203
    """
140 204
    disk = objects.ConfigObject.Loads(params[0])
141 205
    dest_node = params[1]
142 206
    instance = objects.ConfigObject.Loads(params[2])
143
    return backend.ExportSnapshot(disk,dest_node,instance)
207
    return backend.ExportSnapshot(disk, dest_node, instance)
208

  
209
  @staticmethod
210
  def perspective_finalize_export(params):
211
    """Expose the finalize export functionality.
144 212

  
145
  def perspective_finalize_export(self, params):
213
    """
146 214
    instance = objects.ConfigObject.Loads(params[0])
147 215
    snap_disks = [objects.ConfigObject.Loads(str_data)
148 216
                  for str_data in params[1]]
149 217
    return backend.FinalizeExport(instance, snap_disks)
150 218

  
151
  def perspective_export_info(self, params):
152
    dir = params[0]
153
    einfo = backend.ExportInfo(dir)
219
  @staticmethod
220
  def perspective_export_info(params):
221
    """Query information about an existing export on this node.
222

  
223
    The given path may not contain an export, in which case we return
224
    None.
225

  
226
    """
227
    path = params[0]
228
    einfo = backend.ExportInfo(path)
154 229
    if einfo is None:
155 230
      return einfo
156 231
    return einfo.Dumps()
157 232

  
158
  def perspective_export_list(self, params):
233
  @staticmethod
234
  def perspective_export_list(params):
235
    """List the available exports on this node.
236

  
237
    Note that as opposed to export_info, which may query data about an
238
    export in any path, this only queries the standard Ganeti path
239
    (constants.EXPORT_DIR).
240

  
241
    """
159 242
    return backend.ListExports()
160 243

  
161
  def perspective_export_remove(self, params):
244
  @staticmethod
245
  def perspective_export_remove(params):
246
    """Remove an export.
247

  
248
    """
162 249
    export = params[0]
163 250
    return backend.RemoveExport(export)
164 251

  
165 252
  # volume  --------------------------
166 253

  
167
  def perspective_volume_list(self, params):
254
  @staticmethod
255
  def perspective_volume_list(params):
256
    """Query the list of logical volumes in a given volume group.
257

  
258
    """
168 259
    vgname = params[0]
169 260
    return backend.GetVolumeList(vgname)
170 261

  
171
  def perspective_vg_list(self, params):
262
  @staticmethod
263
  def perspective_vg_list(params):
264
    """Query the list of volume groups.
265

  
266
    """
172 267
    return backend.ListVolumeGroups()
173 268

  
174 269
  # bridge  --------------------------
175 270

  
176
  def perspective_bridges_exist(self, params):
271
  @staticmethod
272
  def perspective_bridges_exist(params):
273
    """Check if all bridges given exist on this node.
274

  
275
    """
177 276
    bridges_list = params[0]
178 277
    return backend.BridgesExist(bridges_list)
179 278

  
180 279
  # instance  --------------------------
181 280

  
182
  def perspective_instance_os_add(self, params):
281
  @staticmethod
282
  def perspective_instance_os_add(params):
283
    """Install an OS on a given instance.
284

  
285
    """
183 286
    inst_s, os_disk, swap_disk = params
184 287
    inst = objects.ConfigObject.Loads(inst_s)
185 288
    return backend.AddOSToInstance(inst, os_disk, swap_disk)
186 289

  
187
  def perspective_instance_os_import(self, params):
290
  @staticmethod
291
  def perspective_instance_os_import(params):
292
    """Run the import function of an OS onto a given instance.
293

  
294
    """
188 295
    inst_s, os_disk, swap_disk, src_node, src_image = params
189 296
    inst = objects.ConfigObject.Loads(inst_s)
190 297
    return backend.ImportOSIntoInstance(inst, os_disk, swap_disk,
191 298
                                        src_node, src_image)
192 299

  
193
  def perspective_instance_shutdown(self, params):
300
  @staticmethod
301
  def perspective_instance_shutdown(params):
302
    """Shutdown an instance.
303

  
304
    """
194 305
    instance = objects.ConfigObject.Loads(params[0])
195 306
    return backend.ShutdownInstance(instance)
196 307

  
197
  def perspective_instance_start(self, params):
308
  @staticmethod
309
  def perspective_instance_start(params):
310
    """Start an instance.
311

  
312
    """
198 313
    instance = objects.ConfigObject.Loads(params[0])
199 314
    extra_args = params[1]
200 315
    return backend.StartInstance(instance, extra_args)
201 316

  
202
  def perspective_instance_info(self, params):
317
  @staticmethod
318
  def perspective_instance_info(params):
319
    """Query instance information.
320

  
321
    """
203 322
    return backend.GetInstanceInfo(params[0])
204 323

  
205
  def perspective_all_instances_info(self, params):
324
  @staticmethod
325
  def perspective_all_instances_info(params):
326
    """Query information about all instances.
327

  
328
    """
206 329
    return backend.GetAllInstancesInfo()
207 330

  
208
  def perspective_instance_list(self, params):
331
  @staticmethod
332
  def perspective_instance_list(params):
333
    """Query the list of running instances.
334

  
335
    """
209 336
    return backend.GetInstanceList()
210 337

  
211 338
  # node --------------------------
212 339

  
213
  def perspective_node_info(self, params):
340
  @staticmethod
341
  def perspective_node_info(params):
342
    """Query node information.
343

  
344
    """
214 345
    vgname = params[0]
215 346
    return backend.GetNodeInfo(vgname)
216 347

  
217
  def perspective_node_add(self, params):
348
  @staticmethod
349
  def perspective_node_add(params):
350
    """Complete the registration of this node in the cluster.
351

  
352
    """
218 353
    return backend.AddNode(params[0], params[1], params[2],
219 354
                           params[3], params[4], params[5])
220 355

  
221
  def perspective_node_verify(self, params):
356
  @staticmethod
357
  def perspective_node_verify(params):
358
    """Run a verify sequence on this node.
359

  
360
    """
222 361
    return backend.VerifyNode(params[0])
223 362

  
224
  def perspective_node_start_master(self, params):
363
  @staticmethod
364
  def perspective_node_start_master(params):
365
    """Promote this node to master status.
366

  
367
    """
225 368
    return backend.StartMaster()
226 369

  
227
  def perspective_node_stop_master(self, params):
370
  @staticmethod
371
  def perspective_node_stop_master(params):
372
    """Demote this node from master status.
373

  
374
    """
228 375
    return backend.StopMaster()
229 376

  
230
  def perspective_node_leave_cluster(self, params):
377
  @staticmethod
378
  def perspective_node_leave_cluster(params):
379
    """Cleanup after leaving a cluster.
380

  
381
    """
231 382
    return backend.LeaveCluster()
232 383

  
233
  def perspective_node_volumes(self, params):
384
  @staticmethod
385
  def perspective_node_volumes(params):
386
    """Query the list of all logical volume groups.
387

  
388
    """
234 389
    return backend.NodeVolumes()
235 390

  
236 391
  # cluster --------------------------
237 392

  
238
  def perspective_version(self, params):
393
  @staticmethod
394
  def perspective_version(params):
395
    """Query version information.
396

  
397
    """
239 398
    return constants.PROTOCOL_VERSION
240 399

  
241
  def perspective_upload_file(self, params):
400
  @staticmethod
401
  def perspective_upload_file(params):
402
    """Upload a file.
403

  
404
    Note that the backend implementation imposes strict rules on which
405
    files are accepted.
406

  
407
    """
242 408
    return backend.UploadFile(*params)
243 409

  
244 410

  
245 411
  # os -----------------------
246 412

  
247
  def perspective_os_diagnose(self, params):
413
  @staticmethod
414
  def perspective_os_diagnose(params):
415
    """Query detailed information about existing OSes.
416

  
417
    """
248 418
    os_list = backend.DiagnoseOS()
249 419
    if not os_list:
250 420
      # this catches also return values of 'False',
......
257 427
      elif isinstance(data, errors.InvalidOS):
258 428
        result.append(data.args)
259 429
      else:
260
        raise errors.ProgrammerError, ("Invalid result from backend.DiagnoseOS"
261
                                       " (class %s, %s)" %
262
                                       (str(data.__class__), data))
430
        raise errors.ProgrammerError("Invalid result from backend.DiagnoseOS"
431
                                     " (class %s, %s)" %
432
                                     (str(data.__class__), data))
263 433

  
264 434
    return result
265 435

  
266
  def perspective_os_get(self, params):
436
  @staticmethod
437
  def perspective_os_get(params):
438
    """Query information about a given OS.
439

  
440
    """
267 441
    name = params[0]
268 442
    try:
269
      os = backend.OSFromDisk(name).Dumps()
443
      os_obj = backend.OSFromDisk(name).Dumps()
270 444
    except errors.InvalidOS, err:
271
      os = err.args
272
    return os
445
      os_obj = err.args
446
    return os_obj
273 447

  
274 448
  # hooks -----------------------
275 449

  
276
  def perspective_hooks_runner(self, params):
450
  @staticmethod
451
  def perspective_hooks_runner(params):
452
    """Run hook scripts.
453

  
454
    """
277 455
    hpath, phase, env = params
278 456
    hr = backend.HooksRunner()
279 457
    return hr.RunHooks(hpath, phase, env)
280 458

  
281 459

  
282 460
class MyRealm:
461
  """Simple realm that forwards all requests to a ServerObject.
462

  
463
  """
283 464
  __implements__ = portal.IRealm
465

  
284 466
  def requestAvatar(self, avatarId, mind, *interfaces):
467
    """Return an avatar based on our ServerObject class.
468

  
469
    """
285 470
    if pb.IPerspective not in interfaces:
286 471
      raise NotImplementedError
287 472
    return pb.IPerspective, ServerObject(avatarId), lambda:None
......
310 495

  
311 496

  
312 497
def main():
498
  """Main function for the node daemon.
499

  
500
  """
313 501
  options, args = ParseOptions()
314 502
  for fname in (constants.SSL_CERT_FILE,):
315 503
    if not os.path.isfile(fname):
......
361 549
  try:
362 550
    pid = os.fork()
363 551
  except OSError, e:
364
    raise Exception, "%s [%d]" % (e.strerror, e.errno)
552
    raise Exception("%s [%d]" % (e.strerror, e.errno))
365 553
  if (pid == 0):  # The first child.
366 554
    os.setsid()
367 555
    try:
368 556
      pid = os.fork() # Fork a second child.
369 557
    except OSError, e:
370
      raise Exception, "%s [%d]" % (e.strerror, e.errno)
558
      raise Exception("%s [%d]" % (e.strerror, e.errno))
371 559
    if (pid == 0):  # The second child.
372 560
      os.chdir(WORKDIR)
373 561
      os.umask(UMASK)
......
393 581
  return(0)
394 582

  
395 583

  
396
if __name__=='__main__':
584
if __name__ == '__main__':
397 585
  main()
b/daemons/ganeti-watcher
117 117
      fcntl.flock(f.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
118 118
    except IOError, x:
119 119
      if x.errno == errno.EAGAIN:
120
        raise StandardError('State file already locked')
120
        raise StandardError("State file already locked")
121 121
      raise
122 122

  
123 123
    self.statefile = f
......
202 202
    self.state = state
203 203

  
204 204
  def Restart(self):
205
    """Encapsulates the start of an instance.
206

  
207
    This is currently done using the command line interface and not
208
    the Ganeti modules.
209

  
210
    """
205 211
    DoCmd(['gnt-instance', 'startup', '--lock-retries=15', self.name])
206 212

  
207 213

  
......
258 264
    sstore = ssconf.SimpleStore()
259 265
    master = sstore.GetMasterNode()
260 266
    if master != socket.gethostname():
261
      raise NotMasterError, ("This is not the master node")
267
      raise NotMasterError("This is not the master node")
262 268
    self.instances = InstanceList()
263 269
    self.messages = []
264 270

  
b/lib/backend.py
346 346

  
347 347
  iinfo = hypervisor.GetHypervisor().GetAllInstancesInfo()
348 348
  if iinfo:
349
    for name, id, memory, vcpus, state, times in iinfo:
349
    for name, inst_id, memory, vcpus, state, times in iinfo:
350 350
      output[name] = {
351 351
        'memory': memory,
352 352
        'vcpus': vcpus,
......
729 729
  for dsk in disks:
730 730
    rbd = _RecursiveFindBD(dsk)
731 731
    if rbd is None:
732
      raise errors.BlockDeviceError, "Can't find device %s" % str(dsk)
732
      raise errors.BlockDeviceError("Can't find device %s" % str(dsk))
733 733
    stats.append(rbd.CombinedSyncStatus())
734 734
  return stats
735 735

  
......
847 847
  try:
848 848
    st = os.stat(api_file)
849 849
  except EnvironmentError, err:
850
    raise errors.InvalidOS, (name, "'ganeti_api_version' file not"
851
                             " found (%s)" % _ErrnoOrStr(err))
850
    raise errors.InvalidOS(name, "'ganeti_api_version' file not"
851
                           " found (%s)" % _ErrnoOrStr(err))
852 852

  
853 853
  if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
854
    raise errors.InvalidOS, (name, "'ganeti_api_version' file is not"
855
                             " a regular file")
854
    raise errors.InvalidOS(name, "'ganeti_api_version' file is not"
855
                           " a regular file")
856 856

  
857 857
  try:
858 858
    f = open(api_file)
......
861 861
    finally:
862 862
      f.close()
863 863
  except EnvironmentError, err:
864
    raise errors.InvalidOS, (name, "error while reading the"
865
                             " API version (%s)" % _ErrnoOrStr(err))
864
    raise errors.InvalidOS(name, "error while reading the"
865
                           " API version (%s)" % _ErrnoOrStr(err))
866 866

  
867 867
  api_version = api_version.strip()
868 868
  try:
869 869
    api_version = int(api_version)
870 870
  except (TypeError, ValueError), err:
871
    raise errors.InvalidOS, (name, "API version is not integer (%s)" %
872
                             str(err))
871
    raise errors.InvalidOS(name, "API version is not integer (%s)" % str(err))
873 872

  
874 873
  return api_version
875 874

  
......
920 919
  api_version = _OSOndiskVersion(name, os_dir)
921 920

  
922 921
  if api_version != constants.OS_API_VERSION:
923
    raise errors.InvalidOS, (name, "API version mismatch (found %s want %s)"
924
                             % (api_version, constants.OS_API_VERSION))
922
    raise errors.InvalidOS(name, "API version mismatch (found %s want %s)"
923
                           % (api_version, constants.OS_API_VERSION))
925 924

  
926 925
  # OS Scripts dictionary, we will populate it with the actual script names
927 926
  os_scripts = {'create': '', 'export': '', 'import': ''}
......
932 931
    try:
933 932
      st = os.stat(os_scripts[script])
934 933
    except EnvironmentError, err:
935
      raise errors.InvalidOS, (name, "'%s' script missing (%s)" %
936
                               (script, _ErrnoOrStr(err)))
934
      raise errors.InvalidOS(name, "'%s' script missing (%s)" %
935
                             (script, _ErrnoOrStr(err)))
937 936

  
938 937
    if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
939
      raise errors.InvalidOS, (name, "'%s' script not executable" % script)
938
      raise errors.InvalidOS(name, "'%s' script not executable" % script)
940 939

  
941 940
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
942
      raise errors.InvalidOS, (name, "'%s' is not a regular file" % script)
941
      raise errors.InvalidOS(name, "'%s' is not a regular file" % script)
943 942

  
944 943

  
945 944
  return objects.OS(name=name, path=os_dir,
......
980 979
    else:
981 980
      return None
982 981
  else:
983
    raise errors.ProgrammerError, ("Cannot snapshot non-lvm block device"
984
                                   "'%s' of type '%s'" %
985
                                   (disk.unique_id, disk.dev_type))
982
    raise errors.ProgrammerError("Cannot snapshot non-lvm block device"
983
                                 "'%s' of type '%s'" %
984
                                 (disk.unique_id, disk.dev_type))
986 985

  
987 986

  
988 987
def ExportSnapshot(disk, dest_node, instance):
......
1155 1154

  
1156 1155
  real_os_dev = _RecursiveFindBD(os_device)
1157 1156
  if real_os_dev is None:
1158
    raise errors.BlockDeviceError, ("Block device '%s' is not set up" %
1159
                                    str(os_device))
1157
    raise errors.BlockDeviceError("Block device '%s' is not set up" %
1158
                                  str(os_device))
1160 1159
  real_os_dev.Open()
1161 1160

  
1162 1161
  real_swap_dev = _RecursiveFindBD(swap_device)
1163 1162
  if real_swap_dev is None:
1164
    raise errors.BlockDeviceError, ("Block device '%s' is not set up" %
1165
                                    str(swap_device))
1163
    raise errors.BlockDeviceError("Block device '%s' is not set up" %
1164
                                  str(swap_device))
1166 1165
  real_swap_dev.Open()
1167 1166

  
1168 1167
  logfile = "%s/import-%s-%s-%s.log" % (constants.LOG_OS_DIR, instance.os,
......
1302 1301
    elif phase == constants.HOOKS_PHASE_POST:
1303 1302
      suffix = "post"
1304 1303
    else:
1305
      raise errors.ProgrammerError, ("Unknown hooks phase: '%s'" % phase)
1304
      raise errors.ProgrammerError("Unknown hooks phase: '%s'" % phase)
1306 1305
    rr = []
1307 1306

  
1308 1307
    subdir = "%s-%s.d" % (hpath, suffix)
b/lib/bdev.py
302 302
    vg_name, lv_name = unique_id
303 303
    pvs_info = cls.GetPVInfo(vg_name)
304 304
    if not pvs_info:
305
      raise errors.BlockDeviceError, ("Can't compute PV info for vg %s" %
306
                                      vg_name)
305
      raise errors.BlockDeviceError("Can't compute PV info for vg %s" %
306
                                    vg_name)
307 307
    pvs_info.sort()
308 308
    pvs_info.reverse()
309 309

  
......
313 313
    # The size constraint should have been checked from the master before
314 314
    # calling the create function.
315 315
    if free_size < size:
316
      raise errors.BlockDeviceError, ("Not enough free space: required %s,"
317
                                      " available %s" % (size, free_size))
316
      raise errors.BlockDeviceError("Not enough free space: required %s,"
317
                                    " available %s" % (size, free_size))
318 318
    result = utils.RunCmd(["lvcreate", "-L%dm" % size, "-n%s" % lv_name,
319 319
                           vg_name] + pvlist)
320 320
    if result.failed:
......
469 469

  
470 470
    pvs_info = self.GetPVInfo(self._vg_name)
471 471
    if not pvs_info:
472
      raise errors.BlockDeviceError, ("Can't compute PV info for vg %s" %
473
                                      self._vg_name)
472
      raise errors.BlockDeviceError("Can't compute PV info for vg %s" %
473
                                    self._vg_name)
474 474
    pvs_info.sort()
475 475
    pvs_info.reverse()
476 476
    free_size, pv_name = pvs_info[0]
477 477
    if free_size < size:
478
      raise errors.BlockDeviceError, ("Not enough free space: required %s,"
479
                                      " available %s" % (size, free_size))
478
      raise errors.BlockDeviceError("Not enough free space: required %s,"
479
                                    " available %s" % (size, free_size))
480 480

  
481 481
    result = utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
482 482
                           "-n%s" % snap_name, self.dev_path])
483 483
    if result.failed:
484
      raise errors.BlockDeviceError, ("command: %s error: %s" %
485
                                      (result.cmd, result.fail_reason))
484
      raise errors.BlockDeviceError("command: %s error: %s" %
485
                                    (result.cmd, result.fail_reason))
486 486

  
487 487
    return snap_name
488 488

  
......
503 503
    result = utils.RunCmd(["lvchange", "--addtag", text,
504 504
                           self.dev_path])
505 505
    if result.failed:
506
      raise errors.BlockDeviceError, ("Command: %s error: %s" %
507
                                      (result.cmd, result.fail_reason))
506
      raise errors.BlockDeviceError("Command: %s error: %s" %
507
                                    (result.cmd, result.fail_reason))
508 508

  
509 509

  
510 510
class MDRaid1(BlockDev):
......
659 659

  
660 660
    """
661 661
    if self.minor is None and not self.Attach():
662
      raise errors.BlockDeviceError, "Can't attach to device"
662
      raise errors.BlockDeviceError("Can't attach to device")
663 663
    if device.dev_path is None:
664
      raise errors.BlockDeviceError, "New child is not initialised"
664
      raise errors.BlockDeviceError("New child is not initialised")
665 665
    result = utils.RunCmd(["mdadm", "-a", self.dev_path, device.dev_path])
666 666
    if result.failed:
667
      raise errors.BlockDeviceError, ("Failed to add new device to array: %s" %
668
                                      result.output)
667
      raise errors.BlockDeviceError("Failed to add new device to array: %s" %
668
                                    result.output)
669 669
    new_len = len(self._children) + 1
670 670
    result = utils.RunCmd(["mdadm", "--grow", self.dev_path, "-n", new_len])
671 671
    if result.failed:
672
      raise errors.BlockDeviceError, ("Can't grow md array: %s" %
673
                                      result.output)
672
      raise errors.BlockDeviceError("Can't grow md array: %s" %
673
                                    result.output)
674 674
    self._children.append(device)
675 675

  
676 676

  
......
679 679

  
680 680
    """
681 681
    if self.minor is None and not self.Attach():
682
      raise errors.BlockDeviceError, "Can't attach to device"
682
      raise errors.BlockDeviceError("Can't attach to device")
683 683
    if len(self._children) == 1:
684
      raise errors.BlockDeviceError, ("Can't reduce member when only one"
685
                                      " child left")
684
      raise errors.BlockDeviceError("Can't reduce member when only one"
685
                                    " child left")
686 686
    for device in self._children:
687 687
      if device.dev_path == dev_path:
688 688
        break
689 689
    else:
690
      raise errors.BlockDeviceError, "Can't find child with this path"
690
      raise errors.BlockDeviceError("Can't find child with this path")
691 691
    new_len = len(self._children) - 1
692 692
    result = utils.RunCmd(["mdadm", "-f", self.dev_path, dev_path])
693 693
    if result.failed:
694
      raise errors.BlockDeviceError, ("Failed to mark device as failed: %s" %
695
                                      result.output)
694
      raise errors.BlockDeviceError("Failed to mark device as failed: %s" %
695
                                    result.output)
696 696

  
697 697
    # it seems here we need a short delay for MD to update its
698 698
    # superblocks
699 699
    time.sleep(0.5)
700 700
    result = utils.RunCmd(["mdadm", "-r", self.dev_path, dev_path])
701 701
    if result.failed:
702
      raise errors.BlockDeviceError, ("Failed to remove device from array:"
703
                                      " %s" % result.output)
702
      raise errors.BlockDeviceError("Failed to remove device from array:"
703
                                        " %s" % result.output)
704 704
    result = utils.RunCmd(["mdadm", "--grow", "--force", self.dev_path,
705 705
                           "-n", new_len])
706 706
    if result.failed:
707
      raise errors.BlockDeviceError, ("Can't shrink md array: %s" %
708
                                      result.output)
707
      raise errors.BlockDeviceError("Can't shrink md array: %s" %
708
                                    result.output)
709 709
    self._children.remove(device)
710 710

  
711 711

  
......
1218 1218

  
1219 1219
    minor = self._FindUnusedMinor()
1220 1220
    if minor is None:
1221
      raise errors.BlockDeviceError, "Not enough free minors for DRBD!"
1221
      raise errors.BlockDeviceError("Not enough free minors for DRBD!")
1222 1222
    need_localdev_teardown = False
1223 1223
    if self._children[0]:
1224 1224
      result = self._AssembleLocal(minor, self._children[0].dev_path,
b/lib/cli.py
88 88
  try:
89 89
    return utils.ParseUnit(value)
90 90
  except errors.UnitParseError, err:
91
    raise OptionValueError, ("option %s: %s" % (opt, err))
91
    raise OptionValueError("option %s: %s" % (opt, err))
92 92

  
93 93

  
94 94
class CliOption(Option):
b/lib/cmdlib.py
77 77
    for attr_name in self._OP_REQP:
78 78
      attr_val = getattr(op, attr_name, None)
79 79
      if attr_val is None:
80
        raise errors.OpPrereqError, ("Required parameter '%s' missing" %
81
                                     attr_name)
80
        raise errors.OpPrereqError("Required parameter '%s' missing" %
81
                                   attr_name)
82 82
    if self.REQ_CLUSTER:
83 83
      if not cfg.IsCluster():
84
        raise errors.OpPrereqError, ("Cluster not initialized yet,"
85
                                     " use 'gnt-cluster init' first.")
84
        raise errors.OpPrereqError("Cluster not initialized yet,"
85
                                   " use 'gnt-cluster init' first.")
86 86
      if self.REQ_MASTER:
87 87
        master = sstore.GetMasterNode()
88 88
        if master != socket.gethostname():
89
          raise errors.OpPrereqError, ("Commands must be run on the master"
90
                                       " node %s" % master)
89
          raise errors.OpPrereqError("Commands must be run on the master"
90
                                     " node %s" % master)
91 91

  
92 92
  def CheckPrereq(self):
93 93
    """Check prerequisites for this LU.
......
172 172

  
173 173
  """
174 174
  if nodes is not None and not isinstance(nodes, list):
175
    raise errors.OpPrereqError, "Invalid argument type 'nodes'"
175
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
176 176

  
177 177
  if nodes:
178 178
    wanted_nodes = []
......
180 180
    for name in nodes:
181 181
      node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name))
182 182
      if node is None:
183
        raise errors.OpPrereqError, ("No such node name '%s'" % name)
183
        raise errors.OpPrereqError("No such node name '%s'" % name)
184 184
    wanted_nodes.append(node)
185 185

  
186 186
    return wanted_nodes
......
202 202
  all_fields = static_fields | dynamic_fields
203 203

  
204 204
  if not all_fields.issuperset(selected):
205
    raise errors.OpPrereqError, ("Unknown output fields selected: %s"
206
                                 % ",".join(frozenset(selected).
207
                                            difference(all_fields)))
205
    raise errors.OpPrereqError("Unknown output fields selected: %s"
206
                               % ",".join(frozenset(selected).
207
                                          difference(all_fields)))
208 208

  
209 209

  
210 210
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
......
462 462
                         "-f", "/root/.ssh/id_dsa",
463 463
                         "-q", "-N", ""])
464 464
  if result.failed:
465
    raise errors.OpExecError, ("could not generate ssh keypair, error %s" %
466
                               result.output)
465
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
466
                             result.output)
467 467

  
468 468
  f = open('/root/.ssh/id_dsa.pub', 'r')
469 469
  try:
......
489 489
                         "-keyout", constants.SSL_CERT_FILE,
490 490
                         "-out", constants.SSL_CERT_FILE, "-batch"])
491 491
  if result.failed:
492
    raise errors.OpExecError, ("could not generate server ssl cert, command"
493
                               " %s had exitcode %s and error message %s" %
494
                               (result.cmd, result.exit_code, result.output))
492
    raise errors.OpExecError("could not generate server ssl cert, command"
493
                             " %s had exitcode %s and error message %s" %
494
                             (result.cmd, result.exit_code, result.output))
495 495

  
496 496
  os.chmod(constants.SSL_CERT_FILE, 0400)
497 497

  
498 498
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
499 499

  
500 500
  if result.failed:
501
    raise errors.OpExecError, ("could not start the node daemon, command %s"
502
                               " had exitcode %s and error %s" %
503
                               (result.cmd, result.exit_code, result.output))
501
    raise errors.OpExecError("Could not start the node daemon, command %s"
502
                             " had exitcode %s and error %s" %
503
                             (result.cmd, result.exit_code, result.output))
504 504

  
505 505

  
506 506
class LUInitCluster(LogicalUnit):
......
531 531

  
532 532
    """
533 533
    if config.ConfigWriter.IsCluster():
534
      raise errors.OpPrereqError, ("Cluster is already initialised")
534
      raise errors.OpPrereqError("Cluster is already initialised")
535 535

  
536 536
    hostname_local = socket.gethostname()
537 537
    self.hostname = hostname = utils.LookupHostname(hostname_local)
538 538
    if not hostname:
539
      raise errors.OpPrereqError, ("Cannot resolve my own hostname ('%s')" %
540
                                   hostname_local)
539
      raise errors.OpPrereqError("Cannot resolve my own hostname ('%s')" %
540
                                 hostname_local)
541 541

  
542 542
    self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
543 543
    if not clustername:
544
      raise errors.OpPrereqError, ("Cannot resolve given cluster name ('%s')"
545
                                   % self.op.cluster_name)
544
      raise errors.OpPrereqError("Cannot resolve given cluster name ('%s')"
545
                                 % self.op.cluster_name)
546 546

  
547 547
    result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']])
548 548
    if result.failed:
549
      raise errors.OpPrereqError, ("Inconsistency: this host's name resolves"
550
                                   " to %s,\nbut this ip address does not"
551
                                   " belong to this host."
552
                                   " Aborting." % hostname['ip'])
549
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
550
                                 " to %s,\nbut this ip address does not"
551
                                 " belong to this host."
552
                                 " Aborting." % hostname['ip'])
553 553

  
554 554
    secondary_ip = getattr(self.op, "secondary_ip", None)
555 555
    if secondary_ip and not utils.IsValidIP(secondary_ip):
556
      raise errors.OpPrereqError, ("Invalid secondary ip given")
556
      raise errors.OpPrereqError("Invalid secondary ip given")
557 557
    if secondary_ip and secondary_ip != hostname['ip']:
558 558
      result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
559 559
      if result.failed:
560
        raise errors.OpPrereqError, ("You gave %s as secondary IP,\n"
561
                                     "but it does not belong to this host." %
562
                                     secondary_ip)
560
        raise errors.OpPrereqError("You gave %s as secondary IP,\n"
561
                                   "but it does not belong to this host." %
562
                                   secondary_ip)
563 563
    self.secondary_ip = secondary_ip
564 564

  
565 565
    # checks presence of the volume group given
566 566
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
567 567

  
568 568
    if vgstatus:
569
      raise errors.OpPrereqError, ("Error: %s" % vgstatus)
569
      raise errors.OpPrereqError("Error: %s" % vgstatus)
570 570

  
571 571
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
572 572
                    self.op.mac_prefix):
573
      raise errors.OpPrereqError, ("Invalid mac prefix given '%s'" %
574
                                   self.op.mac_prefix)
573
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
574
                                 self.op.mac_prefix)
575 575

  
576 576
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
577
      raise errors.OpPrereqError, ("Invalid hypervisor type given '%s'" %
578
                                   self.op.hypervisor_type)
577
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
578
                                 self.op.hypervisor_type)
579 579

  
580 580
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
581 581
    if result.failed:
582
      raise errors.OpPrereqError, ("Invalid master netdev given (%s): '%s'" %
583
                                   (self.op.master_netdev, result.output))
582
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
583
                                 (self.op.master_netdev, result.output))
584 584

  
585 585
  def Exec(self, feedback_fn):
586 586
    """Initialize the cluster.
......
647 647

  
648 648
    nodelist = self.cfg.GetNodeList()
649 649
    if len(nodelist) != 1 or nodelist[0] != master:
650
      raise errors.OpPrereqError, ("There are still %d node(s) in "
651
                                   "this cluster." % (len(nodelist) - 1))
650
      raise errors.OpPrereqError("There are still %d node(s) in"
651
                                 " this cluster." % (len(nodelist) - 1))
652 652
    instancelist = self.cfg.GetInstanceList()
653 653
    if instancelist:
654
      raise errors.OpPrereqError, ("There are still %d instance(s) in "
655
                                   "this cluster." % len(instancelist))
654
      raise errors.OpPrereqError("There are still %d instance(s) in"
655
                                 " this cluster." % len(instancelist))
656 656

  
657 657
  def Exec(self, feedback_fn):
658 658
    """Destroys the cluster.
......
932 932
      logger.ToStderr("Can't get any data from node %s" % node)
933 933
      retries += 1
934 934
      if retries >= 10:
935
        raise errors.RemoteError, ("Can't contact node %s for mirror data,"
936
                                   " aborting." % node)
935
        raise errors.RemoteError("Can't contact node %s for mirror data,"
936
                                 " aborting." % node)
937 937
      time.sleep(6)
938 938
      continue
939 939
    retries = 0
......
1012 1012
    node_list = self.cfg.GetNodeList()
1013 1013
    node_data = rpc.call_os_diagnose(node_list)
1014 1014
    if node_data == False:
1015
      raise errors.OpExecError, "Can't gather the list of OSes"
1015
      raise errors.OpExecError("Can't gather the list of OSes")
1016 1016
    return node_data
1017 1017

  
1018 1018

  
......
1058 1058

  
1059 1059
    masternode = self.sstore.GetMasterNode()
1060 1060
    if node.name == masternode:
1061
      raise errors.OpPrereqError, ("Node is the master node,"
1062
                                   " you need to failover first.")
1061
      raise errors.OpPrereqError("Node is the master node,"
1062
                                 " you need to failover first.")
1063 1063

  
1064 1064
    for instance_name in instance_list:
1065 1065
      instance = self.cfg.GetInstanceInfo(instance_name)
1066 1066
      if node.name == instance.primary_node:
1067
        raise errors.OpPrereqError, ("Instance %s still running on the node,"
1068
                                     " please remove first." % instance_name)
1067
        raise errors.OpPrereqError("Instance %s still running on the node,"
1068
                                   " please remove first." % instance_name)
1069 1069
      if node.name in instance.secondary_nodes:
1070
        raise errors.OpPrereqError, ("Instance %s has node as a secondary,"
1071
                                     " please remove first." % instance_name)
1070
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1071
                                   " please remove first." % instance_name)
1072 1072
    self.op.node_name = node.name
1073 1073
    self.node = node
1074 1074

  
......
1168 1168
        elif field in self.dynamic_fields:
1169 1169
          val = live_data[node.name].get(field, "?")
1170 1170
        else:
1171
          raise errors.ParameterError, field
1171
          raise errors.ParameterError(field)
1172 1172
        val = str(val)
1173 1173
        node_output.append(val)
1174 1174
      output.append(node_output)
......
1238 1238
            else:
1239 1239
              val = '-'
1240 1240
          else:
1241
            raise errors.ParameterError, field
1241
            raise errors.ParameterError(field)
1242 1242
          node_output.append(str(val))
1243 1243

  
1244 1244
        output.append(node_output)
......
1285 1285

  
1286 1286
    dns_data = utils.LookupHostname(node_name)
1287 1287
    if not dns_data:
1288
      raise errors.OpPrereqError, ("Node %s is not resolvable" % node_name)
1288
      raise errors.OpPrereqError("Node %s is not resolvable" % node_name)
1289 1289

  
1290 1290
    node = dns_data['hostname']
1291 1291
    primary_ip = self.op.primary_ip = dns_data['ip']
......
1293 1293
    if secondary_ip is None:
1294 1294
      secondary_ip = primary_ip
1295 1295
    if not utils.IsValidIP(secondary_ip):
1296
      raise errors.OpPrereqError, ("Invalid secondary IP given")
1296
      raise errors.OpPrereqError("Invalid secondary IP given")
1297 1297
    self.op.secondary_ip = secondary_ip
1298 1298
    node_list = cfg.GetNodeList()
1299 1299
    if node in node_list:
1300
      raise errors.OpPrereqError, ("Node %s is already in the configuration"
1301
                                   % node)
1300
      raise errors.OpPrereqError("Node %s is already in the configuration"
1301
                                 % node)
1302 1302

  
1303 1303
    for existing_node_name in node_list:
1304 1304
      existing_node = cfg.GetNodeInfo(existing_node_name)
......
1306 1306
          existing_node.secondary_ip == primary_ip or
1307 1307
          existing_node.primary_ip == secondary_ip or
1308 1308
          existing_node.secondary_ip == secondary_ip):
1309
        raise errors.OpPrereqError, ("New node ip address(es) conflict with"
1310
                                     " existing node %s" % existing_node.name)
1309
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1310
                                   " existing node %s" % existing_node.name)
1311 1311

  
1312 1312
    # check that the type of the node (single versus dual homed) is the
1313 1313
    # same as for the master
......
1316 1316
    newbie_singlehomed = secondary_ip == primary_ip
1317 1317
    if master_singlehomed != newbie_singlehomed:
1318 1318
      if master_singlehomed:
1319
        raise errors.OpPrereqError, ("The master has no private ip but the"
1320
                                     " new node has one")
1319
        raise errors.OpPrereqError("The master has no private ip but the"
1320
                                   " new node has one")
1321 1321
      else:
1322
        raise errors.OpPrereqError ("The master has a private ip but the"
1323
                                    " new node doesn't have one")
1322
        raise errors.OpPrereqError("The master has a private ip but the"
1323
                                   " new node doesn't have one")
1324 1324

  
1325 1325
    # checks reachablity
1326 1326
    command = ["fping", "-q", primary_ip]
1327 1327
    result = utils.RunCmd(command)
1328 1328
    if result.failed:
1329
      raise errors.OpPrereqError, ("Node not reachable by ping")
1329
      raise errors.OpPrereqError("Node not reachable by ping")
1330 1330

  
1331 1331
    if not newbie_singlehomed:
1332 1332
      # check reachability from my secondary ip to newbie's secondary ip
1333 1333
      command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
1334 1334
      result = utils.RunCmd(command)
1335 1335
      if result.failed:
1336
        raise errors.OpPrereqError, ("Node secondary ip not reachable by ping")
1336
        raise errors.OpPrereqError("Node secondary ip not reachable by ping")
1337 1337

  
1338 1338
    self.new_node = objects.Node(name=node,
1339 1339
                                 primary_ip=primary_ip,
......
1349 1349
    # set up inter-node password and certificate and restarts the node daemon
1350 1350
    gntpass = self.sstore.GetNodeDaemonPassword()
1351 1351
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1352
      raise errors.OpExecError, ("ganeti password corruption detected")
1352
      raise errors.OpExecError("ganeti password corruption detected")
1353 1353
    f = open(constants.SSL_CERT_FILE)
1354 1354
    try:
1355 1355
      gntpem = f.read(8192)
......
1360 1360
    # cert doesn't contain this, the here-document will be correctly
1361 1361
    # parsed by the shell sequence below
1362 1362
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1363
      raise errors.OpExecError, ("invalid PEM encoding in the SSL certificate")
1363
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1364 1364
    if not gntpem.endswith("\n"):
1365
      raise errors.OpExecError, ("PEM must end with newline")
1365
      raise errors.OpExecError("PEM must end with newline")
1366 1366
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1367 1367

  
1368 1368
    # remove first the root's known_hosts file
......
1381 1381

  
1382 1382
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1383 1383
    if result.failed:
1384
      raise errors.OpExecError, ("Remote command on node %s, error: %s,"
1385
                                 " output: %s" %
1386
                                 (node, result.fail_reason, result.output))
1384
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1385
                               " output: %s" %
1386
                               (node, result.fail_reason, result.output))
1387 1387

  
1388 1388
    # check connectivity
1389 1389
    time.sleep(4)
......
1394 1394
        logger.Info("communication to node %s fine, sw version %s match" %
1395 1395
                    (node, result))
1396 1396
      else:
1397
        raise errors.OpExecError, ("Version mismatch master version %s,"
1398
                                   " node version %s" %
1399
                                   (constants.PROTOCOL_VERSION, result))
1397
        raise errors.OpExecError("Version mismatch master version %s,"
1398
                                 " node version %s" %
1399
                                 (constants.PROTOCOL_VERSION, result))
1400 1400
    else:
1401
      raise errors.OpExecError, ("Cannot get version from the new node")
1401
      raise errors.OpExecError("Cannot get version from the new node")
1402 1402

  
1403 1403
    # setup ssh on node
1404 1404
    logger.Info("copy ssh key to node %s" % node)
......
1418 1418
                               keyarray[3], keyarray[4], keyarray[5])
1419 1419

  
1420 1420
    if not result:
1421
      raise errors.OpExecError, ("Cannot transfer ssh keys to the new node")
1421
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1422 1422

  
1423 1423
    # Add node to our /etc/hosts, and add key to known_hosts
1424 1424
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
......
1429 1429
      result = ssh.SSHCall(node, "root",
1430 1430
                           "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
1431 1431
      if result.failed:
1432
        raise errors.OpExecError, ("Node claims it doesn't have the"
1433
                                   " secondary ip you gave (%s).\n"
1434
                                   "Please fix and re-run this command." %
1435
                                   new_node.secondary_ip)
1432
        raise errors.OpExecError("Node claims it doesn't have the"
1433
                                 " secondary ip you gave (%s).\n"
1434
                                 "Please fix and re-run this command." %
1435
                                 new_node.secondary_ip)
1436 1436

  
1437 1437
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1438 1438
    # including the node just added
......
1493 1493
    self.old_master = self.sstore.GetMasterNode()
1494 1494

  
1495 1495
    if self.old_master == self.new_master:
1496
      raise errors.OpPrereqError, ("This commands must be run on the node"
1497
                                   " where you want the new master to be.\n"
1498
                                   "%s is already the master" %
1499
                                   self.old_master)
1496
      raise errors.OpPrereqError("This commands must be run on the node"
1497
                                 " where you want the new master to be.\n"
1498
                                 "%s is already the master" %
1499
                                 self.old_master)
1500 1500

  
1501 1501
  def Exec(self, feedback_fn):
1502 1502
    """Failover the master node.
......
1659 1659
    instance = self.cfg.GetInstanceInfo(
1660 1660
      self.cfg.ExpandInstanceName(self.op.instance_name))
1661 1661
    if instance is None:
1662
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1663
                                   self.op.instance_name)
1662
      raise errors.OpPrereqError("Instance '%s' not known" %
1663
                                 self.op.instance_name)
1664 1664
    self.instance = instance
1665 1665

  
1666 1666

  
......
1670 1670
    """
1671 1671
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1672 1672
    if not disks_ok:
1673
      raise errors.OpExecError, ("Cannot activate block devices")
1673
      raise errors.OpExecError("Cannot activate block devices")
1674 1674

  
1675 1675
    return disks_info
1676 1676

  
......
1712 1712

  
1713 1713

  
1714 1714
def _StartInstanceDisks(cfg, instance, force):
1715
  """Start the disks of an instance.
1716

  
1717
  """
1715 1718
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1716 1719
                                           ignore_secondaries=force)
1717 1720
  if not disks_ok:
......
1719 1722
    if force is not None and not force:
1720 1723
      logger.Error("If the message above refers to a secondary node,"
1721 1724
                   " you can retry the operation using '--force'.")
1722
    raise errors.OpExecError, ("Disk consistency error")
1725
    raise errors.OpExecError("Disk consistency error")
1723 1726

  
1724 1727

  
1725 1728
class LUDeactivateInstanceDisks(NoHooksLU):
......
1737 1740
    instance = self.cfg.GetInstanceInfo(
1738 1741
      self.cfg.ExpandInstanceName(self.op.instance_name))
1739 1742
    if instance is None:
1740
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1741
                                   self.op.instance_name)
1743
      raise errors.OpPrereqError("Instance '%s' not known" %
1744
                                 self.op.instance_name)
1742 1745
    self.instance = instance
1743 1746

  
1744 1747
  def Exec(self, feedback_fn):
......
1749 1752
    ins_l = rpc.call_instance_list([instance.primary_node])
1750 1753
    ins_l = ins_l[instance.primary_node]
1751 1754
    if not type(ins_l) is list:
1752
      raise errors.OpExecError, ("Can't contact node '%s'" %
1753
                                 instance.primary_node)
1755
      raise errors.OpExecError("Can't contact node '%s'" %
1756
                               instance.primary_node)
1754 1757

  
1755 1758
    if self.instance.name in ins_l:
1756
      raise errors.OpExecError, ("Instance is running, can't shutdown"
1757
                                 " block devices.")
1759
      raise errors.OpExecError("Instance is running, can't shutdown"
1760
                               " block devices.")
1758 1761

  
1759 1762
    _ShutdownInstanceDisks(instance, self.cfg)
1760 1763

  
......
1811 1814
    instance = self.cfg.GetInstanceInfo(
1812 1815
      self.cfg.ExpandInstanceName(self.op.instance_name))
1813 1816
    if instance is None:
1814
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1815
                                   self.op.instance_name)
1817
      raise errors.OpPrereqError("Instance '%s' not known" %
1818
                                 self.op.instance_name)
1816 1819

  
1817 1820
    # check bridges existance
1818 1821
    brlist = [nic.bridge for nic in instance.nics]
1819 1822
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
1820
      raise errors.OpPrereqError, ("one or more target bridges %s does not"
1821
                                   " exist on destination node '%s'" %
1822
                                   (brlist, instance.primary_node))
1823
      raise errors.OpPrereqError("one or more target bridges %s does not"
1824
                                 " exist on destination node '%s'" %
1825
                                 (brlist, instance.primary_node))
1823 1826

  
1824 1827
    self.instance = instance
1825 1828
    self.op.instance_name = instance.name
......
1836 1839

  
1837 1840
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1838 1841
    if not nodeinfo:
1839
      raise errors.OpExecError, ("Could not contact node %s for infos" %
1840
                                 (node_current))
1842
      raise errors.OpExecError("Could not contact node %s for infos" %
1843
                               (node_current))
1841 1844

  
1842 1845
    freememory = nodeinfo[node_current]['memory_free']
1843 1846
    memory = instance.memory
1844 1847
    if memory > freememory:
1845
      raise errors.OpExecError, ("Not enough memory to start instance"
1846
                                 " %s on node %s"
1847
                                 " needed %s MiB, available %s MiB" %
1848
                                 (instance.name, node_current, memory,
1849
                                  freememory))
1848
      raise errors.OpExecError("Not enough memory to start instance"
1849
                               " %s on node %s"
1850
                               " needed %s MiB, available %s MiB" %
1851
                               (instance.name, node_current, memory,
1852
                                freememory))
1850 1853

  
1851 1854
    _StartInstanceDisks(self.cfg, instance, force)
1852 1855

  
1853 1856
    if not rpc.call_instance_start(node_current, instance, extra_args):
1854 1857
      _ShutdownInstanceDisks(instance, self.cfg)
1855
      raise errors.OpExecError, ("Could not start instance")
1858
      raise errors.OpExecError("Could not start instance")
1856 1859

  
1857 1860
    self.cfg.MarkInstanceUp(instance.name)
1858 1861

  
......
1885 1888
    instance = self.cfg.GetInstanceInfo(
1886 1889
      self.cfg.ExpandInstanceName(self.op.instance_name))
1887 1890
    if instance is None:
1888
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1889
                                   self.op.instance_name)
1891
      raise errors.OpPrereqError("Instance '%s' not known" %
1892
                                 self.op.instance_name)
1890 1893
    self.instance = instance
1891 1894

  
1892 1895
  def Exec(self, feedback_fn):
......
1930 1933
    instance = self.cfg.GetInstanceInfo(
1931 1934
      self.cfg.ExpandInstanceName(self.op.instance_name))
1932 1935
    if instance is None:
1933
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1934
                                   self.op.instance_name)
1936
      raise errors.OpPrereqError("Instance '%s' not known" %
1937
                                 self.op.instance_name)
1935 1938
    if instance.disk_template == constants.DT_DISKLESS:
1936
      raise errors.OpPrereqError, ("Instance '%s' has no disks" %
1937
                                   self.op.instance_name)
1939
      raise errors.OpPrereqError("Instance '%s' has no disks" %
1940
                                 self.op.instance_name)
1938 1941
    if instance.status != "down":
1939
      raise errors.OpPrereqError, ("Instance '%s' is marked to be up" %
1940
                                   self.op.instance_name)
1942
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
1943
                                 self.op.instance_name)
1941 1944
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
1942 1945
    if remote_info:
1943
      raise errors.OpPrereqError, ("Instance '%s' is running on the node %s" %
1944
                                   (self.op.instance_name,
1945
                                    instance.primary_node))
1946
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
1947
                                 (self.op.instance_name,
1948
                                  instance.primary_node))
1946 1949

  
1947 1950
    self.op.os_type = getattr(self.op, "os_type", None)
1948 1951
    if self.op.os_type is not None:
......
1950 1953
      pnode = self.cfg.GetNodeInfo(
1951 1954
        self.cfg.ExpandNodeName(instance.primary_node))
1952 1955
      if pnode is None:
1953
        raise errors.OpPrereqError, ("Primary node '%s' is unknown" %
1954
                                     self.op.pnode)
1956
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
1957
                                   self.op.pnode)
1955 1958
      os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
1956 1959
      if not isinstance(os_obj, objects.OS):
1957
        raise errors.OpPrereqError, ("OS '%s' not in supported OS list for"
1958
                                     " primary node"  % self.op.os_type)
1960
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
1961
                                   " primary node"  % self.op.os_type)
1959 1962

  
1960 1963
    self.instance = instance
1961 1964

  
......
1974 1977
    try:
1975 1978
      feedback_fn("Running the instance OS create scripts...")
1976 1979
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
1977
        raise errors.OpExecError, ("Could not install OS for instance %s "
1978
                                   "on node %s" %
1979
                                   (inst.name, inst.primary_node))
1980
        raise errors.OpExecError("Could not install OS for instance %s "
1981
                                 "on node %s" %
1982
                                 (inst.name, inst.primary_node))
1980 1983
    finally:
1981 1984
      _ShutdownInstanceDisks(inst, self.cfg)
1982 1985

  
......
2009 2012
    instance = self.cfg.GetInstanceInfo(
2010 2013
      self.cfg.ExpandInstanceName(self.op.instance_name))
2011 2014
    if instance is None:
2012
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2013
                                   self.op.instance_name)
2015
      raise errors.OpPrereqError("Instance '%s' not known" %
2016
                                 self.op.instance_name)
2014 2017
    self.instance = instance
2015 2018

  
2016 2019
  def Exec(self, feedback_fn):
......
2022 2025
                (instance.name, instance.primary_node))
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff