Revision f4bc1f2c

b/daemons/ganeti-watcher
122 122
    except Exception, msg:
123 123
      # Ignore errors while loading the file and treat it as empty
124 124
      self.data = {}
125
      sys.stderr.write("Empty or invalid state file. "
126
          "Using defaults. Error message: %s\n" % msg)
125
      sys.stderr.write("Empty or invalid state file."
126
                       " Using defaults. Error message: %s\n" % msg)
127 127

  
128 128
    if "instance" not in self.data:
129 129
      self.data["instance"] = {}
......
367 367
      # secondary node.
368 368
      for instance in GetInstanceList(with_secondaries=check_nodes):
369 369
        try:
370
          self.messages.append(Message(NOTICE,
371
                                       "Activating disks for %s." %
372
                                       instance.name))
370
          self.messages.append(Message(NOTICE, ("Activating disks for %s." %
371
                                                instance.name)))
373 372
          instance.ActivateDisks()
374 373
        except Error, x:
375 374
          self.messages.append(Message(ERROR, str(x)))
......
402 401
                                       (instance.name, MAXTRIES)))
403 402
          continue
404 403
        try:
405
          self.messages.append(Message(NOTICE,
406
                                       "Restarting %s%s." %
407
                                       (instance.name, last)))
404
          self.messages.append(Message(NOTICE, ("Restarting %s%s." %
405
                                                (instance.name, last))))
408 406
          instance.Restart()
409 407
        except Error, x:
410 408
          self.messages.append(Message(ERROR, str(x)))
......
416 414
      else:
417 415
        if notepad.NumberOfRestartAttempts(instance):
418 416
          notepad.RemoveInstance(instance)
419
          msg = Message(NOTICE,
420
                        "Restart of %s succeeded." % instance.name)
417
          msg = Message(NOTICE, "Restart of %s succeeded." % instance.name)
421 418
          self.messages.append(msg)
422 419

  
423 420
  def WriteReport(self, logfile):
b/lib/backend.py
1135 1135
      return None
1136 1136
  else:
1137 1137
    raise errors.ProgrammerError("Cannot snapshot non-lvm block device"
1138
                                 "'%s' of type '%s'" %
1138
                                 " '%s' of type '%s'" %
1139 1139
                                 (disk.unique_id, disk.dev_type))
1140 1140

  
1141 1141

  
b/lib/cmdlib.py
522 522
        secondary_ip != hostname.ip and
523 523
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
524 524
                           constants.DEFAULT_NODED_PORT))):
525
      raise errors.OpPrereqError("You gave %s as secondary IP,\n"
526
                                 "but it does not belong to this host." %
525
      raise errors.OpPrereqError("You gave %s as secondary IP,"
526
                                 " but it does not belong to this host." %
527 527
                                 secondary_ip)
528 528
    self.secondary_ip = secondary_ip
529 529

  
......
550 550

  
551 551
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
552 552
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
553
      raise errors.OpPrereqError("Init.d script '%s' missing or not "
554
                                 "executable." % constants.NODE_INITD_SCRIPT)
553
      raise errors.OpPrereqError("Init.d script '%s' missing or not"
554
                                 " executable." % constants.NODE_INITD_SCRIPT)
555 555

  
556 556
  def Exec(self, feedback_fn):
557 557
    """Initialize the cluster.
......
950 950
                         (fname, to_node))
951 951
    finally:
952 952
      if not rpc.call_node_start_master(master):
953
        logger.Error("Could not re-enable the master role on the master,\n"
954
                     "please restart manually.")
953
        logger.Error("Could not re-enable the master role on the master,"
954
                     " please restart manually.")
955 955

  
956 956

  
957 957
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
......
1401 1401
      if not utils.TcpPing(myself.secondary_ip,
1402 1402
                           secondary_ip,
1403 1403
                           constants.DEFAULT_NODED_PORT):
1404
        raise errors.OpPrereqError(
1405
          "Node secondary ip not reachable by TCP based ping to noded port")
1404
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1405
                                   " based ping to noded port")
1406 1406

  
1407 1407
    self.new_node = objects.Node(name=node,
1408 1408
                                 primary_ip=primary_ip,
......
1500 1500
                                    new_node.secondary_ip,
1501 1501
                                    constants.DEFAULT_NODED_PORT,
1502 1502
                                    10, False):
1503
        raise errors.OpExecError("Node claims it doesn't have the"
1504
                                 " secondary ip you gave (%s).\n"
1505
                                 "Please fix and re-run this command." %
1506
                                 new_node.secondary_ip)
1503
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1504
                                 " you gave (%s). Please fix and re-run this"
1505
                                 " command." % new_node.secondary_ip)
1507 1506

  
1508 1507
    success, msg = ssh.VerifyNodeHostname(node)
1509 1508
    if not success:
1510 1509
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1511
                               " than the one the resolver gives: %s.\n"
1512
                               "Please fix and re-run this command." %
1510
                               " than the one the resolver gives: %s."
1511
                               " Please fix and re-run this command." %
1513 1512
                               (node, msg))
1514 1513

  
1515 1514
    # Distribute updated /etc/hosts and known_hosts to all nodes,
......
1572 1571

  
1573 1572
    if self.old_master == self.new_master:
1574 1573
      raise errors.OpPrereqError("This commands must be run on the node"
1575
                                 " where you want the new master to be.\n"
1576
                                 "%s is already the master" %
1574
                                 " where you want the new master to be."
1575
                                 " %s is already the master" %
1577 1576
                                 self.old_master)
1578 1577

  
1579 1578
  def Exec(self, feedback_fn):
......
1602 1601
    if not rpc.call_node_start_master(self.new_master):
1603 1602
      logger.Error("could not start the master role on the new master"
1604 1603
                   " %s, please check" % self.new_master)
1605
      feedback_fn("Error in activating the master IP on the new master,\n"
1606
                  "please fix manually.")
1604
      feedback_fn("Error in activating the master IP on the new master,"
1605
                  " please fix manually.")
1607 1606

  
1608 1607

  
1609 1608

  
......
1778 1777
      result = rpc.call_blockdev_assemble(node, node_disk,
1779 1778
                                          instance.name, is_primary)
1780 1779
      if not result:
1781
        logger.Error("could not prepare block device %s on node %s (is_pri"
1782
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1780
        logger.Error("could not prepare block device %s on node %s"
1781
                     " (is_primary=%s)" %
1782
                     (inst_disk.iv_name, node, is_primary))
1783 1783
        if is_primary or not ignore_secondaries:
1784 1784
          disks_ok = False
1785 1785
      if is_primary:
......
2134 2134
    try:
2135 2135
      feedback_fn("Running the instance OS create scripts...")
2136 2136
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2137
        raise errors.OpExecError("Could not install OS for instance %s "
2138
                                 "on node %s" %
2137
        raise errors.OpExecError("Could not install OS for instance %s"
2138
                                 " on node %s" %
2139 2139
                                 (inst.name, inst.primary_node))
2140 2140
    finally:
2141 2141
      _ShutdownInstanceDisks(inst, self.cfg)
......
2210 2210
    try:
2211 2211
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2212 2212
                                          "sda", "sdb"):
2213
        msg = ("Could run OS rename script for instance %s\n"
2214
               "on node %s\n"
2215
               "(but the instance has been renamed in Ganeti)" %
2213
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2214
               " instance has been renamed in Ganeti)" %
2216 2215
               (inst.name, inst.primary_node))
2217 2216
        logger.Error(msg)
2218 2217
    finally:
......
3144 3143
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3145 3144
                                 " instance." % self.op.disk_name)
3146 3145
    if len(disk.children) > 1:
3147
      raise errors.OpPrereqError("The device already has two slave"
3148
                                 " devices.\n"
3149
                                 "This would create a 3-disk raid1"
3150
                                 " which we don't allow.")
3146
      raise errors.OpPrereqError("The device already has two slave devices."
3147
                                 " This would create a 3-disk raid1 which we"
3148
                                 " don't allow.")
3151 3149
    self.disk = disk
3152 3150

  
3153 3151
  def Exec(self, feedback_fn):
......
3425 3423
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3426 3424
                                        new_drbd, False,
3427 3425
                                        _GetInstanceInfoText(instance)):
3428
        raise errors.OpExecError("Failed to create new component on"
3429
                                 " secondary node %s\n"
3430
                                 "Full abort, cleanup manually!" %
3426
        raise errors.OpExecError("Failed to create new component on secondary"
3427
                                 " node %s. Full abort, cleanup manually!" %
3431 3428
                                 remote_node)
3432 3429

  
3433 3430
      logger.Info("adding new mirror component on primary")
......
3438 3435
        # remove secondary dev
3439 3436
        cfg.SetDiskID(new_drbd, remote_node)
3440 3437
        rpc.call_blockdev_remove(remote_node, new_drbd)
3441
        raise errors.OpExecError("Failed to create volume on primary!\n"
3442
                                 "Full abort, cleanup manually!!")
3438
        raise errors.OpExecError("Failed to create volume on primary!"
3439
                                 " Full abort, cleanup manually!!")
3443 3440

  
3444 3441
      # the device exists now
3445 3442
      # call the primary node to add the mirror to md
b/qa/ganeti-qa.py
215 215
  """Main program.
216 216

  
217 217
  """
218
  parser = OptionParser(usage="%prog [options] <config-file> "
219
                              "<known-hosts-file>")
218
  parser = OptionParser(usage="%prog [options] <config-file>"
219
                              " <known-hosts-file>")
220 220
  parser.add_option('--dry-run', dest='dry_run',
221 221
      action="store_true",
222 222
      help="Show what would be done")
b/qa/qa_daemon.py
95 95
  """Shows a warning about the cron job.
96 96

  
97 97
  """
98
  msg = ("For the following tests it's recommended to turn off the "
99
         "ganeti-watcher cronjob.")
98
  msg = ("For the following tests it's recommended to turn off the"
99
         " ganeti-watcher cronjob.")
100 100
  print
101 101
  print qa_utils.FormatWarning(msg)
102 102

  
b/qa/qa_instance.py
232 232
        node2disk[node_name].append(name)
233 233

  
234 234
  if [node2_full, node_full][int(onmaster)] not in node2disk:
235
    raise qa_error.Error("Couldn't find physical disks used on "
236
                         "%s node" % ["secondary", "master"][int(onmaster)])
235
    raise qa_error.Error("Couldn't find physical disks used on"
236
                         " %s node" % ["secondary", "master"][int(onmaster)])
237 237

  
238 238
  # Check whether nodes have ability to stop disks
239 239
  for node_name, disks in node2disk.iteritems():
......
319 319

  
320 320
def TestInstanceMasterDiskFailure(instance, node, node2):
321 321
  """Testing disk failure on master node."""
322
  print qa_utils.FormatError("Disk failure on primary node cannot be "
323
                             "tested due to potential crashes.")
322
  print qa_utils.FormatError("Disk failure on primary node cannot be"
323
                             " tested due to potential crashes.")
324 324
  # The following can cause crashes, thus it's disabled until fixed
325 325
  #return _TestInstanceDiskFailure(instance, node, node2, True)
326 326

  
b/qa/qa_node.py
94 94
  master = qa_config.GetMasterNode()
95 95

  
96 96
  if qa_utils.GetNodeInstances(node2, secondaries=False):
97
    raise qa_error.UnusableNodeError("Secondary node has at least one "
98
                                     "primary instance. This test requires "
99
                                     "it to have no primary instances.")
97
    raise qa_error.UnusableNodeError("Secondary node has at least one"
98
                                     " primary instance. This test requires"
99
                                     " it to have no primary instances.")
100 100

  
101 101
  # Fail over to secondary node
102 102
  cmd = ['gnt-node', 'failover', '-f', node['primary']]
......
117 117
  node3 = qa_config.AcquireNode(exclude=[node, node2])
118 118
  try:
119 119
    if qa_utils.GetNodeInstances(node3, secondaries=True):
120
      raise qa_error.UnusableNodeError("Evacuation node has at least one "
121
                                       "secondary instance. This test requires "
122
                                       "it to have no secondary instances.")
120
      raise qa_error.UnusableNodeError("Evacuation node has at least one"
121
                                       " secondary instance. This test requires"
122
                                       " it to have no secondary instances.")
123 123

  
124 124
    # Evacuate all secondary instances
125 125
    cmd = ['gnt-node', 'evacuate', '-f', node2['primary'], node3['primary']]
b/scripts/gnt-cluster
210 210
# this is an option common to more than one command, so we declare
211 211
# it here and reuse it
212 212
node_option = make_option("-n", "--node", action="append", dest="nodes",
213
                          help="Node to copy to (if not given, all nodes)"
214
                          ", can be given multiple times", metavar="<node>",
215
                          default=[])
213
                          help="Node to copy to (if not given, all nodes),"
214
                               " can be given multiple times",
215
                          metavar="<node>", default=[])
216 216

  
217 217
commands = {
218 218
  'init': (InitCluster, ARGS_ONE,
b/scripts/gnt-instance
242 242
  instance_name = args[0]
243 243

  
244 244
  if not opts.force:
245
    usertext = ("This will reinstall the instance %s and remove "
246
                "all data. Continue?") % instance_name
245
    usertext = ("This will reinstall the instance %s and remove"
246
                " all data. Continue?") % instance_name
247 247
    if not AskUser(usertext):
248 248
      return 1
249 249

  
b/tools/cfgupgrade
173 173
  parser = optparse.OptionParser()
174 174
  parser.add_option('--dry-run', dest='dry_run',
175 175
                    action="store_true",
176
                    help="Try to do the conversion, but don't write "
177
                      "output file")
176
                    help="Try to do the conversion, but don't write"
177
                         " output file")
178 178
  parser.add_option(FORCE_OPT)
179 179
  parser.add_option('--verbose', dest='verbose',
180 180
                    action="store_true",
......
188 188
    raise Error("Configuration file not specified")
189 189

  
190 190
  if not options.force:
191
    usertext = ("%s MUST run on the master node. Is this the master "
192
                "node?" % program)
191
    usertext = ("%s MUST run on the master node. Is this the master"
192
                " node?" % program)
193 193
    if not AskUser(usertext):
194 194
      sys.exit(1)
195 195

  
b/tools/lvmstrap
190 190

  
191 191
  osname, nodename, release, version, arch = os.uname()
192 192
  if osname != 'Linux':
193
    raise PrereqError("This tool only runs on Linux "
194
                      "(detected OS: %s)." % osname)
193
    raise PrereqError("This tool only runs on Linux"
194
                      " (detected OS: %s)." % osname)
195 195

  
196 196
  if not release.startswith("2.6."):
197
    raise PrereqError("Wrong major kernel version (detected %s, needs "
198
                      "2.6.*)" % release)
197
    raise PrereqError("Wrong major kernel version (detected %s, needs"
198
                      " 2.6.*)" % release)
199 199

  
200 200
  if not os.path.ismount("/sys"):
201
    raise PrereqError("Can't find a filesystem mounted at /sys. "
202
                      "Please mount /sys.")
201
    raise PrereqError("Can't find a filesystem mounted at /sys."
202
                      " Please mount /sys.")
203 203

  
204 204
  if not os.path.isdir("/sys/block"):
205
    raise SysconfigError("Can't find /sys/block directory. Has the "
206
                         "layout of /sys changed?")
205
    raise SysconfigError("Can't find /sys/block directory. Has the"
206
                         " layout of /sys changed?")
207 207

  
208 208
  if not os.path.ismount("/proc"):
209
    raise PrereqError("Can't find a filesystem mounted at /proc. "
210
                      "Please mount /proc.")
209
    raise PrereqError("Can't find a filesystem mounted at /proc."
210
                      " Please mount /proc.")
211 211

  
212 212
  if not os.path.exists("/proc/mounts"):
213 213
    raise SysconfigError("Can't find /proc/mounts")
......
228 228
      vg_free: The available space in the volume group
229 229
  """
230 230

  
231
  result = ExecCommand("vgs --nohead -o lv_count,vg_size,"
232
                       "vg_free --nosuffix --units g "
233
                       "--ignorelockingfailure %s" % vgname)
231
  result = ExecCommand("vgs --nohead -o lv_count,vg_size,vg_free"
232
                       " --nosuffix --units g"
233
                       " --ignorelockingfailure %s" % vgname)
234 234
  if not result.failed:
235 235
    try:
236 236
      lv_count, vg_size, vg_free = result.stdout.strip().split()
......
272 272
      break
273 273
    time.sleep(0.250)
274 274
  else:
275
    raise SysconfigError("the device file %s does not exist, but the block "
276
                         "device exists in the /sys/block tree" % path)
275
    raise SysconfigError("the device file %s does not exist, but the block"
276
                         " device exists in the /sys/block tree" % path)
277 277
  rdev = os.stat(path).st_rdev
278 278
  if devnum != rdev:
279
    raise SysconfigError("For device %s, the major:minor in /dev is %04x "
280
                         "while the major:minor in sysfs is %s" %
279
    raise SysconfigError("For device %s, the major:minor in /dev is %04x"
280
                         " while the major:minor in sysfs is %s" %
281 281
                         (path, rdev, devnum))
282 282

  
283 283

  
......
546 546
  """
547 547

  
548 548
  if not CheckReread(name):
549
    raise OperationalError("CRITICAL: disk %s you selected seems to be in "
550
                           "use. ABORTING!" % name)
549
    raise OperationalError("CRITICAL: disk %s you selected seems to be in"
550
                           " use. ABORTING!" % name)
551 551

  
552 552
  fd = os.open("/dev/%s" % name, os.O_RDWR | os.O_SYNC)
553 553
  olddata = os.read(fd, 512)
554 554
  if len(olddata) != 512:
555
    raise OperationalError("CRITICAL: Can't read partition table information "
556
                           "from /dev/%s (needed 512 bytes, got %d" %
555
    raise OperationalError("CRITICAL: Can't read partition table information"
556
                           " from /dev/%s (needed 512 bytes, got %d" %
557 557
                           (name, len(olddata)))
558 558
  newdata = "\0" * 512
559 559
  os.lseek(fd, 0, 0)
......
561 561
  os.close(fd)
562 562
  if bytes_written != 512:
563 563
    raise OperationalError("CRITICAL: Can't write partition table information"
564
                           " to /dev/%s (tried to write 512 bytes, written "
565
                           "%d. I don't know how to cleanup. Sorry." %
564
                           " to /dev/%s (tried to write 512 bytes, written"
565
                           " %d. I don't know how to cleanup. Sorry." %
566 566
                           (name, bytes_written))
567 567

  
568 568
  if not CheckReread(name):
569 569
    fd = os.open("/dev/%s" % name, os.O_RDWR | os.O_SYNC)
570 570
    os.write(fd, olddata)
571 571
    os.close(fd)
572
    raise OperationalError("CRITICAL: disk %s which I have just wiped cannot "
573
                           "reread partition table. Most likely, it is "
574
                           "in use. You have to clean after this yourself. "
575
                           "I tried to restore the old partition table, "
576
                           "but I cannot guarantee nothing has broken." %
572
    raise OperationalError("CRITICAL: disk %s which I have just wiped cannot"
573
                           " reread partition table. Most likely, it is"
574
                           " in use. You have to clean after this yourself."
575
                           " I tried to restore the old partition table,"
576
                           " but I cannot guarantee nothing has broken." %
577 577
                           name)
578 578

  
579 579

  
......
589 589
  result = ExecCommand(
590 590
    'echo ,,8e, | sfdisk /dev/%s' % name)
591 591
  if result.failed:
592
    raise OperationalError("CRITICAL: disk %s which I have just partitioned "
593
                           "cannot reread its partition table, or there "
594
                           "is some other sfdisk error. Likely, it is in "
595
                           "use. You have to clean this yourself. Error "
596
                           "message from sfdisk: %s" %
592
    raise OperationalError("CRITICAL: disk %s which I have just partitioned"
593
                           " cannot reread its partition table, or there"
594
                           " is some other sfdisk error. Likely, it is in"
595
                           " use. You have to clean this yourself. Error"
596
                           " message from sfdisk: %s" %
597 597
                           (name, result.output))
598 598

  
599 599

  
......
609 609
  """
610 610
  result = ExecCommand("pvcreate -yff /dev/%s1 " % name)
611 611
  if result.failed:
612
    raise OperationalError("I cannot create a physical volume on "
613
                           "partition /dev/%s1. Error message: %s. "
614
                           "Please clean up yourself." %
612
    raise OperationalError("I cannot create a physical volume on"
613
                           " partition /dev/%s1. Error message: %s."
614
                           " Please clean up yourself." %
615 615
                           (name, result.output))
616 616

  
617 617

  
......
628 628
  pnames = ["'/dev/%s1'" % disk for disk in disks]
629 629
  result = ExecCommand("vgcreate -s 64MB '%s' %s" % (vgname, " ".join(pnames)))
630 630
  if result.failed:
631
    raise OperationalError("I cannot create the volume group %s from "
632
                           "disks %s. Error message: %s. Please clean up "
633
                           "yourself." %
631
    raise OperationalError("I cannot create the volume group %s from"
632
                           " disks %s. Error message: %s. Please clean up"
633
                           " yourself." %
634 634
                           (vgname, " ".join(disks), result.output))
635 635

  
636 636

  
......
651 651

  
652 652
  sysdisks = GetDiskList()
653 653
  if not sysdisks:
654
    raise PrereqError("no disks found (I looked for "
655
                      "non-removable block devices).")
654
    raise PrereqError("no disks found (I looked for"
655
                      " non-removable block devices).")
656 656
  sysd_free = []
657 657
  sysd_used = []
658 658
  for name, size, dev, part, used in sysdisks:
......
678 678

  
679 679
  return disklist
680 680

  
681

  
681 682
def BootStrap():
682 683
  """Actual main routine."""
683 684

  
......
711 712
  status, lv_count, size, free = CheckVGExists(vgname)
712 713
  if status:
713 714
    print "Done! %s: size %s GiB, disks: %s" % (vgname, size,
714
                                                ",".join(disklist))
715
                                              ",".join(disklist))
715 716
  else:
716
    raise OperationalError("Although everything seemed ok, the volume "
717
                           "group did not get created.")
717
    raise OperationalError("Although everything seemed ok, the volume"
718
                           " group did not get created.")
718 719

  
719 720

  
720 721
def main():
......
727 728
    BootStrap()
728 729
  except PrereqError, err:
729 730
    print >> sys.stderr, "The prerequisites for running this tool are not met."
730
    print >> sys.stderr, ("Please make sure you followed all the steps in "
731
                          "the build document.")
731
    print >> sys.stderr, ("Please make sure you followed all the steps in"
732
                          " the build document.")
732 733
    print >> sys.stderr, "Description: %s" % str(err)
733 734
    sys.exit(1)
734 735
  except SysconfigError, err:
735
    print >> sys.stderr, ("This system's configuration seems wrong, at "
736
                          "least is not what I expect.")
737
    print >> sys.stderr, ("Please check that the installation didn't fail "
738
                          "at some step.")
736
    print >> sys.stderr, ("This system's configuration seems wrong, at"
737
                          " least is not what I expect.")
738
    print >> sys.stderr, ("Please check that the installation didn't fail"
739
                          " at some step.")
739 740
    print >> sys.stderr, "Description: %s" % str(err)
740 741
    sys.exit(1)
741 742
  except ParameterError, err:
742
    print >> sys.stderr, ("Some parameters you gave to the program or the "
743
                          "invocation is wrong. ")
743
    print >> sys.stderr, ("Some parameters you gave to the program or the"
744
                          " invocation is wrong. ")
744 745
    print >> sys.stderr, "Description: %s" % str(err)
745 746
    Usage()
746 747
  except OperationalError, err:
747
    print >> sys.stderr, ("A serious error has happened while modifying "
748
                          "the system's configuration.")
749
    print >> sys.stderr, ("Please review the error message below and make "
750
                          "sure you clean up yourself.")
751
    print >> sys.stderr, ("It is most likely that the system configuration "
752
                          "has been partially altered.")
748
    print >> sys.stderr, ("A serious error has happened while modifying"
749
                          " the system's configuration.")
750
    print >> sys.stderr, ("Please review the error message below and make"
751
                          " sure you clean up yourself.")
752
    print >> sys.stderr, ("It is most likely that the system configuration"
753
                          " has been partially altered.")
753 754
    print >> sys.stderr, str(err)
754 755
    sys.exit(1)
755 756
  except ProgrammingError, err:
756
    print >> sys.stderr, ("Internal application error. Please signal this "
757
                          "to xencluster-team.")
757
    print >> sys.stderr, ("Internal application error. Please signal this"
758
                          " to xencluster-team.")
758 759
    print >> sys.stderr, "Error description: %s" % str(err)
759 760
    sys.exit(1)
760 761
  except Error, err:

Also available in: Unified diff