Revision 4d4a651d

b/daemons/ganeti-confd
350 350
  try:
351 351
    processor.Enable()
352 352
  except errors.ConfigurationError:
353
    # If enabling the processor has failed, we can still go on, but confd will be disabled
353
    # If enabling the processor has failed, we can still go on, but confd will
354
    # be disabled
354 355
    logging.warning("Confd is starting in disabled mode")
355 356
    pass
356 357
  server = ConfdAsyncUDPServer(options.bind_address, options.port, processor)
b/lib/cmdlib.py
2004 2004
        else:
2005 2005
          rem_time = "no time estimate"
2006 2006
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
2007
                        (instance.disks[i].iv_name, mstat.sync_percent, rem_time))
2007
                        (instance.disks[i].iv_name, mstat.sync_percent,
2008
                         rem_time))
2008 2009

  
2009 2010
    # if we're done but degraded, let's do a few small retries, to
2010 2011
    # make sure we see a stable and not transient situation; therefore
......
6463 6464
    for dev, old_lvs, new_lvs in iv_names.itervalues():
6464 6465
      self.lu.LogInfo("Detaching %s drbd from local storage" % dev.iv_name)
6465 6466

  
6466
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev, old_lvs)
6467
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
6468
                                                     old_lvs)
6467 6469
      result.Raise("Can't detach drbd from local storage on node"
6468 6470
                   " %s for device %s" % (self.target_node, dev.iv_name))
6469 6471
      #dev.children = []
......
6489 6491
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
6490 6492

  
6491 6493
      self.lu.LogInfo("Renaming the old LVs on the target node")
6492
      result = self.rpc.call_blockdev_rename(self.target_node, rename_old_to_new)
6494
      result = self.rpc.call_blockdev_rename(self.target_node,
6495
                                             rename_old_to_new)
6493 6496
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
6494 6497

  
6495 6498
      # Now we rename the new LVs to the old LVs
6496 6499
      self.lu.LogInfo("Renaming the new LVs on the target node")
6497 6500
      rename_new_to_old = [(new, old.physical_id)
6498 6501
                           for old, new in zip(old_lvs, new_lvs)]
6499
      result = self.rpc.call_blockdev_rename(self.target_node, rename_new_to_old)
6502
      result = self.rpc.call_blockdev_rename(self.target_node,
6503
                                             rename_new_to_old)
6500 6504
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
6501 6505

  
6502 6506
      for old, new in zip(old_lvs, new_lvs):
......
6509 6513

  
6510 6514
      # Now that the new lvs have the old name, we can add them to the device
6511 6515
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
6512
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev, new_lvs)
6516
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
6517
                                                  new_lvs)
6513 6518
      msg = result.fail_msg
6514 6519
      if msg:
6515 6520
        for new_lv in new_lvs:
6516
          msg2 = self.rpc.call_blockdev_remove(self.target_node, new_lv).fail_msg
6521
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
6522
                                               new_lv).fail_msg
6517 6523
          if msg2:
6518 6524
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
6519 6525
                               hint=("cleanup manually the unused logical"
......
6581 6587
    # after this, we must manually remove the drbd minors on both the
6582 6588
    # error and the success paths
6583 6589
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
6584
    minors = self.cfg.AllocateDRBDMinor([self.new_node for dev in self.instance.disks],
6590
    minors = self.cfg.AllocateDRBDMinor([self.new_node
6591
                                         for dev in self.instance.disks],
6585 6592
                                        self.instance.name)
6586 6593
    logging.debug("Allocated minors %r" % (minors,))
6587 6594

  
6588 6595
    iv_names = {}
6589 6596
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
6590
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" % (self.new_node, idx))
6597
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
6598
                      (self.new_node, idx))
6591 6599
      # create new devices on new_node; note that we create two IDs:
6592 6600
      # one without port, so the drbd will be activated without
6593 6601
      # networking information on the new node at this stage, and one
......
6598 6606
      else:
6599 6607
        p_minor = o_minor2
6600 6608

  
6601
      new_alone_id = (self.instance.primary_node, self.new_node, None, p_minor, new_minor, o_secret)
6602
      new_net_id = (self.instance.primary_node, self.new_node, o_port, p_minor, new_minor, o_secret)
6609
      new_alone_id = (self.instance.primary_node, self.new_node, None,
6610
                      p_minor, new_minor, o_secret)
6611
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
6612
                    p_minor, new_minor, o_secret)
6603 6613

  
6604 6614
      iv_names[idx] = (dev, dev.children, new_net_id)
6605 6615
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
......
6627 6637
                                 " soon as possible"))
6628 6638

  
6629 6639
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
6630
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node], self.node_secondary_ip,
6631
                                               self.instance.disks)[self.instance.primary_node]
6640
    result = self.rpc.call_drbd_disconnect_net([self.instance.primary_node],
6641
                                               self.node_secondary_ip,
6642
                                               self.instance.disks)\
6643
                                              [self.instance.primary_node]
6632 6644

  
6633 6645
    msg = result.fail_msg
6634 6646
    if msg:
......
6649 6661
    # and now perform the drbd attach
6650 6662
    self.lu.LogInfo("Attaching primary drbds to new secondary"
6651 6663
                    " (standalone => connected)")
6652
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node, self.new_node], self.node_secondary_ip,
6653
                                           self.instance.disks, self.instance.name,
6664
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
6665
                                            self.new_node],
6666
                                           self.node_secondary_ip,
6667
                                           self.instance.disks,
6668
                                           self.instance.name,
6654 6669
                                           False)
6655 6670
    for to_node, to_result in result.items():
6656 6671
      msg = to_result.fail_msg
6657 6672
      if msg:
6658
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s", to_node, msg,
6673
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
6674
                           to_node, msg,
6659 6675
                           hint=("please do a gnt-instance info to see the"
6660 6676
                                 " status of disks"))
6661 6677

  
b/lib/confd/querylib.py
68 68
class PingQuery(ConfdQuery):
69 69
  """An empty confd query.
70 70

  
71
  It will return success on an empty argument, and an error on any other argument.
71
  It will return success on an empty argument, and an error on any other
72
  argument.
72 73

  
73 74
  """
74 75
  def Exec(self, query):
b/lib/jstore.py
133 133
        raise errors.JobQueueError("Can't read/parse the job queue serial file")
134 134

  
135 135
      if not must_lock:
136
        # There's no need for more error handling. Closing the lock file below in
137
        # case of an error will unlock it anyway.
136
        # There's no need for more error handling. Closing the lock file below
137
        # in case of an error will unlock it anyway.
138 138
        queue_lock.Unlock()
139 139

  
140 140
  except:
b/lib/ssconf.py
65 65
    @type force: boolean
66 66
    @param force: whether to force the reload without checking the mtime
67 67
    @rtype: boolean
68
    @return: boolean values that says whether we reloaded the configuration or not
69
             (because we decided it was already up-to-date)
68
    @return: boolean value that says whether we reloaded the configuration or
69
             not (because we decided it was already up-to-date)
70 70

  
71 71
    """
72 72
    try:
b/lib/storage.py
189 189

  
190 190
    """
191 191
    field_to_idx = dict([(field_name, idx)
192
                         for (idx, (field_name, _, _)) in enumerate(fields_def)])
192
                         for (idx, (field_name, _, _)) in
193
                         enumerate(fields_def)])
193 194

  
194 195
    lvm_fields = []
195 196

  
......
222 223
    lvm_name_to_idx = dict([(lvm_name, idx)
223 224
                           for (idx, lvm_name) in enumerate(lvm_fields)])
224 225
    field_to_idx = dict([(field_name, idx)
225
                         for (idx, (field_name, _, _)) in enumerate(fields_def)])
226
                         for (idx, (field_name, _, _)) in
227
                         enumerate(fields_def)])
226 228

  
227 229
    data = []
228 230
    for raw_data in cmd_result:
b/test/ganeti.utils_unittest.py
879 879
    self.assertEqual(utils.MergeTime((1, 500000)), 1.5)
880 880
    self.assertEqual(utils.MergeTime((1218448917, 500000)), 1218448917.5)
881 881

  
882
    self.assertEqual(round(utils.MergeTime((1218448917, 481000)), 3), 1218448917.481)
882
    self.assertEqual(round(utils.MergeTime((1218448917, 481000)), 3),
883
                     1218448917.481)
883 884
    self.assertEqual(round(utils.MergeTime((1, 801000)), 3), 1.801)
884 885

  
885 886
    self.assertRaises(AssertionError, utils.MergeTime, (0, -1))

Also available in: Unified diff