Revision a2fd9afc

b/lib/cmdlib.py
1246 1246
    logger.Info("Removing node %s from config" % node.name)
1247 1247

  
1248 1248
    self.cfg.RemoveNode(node.name)
1249
    # Remove the node from the Ganeti Lock Manager
1250
    self.context.glm.remove(locking.LEVEL_NODE, node.name)
1249 1251

  
1250 1252
    utils.RemoveHostFromEtcHosts(node.name)
1251 1253

  
......
1624 1626
    if not self.op.readd:
1625 1627
      logger.Info("adding node %s to cluster.conf" % node)
1626 1628
      self.cfg.AddNode(new_node)
1629
      # Add the new node to the Ganeti Lock Manager
1630
      self.context.glm.add(locking.LEVEL_NODE, node)
1627 1631

  
1628 1632

  
1629 1633
class LUMasterFailover(LogicalUnit):
......
2366 2370
    logger.Info("removing instance %s out of cluster config" % instance.name)
2367 2371

  
2368 2372
    self.cfg.RemoveInstance(instance.name)
2373
    # Remove the new instance from the Ganeti Lock Manager
2374
    self.context.glm.remove(locking.LEVEL_INSTANCE, instance.name)
2369 2375

  
2370 2376

  
2371 2377
class LUQueryInstances(NoHooksLU):
......
3224 3230
    feedback_fn("adding instance %s to cluster config" % instance)
3225 3231

  
3226 3232
    self.cfg.AddInstance(iobj)
3233
    # Add the new instance to the Ganeti Lock Manager
3234
    self.context.glm.add(locking.LEVEL_INSTANCE, instance)
3227 3235

  
3228 3236
    if self.op.wait_for_sync:
3229 3237
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
......
3238 3246
    if disk_abort:
3239 3247
      _RemoveDisks(iobj, self.cfg)
3240 3248
      self.cfg.RemoveInstance(iobj.name)
3249
      # Remove the new instance from the Ganeti Lock Manager
3250
      self.context.glm.remove(locking.LEVEL_INSTANCE, iobj.name)
3241 3251
      raise errors.OpExecError("There are some degraded disks for"
3242 3252
                               " this instance")
3243 3253

  

Also available in: Unified diff