Don't pass sstore to LUs anymore
[ganeti-local] / lib / config.py
index f823a00..57c56d8 100644 (file)
@@ -34,29 +34,25 @@ much memory.
 import os
 import tempfile
 import random
 import os
 import tempfile
 import random
-import re
+import logging
 
 from ganeti import errors
 from ganeti import locking
 
 from ganeti import errors
 from ganeti import locking
-from ganeti import logger
 from ganeti import utils
 from ganeti import constants
 from ganeti import rpc
 from ganeti import objects
 from ganeti import serializer
 from ganeti import utils
 from ganeti import constants
 from ganeti import rpc
 from ganeti import objects
 from ganeti import serializer
-from ganeti import ssconf
 
 
 _config_lock = locking.SharedLock()
 
 
 
 
 _config_lock = locking.SharedLock()
 
 
-def ValidateConfig():
-  sstore = ssconf.SimpleStore()
-
-  if sstore.GetConfigVersion() != constants.CONFIG_VERSION:
+def _ValidateConfig(data):
+  if data.version != constants.CONFIG_VERSION:
     raise errors.ConfigurationError("Cluster configuration version"
                                     " mismatch, got %s instead of %s" %
     raise errors.ConfigurationError("Cluster configuration version"
                                     " mismatch, got %s instead of %s" %
-                                    (sstore.GetConfigVersion(),
+                                    (data.version,
                                      constants.CONFIG_VERSION))
 
 
                                      constants.CONFIG_VERSION))
 
 
@@ -77,6 +73,7 @@ class ConfigWriter:
     else:
       self._cfg_file = cfg_file
     self._temporary_ids = set()
     else:
       self._cfg_file = cfg_file
     self._temporary_ids = set()
+    self._temporary_drbds = {}
     # Note: in order to prevent errors when resolving our name in
     # _DistributeConfig, we compute it here once and reuse it; it's
     # better to raise an error before starting to modify the config
     # Note: in order to prevent errors when resolving our name in
     # _DistributeConfig, we compute it here once and reuse it; it's
     # better to raise an error before starting to modify the config
@@ -126,6 +123,25 @@ class ConfigWriter:
     all_macs = self._AllMACs()
     return mac in all_macs
 
     all_macs = self._AllMACs()
     return mac in all_macs
 
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GenerateDRBDSecret(self):
+    """Generate a DRBD secret.
+
+    This checks the current disks for duplicates.
+
+    """
+    self._OpenConfig()
+    all_secrets = self._AllDRBDSecrets()
+    retries = 64
+    while retries > 0:
+      secret = utils.GenerateSecret()
+      if secret not in all_secrets:
+        break
+      retries -= 1
+    else:
+      raise errors.ConfigurationError("Can't generate unique DRBD secret")
+    return secret
+
   def _ComputeAllLVs(self):
     """Compute the list of all LVs.
 
   def _ComputeAllLVs(self):
     """Compute the list of all LVs.
 
@@ -185,6 +201,25 @@ class ConfigWriter:
 
     return result
 
 
     return result
 
+  def _AllDRBDSecrets(self):
+    """Return all DRBD secrets present in the config.
+
+    """
+    def helper(disk, result):
+      """Recursively gather secrets from this disk."""
+      if disk.dev_type == constants.DT_DRBD8:
+        result.append(disk.logical_id[5])
+      if disk.children:
+        for child in disk.children:
+          helper(child, result)
+
+    result = []
+    for instance in self._config_data.instances.values():
+      for disk in instance.disks:
+        helper(disk, result)
+
+    return result
+
   @locking.ssynchronized(_config_lock, shared=1)
   def VerifyConfig(self):
     """Stub verify function.
   @locking.ssynchronized(_config_lock, shared=1)
   def VerifyConfig(self):
     """Stub verify function.
@@ -193,6 +228,7 @@ class ConfigWriter:
 
     result = []
     seen_macs = []
 
     result = []
     seen_macs = []
+    ports = {}
     data = self._config_data
     for instance_name in data.instances:
       instance = data.instances[instance_name]
     data = self._config_data
     for instance_name in data.instances:
       instance = data.instances[instance_name]
@@ -209,6 +245,43 @@ class ConfigWriter:
                         (instance_name, idx, nic.mac))
         else:
           seen_macs.append(nic.mac)
                         (instance_name, idx, nic.mac))
         else:
           seen_macs.append(nic.mac)
+
+      # gather the drbd ports for duplicate checks
+      for dsk in instance.disks:
+        if dsk.dev_type in constants.LDS_DRBD:
+          tcp_port = dsk.logical_id[2]
+          if tcp_port not in ports:
+            ports[tcp_port] = []
+          ports[tcp_port].append((instance.name, "drbd disk %s" % dsk.iv_name))
+      # gather network port reservation
+      net_port = getattr(instance, "network_port", None)
+      if net_port is not None:
+        if net_port not in ports:
+          ports[net_port] = []
+        ports[net_port].append((instance.name, "network port"))
+
+    # cluster-wide pool of free ports
+    for free_port in self._config_data.cluster.tcpudp_port_pool:
+      if free_port not in ports:
+        ports[free_port] = []
+      ports[free_port].append(("cluster", "port marked as free"))
+
+    # compute tcp/udp duplicate ports
+    keys = ports.keys()
+    keys.sort()
+    for pnum in keys:
+      pdata = ports[pnum]
+      if len(pdata) > 1:
+        txt = ", ".join(["%s/%s" % val for val in pdata])
+        result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
+
+    # highest used tcp port check
+    if keys:
+      if keys[-1] > self._config_data.cluster.highest_used_port:
+        result.append("Highest used port mismatch, saved %s, computed %s" %
+                      (self._config_data.cluster.highest_used_port,
+                       keys[-1]))
+
     return result
 
   def _UnlockedSetDiskID(self, disk, node_name):
     return result
 
   def _UnlockedSetDiskID(self, disk, node_name):
@@ -229,8 +302,8 @@ class ConfigWriter:
 
     if disk.logical_id is None and disk.physical_id is not None:
       return
 
     if disk.logical_id is None and disk.physical_id is not None:
       return
-    if disk.dev_type in constants.LDS_DRBD:
-      pnode, snode, port = disk.logical_id
+    if disk.dev_type == constants.LD_DRBD8:
+      pnode, snode, port, pminor, sminor, secret = disk.logical_id
       if node_name not in (pnode, snode):
         raise errors.ConfigurationError("DRBD device not knowing node %s" %
                                         node_name)
       if node_name not in (pnode, snode):
         raise errors.ConfigurationError("DRBD device not knowing node %s" %
                                         node_name)
@@ -239,12 +312,12 @@ class ConfigWriter:
       if pnode_info is None or snode_info is None:
         raise errors.ConfigurationError("Can't find primary or secondary node"
                                         " for %s" % str(disk))
       if pnode_info is None or snode_info is None:
         raise errors.ConfigurationError("Can't find primary or secondary node"
                                         " for %s" % str(disk))
+      p_data = (pnode_info.secondary_ip, port)
+      s_data = (snode_info.secondary_ip, port)
       if pnode == node_name:
       if pnode == node_name:
-        disk.physical_id = (pnode_info.secondary_ip, port,
-                            snode_info.secondary_ip, port)
+        disk.physical_id = p_data + s_data + (pminor, secret)
       else: # it must be secondary, we tested above
       else: # it must be secondary, we tested above
-        disk.physical_id = (snode_info.secondary_ip, port,
-                            pnode_info.secondary_ip, port)
+        disk.physical_id = s_data + p_data + (sminor, secret)
     else:
       disk.physical_id = disk.logical_id
     return
     else:
       disk.physical_id = disk.logical_id
     return
@@ -307,6 +380,157 @@ class ConfigWriter:
     self._WriteConfig()
     return port
 
     self._WriteConfig()
     return port
 
+  def _ComputeDRBDMap(self, instance):
+    """Compute the used DRBD minor/nodes.
+
+    Return: dictionary of node_name: dict of minor: instance_name. The
+    returned dict will have all the nodes in it (even if with an empty
+    list).
+
+    """
+    def _AppendUsedPorts(instance_name, disk, used):
+      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
+        nodeA, nodeB, dummy, minorA, minorB = disk.logical_id[:5]
+        for node, port in ((nodeA, minorA), (nodeB, minorB)):
+          assert node in used, "Instance node not found in node list"
+          if port in used[node]:
+            raise errors.ProgrammerError("DRBD minor already used:"
+                                         " %s/%s, %s/%s" %
+                                         (node, port, instance_name,
+                                          used[node][port]))
+
+          used[node][port] = instance_name
+      if disk.children:
+        for child in disk.children:
+          _AppendUsedPorts(instance_name, child, used)
+
+    my_dict = dict((node, {}) for node in self._config_data.nodes)
+    for (node, minor), instance in self._temporary_drbds.iteritems():
+      my_dict[node][minor] = instance
+    for instance in self._config_data.instances.itervalues():
+      for disk in instance.disks:
+        _AppendUsedPorts(instance.name, disk, my_dict)
+    return my_dict
+
+  @locking.ssynchronized(_config_lock)
+  def AllocateDRBDMinor(self, nodes, instance):
+    """Allocate a drbd minor.
+
+    The free minor will be automatically computed from the existing
+    devices. A node can be given multiple times in order to allocate
+    multiple minors. The result is the list of minors, in the same
+    order as the passed nodes.
+
+    """
+    self._OpenConfig()
+
+    d_map = self._ComputeDRBDMap(instance)
+    result = []
+    for nname in nodes:
+      ndata = d_map[nname]
+      if not ndata:
+        # no minors used, we can start at 0
+        result.append(0)
+        ndata[0] = instance
+        self._temporary_drbds[(nname, 0)] = instance
+        continue
+      keys = ndata.keys()
+      keys.sort()
+      ffree = utils.FirstFree(keys)
+      if ffree is None:
+        # return the next minor
+        # TODO: implement high-limit check
+        minor = keys[-1] + 1
+      else:
+        minor = ffree
+      result.append(minor)
+      ndata[minor] = instance
+      assert (nname, minor) not in self._temporary_drbds, \
+             "Attempt to reuse reserved DRBD minor"
+      self._temporary_drbds[(nname, minor)] = instance
+    logging.debug("Request to allocate drbd minors, input: %s, returning %s",
+                  nodes, result)
+    return result
+
+  @locking.ssynchronized(_config_lock)
+  def ReleaseDRBDMinors(self, instance):
+    """Release temporary drbd minors allocated for a given instance.
+
+    This should be called on both the error paths and on the success
+    paths (after the instance has been added or updated).
+
+    @type instance: string
+    @param instance: the instance for which temporary minors should be
+                     released
+
+    """
+    for key, name in self._temporary_drbds.items():
+      if name == instance:
+        del self._temporary_drbds[key]
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetConfigVersion(self):
+    """Get the configuration version.
+
+    @return: Config version
+
+    """
+    return self._config_data.version
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetClusterName(self):
+    """Get cluster name.
+
+    @return: Cluster name
+
+    """
+    self._OpenConfig()
+    return self._config_data.cluster.cluster_name
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetMasterNode(self):
+    """Get the hostname of the master node for this cluster.
+
+    @return: Master hostname
+
+    """
+    self._OpenConfig()
+    return self._config_data.cluster.master_node
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetMasterIP(self):
+    """Get the IP of the master node for this cluster.
+
+    @return: Master IP
+
+    """
+    self._OpenConfig()
+    return self._config_data.cluster.master_ip
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetMasterNetdev(self):
+    """Get the master network device for this cluster.
+
+    """
+    self._OpenConfig()
+    return self._config_data.cluster.master_netdev
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetFileStorageDir(self):
+    """Get the file storage dir for this cluster.
+
+    """
+    self._OpenConfig()
+    return self._config_data.cluster.file_storage_dir
+
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetHypervisorType(self):
+    """Get the hypervisor type for this cluster.
+
+    """
+    self._OpenConfig()
+    return self._config_data.cluster.hypervisor
+
   @locking.ssynchronized(_config_lock, shared=1)
   def GetHostKey(self):
     """Return the rsa hostkey from the config.
   @locking.ssynchronized(_config_lock, shared=1)
   def GetHostKey(self):
     """Return the rsa hostkey from the config.
@@ -332,10 +556,12 @@ class ConfigWriter:
 
     if instance.disk_template != constants.DT_DISKLESS:
       all_lvs = instance.MapLVsByNode()
 
     if instance.disk_template != constants.DT_DISKLESS:
       all_lvs = instance.MapLVsByNode()
-      logger.Info("Instance '%s' DISK_LAYOUT: %s" % (instance.name, all_lvs))
+      logging.info("Instance '%s' DISK_LAYOUT: %s", instance.name, all_lvs)
 
     self._OpenConfig()
 
     self._OpenConfig()
+    instance.serial_no = 1
     self._config_data.instances[instance.name] = instance
     self._config_data.instances[instance.name] = instance
+    self._config_data.cluster.serial_no += 1
     self._WriteConfig()
 
   def _SetInstanceStatus(self, instance_name, status):
     self._WriteConfig()
 
   def _SetInstanceStatus(self, instance_name, status):
@@ -354,6 +580,7 @@ class ConfigWriter:
     instance = self._config_data.instances[instance_name]
     if instance.status != status:
       instance.status = status
     instance = self._config_data.instances[instance_name]
     if instance.status != status:
       instance.status = status
+      instance.serial_no += 1
       self._WriteConfig()
 
   @locking.ssynchronized(_config_lock)
       self._WriteConfig()
 
   @locking.ssynchronized(_config_lock)
@@ -373,6 +600,7 @@ class ConfigWriter:
     if instance_name not in self._config_data.instances:
       raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
     del self._config_data.instances[instance_name]
     if instance_name not in self._config_data.instances:
       raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
     del self._config_data.instances[instance_name]
+    self._config_data.cluster.serial_no += 1
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock)
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock)
@@ -401,6 +629,7 @@ class ConfigWriter:
                                                            disk.iv_name))
 
     self._config_data.instances[inst.name] = inst
                                                            disk.iv_name))
 
     self._config_data.instances[inst.name] = inst
+    self._config_data.cluster.serial_no += 1
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock)
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock)
@@ -410,6 +639,15 @@ class ConfigWriter:
     """
     self._SetInstanceStatus(instance_name, "down")
 
     """
     self._SetInstanceStatus(instance_name, "down")
 
+  def _UnlockedGetInstanceList(self):
+    """Get the list of instances.
+
+    This function is for internal use, when the config lock is already held.
+
+    """
+    self._OpenConfig()
+    return self._config_data.instances.keys()
+
   @locking.ssynchronized(_config_lock, shared=1)
   def GetInstanceList(self):
     """Get the list of instances.
   @locking.ssynchronized(_config_lock, shared=1)
   def GetInstanceList(self):
     """Get the list of instances.
@@ -419,9 +657,7 @@ class ConfigWriter:
       these contains all the instances, also the ones in Admin_down state
 
     """
       these contains all the instances, also the ones in Admin_down state
 
     """
-    self._OpenConfig()
-
-    return self._config_data.instances.keys()
+    return self._UnlockedGetInstanceList()
 
   @locking.ssynchronized(_config_lock, shared=1)
   def ExpandInstanceName(self, short_name):
 
   @locking.ssynchronized(_config_lock, shared=1)
   def ExpandInstanceName(self, short_name):
@@ -433,6 +669,19 @@ class ConfigWriter:
     return utils.MatchNameComponent(short_name,
                                     self._config_data.instances.keys())
 
     return utils.MatchNameComponent(short_name,
                                     self._config_data.instances.keys())
 
+  def _UnlockedGetInstanceInfo(self, instance_name):
+    """Returns informations about an instance.
+
+    This function is for internal use, when the config lock is already held.
+
+    """
+    self._OpenConfig()
+
+    if instance_name not in self._config_data.instances:
+      return None
+
+    return self._config_data.instances[instance_name]
+
   @locking.ssynchronized(_config_lock, shared=1)
   def GetInstanceInfo(self, instance_name):
     """Returns informations about an instance.
   @locking.ssynchronized(_config_lock, shared=1)
   def GetInstanceInfo(self, instance_name):
     """Returns informations about an instance.
@@ -447,12 +696,20 @@ class ConfigWriter:
       the instance object
 
     """
       the instance object
 
     """
-    self._OpenConfig()
+    return self._UnlockedGetInstanceInfo(instance_name)
 
 
-    if instance_name not in self._config_data.instances:
-      return None
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetAllInstancesInfo(self):
+    """Get the configuration of all instances.
 
 
-    return self._config_data.instances[instance_name]
+    @rtype: dict
+    @returns: dict of (instance, instance_info), where instance_info is what
+              would GetInstanceInfo return for the node
+
+    """
+    my_dict = dict([(instance, self._UnlockedGetInstanceInfo(instance))
+                    for instance in self._UnlockedGetInstanceList()])
+    return my_dict
 
   @locking.ssynchronized(_config_lock)
   def AddNode(self, node):
 
   @locking.ssynchronized(_config_lock)
   def AddNode(self, node):
@@ -462,8 +719,12 @@ class ConfigWriter:
       node: an object.Node instance
 
     """
       node: an object.Node instance
 
     """
+    logging.info("Adding node %s to configuration" % node.name)
+
     self._OpenConfig()
     self._OpenConfig()
+    node.serial_no = 1
     self._config_data.nodes[node.name] = node
     self._config_data.nodes[node.name] = node
+    self._config_data.cluster.serial_no += 1
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock)
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock)
@@ -471,11 +732,14 @@ class ConfigWriter:
     """Remove a node from the configuration.
 
     """
     """Remove a node from the configuration.
 
     """
+    logging.info("Removing node %s from configuration" % node_name)
+
     self._OpenConfig()
     if node_name not in self._config_data.nodes:
       raise errors.ConfigurationError("Unknown node '%s'" % node_name)
 
     del self._config_data.nodes[node_name]
     self._OpenConfig()
     if node_name not in self._config_data.nodes:
       raise errors.ConfigurationError("Unknown node '%s'" % node_name)
 
     del self._config_data.nodes[node_name]
+    self._config_data.cluster.serial_no += 1
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock, shared=1)
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock, shared=1)
@@ -535,17 +799,23 @@ class ConfigWriter:
     return self._UnlockedGetNodeList()
 
   @locking.ssynchronized(_config_lock, shared=1)
     return self._UnlockedGetNodeList()
 
   @locking.ssynchronized(_config_lock, shared=1)
-  def DumpConfig(self):
-    """Return the entire configuration of the cluster.
+  def GetAllNodesInfo(self):
+    """Get the configuration of all nodes.
+
+    @rtype: dict
+    @returns: dict of (node, node_info), where node_info is what
+              would GetNodeInfo return for the node
+
     """
     """
-    self._OpenConfig()
-    return self._config_data
+    my_dict = dict([(node, self._UnlockedGetNodeInfo(node))
+                    for node in self._UnlockedGetNodeList()])
+    return my_dict
 
   def _BumpSerialNo(self):
     """Bump up the serial number of the config.
 
     """
 
   def _BumpSerialNo(self):
     """Bump up the serial number of the config.
 
     """
-    self._config_data.cluster.serial_no += 1
+    self._config_data.serial_no += 1
 
   def _OpenConfig(self):
     """Read the config data from disk.
 
   def _OpenConfig(self):
     """Read the config data from disk.
@@ -567,9 +837,6 @@ class ConfigWriter:
       # data is current, so skip loading of config file
       return
 
       # data is current, so skip loading of config file
       return
 
-    # Make sure the configuration has the right version
-    ValidateConfig()
-
     f = open(self._cfg_file, 'r')
     try:
       try:
     f = open(self._cfg_file, 'r')
     try:
       try:
@@ -578,6 +845,10 @@ class ConfigWriter:
         raise errors.ConfigurationError(err)
     finally:
       f.close()
         raise errors.ConfigurationError(err)
     finally:
       f.close()
+
+    # Make sure the configuration has the right version
+    _ValidateConfig(data)
+
     if (not hasattr(data, 'cluster') or
         not hasattr(data.cluster, 'rsahostkeypub')):
       raise errors.ConfigurationError("Incomplete configuration"
     if (not hasattr(data, 'cluster') or
         not hasattr(data.cluster, 'rsahostkeypub')):
       raise errors.ConfigurationError("Incomplete configuration"
@@ -608,8 +879,8 @@ class ConfigWriter:
     result = rpc.call_upload_file(nodelist, self._cfg_file)
     for node in nodelist:
       if not result[node]:
     result = rpc.call_upload_file(nodelist, self._cfg_file)
     for node in nodelist:
       if not result[node]:
-        logger.Error("copy of file %s to node %s failed" %
-                     (self._cfg_file, node))
+        logging.error("copy of file %s to node %s failed",
+                      self._cfg_file, node)
         bad = True
     return not bad
 
         bad = True
     return not bad
 
@@ -644,36 +915,29 @@ class ConfigWriter:
     self._DistributeConfig()
 
   @locking.ssynchronized(_config_lock)
     self._DistributeConfig()
 
   @locking.ssynchronized(_config_lock)
-  def InitConfig(self, node, primary_ip, secondary_ip,
-                 hostkeypub, mac_prefix, vg_name, def_bridge):
+  def InitConfig(self, version, cluster_config, master_node_config):
     """Create the initial cluster configuration.
 
     It will contain the current node, which will also be the master
     """Create the initial cluster configuration.
 
     It will contain the current node, which will also be the master
-    node, and no instances or operating systmes.
+    node, and no instances.
 
 
-    Args:
-      node: the nodename of the initial node
-      primary_ip: the IP address of the current host
-      secondary_ip: the secondary IP of the current host or None
-      hostkeypub: the public hostkey of this host
-
-    """
-    hu_port = constants.FIRST_DRBD_PORT - 1
-    globalconfig = objects.Cluster(serial_no=1,
-                                   rsahostkeypub=hostkeypub,
-                                   highest_used_port=hu_port,
-                                   mac_prefix=mac_prefix,
-                                   volume_group_name=vg_name,
-                                   default_bridge=def_bridge,
-                                   tcpudp_port_pool=set())
-    if secondary_ip is None:
-      secondary_ip = primary_ip
-    nodeconfig = objects.Node(name=node, primary_ip=primary_ip,
-                              secondary_ip=secondary_ip)
-
-    self._config_data = objects.ConfigData(nodes={node: nodeconfig},
+    @type version: int
+    @param version: Configuration version
+    @type cluster_config: objects.Cluster
+    @param cluster_config: Cluster configuration
+    @type master_node_config: objects.Node
+    @param master_node_config: Master node configuration
+
+    """
+    nodes = {
+      master_node_config.name: master_node_config,
+      }
+
+    self._config_data = objects.ConfigData(version=version,
+                                           cluster=cluster_config,
+                                           nodes=nodes,
                                            instances={},
                                            instances={},
-                                           cluster=globalconfig)
+                                           serial_no=1)
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock, shared=1)
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock, shared=1)
@@ -691,6 +955,7 @@ class ConfigWriter:
     """
     self._OpenConfig()
     self._config_data.cluster.volume_group_name = vg_name
     """
     self._OpenConfig()
     self._config_data.cluster.volume_group_name = vg_name
+    self._config_data.cluster.serial_no += 1
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock, shared=1)
     self._WriteConfig()
 
   @locking.ssynchronized(_config_lock, shared=1)
@@ -747,4 +1012,6 @@ class ConfigWriter:
     if not test:
       raise errors.ConfigurationError("Configuration updated since object"
                                       " has been read or unknown object")
     if not test:
       raise errors.ConfigurationError("Configuration updated since object"
                                       " has been read or unknown object")
+    target.serial_no += 1
+
     self._WriteConfig()
     self._WriteConfig()