env["INSTANCE_DISK_COUNT"] = disk_count
+ for source, kind in [(bep, "BE"), (hvp, "HV")]:
+ for key, value in source.items():
+ env["INSTANCE_%s_%s" % (kind, key)] = value
+
return env
+def _PreBuildNICHooksList(lu, nics):
+ """Build a list of nic information tuples.
+
+ This list is suitable to be passed to _BuildInstanceHookEnv.
+
+ @type lu: L{LogicalUnit}
+ @param lu: the logical unit on whose behalf we execute
+ @type nics: list of L{objects.NIC}
+ @param nics: list of nics to convert to hooks tuples
+
+ """
+ hooks_nics = []
+ c_nicparams = lu.cfg.GetClusterInfo().nicparams[constants.PP_DEFAULT]
+ for nic in nics:
+ ip = nic.ip
+ mac = nic.mac
+ filled_params = objects.FillDict(c_nicparams, nic.nicparams)
+ mode = filled_params[constants.NIC_MODE]
+ link = filled_params[constants.NIC_LINK]
+ hooks_nics.append((ip, mac, mode, link))
+ return hooks_nics
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
"""Builds instance related env variables for hooks from an object.
'status': instance.admin_up,
'memory': bep[constants.BE_MEMORY],
'vcpus': bep[constants.BE_VCPUS],
- 'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
+ 'nics': _PreBuildNICHooksList(lu, instance.nics),
'disk_template': instance.disk_template,
'disks': [(disk.size, disk.mode) for disk in instance.disks],
+ 'bep': bep,
+ 'hvp': hvp,
+ 'hypervisor': instance.hypervisor,
}
if override:
args.update(override)
target_node = secondary_nodes[0]
_CheckNodeOnline(self, target_node)
_CheckNodeNotDrained(self, target_node)
- # check memory requirements on the secondary node
- _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
- instance.name, bep[constants.BE_MEMORY],
- instance.hypervisor)
-
+ if instance.admin_up:
+ # check memory requirements on the secondary node
+ _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
+ instance.name, bep[constants.BE_MEMORY],
+ instance.hypervisor)
+ else:
+ self.LogInfo("Not checking memory on the secondary node as"
+ " instance will not be started")
+
# check bridge existance
- brlist = [nic.bridge for nic in instance.nics]
- result = self.rpc.call_bridges_exist(target_node, brlist)
- result.Raise()
- if not result.data:
- raise errors.OpPrereqError("One or more target bridges %s does not"
- " exist on destination node '%s'" %
- (brlist, target_node))
+ _CheckInstanceBridgesExist(self, instance, node=target_node)
def Exec(self, feedback_fn):
"""Failover an instance.
os_type=self.op.os_type,
memory=self.be_full[constants.BE_MEMORY],
vcpus=self.be_full[constants.BE_VCPUS],
- nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
+ nics=_PreBuildNICHooksList(self, self.nics),
disk_template=self.op.disk_template,
disks=[(d["size"], d["mode"]) for d in self.disks],
+ bep=self.be_full,
+ hvp=self.hv_full,
+ hypervisor=self.op.hypervisor,
))
nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
self.cfg.SetDiskID(disk, src_node)
try:
- for disk in instance.disks:
+ for idx, disk in enumerate(instance.disks):
- # new_dev_name will be a snapshot of an lvm leaf of the one we passed
- new_dev_name = self.rpc.call_blockdev_snapshot(src_node, disk)
- if new_dev_name.failed or not new_dev_name.data:
- self.LogWarning("Could not snapshot disk/%d on node %s",
- idx, src_node)
+ # result.payload will be a snapshot of an lvm leaf of the one we passed
+ result = self.rpc.call_blockdev_snapshot(src_node, disk)
+ msg = result.fail_msg
+ if msg:
- self.LogWarning("Could not snapshot block device %s on node %s: %s",
- disk.logical_id[1], src_node, msg)
++ self.LogWarning("Could not snapshot disk/%s on node %s: %s",
++ idx, src_node, msg)
snap_disks.append(False)
else:
+ disk_id = (vgname, result.payload)
new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
- logical_id=(vgname, new_dev_name.data),
- physical_id=(vgname, new_dev_name.data),
+ logical_id=disk_id, physical_id=disk_id,
iv_name=disk.iv_name)
snap_disks.append(new_dev)
if dev:
result = self.rpc.call_snapshot_export(src_node, dev, dst_node.name,
instance, cluster_name, idx)
- if result.failed or not result.data:
- self.LogWarning("Could not export disk/%d from node %s to"
- " node %s", idx, src_node, dst_node.name)
- msg = self.rpc.call_blockdev_remove(src_node, dev).RemoteFailMsg()
+ msg = result.fail_msg
+ if msg:
- self.LogWarning("Could not export block device %s from node %s to"
- " node %s: %s", dev.logical_id[1], src_node,
- dst_node.name, msg)
++ self.LogWarning("Could not export disk/%s from node %s to"
++ " node %s: %s", idx, src_node, dst_node.name, msg)
+ msg = self.rpc.call_blockdev_remove(src_node, dev).fail_msg
if msg:
- self.LogWarning("Could not remove snapshot block device %s from node"
- " %s: %s", dev.logical_id[1], src_node, msg)
+ self.LogWarning("Could not remove snapshot for disk/%d from node"
+ " %s: %s", idx, src_node, msg)
result = self.rpc.call_finalize_export(dst_node.name, instance, snap_disks)
- if result.failed or not result.data:
- self.LogWarning("Could not finalize export for instance %s on node %s",
- instance.name, dst_node.name)
+ msg = result.fail_msg
+ if msg:
+ self.LogWarning("Could not finalize export for instance %s"
+ " on node %s: %s", instance.name, dst_node.name, msg)
nodelist = self.cfg.GetNodeList()
nodelist.remove(dst_node.name)
def GET(self):
"""Returns cluster information.
- Example::
-
- {
- "config_version": 2000000,
- "name": "cluster",
- "software_version": "2.0.0~beta2",
- "os_api_version": 10,
- "export_version": 0,
- "candidate_pool_size": 10,
- "enabled_hypervisors": [
- "fake"
- ],
- "hvparams": {
- "fake": {}
- },
- "default_hypervisor": "fake",
- "master": "node1.example.com",
- "architecture": [
- "64bit",
- "x86_64"
- ],
- "protocol_version": 20,
- "beparams": {
- "default": {
- "auto_balance": true,
- "vcpus": 1,
- "memory": 128
- }
- }
- }
-
"""
- client = luxi.Client()
+ client = baserlib.GetClient()
return client.QueryClusterInfo()
def GET(self):
"""Returns a list of all nodes.
- Example::
-
- [
- {
- "id": "node1.example.com",
- "uri": "\/instances\/node1.example.com"
- },
- {
- "id": "node2.example.com",
- "uri": "\/instances\/node2.example.com"
- }
- ]
-
- If the optional 'bulk' argument is provided and set to 'true'
- value (i.e '?bulk=1'), the output contains detailed
- information about nodes as a list.
-
- Example::
-
- [
- {
- "pinst_cnt": 1,
- "mfree": 31280,
- "mtotal": 32763,
- "name": "www.example.com",
- "tags": [],
- "mnode": 512,
- "dtotal": 5246208,
- "sinst_cnt": 2,
- "dfree": 5171712,
- "offline": false
- },
- ...
- ]
-
- @return: a dictionary with 'name' and 'uri' keys for each of them
-
"""
- client = luxi.Client()
+ client = baserlib.GetClient()
if self.useBulk():
bulkdata = client.QueryNodes([], N_FIELDS, False)
def GET(self):
"""Returns a list of all available instances.
-
- Example::
-
- [
- {
- "name": "web.example.com",
- "uri": "\/instances\/web.example.com"
- },
- {
- "name": "mail.example.com",
- "uri": "\/instances\/mail.example.com"
- }
- ]
-
- If the optional 'bulk' argument is provided and set to 'true'
- value (i.e '?bulk=1'), the output contains detailed
- information about instances as a list.
-
- Example::
-
- [
- {
- "status": "running",
- "disk_usage": 20480,
- "nic.bridges": [
- "xen-br0"
- ],
- "name": "web.example.com",
- "tags": ["tag1", "tag2"],
- "beparams": {
- "vcpus": 2,
- "memory": 512
- },
- "disk.sizes": [
- 20480
- ],
- "pnode": "node1.example.com",
- "nic.macs": ["01:23:45:67:89:01"],
- "snodes": ["node2.example.com"],
- "disk_template": "drbd",
- "admin_state": true,
- "os": "debian-etch",
- "oper_state": true
- },
- ...
- ]
-
- @return: a dictionary with 'name' and 'uri' keys for each of them.
-
"""
- client = luxi.Client()
+ client = baserlib.GetClient()
use_locking = self.useLocking()
if self.useBulk():
ParseUnit, AddAuthorizedKey, RemoveAuthorizedKey, \
ShellQuote, ShellQuoteArgs, TcpPing, ListVisibleFiles, \
SetEtcHostsEntry, RemoveEtcHostsEntry, FirstFree, OwnIpAddress, \
- TailFile, ForceDictType, IsNormAbsPath
- TailFile, ForceDictType, SafeEncode
++ TailFile, ForceDictType, SafeEncode, IsNormAbsPath
from ganeti.errors import LockError, UnitParseError, GenericError, \
ProgrammerError
self.assertRaises(errors.TypeEnforcementError, self._fdt, {'d': '4 L'})
+class TestIsAbsNormPath(unittest.TestCase):
+ """Testing case for IsProcessAlive"""
+
+ def _pathTestHelper(self, path, result):
+ if result:
+ self.assert_(IsNormAbsPath(path),
+ "Path %s should result absolute and normalized" % path)
+ else:
+ self.assert_(not IsNormAbsPath(path),
+ "Path %s should not result absolute and normalized" % path)
+
+ def testBase(self):
+ self._pathTestHelper('/etc', True)
+ self._pathTestHelper('/srv', True)
+ self._pathTestHelper('etc', False)
+ self._pathTestHelper('/etc/../root', False)
+ self._pathTestHelper('/etc/', False)
+
++
+ class TestSafeEncode(unittest.TestCase):
+ """Test case for SafeEncode"""
+
+ def testAscii(self):
+ for txt in [string.digits, string.letters, string.punctuation]:
+ self.failUnlessEqual(txt, SafeEncode(txt))
+
+ def testDoubleEncode(self):
+ for i in range(255):
+ txt = SafeEncode(chr(i))
+ self.failUnlessEqual(txt, SafeEncode(txt))
+
+ def testUnicode(self):
+ # 1024 is high enough to catch non-direct ASCII mappings
+ for i in range(1024):
+ txt = SafeEncode(unichr(i))
+ self.failUnlessEqual(txt, SafeEncode(txt))
+
+
if __name__ == '__main__':
unittest.main()