Revision e69d05fd
b/daemons/ganeti-noded | ||
---|---|---|
387 | 387 |
"""Query information about all instances. |
388 | 388 |
|
389 | 389 |
""" |
390 |
return backend.GetAllInstancesInfo() |
|
390 |
return backend.GetAllInstancesInfo(params[0])
|
|
391 | 391 |
|
392 | 392 |
@staticmethod |
393 | 393 |
def perspective_instance_list(params): |
394 | 394 |
"""Query the list of running instances. |
395 | 395 |
|
396 | 396 |
""" |
397 |
return backend.GetInstanceList() |
|
397 |
return backend.GetInstanceList(params[0])
|
|
398 | 398 |
|
399 | 399 |
# node -------------------------- |
400 | 400 |
|
... | ... | |
411 | 411 |
"""Query node information. |
412 | 412 |
|
413 | 413 |
""" |
414 |
vgname = params[0]
|
|
415 |
return backend.GetNodeInfo(vgname) |
|
414 |
vgname, hypervisor_type = params
|
|
415 |
return backend.GetNodeInfo(vgname, hypervisor_type)
|
|
416 | 416 |
|
417 | 417 |
@staticmethod |
418 | 418 |
def perspective_node_add(params): |
b/lib/backend.py | ||
---|---|---|
231 | 231 |
raise errors.QuitGanetiException(False, 'Shutdown scheduled') |
232 | 232 |
|
233 | 233 |
|
234 |
def GetNodeInfo(vgname): |
|
234 |
def GetNodeInfo(vgname, hypervisor_type):
|
|
235 | 235 |
"""Gives back a hash with different informations about the node. |
236 | 236 |
|
237 |
Returns: |
|
238 |
{ 'vg_size' : xxx, 'vg_free' : xxx, 'memory_domain0': xxx, |
|
239 |
'memory_free' : xxx, 'memory_total' : xxx } |
|
240 |
where |
|
241 |
vg_size is the size of the configured volume group in MiB |
|
242 |
vg_free is the free size of the volume group in MiB |
|
243 |
memory_dom0 is the memory allocated for domain0 in MiB |
|
244 |
memory_free is the currently available (free) ram in MiB |
|
245 |
memory_total is the total number of ram in MiB |
|
237 |
@type vgname: C{string} |
|
238 |
@param vgname: the name of the volume group to ask for disk space information |
|
239 |
@type hypervisor_type: C{str} |
|
240 |
@param hypervisor_type: the name of the hypervisor to ask for |
|
241 |
memory information |
|
242 |
@rtype: C{dict} |
|
243 |
@return: dictionary with the following keys: |
|
244 |
- vg_size is the size of the configured volume group in MiB |
|
245 |
- vg_free is the free size of the volume group in MiB |
|
246 |
- memory_dom0 is the memory allocated for domain0 in MiB |
|
247 |
- memory_free is the currently available (free) ram in MiB |
|
248 |
- memory_total is the total number of ram in MiB |
|
246 | 249 |
|
247 | 250 |
""" |
248 | 251 |
outputarray = {} |
... | ... | |
250 | 253 |
outputarray['vg_size'] = vginfo['vg_size'] |
251 | 254 |
outputarray['vg_free'] = vginfo['vg_free'] |
252 | 255 |
|
253 |
hyper = hypervisor.GetHypervisor(_GetConfig())
|
|
256 |
hyper = hypervisor.GetHypervisor(hypervisor_type)
|
|
254 | 257 |
hyp_info = hyper.GetNodeInfo() |
255 | 258 |
if hyp_info is not None: |
256 | 259 |
outputarray.update(hyp_info) |
... | ... | |
267 | 270 |
def VerifyNode(what, cluster_name): |
268 | 271 |
"""Verify the status of the local node. |
269 | 272 |
|
270 |
Args: |
|
271 |
what - a dictionary of things to check: |
|
272 |
'filelist' : list of files for which to compute checksums |
|
273 |
'nodelist' : list of nodes we should check communication with |
|
274 |
'hypervisor': run the hypervisor-specific verify |
|
273 |
Based on the input L{what} parameter, various checks are done on the |
|
274 |
local node. |
|
275 |
|
|
276 |
If the I{filelist} key is present, this list of |
|
277 |
files is checksummed and the file/checksum pairs are returned. |
|
278 |
|
|
279 |
If the I{nodelist} key is present, we check that we have |
|
280 |
connectivity via ssh with the target nodes (and check the hostname |
|
281 |
report). |
|
275 | 282 |
|
276 |
Requested files on local node are checksummed and the result returned. |
|
283 |
If the I{node-net-test} key is present, we check that we have |
|
284 |
connectivity to the given nodes via both primary IP and, if |
|
285 |
applicable, secondary IPs. |
|
286 |
|
|
287 |
@type what: C{dict} |
|
288 |
@param what: a dictionary of things to check: |
|
289 |
- filelist: list of files for which to compute checksums |
|
290 |
- nodelist: list of nodes we should check ssh communication with |
|
291 |
- node-net-test: list of nodes we should check node daemon port |
|
292 |
connectivity with |
|
293 |
- hypervisor: list with hypervisors to run the verify for |
|
277 | 294 |
|
278 |
The nodelist is traversed, with the following checks being made |
|
279 |
for each node: |
|
280 |
- known_hosts key correct |
|
281 |
- correct resolving of node name (target node returns its own hostname |
|
282 |
by ssh-execution of 'hostname', result compared against name in list. |
|
283 | 295 |
|
284 | 296 |
""" |
285 | 297 |
result = {} |
286 | 298 |
|
287 | 299 |
if 'hypervisor' in what: |
288 |
result['hypervisor'] = hypervisor.GetHypervisor(_GetConfig()).Verify() |
|
300 |
result['hypervisor'] = my_dict = {} |
|
301 |
for hv_name in what['hypervisor']: |
|
302 |
my_dict[hv_name] = hypervisor.GetHypervisor(hv_name).Verify() |
|
289 | 303 |
|
290 | 304 |
if 'filelist' in what: |
291 | 305 |
result['filelist'] = utils.FingerprintFiles(what['filelist']) |
... | ... | |
415 | 429 |
return True |
416 | 430 |
|
417 | 431 |
|
418 |
def GetInstanceList(): |
|
432 |
def GetInstanceList(hypervisor_list):
|
|
419 | 433 |
"""Provides a list of instances. |
420 | 434 |
|
421 |
Returns: |
|
422 |
A list of all running instances on the current node |
|
423 |
- instance1.example.com |
|
424 |
- instance2.example.com |
|
435 |
@type hypervisor_list: list |
|
436 |
@param hypervisor_list: the list of hypervisors to query information |
|
437 |
|
|
438 |
@rtype: list |
|
439 |
@return: a list of all running instances on the current node |
|
440 |
- instance1.example.com |
|
441 |
- instance2.example.com |
|
425 | 442 |
|
426 | 443 |
""" |
427 |
try: |
|
428 |
names = hypervisor.GetHypervisor(_GetConfig()).ListInstances() |
|
429 |
except errors.HypervisorError, err: |
|
430 |
logging.exception("Error enumerating instances") |
|
431 |
raise |
|
444 |
results = [] |
|
445 |
for hname in hypervisor_list: |
|
446 |
try: |
|
447 |
names = hypervisor.GetHypervisor(hname).ListInstances() |
|
448 |
results.extend(names) |
|
449 |
except errors.HypervisorError, err: |
|
450 |
logging.exception("Error enumerating instances for hypevisor %s", hname) |
|
451 |
# FIXME: should we somehow not propagate this to the master? |
|
452 |
raise |
|
432 | 453 |
|
433 |
return names
|
|
454 |
return results
|
|
434 | 455 |
|
435 | 456 |
|
436 |
def GetInstanceInfo(instance): |
|
457 |
def GetInstanceInfo(instance, hname):
|
|
437 | 458 |
"""Gives back the informations about an instance as a dictionary. |
438 | 459 |
|
439 |
Args: |
|
440 |
instance: name of the instance (ex. instance1.example.com) |
|
460 |
@type instance: string |
|
461 |
@param instance: the instance name |
|
462 |
@type hname: string |
|
463 |
@param hname: the hypervisor type of the instance |
|
441 | 464 |
|
442 |
Returns: |
|
443 |
{ 'memory' : 511, 'state' : '-b---', 'time' : 3188.8, } |
|
444 |
where |
|
445 |
memory: memory size of instance (int) |
|
446 |
state: xen state of instance (string) |
|
447 |
time: cpu time of instance (float) |
|
465 |
@rtype: dict |
|
466 |
@return: dictionary with the following keys: |
|
467 |
- memory: memory size of instance (int) |
|
468 |
- state: xen state of instance (string) |
|
469 |
- time: cpu time of instance (float) |
|
448 | 470 |
|
449 | 471 |
""" |
450 | 472 |
output = {} |
451 | 473 |
|
452 |
iinfo = hypervisor.GetHypervisor(_GetConfig()).GetInstanceInfo(instance)
|
|
474 |
iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance)
|
|
453 | 475 |
if iinfo is not None: |
454 | 476 |
output['memory'] = iinfo[2] |
455 | 477 |
output['state'] = iinfo[4] |
... | ... | |
458 | 480 |
return output |
459 | 481 |
|
460 | 482 |
|
461 |
def GetAllInstancesInfo(): |
|
483 |
def GetAllInstancesInfo(hypervisor_list):
|
|
462 | 484 |
"""Gather data about all instances. |
463 | 485 |
|
464 | 486 |
This is the equivalent of `GetInstanceInfo()`, except that it |
465 | 487 |
computes data for all instances at once, thus being faster if one |
466 | 488 |
needs data about more than one instance. |
467 | 489 |
|
468 |
Returns: a dictionary of dictionaries, keys being the instance name, |
|
469 |
and with values: |
|
470 |
{ 'memory' : 511, 'state' : '-b---', 'time' : 3188.8, } |
|
471 |
where |
|
472 |
memory: memory size of instance (int) |
|
473 |
state: xen state of instance (string) |
|
474 |
time: cpu time of instance (float) |
|
475 |
vcpus: the number of cpus |
|
490 |
@type hypervisor_list: list |
|
491 |
@param hypervisor_list: list of hypervisors to query for instance data |
|
492 |
|
|
493 |
@rtype: dict of dicts |
|
494 |
@return: dictionary of instance: data, with data having the following keys: |
|
495 |
- memory: memory size of instance (int) |
|
496 |
- state: xen state of instance (string) |
|
497 |
- time: cpu time of instance (float) |
|
498 |
- vcpuus: the number of vcpus |
|
476 | 499 |
|
477 | 500 |
""" |
478 | 501 |
output = {} |
479 | 502 |
|
480 |
iinfo = hypervisor.GetHypervisor(_GetConfig()).GetAllInstancesInfo() |
|
481 |
if iinfo: |
|
482 |
for name, inst_id, memory, vcpus, state, times in iinfo: |
|
483 |
output[name] = { |
|
484 |
'memory': memory, |
|
485 |
'vcpus': vcpus, |
|
486 |
'state': state, |
|
487 |
'time': times, |
|
488 |
} |
|
503 |
for hname in hypervisor_list: |
|
504 |
iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo() |
|
505 |
if iinfo: |
|
506 |
for name, inst_id, memory, vcpus, state, times in iinfo: |
|
507 |
if name in output: |
|
508 |
raise errors.HypervisorError("Instance %s running duplicate" % name) |
|
509 |
output[name] = { |
|
510 |
'memory': memory, |
|
511 |
'vcpus': vcpus, |
|
512 |
'state': state, |
|
513 |
'time': times, |
|
514 |
} |
|
489 | 515 |
|
490 | 516 |
return output |
491 | 517 |
|
... | ... | |
499 | 525 |
swap_disk: the instance-visible name of the swap device |
500 | 526 |
|
501 | 527 |
""" |
502 |
cfg = _GetConfig() |
|
503 | 528 |
inst_os = OSFromDisk(instance.os) |
504 | 529 |
|
505 | 530 |
create_script = inst_os.create_script |
... | ... | |
535 | 560 |
inst_os.path, create_script, instance.name, |
536 | 561 |
real_os_dev.dev_path, real_swap_dev.dev_path, |
537 | 562 |
logfile) |
538 |
env = {'HYPERVISOR': cfg.GetHypervisorType()}
|
|
563 |
env = {'HYPERVISOR': instance.hypervisor}
|
|
539 | 564 |
|
540 | 565 |
result = utils.RunCmd(command, env=env) |
541 | 566 |
if result.failed: |
... | ... | |
666 | 691 |
def StartInstance(instance, extra_args): |
667 | 692 |
"""Start an instance. |
668 | 693 |
|
669 |
Args: |
|
670 |
instance - name of instance to start. |
|
694 |
@type instance: instance object |
|
695 |
@param instance: the instance object |
|
696 |
@rtype: boolean |
|
697 |
@return: whether the startup was successful or not |
|
671 | 698 |
|
672 | 699 |
""" |
673 |
running_instances = GetInstanceList() |
|
700 |
running_instances = GetInstanceList([instance.hypervisor])
|
|
674 | 701 |
|
675 | 702 |
if instance.name in running_instances: |
676 | 703 |
return True |
677 | 704 |
|
678 | 705 |
block_devices = _GatherBlockDevs(instance) |
679 |
hyper = hypervisor.GetHypervisor(_GetConfig())
|
|
706 |
hyper = hypervisor.GetHypervisor(instance.hypervisor)
|
|
680 | 707 |
|
681 | 708 |
try: |
682 | 709 |
hyper.StartInstance(instance, block_devices, extra_args) |
... | ... | |
690 | 717 |
def ShutdownInstance(instance): |
691 | 718 |
"""Shut an instance down. |
692 | 719 |
|
693 |
Args: |
|
694 |
instance - name of instance to shutdown. |
|
720 |
@type instance: instance object |
|
721 |
@param instance: the instance object |
|
722 |
@rtype: boolean |
|
723 |
@return: whether the startup was successful or not |
|
695 | 724 |
|
696 | 725 |
""" |
697 |
running_instances = GetInstanceList() |
|
726 |
hv_name = instance.hypervisor |
|
727 |
running_instances = GetInstanceList([hv_name]) |
|
698 | 728 |
|
699 | 729 |
if instance.name not in running_instances: |
700 | 730 |
return True |
701 | 731 |
|
702 |
hyper = hypervisor.GetHypervisor(_GetConfig())
|
|
732 |
hyper = hypervisor.GetHypervisor(hv_name)
|
|
703 | 733 |
try: |
704 | 734 |
hyper.StopInstance(instance) |
705 | 735 |
except errors.HypervisorError, err: |
... | ... | |
711 | 741 |
|
712 | 742 |
time.sleep(1) |
713 | 743 |
for dummy in range(11): |
714 |
if instance.name not in GetInstanceList(): |
|
744 |
if instance.name not in GetInstanceList([hv_name]):
|
|
715 | 745 |
break |
716 | 746 |
time.sleep(10) |
717 | 747 |
else: |
... | ... | |
725 | 755 |
return False |
726 | 756 |
|
727 | 757 |
time.sleep(1) |
728 |
if instance.name in GetInstanceList(): |
|
758 |
if instance.name in GetInstanceList([hv_name]):
|
|
729 | 759 |
logging.error("could not shutdown instance '%s' even by destroy", |
730 | 760 |
instance.name) |
731 | 761 |
return False |
... | ... | |
741 | 771 |
reboot_type - how to reboot [soft,hard,full] |
742 | 772 |
|
743 | 773 |
""" |
744 |
running_instances = GetInstanceList() |
|
774 |
running_instances = GetInstanceList([instance.hypervisor])
|
|
745 | 775 |
|
746 | 776 |
if instance.name not in running_instances: |
747 | 777 |
logging.error("Cannot reboot instance that is not running") |
748 | 778 |
return False |
749 | 779 |
|
750 |
hyper = hypervisor.GetHypervisor(_GetConfig())
|
|
780 |
hyper = hypervisor.GetHypervisor(instance.hypervisor)
|
|
751 | 781 |
if reboot_type == constants.INSTANCE_REBOOT_SOFT: |
752 | 782 |
try: |
753 | 783 |
hyper.RebootInstance(instance) |
... | ... | |
764 | 794 |
else: |
765 | 795 |
raise errors.ParameterError("reboot_type invalid") |
766 | 796 |
|
767 |
|
|
768 | 797 |
return True |
769 | 798 |
|
770 | 799 |
|
... | ... | |
784 | 813 |
- msg is a string with details in case of failure |
785 | 814 |
|
786 | 815 |
""" |
787 |
hyper = hypervisor.GetHypervisor(_GetConfig())
|
|
816 |
hyper = hypervisor.GetHypervisor(instance.hypervisor_name)
|
|
788 | 817 |
|
789 | 818 |
try: |
790 | 819 |
hyper.MigrateInstance(instance.name, target, live) |
... | ... | |
1464 | 1493 |
False in case of error, True otherwise. |
1465 | 1494 |
|
1466 | 1495 |
""" |
1467 |
cfg = _GetConfig() |
|
1468 | 1496 |
inst_os = OSFromDisk(instance.os) |
1469 | 1497 |
import_script = inst_os.import_script |
1470 | 1498 |
|
... | ... | |
1507 | 1535 |
logfile) |
1508 | 1536 |
|
1509 | 1537 |
command = '|'.join([utils.ShellQuoteArgs(remotecmd), comprcmd, impcmd]) |
1510 |
env = {'HYPERVISOR': cfg.GetHypervisorType()}
|
|
1538 |
env = {'HYPERVISOR': instance.hypervisor}
|
|
1511 | 1539 |
|
1512 | 1540 |
result = utils.RunCmd(command, env=env) |
1513 | 1541 |
|
b/lib/cmdlib.py | ||
---|---|---|
586 | 586 |
(node, node_result['node-net-test'][node])) |
587 | 587 |
|
588 | 588 |
hyp_result = node_result.get('hypervisor', None) |
589 |
if hyp_result is not None: |
|
590 |
feedback_fn(" - ERROR: hypervisor verify failure: '%s'" % hyp_result) |
|
589 |
if isinstance(hyp_result, dict): |
|
590 |
for hv_name, hv_result in hyp_result.iteritems(): |
|
591 |
if hv_result is not None: |
|
592 |
feedback_fn(" - ERROR: hypervisor %s verify failure: '%s'" % |
|
593 |
(hv_name, hv_result)) |
|
591 | 594 |
return bad |
592 | 595 |
|
593 | 596 |
def _VerifyInstance(self, instance, instanceconfig, node_vol_is, |
... | ... | |
721 | 724 |
feedback_fn(" - ERROR: %s" % msg) |
722 | 725 |
|
723 | 726 |
vg_name = self.cfg.GetVGName() |
727 |
hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors |
|
724 | 728 |
nodelist = utils.NiceSort(self.cfg.GetNodeList()) |
725 | 729 |
nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist] |
726 | 730 |
instancelist = utils.NiceSort(self.cfg.GetInstanceList()) |
... | ... | |
739 | 743 |
|
740 | 744 |
feedback_fn("* Gathering data (%d nodes)" % len(nodelist)) |
741 | 745 |
all_volumeinfo = rpc.call_volume_list(nodelist, vg_name) |
742 |
all_instanceinfo = rpc.call_instance_list(nodelist) |
|
746 |
all_instanceinfo = rpc.call_instance_list(nodelist, hypervisors)
|
|
743 | 747 |
all_vglist = rpc.call_vg_list(nodelist) |
744 | 748 |
node_verify_param = { |
745 | 749 |
'filelist': file_names, |
746 | 750 |
'nodelist': nodelist, |
747 |
'hypervisor': None,
|
|
751 |
'hypervisor': hypervisors,
|
|
748 | 752 |
'node-net-test': [(node.name, node.primary_ip, node.secondary_ip) |
749 | 753 |
for node in nodeinfo] |
750 | 754 |
} |
751 | 755 |
all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param, |
752 | 756 |
self.cfg.GetClusterName()) |
753 | 757 |
all_rversion = rpc.call_version(nodelist) |
754 |
all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName()) |
|
758 |
all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(), |
|
759 |
self.cfg.GetHypervisorType()) |
|
755 | 760 |
|
756 | 761 |
for node in nodelist: |
757 | 762 |
feedback_fn("* Verifying node %s" % node) |
... | ... | |
1470 | 1475 |
|
1471 | 1476 |
if self.dynamic_fields.intersection(self.op.output_fields): |
1472 | 1477 |
live_data = {} |
1473 |
node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName()) |
|
1478 |
node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName(), |
|
1479 |
self.cfg.GetHypervisorType()) |
|
1474 | 1480 |
for name in nodenames: |
1475 | 1481 |
nodeinfo = node_data.get(name, None) |
1476 | 1482 |
if nodeinfo: |
... | ... | |
1808 | 1814 |
(fname, to_node)) |
1809 | 1815 |
|
1810 | 1816 |
to_copy = [] |
1811 |
if self.cfg.GetHypervisorType() == constants.HT_XEN_HVM31:
|
|
1817 |
if constants.HT_XEN_HVM31 in self.cfg.GetClusterInfo().enabled_hypervisors:
|
|
1812 | 1818 |
to_copy.append(constants.VNC_PASSWORD_FILE) |
1813 | 1819 |
for fname in to_copy: |
1814 | 1820 |
result = rpc.call_upload_file([node], fname) |
... | ... | |
1852 | 1858 |
"master": self.cfg.GetMasterNode(), |
1853 | 1859 |
"architecture": (platform.architecture()[0], platform.machine()), |
1854 | 1860 |
"hypervisor_type": self.cfg.GetHypervisorType(), |
1861 |
"enabled_hypervisors": self.cfg.GetClusterInfo().enabled_hypervisors, |
|
1855 | 1862 |
} |
1856 | 1863 |
|
1857 | 1864 |
return result |
... | ... | |
2047 | 2054 |
_ShutdownInstanceDisks. |
2048 | 2055 |
|
2049 | 2056 |
""" |
2050 |
ins_l = rpc.call_instance_list([instance.primary_node]) |
|
2057 |
ins_l = rpc.call_instance_list([instance.primary_node], |
|
2058 |
[instance.hypervisor]) |
|
2051 | 2059 |
ins_l = ins_l[instance.primary_node] |
2052 | 2060 |
if not type(ins_l) is list: |
2053 | 2061 |
raise errors.OpExecError("Can't contact node '%s'" % |
... | ... | |
2081 | 2089 |
return result |
2082 | 2090 |
|
2083 | 2091 |
|
2084 |
def _CheckNodeFreeMemory(cfg, node, reason, requested): |
|
2092 |
def _CheckNodeFreeMemory(cfg, node, reason, requested, hypervisor):
|
|
2085 | 2093 |
"""Checks if a node has enough free memory. |
2086 | 2094 |
|
2087 | 2095 |
This function check if a given node has the needed amount of free |
... | ... | |
2089 | 2097 |
information from the node, this function raise an OpPrereqError |
2090 | 2098 |
exception. |
2091 | 2099 |
|
2092 |
Args: |
|
2093 |
- cfg: a ConfigWriter instance |
|
2094 |
- node: the node name |
|
2095 |
- reason: string to use in the error message |
|
2096 |
- requested: the amount of memory in MiB |
|
2100 |
@type cfg: C{config.ConfigWriter} |
|
2101 |
@param cfg: the ConfigWriter instance from which we get configuration data |
|
2102 |
@type node: C{str} |
|
2103 |
@param node: the node to check |
|
2104 |
@type reason: C{str} |
|
2105 |
@param reason: string to use in the error message |
|
2106 |
@type requested: C{int} |
|
2107 |
@param requested: the amount of memory in MiB to check for |
|
2108 |
@type hypervisor: C{str} |
|
2109 |
@param hypervisor: the hypervisor to ask for memory stats |
|
2110 |
@raise errors.OpPrereqError: if the node doesn't have enough memory, or |
|
2111 |
we cannot check the node |
|
2097 | 2112 |
|
2098 | 2113 |
""" |
2099 |
nodeinfo = rpc.call_node_info([node], cfg.GetVGName()) |
|
2114 |
nodeinfo = rpc.call_node_info([node], cfg.GetVGName(), hypervisor)
|
|
2100 | 2115 |
if not nodeinfo or not isinstance(nodeinfo, dict): |
2101 | 2116 |
raise errors.OpPrereqError("Could not contact node %s for resource" |
2102 | 2117 |
" information" % (node,)) |
... | ... | |
2158 | 2173 |
|
2159 | 2174 |
_CheckNodeFreeMemory(self.cfg, instance.primary_node, |
2160 | 2175 |
"starting instance %s" % instance.name, |
2161 |
instance.memory) |
|
2176 |
instance.memory, instance.hypervisor)
|
|
2162 | 2177 |
|
2163 | 2178 |
def Exec(self, feedback_fn): |
2164 | 2179 |
"""Start the instance. |
... | ... | |
2357 | 2372 |
if instance.status != "down": |
2358 | 2373 |
raise errors.OpPrereqError("Instance '%s' is marked to be up" % |
2359 | 2374 |
self.op.instance_name) |
2360 |
remote_info = rpc.call_instance_info(instance.primary_node, instance.name) |
|
2375 |
remote_info = rpc.call_instance_info(instance.primary_node, instance.name, |
|
2376 |
instance.hypervisor) |
|
2361 | 2377 |
if remote_info: |
2362 | 2378 |
raise errors.OpPrereqError("Instance '%s' is running on the node %s" % |
2363 | 2379 |
(self.op.instance_name, |
... | ... | |
2434 | 2450 |
if instance.status != "down": |
2435 | 2451 |
raise errors.OpPrereqError("Instance '%s' is marked to be up" % |
2436 | 2452 |
self.op.instance_name) |
2437 |
remote_info = rpc.call_instance_info(instance.primary_node, instance.name) |
|
2453 |
remote_info = rpc.call_instance_info(instance.primary_node, instance.name, |
|
2454 |
instance.hypervisor) |
|
2438 | 2455 |
if remote_info: |
2439 | 2456 |
raise errors.OpPrereqError("Instance '%s' is running on the node %s" % |
2440 | 2457 |
(self.op.instance_name, |
... | ... | |
2590 | 2607 |
"hvm_boot_order", "hvm_acpi", "hvm_pae", |
2591 | 2608 |
"hvm_cdrom_image_path", "hvm_nic_type", |
2592 | 2609 |
"hvm_disk_type", "vnc_bind_address", |
2593 |
"serial_no", |
|
2610 |
"serial_no", "hypervisor",
|
|
2594 | 2611 |
]) |
2595 | 2612 |
_CheckOutputFields(static=self.static_fields, |
2596 | 2613 |
dynamic=self.dynamic_fields, |
... | ... | |
2642 | 2659 |
# begin data gathering |
2643 | 2660 |
|
2644 | 2661 |
nodes = frozenset([inst.primary_node for inst in instance_list]) |
2662 |
hv_list = list(set([inst.hypervisor for inst in instance_list])) |
|
2645 | 2663 |
|
2646 | 2664 |
bad_nodes = [] |
2647 | 2665 |
if self.dynamic_fields.intersection(self.op.output_fields): |
2648 | 2666 |
live_data = {} |
2649 |
node_data = rpc.call_all_instances_info(nodes) |
|
2667 |
node_data = rpc.call_all_instances_info(nodes, hv_list)
|
|
2650 | 2668 |
for name in nodes: |
2651 | 2669 |
result = node_data[name] |
2652 | 2670 |
if result: |
... | ... | |
2734 | 2752 |
val = "default" |
2735 | 2753 |
else: |
2736 | 2754 |
val = "-" |
2755 |
elif field == "hypervisor": |
|
2756 |
val = instance.hypervisor |
|
2737 | 2757 |
else: |
2738 | 2758 |
raise errors.ParameterError(field) |
2739 | 2759 |
iout.append(val) |
... | ... | |
2795 | 2815 |
target_node = secondary_nodes[0] |
2796 | 2816 |
# check memory requirements on the secondary node |
2797 | 2817 |
_CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" % |
2798 |
instance.name, instance.memory) |
|
2818 |
instance.name, instance.memory, |
|
2819 |
instance.hypervisor) |
|
2799 | 2820 |
|
2800 | 2821 |
# check bridge existance |
2801 | 2822 |
brlist = [nic.bridge for nic in instance.nics] |
... | ... | |
3150 | 3171 |
for attr in ["kernel_path", "initrd_path", "pnode", "snode", |
3151 | 3172 |
"iallocator", "hvm_boot_order", "hvm_acpi", "hvm_pae", |
3152 | 3173 |
"hvm_cdrom_image_path", "hvm_nic_type", "hvm_disk_type", |
3153 |
"vnc_bind_address"]: |
|
3174 |
"vnc_bind_address", "hypervisor"]:
|
|
3154 | 3175 |
if not hasattr(self.op, attr): |
3155 | 3176 |
setattr(self.op, attr, None) |
3156 | 3177 |
|
... | ... | |
3327 | 3348 |
raise errors.OpPrereqError("Cluster does not support lvm-based" |
3328 | 3349 |
" instances") |
3329 | 3350 |
|
3351 |
# cheap checks (from the config only) |
|
3352 |
|
|
3353 |
if self.op.hypervisor is None: |
|
3354 |
self.op.hypervisor = self.cfg.GetHypervisorType() |
|
3355 |
|
|
3356 |
enabled_hvs = self.cfg.GetClusterInfo().enabled_hypervisors |
|
3357 |
if self.op.hypervisor not in enabled_hvs: |
|
3358 |
raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the" |
|
3359 |
" cluster (%s)" % (self.op.hypervisor, |
|
3360 |
",".join(enabled_hvs))) |
|
3361 |
|
|
3362 |
# costly checks (from nodes) |
|
3363 |
|
|
3330 | 3364 |
if self.op.mode == constants.INSTANCE_IMPORT: |
3331 | 3365 |
src_node = self.op.src_node |
3332 | 3366 |
src_path = self.op.src_path |
... | ... | |
3401 | 3435 |
# Check lv size requirements |
3402 | 3436 |
if req_size is not None: |
3403 | 3437 |
nodenames = [pnode.name] + self.secondaries |
3404 |
nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName()) |
|
3438 |
nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName(), |
|
3439 |
self.op.hypervisor) |
|
3405 | 3440 |
for node in nodenames: |
3406 | 3441 |
info = nodeinfo.get(node, None) |
3407 | 3442 |
if not info: |
... | ... | |
3435 | 3470 |
if self.op.start: |
3436 | 3471 |
_CheckNodeFreeMemory(self.cfg, self.pnode.name, |
3437 | 3472 |
"creating instance %s" % self.op.instance_name, |
3438 |
self.op.mem_size) |
|
3473 |
self.op.mem_size, self.op.hypervisor)
|
|
3439 | 3474 |
|
3440 | 3475 |
# hvm_cdrom_image_path verification |
3441 | 3476 |
if self.op.hvm_cdrom_image_path is not None: |
... | ... | |
3458 | 3493 |
self.op.vnc_bind_address) |
3459 | 3494 |
|
3460 | 3495 |
# Xen HVM device type checks |
3461 |
if self.cfg.GetHypervisorType() == constants.HT_XEN_HVM31:
|
|
3496 |
if self.op.hypervisor == constants.HT_XEN_HVM31:
|
|
3462 | 3497 |
if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES: |
3463 | 3498 |
raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM" |
3464 | 3499 |
" hypervisor" % self.op.hvm_nic_type) |
... | ... | |
3487 | 3522 |
if self.inst_ip is not None: |
3488 | 3523 |
nic.ip = self.inst_ip |
3489 | 3524 |
|
3490 |
ht_kind = self.cfg.GetHypervisorType()
|
|
3525 |
ht_kind = self.op.hypervisor
|
|
3491 | 3526 |
if ht_kind in constants.HTS_REQ_PORT: |
3492 | 3527 |
network_port = self.cfg.AllocatePort() |
3493 | 3528 |
else: |
... | ... | |
3533 | 3568 |
vnc_bind_address=self.op.vnc_bind_address, |
3534 | 3569 |
hvm_nic_type=self.op.hvm_nic_type, |
3535 | 3570 |
hvm_disk_type=self.op.hvm_disk_type, |
3571 |
hypervisor=self.op.hypervisor, |
|
3536 | 3572 |
) |
3537 | 3573 |
|
3538 | 3574 |
feedback_fn("* creating instance disks...") |
... | ... | |
3632 | 3668 |
instance = self.instance |
3633 | 3669 |
node = instance.primary_node |
3634 | 3670 |
|
3635 |
node_insts = rpc.call_instance_list([node])[node] |
|
3671 |
node_insts = rpc.call_instance_list([node], |
|
3672 |
[instance.hypervisor])[node] |
|
3636 | 3673 |
if node_insts is False: |
3637 | 3674 |
raise errors.OpExecError("Can't connect to node %s." % node) |
3638 | 3675 |
|
... | ... | |
3641 | 3678 |
|
3642 | 3679 |
logger.Debug("connecting to console of %s on %s" % (instance.name, node)) |
3643 | 3680 |
|
3644 |
hyper = hypervisor.GetHypervisor(self.cfg)
|
|
3681 |
hyper = hypervisor.GetHypervisor(instance.hypervisor)
|
|
3645 | 3682 |
console_cmd = hyper.GetShellCommandForConsole(instance) |
3646 | 3683 |
|
3647 | 3684 |
# build ssh cmdline |
... | ... | |
4243 | 4280 |
(self.op.disk, instance.name)) |
4244 | 4281 |
|
4245 | 4282 |
nodenames = [instance.primary_node] + list(instance.secondary_nodes) |
4246 |
nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName()) |
|
4283 |
nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName(), |
|
4284 |
instance.hypervisor) |
|
4247 | 4285 |
for node in nodenames: |
4248 | 4286 |
info = nodeinfo.get(node, None) |
4249 | 4287 |
if not info: |
... | ... | |
4366 | 4404 |
result = {} |
4367 | 4405 |
for instance in self.wanted_instances: |
4368 | 4406 |
remote_info = rpc.call_instance_info(instance.primary_node, |
4369 |
instance.name) |
|
4407 |
instance.name, |
|
4408 |
instance.hypervisor) |
|
4370 | 4409 |
if remote_info and "state" in remote_info: |
4371 | 4410 |
remote_state = "up" |
4372 | 4411 |
else: |
... | ... | |
4390 | 4429 |
"nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics], |
4391 | 4430 |
"disks": disks, |
4392 | 4431 |
"vcpus": instance.vcpus, |
4432 |
"hypervisor": instance.hypervisor, |
|
4393 | 4433 |
} |
4394 | 4434 |
|
4395 |
htkind = self.cfg.GetHypervisorType()
|
|
4435 |
htkind = instance.hypervisor
|
|
4396 | 4436 |
if htkind == constants.HT_XEN_PVM30: |
4397 | 4437 |
idict["kernel_path"] = instance.kernel_path |
4398 | 4438 |
idict["initrd_path"] = instance.initrd_path |
... | ... | |
4589 | 4629 |
pnode = self.instance.primary_node |
4590 | 4630 |
nodelist = [pnode] |
4591 | 4631 |
nodelist.extend(instance.secondary_nodes) |
4592 |
instance_info = rpc.call_instance_info(pnode, instance.name) |
|
4593 |
nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName()) |
|
4632 |
instance_info = rpc.call_instance_info(pnode, instance.name, |
|
4633 |
instance.hypervisor) |
|
4634 |
nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(), |
|
4635 |
instance.hypervisor) |
|
4594 | 4636 |
|
4595 | 4637 |
if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict): |
4596 | 4638 |
# Assume the primary node is unreachable and go ahead |
... | ... | |
4617 | 4659 |
" node %s" % node) |
4618 | 4660 |
|
4619 | 4661 |
# Xen HVM device type checks |
4620 |
if self.cfg.GetHypervisorType() == constants.HT_XEN_HVM31:
|
|
4662 |
if instance.hypervisor == constants.HT_XEN_HVM31:
|
|
4621 | 4663 |
if self.op.hvm_nic_type is not None: |
4622 | 4664 |
if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES: |
4623 | 4665 |
raise errors.OpPrereqError("Invalid NIC type %s specified for Xen" |
... | ... | |
5180 | 5222 |
|
5181 | 5223 |
""" |
5182 | 5224 |
cfg = self.cfg |
5225 |
cluster_info = cfg.GetClusterInfo() |
|
5183 | 5226 |
# cluster data |
5184 | 5227 |
data = { |
5185 | 5228 |
"version": 1, |
5186 | 5229 |
"cluster_name": self.cfg.GetClusterName(), |
5187 |
"cluster_tags": list(cfg.GetClusterInfo().GetTags()),
|
|
5188 |
"hypervisor_type": self.cfg.GetHypervisorType(),
|
|
5230 |
"cluster_tags": list(cluster_info.GetTags()),
|
|
5231 |
"enable_hypervisors": list(cluster_info.enabled_hypervisors),
|
|
5189 | 5232 |
# we don't have job IDs |
5190 | 5233 |
} |
5191 | 5234 |
|
... | ... | |
5194 | 5237 |
# node data |
5195 | 5238 |
node_results = {} |
5196 | 5239 |
node_list = cfg.GetNodeList() |
5197 |
node_data = rpc.call_node_info(node_list, cfg.GetVGName()) |
|
5240 |
# FIXME: here we have only one hypervisor information, but |
|
5241 |
# instance can belong to different hypervisors |
|
5242 |
node_data = rpc.call_node_info(node_list, cfg.GetVGName(), |
|
5243 |
cfg.GetHypervisorType()) |
|
5198 | 5244 |
for nname in node_list: |
5199 | 5245 |
ninfo = cfg.GetNodeInfo(nname) |
5200 | 5246 |
if nname not in node_data or not isinstance(node_data[nname], dict): |
... | ... | |
5250 | 5296 |
"nics": nic_data, |
5251 | 5297 |
"disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks], |
5252 | 5298 |
"disk_template": iinfo.disk_template, |
5299 |
"hypervisor": iinfo.hypervisor, |
|
5253 | 5300 |
} |
5254 | 5301 |
instance_data[iinfo.name] = pir |
5255 | 5302 |
|
b/lib/hypervisor/__init__.py | ||
---|---|---|
39 | 39 |
} |
40 | 40 |
|
41 | 41 |
|
42 |
def GetHypervisor(cfg):
|
|
42 |
def GetHypervisor(ht_kind):
|
|
43 | 43 |
"""Return a Hypervisor instance. |
44 | 44 |
|
45 | 45 |
This function parses the cluster hypervisor configuration file and |
46 | 46 |
instantiates a class based on the value of this file. |
47 | 47 |
|
48 |
@param cfg: Configuration object |
|
48 |
@type ht_kind: string |
|
49 |
@param ht_kind: The requested hypervisor type |
|
49 | 50 |
|
50 | 51 |
""" |
51 |
ht_kind = cfg.GetHypervisorType() |
|
52 |
|
|
53 | 52 |
if ht_kind not in _HYPERVISOR_MAP: |
54 | 53 |
raise errors.HypervisorError("Unknown hypervisor type '%s'" % ht_kind) |
55 | 54 |
|
b/lib/objects.py | ||
---|---|---|
503 | 503 |
"name", |
504 | 504 |
"primary_node", |
505 | 505 |
"os", |
506 |
"hypervisor", |
|
506 | 507 |
"status", |
507 | 508 |
"memory", |
508 | 509 |
"vcpus", |
... | ... | |
701 | 702 |
"master_netdev", |
702 | 703 |
"cluster_name", |
703 | 704 |
"file_storage_dir", |
705 |
"enabled_hypervisors", |
|
704 | 706 |
] |
705 | 707 |
|
706 | 708 |
def ToDict(self): |
b/lib/opcodes.py | ||
---|---|---|
327 | 327 |
"hvm_pae", "hvm_cdrom_image_path", "vnc_bind_address", |
328 | 328 |
"file_storage_dir", "file_driver", |
329 | 329 |
"iallocator", "hvm_nic_type", "hvm_disk_type", |
330 |
"hypervisor", |
|
330 | 331 |
] |
331 | 332 |
|
332 | 333 |
|
b/lib/rpc.py | ||
---|---|---|
211 | 211 |
|
212 | 212 |
@type node: string |
213 | 213 |
@param node: the node on which the instance is currently running |
214 |
@type instance: instance object
|
|
214 |
@type instance: C{objects.Instance}
|
|
215 | 215 |
@param instance: the instance definition |
216 | 216 |
@type target: string |
217 | 217 |
@param target: the target node name |
... | ... | |
264 | 264 |
return c.getresult().get(node, False) |
265 | 265 |
|
266 | 266 |
|
267 |
def call_instance_info(node, instance): |
|
267 |
def call_instance_info(node, instance, hname):
|
|
268 | 268 |
"""Returns information about a single instance. |
269 | 269 |
|
270 | 270 |
This is a single-node call. |
271 | 271 |
|
272 |
@type node_list: list |
|
273 |
@param node_list: the list of nodes to query |
|
274 |
@type instance: string |
|
275 |
@param instance: the instance name |
|
276 |
@type hname: string |
|
277 |
@param hname: the hypervisor type of the instance |
|
278 |
|
|
272 | 279 |
""" |
273 | 280 |
c = Client("instance_info", [instance]) |
274 | 281 |
c.connect(node) |
... | ... | |
276 | 283 |
return c.getresult().get(node, False) |
277 | 284 |
|
278 | 285 |
|
279 |
def call_all_instances_info(node_list): |
|
280 |
"""Returns information about all instances on a given node.
|
|
286 |
def call_all_instances_info(node_list, hypervisor_list):
|
|
287 |
"""Returns information about all instances on the given nodes.
|
|
281 | 288 |
|
282 |
This is a single-node call. |
|
289 |
This is a multi-node call. |
|
290 |
|
|
291 |
@type node_list: list |
|
292 |
@param node_list: the list of nodes to query |
|
293 |
@type hypervisor_list: list |
|
294 |
@param hypervisor_list: the hypervisors to query for instances |
|
283 | 295 |
|
284 | 296 |
""" |
285 |
c = Client("all_instances_info", []) |
|
297 |
c = Client("all_instances_info", [hypervisor_list])
|
|
286 | 298 |
c.connect_list(node_list) |
287 | 299 |
c.run() |
288 | 300 |
return c.getresult() |
289 | 301 |
|
290 | 302 |
|
291 |
def call_instance_list(node_list): |
|
303 |
def call_instance_list(node_list, hypervisor_list):
|
|
292 | 304 |
"""Returns the list of running instances on a given node. |
293 | 305 |
|
294 |
This is a single-node call. |
|
306 |
This is a multi-node call. |
|
307 |
|
|
308 |
@type node_list: list |
|
309 |
@param node_list: the list of nodes to query |
|
310 |
@type hypervisor_list: list |
|
311 |
@param hypervisor_list: the hypervisors to query for instances |
|
295 | 312 |
|
296 | 313 |
""" |
297 |
c = Client("instance_list", []) |
|
314 |
c = Client("instance_list", [hypervisor_list])
|
|
298 | 315 |
c.connect_list(node_list) |
299 | 316 |
c.run() |
300 | 317 |
return c.getresult() |
... | ... | |
312 | 329 |
return c.getresult().get(node, False) |
313 | 330 |
|
314 | 331 |
|
315 |
def call_node_info(node_list, vg_name): |
|
332 |
def call_node_info(node_list, vg_name, hypervisor_type):
|
|
316 | 333 |
"""Return node information. |
317 | 334 |
|
318 | 335 |
This will return memory information and volume group size and free |
... | ... | |
320 | 337 |
|
321 | 338 |
This is a multi-node call. |
322 | 339 |
|
340 |
@type node_list: list |
|
341 |
@param node_list: the list of nodes to query |
|
342 |
@type vgname: C{string} |
|
343 |
@param vgname: the name of the volume group to ask for disk space information |
|
344 |
@type hypervisor_type: C{str} |
|
345 |
@param hypervisor_type: the name of the hypervisor to ask for |
|
346 |
memory information |
|
347 |
|
|
323 | 348 |
""" |
324 |
c = Client("node_info", [vg_name]) |
|
349 |
c = Client("node_info", [vg_name, hypervisor_type])
|
|
325 | 350 |
c.connect_list(node_list) |
326 | 351 |
c.run() |
327 | 352 |
retux = c.getresult() |
b/scripts/gnt-instance | ||
---|---|---|
43 | 43 |
_VALUE_TRUE = "true" |
44 | 44 |
|
45 | 45 |
_LIST_DEF_FIELDS = [ |
46 |
"name", "os", "pnode", "status", "oper_ram", |
|
46 |
"name", "hypervisor", "os", "pnode", "status", "oper_ram",
|
|
47 | 47 |
] |
48 | 48 |
|
49 | 49 |
|
... | ... | |
197 | 197 |
"hvm_nic_type": "HVM_NIC_type", |
198 | 198 |
"hvm_disk_type": "HVM_disk_type", |
199 | 199 |
"vnc_bind_address": "VNC_bind_address", |
200 |
"serial_no": "SerialNo", |
|
200 |
"serial_no": "SerialNo", "hypervisor": "Hypervisor",
|
|
201 | 201 |
} |
202 | 202 |
else: |
203 | 203 |
headers = None |
... | ... | |
709 | 709 |
buf.write(" Nodes:\n") |
710 | 710 |
buf.write(" - primary: %s\n" % instance["pnode"]) |
711 | 711 |
buf.write(" - secondaries: %s\n" % ", ".join(instance["snodes"])) |
712 |
buf.write(" Hypervisor: %s\n" % instance["hypervisor"]) |
|
712 | 713 |
buf.write(" Operating system: %s\n" % instance["os"]) |
713 | 714 |
if instance.has_key("network_port"): |
714 | 715 |
buf.write(" Allocated network port: %s\n" % instance["network_port"]) |
... | ... | |
974 | 975 |
"Lists the instances and their status. The available fields are" |
975 | 976 |
" (see the man page for details): status, oper_state, oper_ram," |
976 | 977 |
" name, os, pnode, snodes, admin_state, admin_ram, disk_template," |
977 |
" ip, mac, bridge, sda_size, sdb_size, vcpus, serial_no." |
|
978 |
" ip, mac, bridge, sda_size, sdb_size, vcpus, serial_no," |
|
979 |
" hypervisor." |
|
978 | 980 |
" The default field" |
979 | 981 |
" list is (in order): %s." % ", ".join(_LIST_DEF_FIELDS), |
980 | 982 |
), |
Also available in: Unified diff