4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API version 2 baserlib.library.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 To be in context of this module for instance creation POST on
32 /2/instances is legitim while PUT would be not, due to it does create a
33 new entity and not just replace /2/instances with it.
35 So when adding new methods, if they are operating on the URI entity itself,
36 PUT should be prefered over POST.
40 # pylint: disable-msg=C0103
42 # C0103: Invalid name, since the R_* names are not conforming
44 from ganeti import opcodes
45 from ganeti import http
46 from ganeti import constants
47 from ganeti import cli
48 from ganeti import utils
49 from ganeti import rapi
50 from ganeti.rapi import baserlib
53 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54 I_FIELDS = ["name", "admin_state", "os",
57 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
59 "disk.sizes", "disk_usage",
60 "beparams", "hvparams",
61 "oper_state", "oper_ram", "oper_vcpus", "status",
62 "custom_hvparams", "custom_beparams", "custom_nicparams",
65 N_FIELDS = ["name", "offline", "master_candidate", "drained",
67 "mtotal", "mnode", "mfree",
68 "pinst_cnt", "sinst_cnt",
69 "ctotal", "cnodes", "csockets",
71 "pinst_list", "sinst_list",
72 "master_capable", "vm_capable",
76 G_FIELDS = ["name", "uuid",
78 "node_cnt", "node_list",
79 "ctime", "mtime", "serial_no",
80 ] # "tags" is missing to be able to use _COMMON_FIELDS here.
82 _NR_DRAINED = "drained"
83 _NR_MASTER_CANDIATE = "master-candidate"
85 _NR_OFFLINE = "offline"
86 _NR_REGULAR = "regular"
90 "C": _NR_MASTER_CANDIATE,
96 # Request data version field
97 _REQ_DATA_VERSION = "__version__"
99 # Feature string for instance creation request data version 1
100 _INST_CREATE_REQV1 = "instance-create-reqv1"
102 # Feature string for instance reinstall request version 1
103 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
105 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
109 class R_version(baserlib.R_Generic):
110 """/version resource.
112 This resource should be used to determine the remote API version and
113 to adapt clients accordingly.
118 """Returns the remote API version.
121 return constants.RAPI_VERSION
124 class R_2_info(baserlib.R_Generic):
130 """Returns cluster information.
133 client = baserlib.GetClient()
134 return client.QueryClusterInfo()
137 class R_2_features(baserlib.R_Generic):
138 """/2/features resource.
143 """Returns list of optional RAPI features implemented.
146 return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
149 class R_2_os(baserlib.R_Generic):
155 """Return a list of all OSes.
157 Can return error 500 in case of a problem.
159 Example: ["debian-etch"]
162 cl = baserlib.GetClient()
163 op = opcodes.OpDiagnoseOS(output_fields=["name", "variants"], names=[])
164 job_id = baserlib.SubmitJob([op], cl)
165 # we use custom feedback function, instead of print we log the status
166 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
167 diagnose_data = result[0]
169 if not isinstance(diagnose_data, list):
170 raise http.HttpBadGateway(message="Can't get OS list")
173 for (name, variants) in diagnose_data:
174 os_names.extend(cli.CalculateOSNames(name, variants))
179 class R_2_redist_config(baserlib.R_Generic):
180 """/2/redistribute-config resource.
185 """Redistribute configuration to all nodes.
188 return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
191 class R_2_cluster_modify(baserlib.R_Generic):
192 """/2/modify resource.
196 """Modifies cluster parameters.
201 op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
204 return baserlib.SubmitJob([op])
207 class R_2_jobs(baserlib.R_Generic):
213 """Returns a dictionary of jobs.
215 @return: a dictionary with jobs id and uri.
219 cl = baserlib.GetClient()
220 # Convert the list of lists to the list of ids
221 result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
222 return baserlib.BuildUriList(result, "/2/jobs/%s",
223 uri_fields=("id", "uri"))
226 class R_2_jobs_id(baserlib.R_Generic):
227 """/2/jobs/[job_id] resource.
231 """Returns a job status.
233 @return: a dictionary with job parameters.
235 - id: job ID as a number
236 - status: current job status as a string
237 - ops: involved OpCodes as a list of dictionaries for each
239 - opstatus: OpCodes status as a list
240 - opresult: OpCodes results as a list of lists
243 fields = ["id", "ops", "status", "summary",
244 "opstatus", "opresult", "oplog",
245 "received_ts", "start_ts", "end_ts",
247 job_id = self.items[0]
248 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
250 raise http.HttpNotFound()
251 return baserlib.MapFields(fields, result)
254 """Cancel not-yet-started job.
257 job_id = self.items[0]
258 result = baserlib.GetClient().CancelJob(job_id)
262 class R_2_jobs_id_wait(baserlib.R_Generic):
263 """/2/jobs/[job_id]/wait resource.
266 # WaitForJobChange provides access to sensitive information and blocks
267 # machine resources (it's a blocking RAPI call), hence restricting access.
268 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
271 """Waits for job changes.
274 job_id = self.items[0]
276 fields = self.getBodyParameter("fields")
277 prev_job_info = self.getBodyParameter("previous_job_info", None)
278 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
280 if not isinstance(fields, list):
281 raise http.HttpBadRequest("The 'fields' parameter should be a list")
283 if not (prev_job_info is None or isinstance(prev_job_info, list)):
284 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
287 if not (prev_log_serial is None or
288 isinstance(prev_log_serial, (int, long))):
289 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
292 client = baserlib.GetClient()
293 result = client.WaitForJobChangeOnce(job_id, fields,
294 prev_job_info, prev_log_serial,
295 timeout=_WFJC_TIMEOUT)
297 raise http.HttpNotFound()
299 if result == constants.JOB_NOTCHANGED:
303 (job_info, log_entries) = result
306 "job_info": job_info,
307 "log_entries": log_entries,
311 class R_2_nodes(baserlib.R_Generic):
312 """/2/nodes resource.
316 """Returns a list of all nodes.
319 client = baserlib.GetClient()
322 bulkdata = client.QueryNodes([], N_FIELDS, False)
323 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
325 nodesdata = client.QueryNodes([], ["name"], False)
326 nodeslist = [row[0] for row in nodesdata]
327 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
328 uri_fields=("id", "uri"))
331 class R_2_nodes_name(baserlib.R_Generic):
332 """/2/nodes/[node_name] resource.
336 """Send information about a node.
339 node_name = self.items[0]
340 client = baserlib.GetClient()
342 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
343 names=[node_name], fields=N_FIELDS,
344 use_locking=self.useLocking())
346 return baserlib.MapFields(N_FIELDS, result[0])
349 class R_2_nodes_name_role(baserlib.R_Generic):
350 """ /2/nodes/[node_name]/role resource.
354 """Returns the current node role.
359 node_name = self.items[0]
360 client = baserlib.GetClient()
361 result = client.QueryNodes(names=[node_name], fields=["role"],
362 use_locking=self.useLocking())
364 return _NR_MAP[result[0][0]]
367 """Sets the node role.
372 if not isinstance(self.request_body, basestring):
373 raise http.HttpBadRequest("Invalid body contents, not a string")
375 node_name = self.items[0]
376 role = self.request_body
378 if role == _NR_REGULAR:
383 elif role == _NR_MASTER_CANDIATE:
385 offline = drained = None
387 elif role == _NR_DRAINED:
389 candidate = offline = None
391 elif role == _NR_OFFLINE:
393 candidate = drained = None
396 raise http.HttpBadRequest("Can't set '%s' role" % role)
398 op = opcodes.OpSetNodeParams(node_name=node_name,
399 master_candidate=candidate,
402 force=bool(self.useForce()))
404 return baserlib.SubmitJob([op])
407 class R_2_nodes_name_evacuate(baserlib.R_Generic):
408 """/2/nodes/[node_name]/evacuate resource.
412 """Evacuate all secondary instances off a node.
415 node_name = self.items[0]
416 remote_node = self._checkStringVariable("remote_node", default=None)
417 iallocator = self._checkStringVariable("iallocator", default=None)
418 early_r = bool(self._checkIntVariable("early_release", default=0))
419 dry_run = bool(self.dryRun())
421 cl = baserlib.GetClient()
423 op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name],
424 iallocator=iallocator,
425 remote_node=remote_node)
427 job_id = baserlib.SubmitJob([op], cl)
428 # we use custom feedback function, instead of print we log the status
429 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
432 for iname, node in result:
436 op = opcodes.OpReplaceDisks(instance_name=iname,
437 remote_node=node, disks=[],
438 mode=constants.REPLACE_DISK_CHG,
439 early_release=early_r)
440 jid = baserlib.SubmitJob([op])
441 jobs.append((jid, iname, node))
446 class R_2_nodes_name_migrate(baserlib.R_Generic):
447 """/2/nodes/[node_name]/migrate resource.
451 """Migrate all primary instances from a node.
454 node_name = self.items[0]
456 if "live" in self.queryargs and "mode" in self.queryargs:
457 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
459 elif "live" in self.queryargs:
460 if self._checkIntVariable("live", default=1):
461 mode = constants.HT_MIGRATION_LIVE
463 mode = constants.HT_MIGRATION_NONLIVE
465 mode = self._checkStringVariable("mode", default=None)
467 op = opcodes.OpMigrateNode(node_name=node_name, mode=mode)
469 return baserlib.SubmitJob([op])
472 class R_2_nodes_name_storage(baserlib.R_Generic):
473 """/2/nodes/[node_name]/storage resource.
476 # LUQueryNodeStorage acquires locks, hence restricting access to GET
477 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
480 node_name = self.items[0]
482 storage_type = self._checkStringVariable("storage_type", None)
484 raise http.HttpBadRequest("Missing the required 'storage_type'"
487 output_fields = self._checkStringVariable("output_fields", None)
488 if not output_fields:
489 raise http.HttpBadRequest("Missing the required 'output_fields'"
492 op = opcodes.OpQueryNodeStorage(nodes=[node_name],
493 storage_type=storage_type,
494 output_fields=output_fields.split(","))
495 return baserlib.SubmitJob([op])
498 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
499 """/2/nodes/[node_name]/storage/modify resource.
503 node_name = self.items[0]
505 storage_type = self._checkStringVariable("storage_type", None)
507 raise http.HttpBadRequest("Missing the required 'storage_type'"
510 name = self._checkStringVariable("name", None)
512 raise http.HttpBadRequest("Missing the required 'name'"
517 if "allocatable" in self.queryargs:
518 changes[constants.SF_ALLOCATABLE] = \
519 bool(self._checkIntVariable("allocatable", default=1))
521 op = opcodes.OpModifyNodeStorage(node_name=node_name,
522 storage_type=storage_type,
525 return baserlib.SubmitJob([op])
528 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
529 """/2/nodes/[node_name]/storage/repair resource.
533 node_name = self.items[0]
535 storage_type = self._checkStringVariable("storage_type", None)
537 raise http.HttpBadRequest("Missing the required 'storage_type'"
540 name = self._checkStringVariable("name", None)
542 raise http.HttpBadRequest("Missing the required 'name'"
545 op = opcodes.OpRepairNodeStorage(node_name=node_name,
546 storage_type=storage_type,
548 return baserlib.SubmitJob([op])
551 def _ParseCreateGroupRequest(data, dry_run):
552 """Parses a request for creating a node group.
554 @rtype: L{opcodes.OpGroupAdd}
555 @return: Group creation opcode
558 group_name = baserlib.CheckParameter(data, "name")
559 alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
561 return opcodes.OpGroupAdd(group_name=group_name,
562 alloc_policy=alloc_policy,
566 class R_2_groups(baserlib.R_Generic):
567 """/2/groups resource.
571 """Returns a list of all node groups.
574 client = baserlib.GetClient()
577 bulkdata = client.QueryGroups([], G_FIELDS, False)
578 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
580 data = client.QueryGroups([], ["name"], False)
581 groupnames = [row[0] for row in data]
582 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
583 uri_fields=("name", "uri"))
586 """Create a node group.
591 baserlib.CheckType(self.request_body, dict, "Body contents")
592 op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
593 return baserlib.SubmitJob([op])
596 class R_2_groups_name(baserlib.R_Generic):
597 """/2/groups/[group_name] resource.
601 """Send information about a node group.
604 group_name = self.items[0]
605 client = baserlib.GetClient()
607 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
608 names=[group_name], fields=G_FIELDS,
609 use_locking=self.useLocking())
611 return baserlib.MapFields(G_FIELDS, result[0])
614 """Delete a node group.
617 op = opcodes.OpGroupRemove(group_name=self.items[0],
618 dry_run=bool(self.dryRun()))
620 return baserlib.SubmitJob([op])
623 def _ParseModifyGroupRequest(name, data):
624 """Parses a request for modifying a node group.
626 @rtype: L{opcodes.OpGroupSetParams}
627 @return: Group modify opcode
630 alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
631 return opcodes.OpGroupSetParams(group_name=name, alloc_policy=alloc_policy)
634 class R_2_groups_name_modify(baserlib.R_Generic):
635 """/2/groups/[group_name]/modify resource.
639 """Changes some parameters of node group.
644 baserlib.CheckType(self.request_body, dict, "Body contents")
646 op = _ParseModifyGroupRequest(self.items[0], self.request_body)
648 return baserlib.SubmitJob([op])
651 def _ParseRenameGroupRequest(name, data, dry_run):
652 """Parses a request for renaming a node group.
655 @param name: name of the node group to rename
657 @param data: the body received by the rename request
659 @param dry_run: whether to perform a dry run
661 @rtype: L{opcodes.OpGroupRename}
662 @return: Node group rename opcode
666 new_name = baserlib.CheckParameter(data, "new_name")
668 return opcodes.OpGroupRename(old_name=old_name, new_name=new_name,
672 class R_2_groups_name_rename(baserlib.R_Generic):
673 """/2/groups/[group_name]/rename resource.
677 """Changes the name of a node group.
682 baserlib.CheckType(self.request_body, dict, "Body contents")
683 op = _ParseRenameGroupRequest(self.items[0], self.request_body,
685 return baserlib.SubmitJob([op])
688 class R_2_groups_name_assign_nodes(baserlib.R_Generic):
689 """/2/groups/[group_name]/assign-nodes resource.
693 """Assigns nodes to a group.
698 op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
699 "group_name": self.items[0],
700 "dry_run": self.dryRun(),
701 "force": self.useForce(),
704 return baserlib.SubmitJob([op])
707 def _ParseInstanceCreateRequestVersion1(data, dry_run):
708 """Parses an instance creation request version 1.
710 @rtype: L{opcodes.OpInstanceCreate}
711 @return: Instance creation opcode
715 disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
718 for idx, i in enumerate(disks_input):
719 baserlib.CheckType(i, dict, "Disk %d specification" % idx)
723 size = i[constants.IDISK_SIZE]
725 raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
729 constants.IDISK_SIZE: size,
732 # Optional disk access mode
734 disk_access = i[constants.IDISK_MODE]
738 disk[constants.IDISK_MODE] = disk_access
742 assert len(disks_input) == len(disks)
745 nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
748 for idx, i in enumerate(nics_input):
749 baserlib.CheckType(i, dict, "NIC %d specification" % idx)
753 for field in constants.INIC_PARAMS:
763 assert len(nics_input) == len(nics)
766 hvparams = baserlib.CheckParameter(data, "hvparams", default={})
767 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
769 beparams = baserlib.CheckParameter(data, "beparams", default={})
770 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
772 return opcodes.OpInstanceCreate(
773 mode=baserlib.CheckParameter(data, "mode"),
774 instance_name=baserlib.CheckParameter(data, "name"),
775 os_type=baserlib.CheckParameter(data, "os"),
776 osparams=baserlib.CheckParameter(data, "osparams", default={}),
777 force_variant=baserlib.CheckParameter(data, "force_variant",
779 no_install=baserlib.CheckParameter(data, "no_install", default=False),
780 pnode=baserlib.CheckParameter(data, "pnode", default=None),
781 snode=baserlib.CheckParameter(data, "snode", default=None),
782 disk_template=baserlib.CheckParameter(data, "disk_template"),
785 src_node=baserlib.CheckParameter(data, "src_node", default=None),
786 src_path=baserlib.CheckParameter(data, "src_path", default=None),
787 start=baserlib.CheckParameter(data, "start", default=True),
789 ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
790 name_check=baserlib.CheckParameter(data, "name_check", default=True),
791 file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
793 file_driver=baserlib.CheckParameter(data, "file_driver",
794 default=constants.FD_LOOP),
795 source_handshake=baserlib.CheckParameter(data, "source_handshake",
797 source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
799 source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
801 iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
802 hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
809 class R_2_instances(baserlib.R_Generic):
810 """/2/instances resource.
814 """Returns a list of all available instances.
817 client = baserlib.GetClient()
819 use_locking = self.useLocking()
821 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
822 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
824 instancesdata = client.QueryInstances([], ["name"], use_locking)
825 instanceslist = [row[0] for row in instancesdata]
826 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
827 uri_fields=("id", "uri"))
829 def _ParseVersion0CreateRequest(self):
830 """Parses an instance creation request version 0.
832 Request data version 0 is deprecated and should not be used anymore.
834 @rtype: L{opcodes.OpInstanceCreate}
835 @return: Instance creation opcode
838 # Do not modify anymore, request data version 0 is deprecated
839 beparams = baserlib.MakeParamsDict(self.request_body,
840 constants.BES_PARAMETERS)
841 hvparams = baserlib.MakeParamsDict(self.request_body,
842 constants.HVS_PARAMETERS)
843 fn = self.getBodyParameter
846 disk_data = fn('disks')
847 if not isinstance(disk_data, list):
848 raise http.HttpBadRequest("The 'disks' parameter should be a list")
850 for idx, d in enumerate(disk_data):
851 if not isinstance(d, int):
852 raise http.HttpBadRequest("Disk %d specification wrong: should"
853 " be an integer" % idx)
854 disks.append({"size": d})
856 # nic processing (one nic only)
857 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
858 if fn("ip", None) is not None:
859 nics[0]["ip"] = fn("ip")
860 if fn("mode", None) is not None:
861 nics[0]["mode"] = fn("mode")
862 if fn("link", None) is not None:
863 nics[0]["link"] = fn("link")
864 if fn("bridge", None) is not None:
865 nics[0]["bridge"] = fn("bridge")
867 # Do not modify anymore, request data version 0 is deprecated
868 return opcodes.OpInstanceCreate(
869 mode=constants.INSTANCE_CREATE,
870 instance_name=fn('name'),
872 disk_template=fn('disk_template'),
874 pnode=fn('pnode', None),
875 snode=fn('snode', None),
876 iallocator=fn('iallocator', None),
878 start=fn('start', True),
879 ip_check=fn('ip_check', True),
880 name_check=fn('name_check', True),
882 hypervisor=fn('hypervisor', None),
885 file_storage_dir=fn('file_storage_dir', None),
886 file_driver=fn('file_driver', constants.FD_LOOP),
887 dry_run=bool(self.dryRun()),
891 """Create an instance.
896 if not isinstance(self.request_body, dict):
897 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
899 # Default to request data version 0
900 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
902 if data_version == 0:
903 op = self._ParseVersion0CreateRequest()
904 elif data_version == 1:
905 op = _ParseInstanceCreateRequestVersion1(self.request_body,
908 raise http.HttpBadRequest("Unsupported request data version %s" %
911 return baserlib.SubmitJob([op])
914 class R_2_instances_name(baserlib.R_Generic):
915 """/2/instances/[instance_name] resource.
919 """Send information about an instance.
922 client = baserlib.GetClient()
923 instance_name = self.items[0]
925 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
926 names=[instance_name],
928 use_locking=self.useLocking())
930 return baserlib.MapFields(I_FIELDS, result[0])
933 """Delete an instance.
936 op = opcodes.OpRemoveInstance(instance_name=self.items[0],
937 ignore_failures=False,
938 dry_run=bool(self.dryRun()))
939 return baserlib.SubmitJob([op])
942 class R_2_instances_name_info(baserlib.R_Generic):
943 """/2/instances/[instance_name]/info resource.
947 """Request detailed instance information.
950 instance_name = self.items[0]
951 static = bool(self._checkIntVariable("static", default=0))
953 op = opcodes.OpQueryInstanceData(instances=[instance_name],
955 return baserlib.SubmitJob([op])
958 class R_2_instances_name_reboot(baserlib.R_Generic):
959 """/2/instances/[instance_name]/reboot resource.
961 Implements an instance reboot.
965 """Reboot an instance.
967 The URI takes type=[hard|soft|full] and
968 ignore_secondaries=[False|True] parameters.
971 instance_name = self.items[0]
972 reboot_type = self.queryargs.get('type',
973 [constants.INSTANCE_REBOOT_HARD])[0]
974 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
975 op = opcodes.OpRebootInstance(instance_name=instance_name,
976 reboot_type=reboot_type,
977 ignore_secondaries=ignore_secondaries,
978 dry_run=bool(self.dryRun()))
980 return baserlib.SubmitJob([op])
983 class R_2_instances_name_startup(baserlib.R_Generic):
984 """/2/instances/[instance_name]/startup resource.
986 Implements an instance startup.
990 """Startup an instance.
992 The URI takes force=[False|True] parameter to start the instance
993 if even if secondary disks are failing.
996 instance_name = self.items[0]
997 force_startup = bool(self._checkIntVariable('force'))
998 op = opcodes.OpStartupInstance(instance_name=instance_name,
1000 dry_run=bool(self.dryRun()))
1002 return baserlib.SubmitJob([op])
1005 class R_2_instances_name_shutdown(baserlib.R_Generic):
1006 """/2/instances/[instance_name]/shutdown resource.
1008 Implements an instance shutdown.
1012 """Shutdown an instance.
1015 instance_name = self.items[0]
1016 op = opcodes.OpShutdownInstance(instance_name=instance_name,
1017 dry_run=bool(self.dryRun()))
1019 return baserlib.SubmitJob([op])
1022 def _ParseInstanceReinstallRequest(name, data):
1023 """Parses a request for reinstalling an instance.
1026 if not isinstance(data, dict):
1027 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1029 ostype = baserlib.CheckParameter(data, "os")
1030 start = baserlib.CheckParameter(data, "start", exptype=bool,
1032 osparams = baserlib.CheckParameter(data, "osparams", default=None)
1035 opcodes.OpShutdownInstance(instance_name=name),
1036 opcodes.OpReinstallInstance(instance_name=name, os_type=ostype,
1041 ops.append(opcodes.OpStartupInstance(instance_name=name, force=False))
1046 class R_2_instances_name_reinstall(baserlib.R_Generic):
1047 """/2/instances/[instance_name]/reinstall resource.
1049 Implements an instance reinstall.
1053 """Reinstall an instance.
1055 The URI takes os=name and nostartup=[0|1] optional
1056 parameters. By default, the instance will be started
1060 if self.request_body:
1062 raise http.HttpBadRequest("Can't combine query and body parameters")
1064 body = self.request_body
1066 if not self.queryargs:
1067 raise http.HttpBadRequest("Missing query parameters")
1068 # Legacy interface, do not modify/extend
1070 "os": self._checkStringVariable("os"),
1071 "start": not self._checkIntVariable("nostartup"),
1074 ops = _ParseInstanceReinstallRequest(self.items[0], body)
1076 return baserlib.SubmitJob(ops)
1079 class R_2_instances_name_replace_disks(baserlib.R_Generic):
1080 """/2/instances/[instance_name]/replace-disks resource.
1084 """Replaces disks on an instance.
1087 instance_name = self.items[0]
1088 remote_node = self._checkStringVariable("remote_node", default=None)
1089 mode = self._checkStringVariable("mode", default=None)
1090 raw_disks = self._checkStringVariable("disks", default=None)
1091 iallocator = self._checkStringVariable("iallocator", default=None)
1095 disks = [int(part) for part in raw_disks.split(",")]
1096 except ValueError, err:
1097 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1101 op = opcodes.OpReplaceDisks(instance_name=instance_name,
1102 remote_node=remote_node,
1105 iallocator=iallocator)
1107 return baserlib.SubmitJob([op])
1110 class R_2_instances_name_activate_disks(baserlib.R_Generic):
1111 """/2/instances/[instance_name]/activate-disks resource.
1115 """Activate disks for an instance.
1117 The URI might contain ignore_size to ignore current recorded size.
1120 instance_name = self.items[0]
1121 ignore_size = bool(self._checkIntVariable('ignore_size'))
1123 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1124 ignore_size=ignore_size)
1126 return baserlib.SubmitJob([op])
1129 class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1130 """/2/instances/[instance_name]/deactivate-disks resource.
1134 """Deactivate disks for an instance.
1137 instance_name = self.items[0]
1139 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1141 return baserlib.SubmitJob([op])
1144 class R_2_instances_name_prepare_export(baserlib.R_Generic):
1145 """/2/instances/[instance_name]/prepare-export resource.
1149 """Prepares an export for an instance.
1154 instance_name = self.items[0]
1155 mode = self._checkStringVariable("mode")
1157 op = opcodes.OpBackupPrepare(instance_name=instance_name,
1160 return baserlib.SubmitJob([op])
1163 def _ParseExportInstanceRequest(name, data):
1164 """Parses a request for an instance export.
1166 @rtype: L{opcodes.OpBackupExport}
1167 @return: Instance export opcode
1170 mode = baserlib.CheckParameter(data, "mode",
1171 default=constants.EXPORT_MODE_LOCAL)
1172 target_node = baserlib.CheckParameter(data, "destination")
1173 shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
1174 remove_instance = baserlib.CheckParameter(data, "remove_instance",
1175 exptype=bool, default=False)
1176 x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
1177 destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
1180 return opcodes.OpBackupExport(instance_name=name,
1182 target_node=target_node,
1184 remove_instance=remove_instance,
1185 x509_key_name=x509_key_name,
1186 destination_x509_ca=destination_x509_ca)
1189 class R_2_instances_name_export(baserlib.R_Generic):
1190 """/2/instances/[instance_name]/export resource.
1194 """Exports an instance.
1199 if not isinstance(self.request_body, dict):
1200 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1202 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1204 return baserlib.SubmitJob([op])
1207 def _ParseMigrateInstanceRequest(name, data):
1208 """Parses a request for an instance migration.
1210 @rtype: L{opcodes.OpInstanceMigrate}
1211 @return: Instance migration opcode
1214 mode = baserlib.CheckParameter(data, "mode", default=None)
1215 cleanup = baserlib.CheckParameter(data, "cleanup", exptype=bool,
1218 return opcodes.OpInstanceMigrate(instance_name=name, mode=mode,
1222 class R_2_instances_name_migrate(baserlib.R_Generic):
1223 """/2/instances/[instance_name]/migrate resource.
1227 """Migrates an instance.
1232 baserlib.CheckType(self.request_body, dict, "Body contents")
1234 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1236 return baserlib.SubmitJob([op])
1239 def _ParseRenameInstanceRequest(name, data):
1240 """Parses a request for renaming an instance.
1242 @rtype: L{opcodes.OpRenameInstance}
1243 @return: Instance rename opcode
1246 new_name = baserlib.CheckParameter(data, "new_name")
1247 ip_check = baserlib.CheckParameter(data, "ip_check", default=True)
1248 name_check = baserlib.CheckParameter(data, "name_check", default=True)
1250 return opcodes.OpRenameInstance(instance_name=name, new_name=new_name,
1251 name_check=name_check, ip_check=ip_check)
1254 class R_2_instances_name_rename(baserlib.R_Generic):
1255 """/2/instances/[instance_name]/rename resource.
1259 """Changes the name of an instance.
1264 baserlib.CheckType(self.request_body, dict, "Body contents")
1266 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1268 return baserlib.SubmitJob([op])
1271 def _ParseModifyInstanceRequest(name, data):
1272 """Parses a request for modifying an instance.
1274 @rtype: L{opcodes.OpSetInstanceParams}
1275 @return: Instance modify opcode
1278 osparams = baserlib.CheckParameter(data, "osparams", default={})
1279 force = baserlib.CheckParameter(data, "force", default=False)
1280 nics = baserlib.CheckParameter(data, "nics", default=[])
1281 disks = baserlib.CheckParameter(data, "disks", default=[])
1282 disk_template = baserlib.CheckParameter(data, "disk_template", default=None)
1283 remote_node = baserlib.CheckParameter(data, "remote_node", default=None)
1284 os_name = baserlib.CheckParameter(data, "os_name", default=None)
1285 force_variant = baserlib.CheckParameter(data, "force_variant", default=False)
1288 hvparams = baserlib.CheckParameter(data, "hvparams", default={})
1289 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES,
1290 allowed_values=[constants.VALUE_DEFAULT])
1292 beparams = baserlib.CheckParameter(data, "beparams", default={})
1293 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES,
1294 allowed_values=[constants.VALUE_DEFAULT])
1296 return opcodes.OpSetInstanceParams(instance_name=name, hvparams=hvparams,
1297 beparams=beparams, osparams=osparams,
1298 force=force, nics=nics, disks=disks,
1299 disk_template=disk_template,
1300 remote_node=remote_node, os_name=os_name,
1301 force_variant=force_variant)
1304 class R_2_instances_name_modify(baserlib.R_Generic):
1305 """/2/instances/[instance_name]/modify resource.
1309 """Changes some parameters of an instance.
1314 baserlib.CheckType(self.request_body, dict, "Body contents")
1316 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1318 return baserlib.SubmitJob([op])
1321 class R_2_instances_name_disk_grow(baserlib.R_Generic):
1322 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1326 """Increases the size of an instance disk.
1331 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1332 "instance_name": self.items[0],
1333 "disk": int(self.items[1]),
1336 return baserlib.SubmitJob([op])
1339 class _R_Tags(baserlib.R_Generic):
1340 """ Quasiclass for tagging resources
1342 Manages tags. When inheriting this class you must define the
1348 def __init__(self, items, queryargs, req):
1349 """A tag resource constructor.
1351 We have to override the default to sort out cluster naming case.
1354 baserlib.R_Generic.__init__(self, items, queryargs, req)
1356 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1359 self.name = items[0]
1362 """Returns a list of tags.
1364 Example: ["tag1", "tag2", "tag3"]
1367 # pylint: disable-msg=W0212
1368 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1371 """Add a set of tags.
1373 The request as a list of strings should be PUT to this URI. And
1374 you'll have back a job id.
1377 # pylint: disable-msg=W0212
1378 if 'tag' not in self.queryargs:
1379 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1380 " the 'tag' parameter")
1381 return baserlib._Tags_PUT(self.TAG_LEVEL,
1382 self.queryargs['tag'], name=self.name,
1383 dry_run=bool(self.dryRun()))
1388 In order to delete a set of tags, the DELETE
1389 request should be addressed to URI like:
1390 /tags?tag=[tag]&tag=[tag]
1393 # pylint: disable-msg=W0212
1394 if 'tag' not in self.queryargs:
1395 # no we not gonna delete all tags
1396 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1397 " tag(s) using the 'tag' parameter")
1398 return baserlib._Tags_DELETE(self.TAG_LEVEL,
1399 self.queryargs['tag'],
1401 dry_run=bool(self.dryRun()))
1404 class R_2_instances_name_tags(_R_Tags):
1405 """ /2/instances/[instance_name]/tags resource.
1407 Manages per-instance tags.
1410 TAG_LEVEL = constants.TAG_INSTANCE
1413 class R_2_nodes_name_tags(_R_Tags):
1414 """ /2/nodes/[node_name]/tags resource.
1416 Manages per-node tags.
1419 TAG_LEVEL = constants.TAG_NODE
1422 class R_2_tags(_R_Tags):
1423 """ /2/tags resource.
1425 Manages cluster tags.
1428 TAG_LEVEL = constants.TAG_CLUSTER