4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API resource implementations.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
35 Quoting from RFC2616, section 9.6::
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
54 # pylint: disable-msg=C0103
56 # C0103: Invalid name, since the R_* names are not conforming
58 from ganeti import opcodes
59 from ganeti import http
60 from ganeti import constants
61 from ganeti import cli
62 from ganeti import rapi
64 from ganeti import compat
65 from ganeti.rapi import baserlib
68 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
69 I_FIELDS = ["name", "admin_state", "os",
72 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
74 "disk.sizes", "disk_usage",
75 "beparams", "hvparams",
76 "oper_state", "oper_ram", "oper_vcpus", "status",
77 "custom_hvparams", "custom_beparams", "custom_nicparams",
80 N_FIELDS = ["name", "offline", "master_candidate", "drained",
82 "mtotal", "mnode", "mfree",
83 "pinst_cnt", "sinst_cnt",
84 "ctotal", "cnodes", "csockets",
86 "pinst_list", "sinst_list",
87 "master_capable", "vm_capable",
99 "id", "ops", "status", "summary",
101 "received_ts", "start_ts", "end_ts",
104 J_FIELDS = J_FIELDS_BULK + [
109 _NR_DRAINED = "drained"
110 _NR_MASTER_CANDIATE = "master-candidate"
111 _NR_MASTER = "master"
112 _NR_OFFLINE = "offline"
113 _NR_REGULAR = "regular"
116 constants.NR_MASTER: _NR_MASTER,
117 constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
118 constants.NR_DRAINED: _NR_DRAINED,
119 constants.NR_OFFLINE: _NR_OFFLINE,
120 constants.NR_REGULAR: _NR_REGULAR,
123 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
125 # Request data version field
126 _REQ_DATA_VERSION = "__version__"
128 # Feature string for instance creation request data version 1
129 _INST_CREATE_REQV1 = "instance-create-reqv1"
131 # Feature string for instance reinstall request version 1
132 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
134 # Feature string for node migration version 1
135 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
137 # Feature string for node evacuation with LU-generated jobs
138 _NODE_EVAC_RES1 = "node-evac-res1"
140 ALL_FEATURES = frozenset([
142 _INST_REINSTALL_REQV1,
147 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
151 class R_root(baserlib.R_Generic):
157 """Supported for legacy reasons.
163 class R_version(baserlib.R_Generic):
164 """/version resource.
166 This resource should be used to determine the remote API version and
167 to adapt clients accordingly.
172 """Returns the remote API version.
175 return constants.RAPI_VERSION
178 class R_2_info(baserlib.R_Generic):
184 """Returns cluster information.
187 client = baserlib.GetClient()
188 return client.QueryClusterInfo()
191 class R_2_features(baserlib.R_Generic):
192 """/2/features resource.
197 """Returns list of optional RAPI features implemented.
200 return list(ALL_FEATURES)
203 class R_2_os(baserlib.R_Generic):
209 """Return a list of all OSes.
211 Can return error 500 in case of a problem.
213 Example: ["debian-etch"]
216 cl = baserlib.GetClient()
217 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
218 job_id = baserlib.SubmitJob([op], cl)
219 # we use custom feedback function, instead of print we log the status
220 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
221 diagnose_data = result[0]
223 if not isinstance(diagnose_data, list):
224 raise http.HttpBadGateway(message="Can't get OS list")
227 for (name, variants) in diagnose_data:
228 os_names.extend(cli.CalculateOSNames(name, variants))
233 class R_2_redist_config(baserlib.R_Generic):
234 """/2/redistribute-config resource.
239 """Redistribute configuration to all nodes.
242 return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
245 class R_2_cluster_modify(baserlib.R_Generic):
246 """/2/modify resource.
250 """Modifies cluster parameters.
255 op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
258 return baserlib.SubmitJob([op])
261 class R_2_jobs(baserlib.R_Generic):
266 """Returns a dictionary of jobs.
268 @return: a dictionary with jobs id and uri.
271 client = baserlib.GetClient()
274 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
275 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
277 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
278 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
279 uri_fields=("id", "uri"))
282 class R_2_jobs_id(baserlib.R_Generic):
283 """/2/jobs/[job_id] resource.
287 """Returns a job status.
289 @return: a dictionary with job parameters.
291 - id: job ID as a number
292 - status: current job status as a string
293 - ops: involved OpCodes as a list of dictionaries for each
295 - opstatus: OpCodes status as a list
296 - opresult: OpCodes results as a list of lists
299 job_id = self.items[0]
300 result = baserlib.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
302 raise http.HttpNotFound()
303 return baserlib.MapFields(J_FIELDS, result)
306 """Cancel not-yet-started job.
309 job_id = self.items[0]
310 result = baserlib.GetClient().CancelJob(job_id)
314 class R_2_jobs_id_wait(baserlib.R_Generic):
315 """/2/jobs/[job_id]/wait resource.
318 # WaitForJobChange provides access to sensitive information and blocks
319 # machine resources (it's a blocking RAPI call), hence restricting access.
320 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
323 """Waits for job changes.
326 job_id = self.items[0]
328 fields = self.getBodyParameter("fields")
329 prev_job_info = self.getBodyParameter("previous_job_info", None)
330 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
332 if not isinstance(fields, list):
333 raise http.HttpBadRequest("The 'fields' parameter should be a list")
335 if not (prev_job_info is None or isinstance(prev_job_info, list)):
336 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
339 if not (prev_log_serial is None or
340 isinstance(prev_log_serial, (int, long))):
341 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
344 client = baserlib.GetClient()
345 result = client.WaitForJobChangeOnce(job_id, fields,
346 prev_job_info, prev_log_serial,
347 timeout=_WFJC_TIMEOUT)
349 raise http.HttpNotFound()
351 if result == constants.JOB_NOTCHANGED:
355 (job_info, log_entries) = result
358 "job_info": job_info,
359 "log_entries": log_entries,
363 class R_2_nodes(baserlib.R_Generic):
364 """/2/nodes resource.
368 """Returns a list of all nodes.
371 client = baserlib.GetClient()
374 bulkdata = client.QueryNodes([], N_FIELDS, False)
375 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
377 nodesdata = client.QueryNodes([], ["name"], False)
378 nodeslist = [row[0] for row in nodesdata]
379 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
380 uri_fields=("id", "uri"))
383 class R_2_nodes_name(baserlib.R_Generic):
384 """/2/nodes/[node_name] resource.
388 """Send information about a node.
391 node_name = self.items[0]
392 client = baserlib.GetClient()
394 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
395 names=[node_name], fields=N_FIELDS,
396 use_locking=self.useLocking())
398 return baserlib.MapFields(N_FIELDS, result[0])
401 class R_2_nodes_name_role(baserlib.R_Generic):
402 """ /2/nodes/[node_name]/role resource.
406 """Returns the current node role.
411 node_name = self.items[0]
412 client = baserlib.GetClient()
413 result = client.QueryNodes(names=[node_name], fields=["role"],
414 use_locking=self.useLocking())
416 return _NR_MAP[result[0][0]]
419 """Sets the node role.
424 if not isinstance(self.request_body, basestring):
425 raise http.HttpBadRequest("Invalid body contents, not a string")
427 node_name = self.items[0]
428 role = self.request_body
430 if role == _NR_REGULAR:
435 elif role == _NR_MASTER_CANDIATE:
437 offline = drained = None
439 elif role == _NR_DRAINED:
441 candidate = offline = None
443 elif role == _NR_OFFLINE:
445 candidate = drained = None
448 raise http.HttpBadRequest("Can't set '%s' role" % role)
450 op = opcodes.OpNodeSetParams(node_name=node_name,
451 master_candidate=candidate,
454 force=bool(self.useForce()))
456 return baserlib.SubmitJob([op])
459 class R_2_nodes_name_evacuate(baserlib.R_Generic):
460 """/2/nodes/[node_name]/evacuate resource.
464 """Evacuate all instances off a node.
467 op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
468 "node_name": self.items[0],
469 "dry_run": self.dryRun(),
472 return baserlib.SubmitJob([op])
475 class R_2_nodes_name_migrate(baserlib.R_Generic):
476 """/2/nodes/[node_name]/migrate resource.
480 """Migrate all primary instances from a node.
483 node_name = self.items[0]
486 # Support old-style requests
487 if "live" in self.queryargs and "mode" in self.queryargs:
488 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
491 if "live" in self.queryargs:
492 if self._checkIntVariable("live", default=1):
493 mode = constants.HT_MIGRATION_LIVE
495 mode = constants.HT_MIGRATION_NONLIVE
497 mode = self._checkStringVariable("mode", default=None)
503 data = self.request_body
505 op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
506 "node_name": node_name,
509 return baserlib.SubmitJob([op])
512 class R_2_nodes_name_storage(baserlib.R_Generic):
513 """/2/nodes/[node_name]/storage resource.
516 # LUNodeQueryStorage acquires locks, hence restricting access to GET
517 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
520 node_name = self.items[0]
522 storage_type = self._checkStringVariable("storage_type", None)
524 raise http.HttpBadRequest("Missing the required 'storage_type'"
527 output_fields = self._checkStringVariable("output_fields", None)
528 if not output_fields:
529 raise http.HttpBadRequest("Missing the required 'output_fields'"
532 op = opcodes.OpNodeQueryStorage(nodes=[node_name],
533 storage_type=storage_type,
534 output_fields=output_fields.split(","))
535 return baserlib.SubmitJob([op])
538 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
539 """/2/nodes/[node_name]/storage/modify resource.
543 node_name = self.items[0]
545 storage_type = self._checkStringVariable("storage_type", None)
547 raise http.HttpBadRequest("Missing the required 'storage_type'"
550 name = self._checkStringVariable("name", None)
552 raise http.HttpBadRequest("Missing the required 'name'"
557 if "allocatable" in self.queryargs:
558 changes[constants.SF_ALLOCATABLE] = \
559 bool(self._checkIntVariable("allocatable", default=1))
561 op = opcodes.OpNodeModifyStorage(node_name=node_name,
562 storage_type=storage_type,
565 return baserlib.SubmitJob([op])
568 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
569 """/2/nodes/[node_name]/storage/repair resource.
573 node_name = self.items[0]
575 storage_type = self._checkStringVariable("storage_type", None)
577 raise http.HttpBadRequest("Missing the required 'storage_type'"
580 name = self._checkStringVariable("name", None)
582 raise http.HttpBadRequest("Missing the required 'name'"
585 op = opcodes.OpRepairNodeStorage(node_name=node_name,
586 storage_type=storage_type,
588 return baserlib.SubmitJob([op])
591 def _ParseCreateGroupRequest(data, dry_run):
592 """Parses a request for creating a node group.
594 @rtype: L{opcodes.OpGroupAdd}
595 @return: Group creation opcode
603 "name": "group_name",
606 return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
610 class R_2_groups(baserlib.R_Generic):
611 """/2/groups resource.
615 """Returns a list of all node groups.
618 client = baserlib.GetClient()
621 bulkdata = client.QueryGroups([], G_FIELDS, False)
622 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
624 data = client.QueryGroups([], ["name"], False)
625 groupnames = [row[0] for row in data]
626 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
627 uri_fields=("name", "uri"))
630 """Create a node group.
635 baserlib.CheckType(self.request_body, dict, "Body contents")
636 op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
637 return baserlib.SubmitJob([op])
640 class R_2_groups_name(baserlib.R_Generic):
641 """/2/groups/[group_name] resource.
645 """Send information about a node group.
648 group_name = self.items[0]
649 client = baserlib.GetClient()
651 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
652 names=[group_name], fields=G_FIELDS,
653 use_locking=self.useLocking())
655 return baserlib.MapFields(G_FIELDS, result[0])
658 """Delete a node group.
661 op = opcodes.OpGroupRemove(group_name=self.items[0],
662 dry_run=bool(self.dryRun()))
664 return baserlib.SubmitJob([op])
667 def _ParseModifyGroupRequest(name, data):
668 """Parses a request for modifying a node group.
670 @rtype: L{opcodes.OpGroupSetParams}
671 @return: Group modify opcode
674 return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
679 class R_2_groups_name_modify(baserlib.R_Generic):
680 """/2/groups/[group_name]/modify resource.
684 """Changes some parameters of node group.
689 baserlib.CheckType(self.request_body, dict, "Body contents")
691 op = _ParseModifyGroupRequest(self.items[0], self.request_body)
693 return baserlib.SubmitJob([op])
696 def _ParseRenameGroupRequest(name, data, dry_run):
697 """Parses a request for renaming a node group.
700 @param name: name of the node group to rename
702 @param data: the body received by the rename request
704 @param dry_run: whether to perform a dry run
706 @rtype: L{opcodes.OpGroupRename}
707 @return: Node group rename opcode
710 return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
716 class R_2_groups_name_rename(baserlib.R_Generic):
717 """/2/groups/[group_name]/rename resource.
721 """Changes the name of a node group.
726 baserlib.CheckType(self.request_body, dict, "Body contents")
727 op = _ParseRenameGroupRequest(self.items[0], self.request_body,
729 return baserlib.SubmitJob([op])
732 class R_2_groups_name_assign_nodes(baserlib.R_Generic):
733 """/2/groups/[group_name]/assign-nodes resource.
737 """Assigns nodes to a group.
742 op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
743 "group_name": self.items[0],
744 "dry_run": self.dryRun(),
745 "force": self.useForce(),
748 return baserlib.SubmitJob([op])
751 def _ParseInstanceCreateRequestVersion1(data, dry_run):
752 """Parses an instance creation request version 1.
754 @rtype: L{opcodes.OpInstanceCreate}
755 @return: Instance creation opcode
764 "name": "instance_name",
767 return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
771 class R_2_instances(baserlib.R_Generic):
772 """/2/instances resource.
776 """Returns a list of all available instances.
779 client = baserlib.GetClient()
781 use_locking = self.useLocking()
783 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
784 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
786 instancesdata = client.QueryInstances([], ["name"], use_locking)
787 instanceslist = [row[0] for row in instancesdata]
788 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
789 uri_fields=("id", "uri"))
792 """Create an instance.
797 if not isinstance(self.request_body, dict):
798 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
800 # Default to request data version 0
801 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
803 if data_version == 0:
804 raise http.HttpBadRequest("Instance creation request version 0 is no"
806 elif data_version == 1:
807 data = self.request_body.copy()
808 # Remove "__version__"
809 data.pop(_REQ_DATA_VERSION, None)
810 op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
812 raise http.HttpBadRequest("Unsupported request data version %s" %
815 return baserlib.SubmitJob([op])
818 class R_2_instances_name(baserlib.R_Generic):
819 """/2/instances/[instance_name] resource.
823 """Send information about an instance.
826 client = baserlib.GetClient()
827 instance_name = self.items[0]
829 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
830 names=[instance_name],
832 use_locking=self.useLocking())
834 return baserlib.MapFields(I_FIELDS, result[0])
837 """Delete an instance.
840 op = opcodes.OpInstanceRemove(instance_name=self.items[0],
841 ignore_failures=False,
842 dry_run=bool(self.dryRun()))
843 return baserlib.SubmitJob([op])
846 class R_2_instances_name_info(baserlib.R_Generic):
847 """/2/instances/[instance_name]/info resource.
851 """Request detailed instance information.
854 instance_name = self.items[0]
855 static = bool(self._checkIntVariable("static", default=0))
857 op = opcodes.OpInstanceQueryData(instances=[instance_name],
859 return baserlib.SubmitJob([op])
862 class R_2_instances_name_reboot(baserlib.R_Generic):
863 """/2/instances/[instance_name]/reboot resource.
865 Implements an instance reboot.
869 """Reboot an instance.
871 The URI takes type=[hard|soft|full] and
872 ignore_secondaries=[False|True] parameters.
875 instance_name = self.items[0]
876 reboot_type = self.queryargs.get("type",
877 [constants.INSTANCE_REBOOT_HARD])[0]
878 ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
879 op = opcodes.OpInstanceReboot(instance_name=instance_name,
880 reboot_type=reboot_type,
881 ignore_secondaries=ignore_secondaries,
882 dry_run=bool(self.dryRun()))
884 return baserlib.SubmitJob([op])
887 class R_2_instances_name_startup(baserlib.R_Generic):
888 """/2/instances/[instance_name]/startup resource.
890 Implements an instance startup.
894 """Startup an instance.
896 The URI takes force=[False|True] parameter to start the instance
897 if even if secondary disks are failing.
900 instance_name = self.items[0]
901 force_startup = bool(self._checkIntVariable("force"))
902 no_remember = bool(self._checkIntVariable("no_remember"))
903 op = opcodes.OpInstanceStartup(instance_name=instance_name,
905 dry_run=bool(self.dryRun()),
906 no_remember=no_remember)
908 return baserlib.SubmitJob([op])
911 def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
912 """Parses a request for an instance shutdown.
914 @rtype: L{opcodes.OpInstanceShutdown}
915 @return: Instance shutdown opcode
918 return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
919 "instance_name": name,
921 "no_remember": no_remember,
925 class R_2_instances_name_shutdown(baserlib.R_Generic):
926 """/2/instances/[instance_name]/shutdown resource.
928 Implements an instance shutdown.
932 """Shutdown an instance.
937 baserlib.CheckType(self.request_body, dict, "Body contents")
939 no_remember = bool(self._checkIntVariable("no_remember"))
940 op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
941 bool(self.dryRun()), no_remember)
943 return baserlib.SubmitJob([op])
946 def _ParseInstanceReinstallRequest(name, data):
947 """Parses a request for reinstalling an instance.
950 if not isinstance(data, dict):
951 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
953 ostype = baserlib.CheckParameter(data, "os", default=None)
954 start = baserlib.CheckParameter(data, "start", exptype=bool,
956 osparams = baserlib.CheckParameter(data, "osparams", default=None)
959 opcodes.OpInstanceShutdown(instance_name=name),
960 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
965 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
970 class R_2_instances_name_reinstall(baserlib.R_Generic):
971 """/2/instances/[instance_name]/reinstall resource.
973 Implements an instance reinstall.
977 """Reinstall an instance.
979 The URI takes os=name and nostartup=[0|1] optional
980 parameters. By default, the instance will be started
984 if self.request_body:
986 raise http.HttpBadRequest("Can't combine query and body parameters")
988 body = self.request_body
990 # Legacy interface, do not modify/extend
992 "os": self._checkStringVariable("os"),
993 "start": not self._checkIntVariable("nostartup"),
998 ops = _ParseInstanceReinstallRequest(self.items[0], body)
1000 return baserlib.SubmitJob(ops)
1003 def _ParseInstanceReplaceDisksRequest(name, data):
1004 """Parses a request for an instance export.
1006 @rtype: L{opcodes.OpInstanceReplaceDisks}
1007 @return: Instance export opcode
1011 "instance_name": name,
1016 raw_disks = data["disks"]
1020 if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
1021 # Backwards compatibility for strings of the format "1, 2, 3"
1023 data["disks"] = [int(part) for part in raw_disks.split(",")]
1024 except (TypeError, ValueError), err:
1025 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1027 return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
1030 class R_2_instances_name_replace_disks(baserlib.R_Generic):
1031 """/2/instances/[instance_name]/replace-disks resource.
1035 """Replaces disks on an instance.
1038 op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
1040 return baserlib.SubmitJob([op])
1043 class R_2_instances_name_activate_disks(baserlib.R_Generic):
1044 """/2/instances/[instance_name]/activate-disks resource.
1048 """Activate disks for an instance.
1050 The URI might contain ignore_size to ignore current recorded size.
1053 instance_name = self.items[0]
1054 ignore_size = bool(self._checkIntVariable("ignore_size"))
1056 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1057 ignore_size=ignore_size)
1059 return baserlib.SubmitJob([op])
1062 class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1063 """/2/instances/[instance_name]/deactivate-disks resource.
1067 """Deactivate disks for an instance.
1070 instance_name = self.items[0]
1072 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1074 return baserlib.SubmitJob([op])
1077 class R_2_instances_name_prepare_export(baserlib.R_Generic):
1078 """/2/instances/[instance_name]/prepare-export resource.
1082 """Prepares an export for an instance.
1087 instance_name = self.items[0]
1088 mode = self._checkStringVariable("mode")
1090 op = opcodes.OpBackupPrepare(instance_name=instance_name,
1093 return baserlib.SubmitJob([op])
1096 def _ParseExportInstanceRequest(name, data):
1097 """Parses a request for an instance export.
1099 @rtype: L{opcodes.OpBackupExport}
1100 @return: Instance export opcode
1103 # Rename "destination" to "target_node"
1105 data["target_node"] = data.pop("destination")
1109 return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1110 "instance_name": name,
1114 class R_2_instances_name_export(baserlib.R_Generic):
1115 """/2/instances/[instance_name]/export resource.
1119 """Exports an instance.
1124 if not isinstance(self.request_body, dict):
1125 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1127 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1129 return baserlib.SubmitJob([op])
1132 def _ParseMigrateInstanceRequest(name, data):
1133 """Parses a request for an instance migration.
1135 @rtype: L{opcodes.OpInstanceMigrate}
1136 @return: Instance migration opcode
1139 return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1140 "instance_name": name,
1144 class R_2_instances_name_migrate(baserlib.R_Generic):
1145 """/2/instances/[instance_name]/migrate resource.
1149 """Migrates an instance.
1154 baserlib.CheckType(self.request_body, dict, "Body contents")
1156 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1158 return baserlib.SubmitJob([op])
1161 class R_2_instances_name_failover(baserlib.R_Generic):
1162 """/2/instances/[instance_name]/failover resource.
1166 """Does a failover of an instance.
1171 baserlib.CheckType(self.request_body, dict, "Body contents")
1173 op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
1174 "instance_name": self.items[0],
1177 return baserlib.SubmitJob([op])
1180 def _ParseRenameInstanceRequest(name, data):
1181 """Parses a request for renaming an instance.
1183 @rtype: L{opcodes.OpInstanceRename}
1184 @return: Instance rename opcode
1187 return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1188 "instance_name": name,
1192 class R_2_instances_name_rename(baserlib.R_Generic):
1193 """/2/instances/[instance_name]/rename resource.
1197 """Changes the name of an instance.
1202 baserlib.CheckType(self.request_body, dict, "Body contents")
1204 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1206 return baserlib.SubmitJob([op])
1209 def _ParseModifyInstanceRequest(name, data):
1210 """Parses a request for modifying an instance.
1212 @rtype: L{opcodes.OpInstanceSetParams}
1213 @return: Instance modify opcode
1216 return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1217 "instance_name": name,
1221 class R_2_instances_name_modify(baserlib.R_Generic):
1222 """/2/instances/[instance_name]/modify resource.
1226 """Changes some parameters of an instance.
1231 baserlib.CheckType(self.request_body, dict, "Body contents")
1233 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1235 return baserlib.SubmitJob([op])
1238 class R_2_instances_name_disk_grow(baserlib.R_Generic):
1239 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1243 """Increases the size of an instance disk.
1248 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1249 "instance_name": self.items[0],
1250 "disk": int(self.items[1]),
1253 return baserlib.SubmitJob([op])
1256 class R_2_instances_name_console(baserlib.R_Generic):
1257 """/2/instances/[instance_name]/console resource.
1260 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1263 """Request information for connecting to instance's console.
1265 @return: Serialized instance console description, see
1266 L{objects.InstanceConsole}
1269 client = baserlib.GetClient()
1271 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1274 raise http.HttpServiceUnavailable("Instance console unavailable")
1276 assert isinstance(console, dict)
1280 def _GetQueryFields(args):
1285 fields = args["fields"]
1287 raise http.HttpBadRequest("Missing 'fields' query argument")
1289 return _SplitQueryFields(fields[0])
1292 def _SplitQueryFields(fields):
1296 return [i.strip() for i in fields.split(",")]
1299 class R_2_query(baserlib.R_Generic):
1300 """/2/query/[resource] resource.
1303 # Results might contain sensitive information
1304 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1306 def _Query(self, fields, filter_):
1307 return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
1310 """Returns resource information.
1312 @return: Query result, see L{objects.QueryResponse}
1315 return self._Query(_GetQueryFields(self.queryargs), None)
1318 """Submits job querying for resources.
1320 @return: Query result, see L{objects.QueryResponse}
1323 body = self.request_body
1325 baserlib.CheckType(body, dict, "Body contents")
1328 fields = body["fields"]
1330 fields = _GetQueryFields(self.queryargs)
1332 return self._Query(fields, self.request_body.get("filter", None))
1335 class R_2_query_fields(baserlib.R_Generic):
1336 """/2/query/[resource]/fields resource.
1340 """Retrieves list of available fields for a resource.
1342 @return: List of serialized L{objects.QueryFieldDefinition}
1346 raw_fields = self.queryargs["fields"]
1350 fields = _SplitQueryFields(raw_fields[0])
1352 return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
1355 class _R_Tags(baserlib.R_Generic):
1356 """ Quasiclass for tagging resources
1358 Manages tags. When inheriting this class you must define the
1364 def __init__(self, items, queryargs, req):
1365 """A tag resource constructor.
1367 We have to override the default to sort out cluster naming case.
1370 baserlib.R_Generic.__init__(self, items, queryargs, req)
1372 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1375 self.name = items[0]
1378 """Returns a list of tags.
1380 Example: ["tag1", "tag2", "tag3"]
1383 kind = self.TAG_LEVEL
1385 if kind in (constants.TAG_INSTANCE,
1386 constants.TAG_NODEGROUP,
1387 constants.TAG_NODE):
1389 raise http.HttpBadRequest("Missing name on tag request")
1391 cl = baserlib.GetClient()
1392 if kind == constants.TAG_INSTANCE:
1393 fn = cl.QueryInstances
1394 elif kind == constants.TAG_NODEGROUP:
1398 result = fn(names=[self.name], fields=["tags"], use_locking=False)
1399 if not result or not result[0]:
1400 raise http.HttpBadGateway("Invalid response from tag query")
1403 elif kind == constants.TAG_CLUSTER:
1404 assert not self.name
1405 # TODO: Use query API?
1406 ssc = ssconf.SimpleStore()
1407 tags = ssc.GetClusterTags()
1412 """Add a set of tags.
1414 The request as a list of strings should be PUT to this URI. And
1415 you'll have back a job id.
1418 # pylint: disable-msg=W0212
1419 if "tag" not in self.queryargs:
1420 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1421 " the 'tag' parameter")
1422 op = opcodes.OpTagsSet(kind=self.TAG_LEVEL, name=self.name,
1423 tags=self.queryargs["tag"], dry_run=self.dryRun())
1424 return baserlib.SubmitJob([op])
1429 In order to delete a set of tags, the DELETE
1430 request should be addressed to URI like:
1431 /tags?tag=[tag]&tag=[tag]
1434 # pylint: disable-msg=W0212
1435 if "tag" not in self.queryargs:
1436 # no we not gonna delete all tags
1437 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1438 " tag(s) using the 'tag' parameter")
1439 op = opcodes.OpTagsDel(kind=self.TAG_LEVEL, name=self.name,
1440 tags=self.queryargs["tag"], dry_run=self.dryRun())
1441 return baserlib.SubmitJob([op])
1444 class R_2_instances_name_tags(_R_Tags):
1445 """ /2/instances/[instance_name]/tags resource.
1447 Manages per-instance tags.
1450 TAG_LEVEL = constants.TAG_INSTANCE
1453 class R_2_nodes_name_tags(_R_Tags):
1454 """ /2/nodes/[node_name]/tags resource.
1456 Manages per-node tags.
1459 TAG_LEVEL = constants.TAG_NODE
1462 class R_2_groups_name_tags(_R_Tags):
1463 """ /2/groups/[group_name]/tags resource.
1465 Manages per-nodegroup tags.
1468 TAG_LEVEL = constants.TAG_NODEGROUP
1471 class R_2_tags(_R_Tags):
1472 """ /2/tags resource.
1474 Manages cluster tags.
1477 TAG_LEVEL = constants.TAG_CLUSTER