4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API resource implementations.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
35 Quoting from RFC2616, section 9.6::
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
54 # pylint: disable-msg=C0103
56 # C0103: Invalid name, since the R_* names are not conforming
58 from ganeti import opcodes
59 from ganeti import http
60 from ganeti import constants
61 from ganeti import cli
62 from ganeti import rapi
64 from ganeti import compat
65 from ganeti import ssconf
66 from ganeti.rapi import baserlib
69 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
70 I_FIELDS = ["name", "admin_state", "os",
73 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
75 "disk.sizes", "disk_usage",
76 "beparams", "hvparams",
77 "oper_state", "oper_ram", "oper_vcpus", "status",
78 "custom_hvparams", "custom_beparams", "custom_nicparams",
81 N_FIELDS = ["name", "offline", "master_candidate", "drained",
83 "mtotal", "mnode", "mfree",
84 "pinst_cnt", "sinst_cnt",
85 "ctotal", "cnodes", "csockets",
87 "pinst_list", "sinst_list",
88 "master_capable", "vm_capable",
100 "id", "ops", "status", "summary",
102 "received_ts", "start_ts", "end_ts",
105 J_FIELDS = J_FIELDS_BULK + [
110 _NR_DRAINED = "drained"
111 _NR_MASTER_CANDIATE = "master-candidate"
112 _NR_MASTER = "master"
113 _NR_OFFLINE = "offline"
114 _NR_REGULAR = "regular"
117 constants.NR_MASTER: _NR_MASTER,
118 constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
119 constants.NR_DRAINED: _NR_DRAINED,
120 constants.NR_OFFLINE: _NR_OFFLINE,
121 constants.NR_REGULAR: _NR_REGULAR,
124 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
126 # Request data version field
127 _REQ_DATA_VERSION = "__version__"
129 # Feature string for instance creation request data version 1
130 _INST_CREATE_REQV1 = "instance-create-reqv1"
132 # Feature string for instance reinstall request version 1
133 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
135 # Feature string for node migration version 1
136 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
138 # Feature string for node evacuation with LU-generated jobs
139 _NODE_EVAC_RES1 = "node-evac-res1"
141 ALL_FEATURES = frozenset([
143 _INST_REINSTALL_REQV1,
148 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
152 class R_root(baserlib.ResourceBase):
158 """Supported for legacy reasons.
164 class R_version(baserlib.ResourceBase):
165 """/version resource.
167 This resource should be used to determine the remote API version and
168 to adapt clients accordingly.
173 """Returns the remote API version.
176 return constants.RAPI_VERSION
179 class R_2_info(baserlib.ResourceBase):
184 """Returns cluster information.
187 client = self.GetClient()
188 return client.QueryClusterInfo()
191 class R_2_features(baserlib.ResourceBase):
192 """/2/features resource.
197 """Returns list of optional RAPI features implemented.
200 return list(ALL_FEATURES)
203 class R_2_os(baserlib.ResourceBase):
208 """Return a list of all OSes.
210 Can return error 500 in case of a problem.
212 Example: ["debian-etch"]
215 cl = self.GetClient()
216 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
217 job_id = self.SubmitJob([op], cl=cl)
218 # we use custom feedback function, instead of print we log the status
219 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
220 diagnose_data = result[0]
222 if not isinstance(diagnose_data, list):
223 raise http.HttpBadGateway(message="Can't get OS list")
226 for (name, variants) in diagnose_data:
227 os_names.extend(cli.CalculateOSNames(name, variants))
232 class R_2_redist_config(baserlib.OpcodeResource):
233 """/2/redistribute-config resource.
236 PUT_OPCODE = opcodes.OpClusterRedistConf
239 class R_2_cluster_modify(baserlib.OpcodeResource):
240 """/2/modify resource.
243 PUT_OPCODE = opcodes.OpClusterSetParams
246 class R_2_jobs(baserlib.ResourceBase):
251 """Returns a dictionary of jobs.
253 @return: a dictionary with jobs id and uri.
256 client = self.GetClient()
259 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
260 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
262 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
263 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
264 uri_fields=("id", "uri"))
267 class R_2_jobs_id(baserlib.ResourceBase):
268 """/2/jobs/[job_id] resource.
272 """Returns a job status.
274 @return: a dictionary with job parameters.
276 - id: job ID as a number
277 - status: current job status as a string
278 - ops: involved OpCodes as a list of dictionaries for each
280 - opstatus: OpCodes status as a list
281 - opresult: OpCodes results as a list of lists
284 job_id = self.items[0]
285 result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
287 raise http.HttpNotFound()
288 return baserlib.MapFields(J_FIELDS, result)
291 """Cancel not-yet-started job.
294 job_id = self.items[0]
295 result = self.GetClient().CancelJob(job_id)
299 class R_2_jobs_id_wait(baserlib.ResourceBase):
300 """/2/jobs/[job_id]/wait resource.
303 # WaitForJobChange provides access to sensitive information and blocks
304 # machine resources (it's a blocking RAPI call), hence restricting access.
305 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
308 """Waits for job changes.
311 job_id = self.items[0]
313 fields = self.getBodyParameter("fields")
314 prev_job_info = self.getBodyParameter("previous_job_info", None)
315 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
317 if not isinstance(fields, list):
318 raise http.HttpBadRequest("The 'fields' parameter should be a list")
320 if not (prev_job_info is None or isinstance(prev_job_info, list)):
321 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
324 if not (prev_log_serial is None or
325 isinstance(prev_log_serial, (int, long))):
326 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
329 client = self.GetClient()
330 result = client.WaitForJobChangeOnce(job_id, fields,
331 prev_job_info, prev_log_serial,
332 timeout=_WFJC_TIMEOUT)
334 raise http.HttpNotFound()
336 if result == constants.JOB_NOTCHANGED:
340 (job_info, log_entries) = result
343 "job_info": job_info,
344 "log_entries": log_entries,
348 class R_2_nodes(baserlib.ResourceBase):
349 """/2/nodes resource.
353 """Returns a list of all nodes.
356 client = self.GetClient()
359 bulkdata = client.QueryNodes([], N_FIELDS, False)
360 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
362 nodesdata = client.QueryNodes([], ["name"], False)
363 nodeslist = [row[0] for row in nodesdata]
364 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
365 uri_fields=("id", "uri"))
368 class R_2_nodes_name(baserlib.ResourceBase):
369 """/2/nodes/[node_name] resource.
373 """Send information about a node.
376 node_name = self.items[0]
377 client = self.GetClient()
379 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
380 names=[node_name], fields=N_FIELDS,
381 use_locking=self.useLocking())
383 return baserlib.MapFields(N_FIELDS, result[0])
386 class R_2_nodes_name_role(baserlib.ResourceBase):
387 """ /2/nodes/[node_name]/role resource.
391 """Returns the current node role.
396 node_name = self.items[0]
397 client = self.GetClient()
398 result = client.QueryNodes(names=[node_name], fields=["role"],
399 use_locking=self.useLocking())
401 return _NR_MAP[result[0][0]]
404 """Sets the node role.
409 if not isinstance(self.request_body, basestring):
410 raise http.HttpBadRequest("Invalid body contents, not a string")
412 node_name = self.items[0]
413 role = self.request_body
415 if role == _NR_REGULAR:
420 elif role == _NR_MASTER_CANDIATE:
422 offline = drained = None
424 elif role == _NR_DRAINED:
426 candidate = offline = None
428 elif role == _NR_OFFLINE:
430 candidate = drained = None
433 raise http.HttpBadRequest("Can't set '%s' role" % role)
435 op = opcodes.OpNodeSetParams(node_name=node_name,
436 master_candidate=candidate,
439 force=bool(self.useForce()))
441 return self.SubmitJob([op])
444 class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
445 """/2/nodes/[node_name]/evacuate resource.
448 POST_OPCODE = opcodes.OpNodeEvacuate
450 def GetPostOpInput(self):
451 """Evacuate all instances off a node.
454 return (self.request_body, {
455 "node_name": self.items[0],
456 "dry_run": self.dryRun(),
460 class R_2_nodes_name_migrate(baserlib.OpcodeResource):
461 """/2/nodes/[node_name]/migrate resource.
464 POST_OPCODE = opcodes.OpNodeMigrate
466 def GetPostOpInput(self):
467 """Migrate all primary instances from a node.
471 # Support old-style requests
472 if "live" in self.queryargs and "mode" in self.queryargs:
473 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
476 if "live" in self.queryargs:
477 if self._checkIntVariable("live", default=1):
478 mode = constants.HT_MIGRATION_LIVE
480 mode = constants.HT_MIGRATION_NONLIVE
482 mode = self._checkStringVariable("mode", default=None)
488 data = self.request_body
491 "node_name": self.items[0],
495 class R_2_nodes_name_storage(baserlib.ResourceBase):
496 """/2/nodes/[node_name]/storage resource.
499 # LUNodeQueryStorage acquires locks, hence restricting access to GET
500 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
503 node_name = self.items[0]
505 storage_type = self._checkStringVariable("storage_type", None)
507 raise http.HttpBadRequest("Missing the required 'storage_type'"
510 output_fields = self._checkStringVariable("output_fields", None)
511 if not output_fields:
512 raise http.HttpBadRequest("Missing the required 'output_fields'"
515 op = opcodes.OpNodeQueryStorage(nodes=[node_name],
516 storage_type=storage_type,
517 output_fields=output_fields.split(","))
518 return self.SubmitJob([op])
521 class R_2_nodes_name_storage_modify(baserlib.ResourceBase):
522 """/2/nodes/[node_name]/storage/modify resource.
526 node_name = self.items[0]
528 storage_type = self._checkStringVariable("storage_type", None)
530 raise http.HttpBadRequest("Missing the required 'storage_type'"
533 name = self._checkStringVariable("name", None)
535 raise http.HttpBadRequest("Missing the required 'name'"
540 if "allocatable" in self.queryargs:
541 changes[constants.SF_ALLOCATABLE] = \
542 bool(self._checkIntVariable("allocatable", default=1))
544 op = opcodes.OpNodeModifyStorage(node_name=node_name,
545 storage_type=storage_type,
548 return self.SubmitJob([op])
551 class R_2_nodes_name_storage_repair(baserlib.ResourceBase):
552 """/2/nodes/[node_name]/storage/repair resource.
556 node_name = self.items[0]
558 storage_type = self._checkStringVariable("storage_type", None)
560 raise http.HttpBadRequest("Missing the required 'storage_type'"
563 name = self._checkStringVariable("name", None)
565 raise http.HttpBadRequest("Missing the required 'name'"
568 op = opcodes.OpRepairNodeStorage(node_name=node_name,
569 storage_type=storage_type,
571 return self.SubmitJob([op])
574 class R_2_groups(baserlib.OpcodeResource):
575 """/2/groups resource.
578 POST_OPCODE = opcodes.OpGroupAdd
580 "name": "group_name",
583 def GetPostOpInput(self):
584 """Create a node group.
587 assert not self.items
588 return (self.request_body, {
589 "dry_run": self.dryRun(),
593 """Returns a list of all node groups.
596 client = self.GetClient()
599 bulkdata = client.QueryGroups([], G_FIELDS, False)
600 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
602 data = client.QueryGroups([], ["name"], False)
603 groupnames = [row[0] for row in data]
604 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
605 uri_fields=("name", "uri"))
608 class R_2_groups_name(baserlib.ResourceBase):
609 """/2/groups/[group_name] resource.
613 """Send information about a node group.
616 group_name = self.items[0]
617 client = self.GetClient()
619 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
620 names=[group_name], fields=G_FIELDS,
621 use_locking=self.useLocking())
623 return baserlib.MapFields(G_FIELDS, result[0])
626 """Delete a node group.
629 op = opcodes.OpGroupRemove(group_name=self.items[0],
630 dry_run=bool(self.dryRun()))
632 return self.SubmitJob([op])
635 class R_2_groups_name_modify(baserlib.OpcodeResource):
636 """/2/groups/[group_name]/modify resource.
639 PUT_OPCODE = opcodes.OpGroupSetParams
641 def GetPutOpInput(self):
642 """Changes some parameters of node group.
646 return (self.request_body, {
647 "group_name": self.items[0],
651 class R_2_groups_name_rename(baserlib.OpcodeResource):
652 """/2/groups/[group_name]/rename resource.
655 PUT_OPCODE = opcodes.OpGroupRename
657 def GetPutOpInput(self):
658 """Changes the name of a node group.
661 assert len(self.items) == 1
662 return (self.request_body, {
663 "group_name": self.items[0],
664 "dry_run": self.dryRun(),
668 class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
669 """/2/groups/[group_name]/assign-nodes resource.
672 PUT_OPCODE = opcodes.OpGroupAssignNodes
674 def GetPutOpInput(self):
675 """Assigns nodes to a group.
678 assert len(self.items) == 1
679 return (self.request_body, {
680 "group_name": self.items[0],
681 "dry_run": self.dryRun(),
682 "force": self.useForce(),
686 def _ParseInstanceCreateRequestVersion1(data, dry_run):
687 """Parses an instance creation request version 1.
689 @rtype: L{opcodes.OpInstanceCreate}
690 @return: Instance creation opcode
699 "name": "instance_name",
702 return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
706 class R_2_instances(baserlib.ResourceBase):
707 """/2/instances resource.
711 """Returns a list of all available instances.
714 client = self.GetClient()
716 use_locking = self.useLocking()
718 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
719 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
721 instancesdata = client.QueryInstances([], ["name"], use_locking)
722 instanceslist = [row[0] for row in instancesdata]
723 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
724 uri_fields=("id", "uri"))
727 """Create an instance.
732 if not isinstance(self.request_body, dict):
733 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
735 # Default to request data version 0
736 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
738 if data_version == 0:
739 raise http.HttpBadRequest("Instance creation request version 0 is no"
741 elif data_version == 1:
742 data = self.request_body.copy()
743 # Remove "__version__"
744 data.pop(_REQ_DATA_VERSION, None)
745 op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
747 raise http.HttpBadRequest("Unsupported request data version %s" %
750 return self.SubmitJob([op])
753 class R_2_instances_name(baserlib.ResourceBase):
754 """/2/instances/[instance_name] resource.
758 """Send information about an instance.
761 client = self.GetClient()
762 instance_name = self.items[0]
764 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
765 names=[instance_name],
767 use_locking=self.useLocking())
769 return baserlib.MapFields(I_FIELDS, result[0])
772 """Delete an instance.
775 op = opcodes.OpInstanceRemove(instance_name=self.items[0],
776 ignore_failures=False,
777 dry_run=bool(self.dryRun()))
778 return self.SubmitJob([op])
781 class R_2_instances_name_info(baserlib.ResourceBase):
782 """/2/instances/[instance_name]/info resource.
786 """Request detailed instance information.
789 instance_name = self.items[0]
790 static = bool(self._checkIntVariable("static", default=0))
792 op = opcodes.OpInstanceQueryData(instances=[instance_name],
794 return self.SubmitJob([op])
797 class R_2_instances_name_reboot(baserlib.ResourceBase):
798 """/2/instances/[instance_name]/reboot resource.
800 Implements an instance reboot.
804 """Reboot an instance.
806 The URI takes type=[hard|soft|full] and
807 ignore_secondaries=[False|True] parameters.
810 instance_name = self.items[0]
811 reboot_type = self.queryargs.get("type",
812 [constants.INSTANCE_REBOOT_HARD])[0]
813 ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
814 op = opcodes.OpInstanceReboot(instance_name=instance_name,
815 reboot_type=reboot_type,
816 ignore_secondaries=ignore_secondaries,
817 dry_run=bool(self.dryRun()))
819 return self.SubmitJob([op])
822 class R_2_instances_name_startup(baserlib.ResourceBase):
823 """/2/instances/[instance_name]/startup resource.
825 Implements an instance startup.
829 """Startup an instance.
831 The URI takes force=[False|True] parameter to start the instance
832 if even if secondary disks are failing.
835 instance_name = self.items[0]
836 force_startup = bool(self._checkIntVariable("force"))
837 no_remember = bool(self._checkIntVariable("no_remember"))
838 op = opcodes.OpInstanceStartup(instance_name=instance_name,
840 dry_run=bool(self.dryRun()),
841 no_remember=no_remember)
843 return self.SubmitJob([op])
846 def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
847 """Parses a request for an instance shutdown.
849 @rtype: L{opcodes.OpInstanceShutdown}
850 @return: Instance shutdown opcode
853 return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
854 "instance_name": name,
856 "no_remember": no_remember,
860 class R_2_instances_name_shutdown(baserlib.ResourceBase):
861 """/2/instances/[instance_name]/shutdown resource.
863 Implements an instance shutdown.
867 """Shutdown an instance.
872 baserlib.CheckType(self.request_body, dict, "Body contents")
874 no_remember = bool(self._checkIntVariable("no_remember"))
875 op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
876 bool(self.dryRun()), no_remember)
878 return self.SubmitJob([op])
881 def _ParseInstanceReinstallRequest(name, data):
882 """Parses a request for reinstalling an instance.
885 if not isinstance(data, dict):
886 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
888 ostype = baserlib.CheckParameter(data, "os", default=None)
889 start = baserlib.CheckParameter(data, "start", exptype=bool,
891 osparams = baserlib.CheckParameter(data, "osparams", default=None)
894 opcodes.OpInstanceShutdown(instance_name=name),
895 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
900 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
905 class R_2_instances_name_reinstall(baserlib.ResourceBase):
906 """/2/instances/[instance_name]/reinstall resource.
908 Implements an instance reinstall.
912 """Reinstall an instance.
914 The URI takes os=name and nostartup=[0|1] optional
915 parameters. By default, the instance will be started
919 if self.request_body:
921 raise http.HttpBadRequest("Can't combine query and body parameters")
923 body = self.request_body
925 # Legacy interface, do not modify/extend
927 "os": self._checkStringVariable("os"),
928 "start": not self._checkIntVariable("nostartup"),
933 ops = _ParseInstanceReinstallRequest(self.items[0], body)
935 return self.SubmitJob(ops)
938 def _ParseInstanceReplaceDisksRequest(name, data):
939 """Parses a request for an instance export.
941 @rtype: L{opcodes.OpInstanceReplaceDisks}
942 @return: Instance export opcode
946 "instance_name": name,
951 raw_disks = data["disks"]
955 if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
956 # Backwards compatibility for strings of the format "1, 2, 3"
958 data["disks"] = [int(part) for part in raw_disks.split(",")]
959 except (TypeError, ValueError), err:
960 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
962 return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
965 class R_2_instances_name_replace_disks(baserlib.ResourceBase):
966 """/2/instances/[instance_name]/replace-disks resource.
970 """Replaces disks on an instance.
973 op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
975 return self.SubmitJob([op])
978 class R_2_instances_name_activate_disks(baserlib.ResourceBase):
979 """/2/instances/[instance_name]/activate-disks resource.
983 """Activate disks for an instance.
985 The URI might contain ignore_size to ignore current recorded size.
988 instance_name = self.items[0]
989 ignore_size = bool(self._checkIntVariable("ignore_size"))
991 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
992 ignore_size=ignore_size)
994 return self.SubmitJob([op])
997 class R_2_instances_name_deactivate_disks(baserlib.ResourceBase):
998 """/2/instances/[instance_name]/deactivate-disks resource.
1002 """Deactivate disks for an instance.
1005 instance_name = self.items[0]
1007 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1009 return self.SubmitJob([op])
1012 class R_2_instances_name_prepare_export(baserlib.ResourceBase):
1013 """/2/instances/[instance_name]/prepare-export resource.
1017 """Prepares an export for an instance.
1022 instance_name = self.items[0]
1023 mode = self._checkStringVariable("mode")
1025 op = opcodes.OpBackupPrepare(instance_name=instance_name,
1028 return self.SubmitJob([op])
1031 def _ParseExportInstanceRequest(name, data):
1032 """Parses a request for an instance export.
1034 @rtype: L{opcodes.OpBackupExport}
1035 @return: Instance export opcode
1038 # Rename "destination" to "target_node"
1040 data["target_node"] = data.pop("destination")
1044 return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1045 "instance_name": name,
1049 class R_2_instances_name_export(baserlib.ResourceBase):
1050 """/2/instances/[instance_name]/export resource.
1054 """Exports an instance.
1059 if not isinstance(self.request_body, dict):
1060 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1062 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1064 return self.SubmitJob([op])
1067 def _ParseMigrateInstanceRequest(name, data):
1068 """Parses a request for an instance migration.
1070 @rtype: L{opcodes.OpInstanceMigrate}
1071 @return: Instance migration opcode
1074 return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1075 "instance_name": name,
1079 class R_2_instances_name_migrate(baserlib.ResourceBase):
1080 """/2/instances/[instance_name]/migrate resource.
1084 """Migrates an instance.
1089 baserlib.CheckType(self.request_body, dict, "Body contents")
1091 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1093 return self.SubmitJob([op])
1096 class R_2_instances_name_failover(baserlib.ResourceBase):
1097 """/2/instances/[instance_name]/failover resource.
1101 """Does a failover of an instance.
1106 baserlib.CheckType(self.request_body, dict, "Body contents")
1108 op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
1109 "instance_name": self.items[0],
1112 return self.SubmitJob([op])
1115 def _ParseRenameInstanceRequest(name, data):
1116 """Parses a request for renaming an instance.
1118 @rtype: L{opcodes.OpInstanceRename}
1119 @return: Instance rename opcode
1122 return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1123 "instance_name": name,
1127 class R_2_instances_name_rename(baserlib.ResourceBase):
1128 """/2/instances/[instance_name]/rename resource.
1132 """Changes the name of an instance.
1137 baserlib.CheckType(self.request_body, dict, "Body contents")
1139 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1141 return self.SubmitJob([op])
1144 def _ParseModifyInstanceRequest(name, data):
1145 """Parses a request for modifying an instance.
1147 @rtype: L{opcodes.OpInstanceSetParams}
1148 @return: Instance modify opcode
1151 return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1152 "instance_name": name,
1156 class R_2_instances_name_modify(baserlib.ResourceBase):
1157 """/2/instances/[instance_name]/modify resource.
1161 """Changes some parameters of an instance.
1166 baserlib.CheckType(self.request_body, dict, "Body contents")
1168 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1170 return self.SubmitJob([op])
1173 class R_2_instances_name_disk_grow(baserlib.ResourceBase):
1174 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1178 """Increases the size of an instance disk.
1183 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1184 "instance_name": self.items[0],
1185 "disk": int(self.items[1]),
1188 return self.SubmitJob([op])
1191 class R_2_instances_name_console(baserlib.ResourceBase):
1192 """/2/instances/[instance_name]/console resource.
1195 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1198 """Request information for connecting to instance's console.
1200 @return: Serialized instance console description, see
1201 L{objects.InstanceConsole}
1204 client = self.GetClient()
1206 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1209 raise http.HttpServiceUnavailable("Instance console unavailable")
1211 assert isinstance(console, dict)
1215 def _GetQueryFields(args):
1220 fields = args["fields"]
1222 raise http.HttpBadRequest("Missing 'fields' query argument")
1224 return _SplitQueryFields(fields[0])
1227 def _SplitQueryFields(fields):
1231 return [i.strip() for i in fields.split(",")]
1234 class R_2_query(baserlib.ResourceBase):
1235 """/2/query/[resource] resource.
1238 # Results might contain sensitive information
1239 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1241 def _Query(self, fields, filter_):
1242 return self.GetClient().Query(self.items[0], fields, filter_).ToDict()
1245 """Returns resource information.
1247 @return: Query result, see L{objects.QueryResponse}
1250 return self._Query(_GetQueryFields(self.queryargs), None)
1253 """Submits job querying for resources.
1255 @return: Query result, see L{objects.QueryResponse}
1258 body = self.request_body
1260 baserlib.CheckType(body, dict, "Body contents")
1263 fields = body["fields"]
1265 fields = _GetQueryFields(self.queryargs)
1267 return self._Query(fields, self.request_body.get("filter", None))
1270 class R_2_query_fields(baserlib.ResourceBase):
1271 """/2/query/[resource]/fields resource.
1275 """Retrieves list of available fields for a resource.
1277 @return: List of serialized L{objects.QueryFieldDefinition}
1281 raw_fields = self.queryargs["fields"]
1285 fields = _SplitQueryFields(raw_fields[0])
1287 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1290 class _R_Tags(baserlib.ResourceBase):
1291 """ Quasiclass for tagging resources
1293 Manages tags. When inheriting this class you must define the
1299 def __init__(self, items, queryargs, req):
1300 """A tag resource constructor.
1302 We have to override the default to sort out cluster naming case.
1305 baserlib.ResourceBase.__init__(self, items, queryargs, req)
1307 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1310 self.name = items[0]
1313 """Returns a list of tags.
1315 Example: ["tag1", "tag2", "tag3"]
1318 kind = self.TAG_LEVEL
1320 if kind in (constants.TAG_INSTANCE,
1321 constants.TAG_NODEGROUP,
1322 constants.TAG_NODE):
1324 raise http.HttpBadRequest("Missing name on tag request")
1326 cl = self.GetClient()
1327 if kind == constants.TAG_INSTANCE:
1328 fn = cl.QueryInstances
1329 elif kind == constants.TAG_NODEGROUP:
1333 result = fn(names=[self.name], fields=["tags"], use_locking=False)
1334 if not result or not result[0]:
1335 raise http.HttpBadGateway("Invalid response from tag query")
1338 elif kind == constants.TAG_CLUSTER:
1339 assert not self.name
1340 # TODO: Use query API?
1341 ssc = ssconf.SimpleStore()
1342 tags = ssc.GetClusterTags()
1347 """Add a set of tags.
1349 The request as a list of strings should be PUT to this URI. And
1350 you'll have back a job id.
1353 # pylint: disable-msg=W0212
1354 if "tag" not in self.queryargs:
1355 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1356 " the 'tag' parameter")
1357 op = opcodes.OpTagsSet(kind=self.TAG_LEVEL, name=self.name,
1358 tags=self.queryargs["tag"], dry_run=self.dryRun())
1359 return self.SubmitJob([op])
1364 In order to delete a set of tags, the DELETE
1365 request should be addressed to URI like:
1366 /tags?tag=[tag]&tag=[tag]
1369 # pylint: disable-msg=W0212
1370 if "tag" not in self.queryargs:
1371 # no we not gonna delete all tags
1372 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1373 " tag(s) using the 'tag' parameter")
1374 op = opcodes.OpTagsDel(kind=self.TAG_LEVEL, name=self.name,
1375 tags=self.queryargs["tag"], dry_run=self.dryRun())
1376 return self.SubmitJob([op])
1379 class R_2_instances_name_tags(_R_Tags):
1380 """ /2/instances/[instance_name]/tags resource.
1382 Manages per-instance tags.
1385 TAG_LEVEL = constants.TAG_INSTANCE
1388 class R_2_nodes_name_tags(_R_Tags):
1389 """ /2/nodes/[node_name]/tags resource.
1391 Manages per-node tags.
1394 TAG_LEVEL = constants.TAG_NODE
1397 class R_2_groups_name_tags(_R_Tags):
1398 """ /2/groups/[group_name]/tags resource.
1400 Manages per-nodegroup tags.
1403 TAG_LEVEL = constants.TAG_NODEGROUP
1406 class R_2_tags(_R_Tags):
1407 """ /2/tags resource.
1409 Manages cluster tags.
1412 TAG_LEVEL = constants.TAG_CLUSTER