4 # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API version 2 baserlib.library.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 To be in context of this module for instance creation POST on
32 /2/instances is legitim while PUT would be not, due to it does create a
33 new entity and not just replace /2/instances with it.
35 So when adding new methods, if they are operating on the URI entity itself,
36 PUT should be prefered over POST.
40 # pylint: disable-msg=C0103
42 # C0103: Invalid name, since the R_* names are not conforming
44 from ganeti import opcodes
45 from ganeti import http
46 from ganeti import constants
47 from ganeti import cli
48 from ganeti import utils
49 from ganeti import rapi
50 from ganeti.rapi import baserlib
53 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54 I_FIELDS = ["name", "admin_state", "os",
57 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
59 "disk.sizes", "disk_usage",
60 "beparams", "hvparams",
61 "oper_state", "oper_ram", "oper_vcpus", "status",
64 N_FIELDS = ["name", "offline", "master_candidate", "drained",
66 "mtotal", "mnode", "mfree",
67 "pinst_cnt", "sinst_cnt",
68 "ctotal", "cnodes", "csockets",
70 "pinst_list", "sinst_list",
73 _NR_DRAINED = "drained"
74 _NR_MASTER_CANDIATE = "master-candidate"
76 _NR_OFFLINE = "offline"
77 _NR_REGULAR = "regular"
81 "C": _NR_MASTER_CANDIATE,
87 # Request data version field
88 _REQ_DATA_VERSION = "__version__"
90 # Feature string for instance creation request data version 1
91 _INST_CREATE_REQV1 = "instance-create-reqv1"
93 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
97 class R_version(baserlib.R_Generic):
100 This resource should be used to determine the remote API version and
101 to adapt clients accordingly.
106 """Returns the remote API version.
109 return constants.RAPI_VERSION
112 class R_2_info(baserlib.R_Generic):
118 """Returns cluster information.
121 client = baserlib.GetClient()
122 return client.QueryClusterInfo()
125 class R_2_features(baserlib.R_Generic):
126 """/2/features resource.
131 """Returns list of optional RAPI features implemented.
134 return [_INST_CREATE_REQV1]
137 class R_2_os(baserlib.R_Generic):
143 """Return a list of all OSes.
145 Can return error 500 in case of a problem.
147 Example: ["debian-etch"]
150 cl = baserlib.GetClient()
151 op = opcodes.OpDiagnoseOS(output_fields=["name", "variants"], names=[])
152 job_id = baserlib.SubmitJob([op], cl)
153 # we use custom feedback function, instead of print we log the status
154 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
155 diagnose_data = result[0]
157 if not isinstance(diagnose_data, list):
158 raise http.HttpBadGateway(message="Can't get OS list")
161 for (name, variants) in diagnose_data:
162 os_names.extend(cli.CalculateOSNames(name, variants))
167 class R_2_redist_config(baserlib.R_Generic):
168 """/2/redistribute-config resource.
173 """Redistribute configuration to all nodes.
176 return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
179 class R_2_jobs(baserlib.R_Generic):
185 """Returns a dictionary of jobs.
187 @return: a dictionary with jobs id and uri.
191 cl = baserlib.GetClient()
192 # Convert the list of lists to the list of ids
193 result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
194 return baserlib.BuildUriList(result, "/2/jobs/%s",
195 uri_fields=("id", "uri"))
198 class R_2_jobs_id(baserlib.R_Generic):
199 """/2/jobs/[job_id] resource.
203 """Returns a job status.
205 @return: a dictionary with job parameters.
207 - id: job ID as a number
208 - status: current job status as a string
209 - ops: involved OpCodes as a list of dictionaries for each
211 - opstatus: OpCodes status as a list
212 - opresult: OpCodes results as a list of lists
215 fields = ["id", "ops", "status", "summary",
216 "opstatus", "opresult", "oplog",
217 "received_ts", "start_ts", "end_ts",
219 job_id = self.items[0]
220 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
222 raise http.HttpNotFound()
223 return baserlib.MapFields(fields, result)
226 """Cancel not-yet-started job.
229 job_id = self.items[0]
230 result = baserlib.GetClient().CancelJob(job_id)
234 class R_2_jobs_id_wait(baserlib.R_Generic):
235 """/2/jobs/[job_id]/wait resource.
238 # WaitForJobChange provides access to sensitive information and blocks
239 # machine resources (it's a blocking RAPI call), hence restricting access.
240 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
243 """Waits for job changes.
246 job_id = self.items[0]
248 fields = self.getBodyParameter("fields")
249 prev_job_info = self.getBodyParameter("previous_job_info", None)
250 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
252 if not isinstance(fields, list):
253 raise http.HttpBadRequest("The 'fields' parameter should be a list")
255 if not (prev_job_info is None or isinstance(prev_job_info, list)):
256 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
259 if not (prev_log_serial is None or
260 isinstance(prev_log_serial, (int, long))):
261 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
264 client = baserlib.GetClient()
265 result = client.WaitForJobChangeOnce(job_id, fields,
266 prev_job_info, prev_log_serial,
267 timeout=_WFJC_TIMEOUT)
269 raise http.HttpNotFound()
271 if result == constants.JOB_NOTCHANGED:
275 (job_info, log_entries) = result
278 "job_info": job_info,
279 "log_entries": log_entries,
283 class R_2_nodes(baserlib.R_Generic):
284 """/2/nodes resource.
288 """Returns a list of all nodes.
291 client = baserlib.GetClient()
294 bulkdata = client.QueryNodes([], N_FIELDS, False)
295 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
297 nodesdata = client.QueryNodes([], ["name"], False)
298 nodeslist = [row[0] for row in nodesdata]
299 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
300 uri_fields=("id", "uri"))
303 class R_2_nodes_name(baserlib.R_Generic):
304 """/2/nodes/[node_name] resources.
308 """Send information about a node.
311 node_name = self.items[0]
312 client = baserlib.GetClient()
314 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
315 names=[node_name], fields=N_FIELDS,
316 use_locking=self.useLocking())
318 return baserlib.MapFields(N_FIELDS, result[0])
321 class R_2_nodes_name_role(baserlib.R_Generic):
322 """ /2/nodes/[node_name]/role resource.
326 """Returns the current node role.
331 node_name = self.items[0]
332 client = baserlib.GetClient()
333 result = client.QueryNodes(names=[node_name], fields=["role"],
334 use_locking=self.useLocking())
336 return _NR_MAP[result[0][0]]
339 """Sets the node role.
344 if not isinstance(self.request_body, basestring):
345 raise http.HttpBadRequest("Invalid body contents, not a string")
347 node_name = self.items[0]
348 role = self.request_body
350 if role == _NR_REGULAR:
355 elif role == _NR_MASTER_CANDIATE:
357 offline = drained = None
359 elif role == _NR_DRAINED:
361 candidate = offline = None
363 elif role == _NR_OFFLINE:
365 candidate = drained = None
368 raise http.HttpBadRequest("Can't set '%s' role" % role)
370 op = opcodes.OpSetNodeParams(node_name=node_name,
371 master_candidate=candidate,
374 force=bool(self.useForce()))
376 return baserlib.SubmitJob([op])
379 class R_2_nodes_name_evacuate(baserlib.R_Generic):
380 """/2/nodes/[node_name]/evacuate resource.
384 """Evacuate all secondary instances off a node.
387 node_name = self.items[0]
388 remote_node = self._checkStringVariable("remote_node", default=None)
389 iallocator = self._checkStringVariable("iallocator", default=None)
390 early_r = bool(self._checkIntVariable("early_release", default=0))
391 dry_run = bool(self.dryRun())
393 cl = baserlib.GetClient()
395 op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name],
396 iallocator=iallocator,
397 remote_node=remote_node)
399 job_id = baserlib.SubmitJob([op], cl)
400 # we use custom feedback function, instead of print we log the status
401 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
404 for iname, node in result:
408 op = opcodes.OpReplaceDisks(instance_name=iname,
409 remote_node=node, disks=[],
410 mode=constants.REPLACE_DISK_CHG,
411 early_release=early_r)
412 jid = baserlib.SubmitJob([op])
413 jobs.append((jid, iname, node))
418 class R_2_nodes_name_migrate(baserlib.R_Generic):
419 """/2/nodes/[node_name]/migrate resource.
423 """Migrate all primary instances from a node.
426 node_name = self.items[0]
428 if "live" in self.queryargs and "mode" in self.queryargs:
429 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
431 elif "live" in self.queryargs:
432 if self._checkIntVariable("live", default=1):
433 mode = constants.HT_MIGRATION_LIVE
435 mode = constants.HT_MIGRATION_NONLIVE
437 mode = self._checkStringVariable("mode", default=None)
439 op = opcodes.OpMigrateNode(node_name=node_name, mode=mode)
441 return baserlib.SubmitJob([op])
444 class R_2_nodes_name_storage(baserlib.R_Generic):
445 """/2/nodes/[node_name]/storage ressource.
448 # LUQueryNodeStorage acquires locks, hence restricting access to GET
449 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
452 node_name = self.items[0]
454 storage_type = self._checkStringVariable("storage_type", None)
456 raise http.HttpBadRequest("Missing the required 'storage_type'"
459 output_fields = self._checkStringVariable("output_fields", None)
460 if not output_fields:
461 raise http.HttpBadRequest("Missing the required 'output_fields'"
464 op = opcodes.OpQueryNodeStorage(nodes=[node_name],
465 storage_type=storage_type,
466 output_fields=output_fields.split(","))
467 return baserlib.SubmitJob([op])
470 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
471 """/2/nodes/[node_name]/storage/modify ressource.
475 node_name = self.items[0]
477 storage_type = self._checkStringVariable("storage_type", None)
479 raise http.HttpBadRequest("Missing the required 'storage_type'"
482 name = self._checkStringVariable("name", None)
484 raise http.HttpBadRequest("Missing the required 'name'"
489 if "allocatable" in self.queryargs:
490 changes[constants.SF_ALLOCATABLE] = \
491 bool(self._checkIntVariable("allocatable", default=1))
493 op = opcodes.OpModifyNodeStorage(node_name=node_name,
494 storage_type=storage_type,
497 return baserlib.SubmitJob([op])
500 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
501 """/2/nodes/[node_name]/storage/repair ressource.
505 node_name = self.items[0]
507 storage_type = self._checkStringVariable("storage_type", None)
509 raise http.HttpBadRequest("Missing the required 'storage_type'"
512 name = self._checkStringVariable("name", None)
514 raise http.HttpBadRequest("Missing the required 'name'"
517 op = opcodes.OpRepairNodeStorage(node_name=node_name,
518 storage_type=storage_type,
520 return baserlib.SubmitJob([op])
523 def _ParseInstanceCreateRequestVersion1(data, dry_run):
524 """Parses an instance creation request version 1.
526 @rtype: L{opcodes.OpCreateInstance}
527 @return: Instance creation opcode
531 disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
534 for idx, i in enumerate(disks_input):
535 baserlib.CheckType(i, dict, "Disk %d specification" % idx)
539 size = i[constants.IDISK_SIZE]
541 raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
545 constants.IDISK_SIZE: size,
548 # Optional disk access mode
550 disk_access = i[constants.IDISK_MODE]
554 disk[constants.IDISK_MODE] = disk_access
558 assert len(disks_input) == len(disks)
561 nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
564 for idx, i in enumerate(nics_input):
565 baserlib.CheckType(i, dict, "NIC %d specification" % idx)
569 for field in constants.INIC_PARAMS:
579 assert len(nics_input) == len(nics)
582 hvparams = baserlib.CheckParameter(data, "hvparams", default={})
583 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
585 beparams = baserlib.CheckParameter(data, "beparams", default={})
586 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
588 return opcodes.OpCreateInstance(
589 mode=baserlib.CheckParameter(data, "mode"),
590 instance_name=baserlib.CheckParameter(data, "name"),
591 os_type=baserlib.CheckParameter(data, "os"),
592 osparams=baserlib.CheckParameter(data, "osparams", default={}),
593 force_variant=baserlib.CheckParameter(data, "force_variant",
595 pnode=baserlib.CheckParameter(data, "pnode", default=None),
596 snode=baserlib.CheckParameter(data, "snode", default=None),
597 disk_template=baserlib.CheckParameter(data, "disk_template"),
600 src_node=baserlib.CheckParameter(data, "src_node", default=None),
601 src_path=baserlib.CheckParameter(data, "src_path", default=None),
602 start=baserlib.CheckParameter(data, "start", default=True),
604 ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
605 name_check=baserlib.CheckParameter(data, "name_check", default=True),
606 file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
608 file_driver=baserlib.CheckParameter(data, "file_driver",
609 default=constants.FD_LOOP),
610 source_handshake=baserlib.CheckParameter(data, "source_handshake",
612 source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
614 source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
616 iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
617 hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
624 class R_2_instances(baserlib.R_Generic):
625 """/2/instances resource.
629 """Returns a list of all available instances.
632 client = baserlib.GetClient()
634 use_locking = self.useLocking()
636 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
637 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
639 instancesdata = client.QueryInstances([], ["name"], use_locking)
640 instanceslist = [row[0] for row in instancesdata]
641 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
642 uri_fields=("id", "uri"))
644 def _ParseVersion0CreateRequest(self):
645 """Parses an instance creation request version 0.
647 Request data version 0 is deprecated and should not be used anymore.
649 @rtype: L{opcodes.OpCreateInstance}
650 @return: Instance creation opcode
653 # Do not modify anymore, request data version 0 is deprecated
654 beparams = baserlib.MakeParamsDict(self.request_body,
655 constants.BES_PARAMETERS)
656 hvparams = baserlib.MakeParamsDict(self.request_body,
657 constants.HVS_PARAMETERS)
658 fn = self.getBodyParameter
661 disk_data = fn('disks')
662 if not isinstance(disk_data, list):
663 raise http.HttpBadRequest("The 'disks' parameter should be a list")
665 for idx, d in enumerate(disk_data):
666 if not isinstance(d, int):
667 raise http.HttpBadRequest("Disk %d specification wrong: should"
668 " be an integer" % idx)
669 disks.append({"size": d})
671 # nic processing (one nic only)
672 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
673 if fn("ip", None) is not None:
674 nics[0]["ip"] = fn("ip")
675 if fn("mode", None) is not None:
676 nics[0]["mode"] = fn("mode")
677 if fn("link", None) is not None:
678 nics[0]["link"] = fn("link")
679 if fn("bridge", None) is not None:
680 nics[0]["bridge"] = fn("bridge")
682 # Do not modify anymore, request data version 0 is deprecated
683 return opcodes.OpCreateInstance(
684 mode=constants.INSTANCE_CREATE,
685 instance_name=fn('name'),
687 disk_template=fn('disk_template'),
689 pnode=fn('pnode', None),
690 snode=fn('snode', None),
691 iallocator=fn('iallocator', None),
693 start=fn('start', True),
694 ip_check=fn('ip_check', True),
695 name_check=fn('name_check', True),
697 hypervisor=fn('hypervisor', None),
700 file_storage_dir=fn('file_storage_dir', None),
701 file_driver=fn('file_driver', constants.FD_LOOP),
702 dry_run=bool(self.dryRun()),
706 """Create an instance.
711 if not isinstance(self.request_body, dict):
712 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
714 # Default to request data version 0
715 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
717 if data_version == 0:
718 op = self._ParseVersion0CreateRequest()
719 elif data_version == 1:
720 op = _ParseInstanceCreateRequestVersion1(self.request_body,
723 raise http.HttpBadRequest("Unsupported request data version %s" %
726 return baserlib.SubmitJob([op])
729 class R_2_instances_name(baserlib.R_Generic):
730 """/2/instances/[instance_name] resources.
734 """Send information about an instance.
737 client = baserlib.GetClient()
738 instance_name = self.items[0]
740 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
741 names=[instance_name],
743 use_locking=self.useLocking())
745 return baserlib.MapFields(I_FIELDS, result[0])
748 """Delete an instance.
751 op = opcodes.OpRemoveInstance(instance_name=self.items[0],
752 ignore_failures=False,
753 dry_run=bool(self.dryRun()))
754 return baserlib.SubmitJob([op])
757 class R_2_instances_name_info(baserlib.R_Generic):
758 """/2/instances/[instance_name]/info resource.
762 """Request detailed instance information.
765 instance_name = self.items[0]
766 static = bool(self._checkIntVariable("static", default=0))
768 op = opcodes.OpQueryInstanceData(instances=[instance_name],
770 return baserlib.SubmitJob([op])
773 class R_2_instances_name_reboot(baserlib.R_Generic):
774 """/2/instances/[instance_name]/reboot resource.
776 Implements an instance reboot.
780 """Reboot an instance.
782 The URI takes type=[hard|soft|full] and
783 ignore_secondaries=[False|True] parameters.
786 instance_name = self.items[0]
787 reboot_type = self.queryargs.get('type',
788 [constants.INSTANCE_REBOOT_HARD])[0]
789 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
790 op = opcodes.OpRebootInstance(instance_name=instance_name,
791 reboot_type=reboot_type,
792 ignore_secondaries=ignore_secondaries,
793 dry_run=bool(self.dryRun()))
795 return baserlib.SubmitJob([op])
798 class R_2_instances_name_startup(baserlib.R_Generic):
799 """/2/instances/[instance_name]/startup resource.
801 Implements an instance startup.
805 """Startup an instance.
807 The URI takes force=[False|True] parameter to start the instance
808 if even if secondary disks are failing.
811 instance_name = self.items[0]
812 force_startup = bool(self._checkIntVariable('force'))
813 op = opcodes.OpStartupInstance(instance_name=instance_name,
815 dry_run=bool(self.dryRun()))
817 return baserlib.SubmitJob([op])
820 class R_2_instances_name_shutdown(baserlib.R_Generic):
821 """/2/instances/[instance_name]/shutdown resource.
823 Implements an instance shutdown.
827 """Shutdown an instance.
830 instance_name = self.items[0]
831 op = opcodes.OpShutdownInstance(instance_name=instance_name,
832 dry_run=bool(self.dryRun()))
834 return baserlib.SubmitJob([op])
837 class R_2_instances_name_reinstall(baserlib.R_Generic):
838 """/2/instances/[instance_name]/reinstall resource.
840 Implements an instance reinstall.
844 """Reinstall an instance.
846 The URI takes os=name and nostartup=[0|1] optional
847 parameters. By default, the instance will be started
851 instance_name = self.items[0]
852 ostype = self._checkStringVariable('os')
853 nostartup = self._checkIntVariable('nostartup')
855 opcodes.OpShutdownInstance(instance_name=instance_name),
856 opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
859 ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
861 return baserlib.SubmitJob(ops)
864 class R_2_instances_name_replace_disks(baserlib.R_Generic):
865 """/2/instances/[instance_name]/replace-disks resource.
869 """Replaces disks on an instance.
872 instance_name = self.items[0]
873 remote_node = self._checkStringVariable("remote_node", default=None)
874 mode = self._checkStringVariable("mode", default=None)
875 raw_disks = self._checkStringVariable("disks", default=None)
876 iallocator = self._checkStringVariable("iallocator", default=None)
880 disks = [int(part) for part in raw_disks.split(",")]
881 except ValueError, err:
882 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
886 op = opcodes.OpReplaceDisks(instance_name=instance_name,
887 remote_node=remote_node,
890 iallocator=iallocator)
892 return baserlib.SubmitJob([op])
895 class R_2_instances_name_activate_disks(baserlib.R_Generic):
896 """/2/instances/[instance_name]/activate-disks resource.
900 """Activate disks for an instance.
902 The URI might contain ignore_size to ignore current recorded size.
905 instance_name = self.items[0]
906 ignore_size = bool(self._checkIntVariable('ignore_size'))
908 op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
909 ignore_size=ignore_size)
911 return baserlib.SubmitJob([op])
914 class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
915 """/2/instances/[instance_name]/deactivate-disks resource.
919 """Deactivate disks for an instance.
922 instance_name = self.items[0]
924 op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
926 return baserlib.SubmitJob([op])
929 class R_2_instances_name_prepare_export(baserlib.R_Generic):
930 """/2/instances/[instance_name]/prepare-export resource.
934 """Prepares an export for an instance.
939 instance_name = self.items[0]
940 mode = self._checkStringVariable("mode")
942 op = opcodes.OpPrepareExport(instance_name=instance_name,
945 return baserlib.SubmitJob([op])
948 def _ParseExportInstanceRequest(name, data):
949 """Parses a request for an instance export.
951 @rtype: L{opcodes.OpExportInstance}
952 @return: Instance export opcode
955 mode = baserlib.CheckParameter(data, "mode",
956 default=constants.EXPORT_MODE_LOCAL)
957 target_node = baserlib.CheckParameter(data, "destination")
958 shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
959 remove_instance = baserlib.CheckParameter(data, "remove_instance",
960 exptype=bool, default=False)
961 x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
962 destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
965 return opcodes.OpExportInstance(instance_name=name,
967 target_node=target_node,
969 remove_instance=remove_instance,
970 x509_key_name=x509_key_name,
971 destination_x509_ca=destination_x509_ca)
974 class R_2_instances_name_export(baserlib.R_Generic):
975 """/2/instances/[instance_name]/export resource.
979 """Exports an instance.
984 if not isinstance(self.request_body, dict):
985 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
987 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
989 return baserlib.SubmitJob([op])
992 def _ParseMigrateInstanceRequest(name, data):
993 """Parses a request for an instance migration.
995 @rtype: L{opcodes.OpMigrateInstance}
996 @return: Instance migration opcode
999 mode = baserlib.CheckParameter(data, "mode", default=None)
1000 cleanup = baserlib.CheckParameter(data, "cleanup", exptype=bool,
1003 return opcodes.OpMigrateInstance(instance_name=name, mode=mode,
1007 class R_2_instances_name_migrate(baserlib.R_Generic):
1008 """/2/instances/[instance_name]/migrate resource.
1012 """Migrates an instance.
1017 baserlib.CheckType(self.request_body, dict, "Body contents")
1019 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1021 return baserlib.SubmitJob([op])
1024 def _ParseRenameInstanceRequest(name, data):
1025 """Parses a request for renaming an instance.
1027 @rtype: L{opcodes.OpRenameInstance}
1028 @return: Instance rename opcode
1031 new_name = baserlib.CheckParameter(data, "new_name")
1032 ip_check = baserlib.CheckParameter(data, "ip_check", default=True)
1033 name_check = baserlib.CheckParameter(data, "name_check", default=True)
1035 return opcodes.OpRenameInstance(instance_name=name, new_name=new_name,
1036 name_check=name_check, ip_check=ip_check)
1039 class R_2_instances_name_rename(baserlib.R_Generic):
1040 """/2/instances/[instance_name]/rename resource.
1044 """Changes the name of an instance.
1049 baserlib.CheckType(self.request_body, dict, "Body contents")
1051 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1053 return baserlib.SubmitJob([op])
1056 def _ParseModifyInstanceRequest(name, data):
1057 """Parses a request for modifying an instance.
1059 @rtype: L{opcodes.OpSetInstanceParams}
1060 @return: Instance modify opcode
1063 osparams = baserlib.CheckParameter(data, "osparams", default={})
1064 force = baserlib.CheckParameter(data, "force", default=False)
1065 nics = baserlib.CheckParameter(data, "nics", default=[])
1066 disks = baserlib.CheckParameter(data, "disks", default=[])
1067 disk_template = baserlib.CheckParameter(data, "disk_template", default=None)
1068 remote_node = baserlib.CheckParameter(data, "remote_node", default=None)
1069 os_name = baserlib.CheckParameter(data, "os_name", default=None)
1070 force_variant = baserlib.CheckParameter(data, "force_variant", default=False)
1073 hvparams = baserlib.CheckParameter(data, "hvparams", default={})
1074 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES,
1075 allowed_values=[constants.VALUE_DEFAULT])
1077 beparams = baserlib.CheckParameter(data, "beparams", default={})
1078 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES,
1079 allowed_values=[constants.VALUE_DEFAULT])
1081 return opcodes.OpSetInstanceParams(instance_name=name, hvparams=hvparams,
1082 beparams=beparams, osparams=osparams,
1083 force=force, nics=nics, disks=disks,
1084 disk_template=disk_template,
1085 remote_node=remote_node, os_name=os_name,
1086 force_variant=force_variant)
1089 class R_2_instances_name_modify(baserlib.R_Generic):
1090 """/2/instances/[instance_name]/modify resource.
1094 """Changes some parameters of an instance.
1099 baserlib.CheckType(self.request_body, dict, "Body contents")
1101 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1103 return baserlib.SubmitJob([op])
1106 class _R_Tags(baserlib.R_Generic):
1107 """ Quasiclass for tagging resources
1109 Manages tags. When inheriting this class you must define the
1115 def __init__(self, items, queryargs, req):
1116 """A tag resource constructor.
1118 We have to override the default to sort out cluster naming case.
1121 baserlib.R_Generic.__init__(self, items, queryargs, req)
1123 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1126 self.name = items[0]
1129 """Returns a list of tags.
1131 Example: ["tag1", "tag2", "tag3"]
1134 # pylint: disable-msg=W0212
1135 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1138 """Add a set of tags.
1140 The request as a list of strings should be PUT to this URI. And
1141 you'll have back a job id.
1144 # pylint: disable-msg=W0212
1145 if 'tag' not in self.queryargs:
1146 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1147 " the 'tag' parameter")
1148 return baserlib._Tags_PUT(self.TAG_LEVEL,
1149 self.queryargs['tag'], name=self.name,
1150 dry_run=bool(self.dryRun()))
1155 In order to delete a set of tags, the DELETE
1156 request should be addressed to URI like:
1157 /tags?tag=[tag]&tag=[tag]
1160 # pylint: disable-msg=W0212
1161 if 'tag' not in self.queryargs:
1162 # no we not gonna delete all tags
1163 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1164 " tag(s) using the 'tag' parameter")
1165 return baserlib._Tags_DELETE(self.TAG_LEVEL,
1166 self.queryargs['tag'],
1168 dry_run=bool(self.dryRun()))
1171 class R_2_instances_name_tags(_R_Tags):
1172 """ /2/instances/[instance_name]/tags resource.
1174 Manages per-instance tags.
1177 TAG_LEVEL = constants.TAG_INSTANCE
1180 class R_2_nodes_name_tags(_R_Tags):
1181 """ /2/nodes/[node_name]/tags resource.
1183 Manages per-node tags.
1186 TAG_LEVEL = constants.TAG_NODE
1189 class R_2_tags(_R_Tags):
1190 """ /2/instances/tags resource.
1192 Manages cluster tags.
1195 TAG_LEVEL = constants.TAG_CLUSTER