4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API version 2 baserlib.library.
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
31 To be in context of this module for instance creation POST on
32 /2/instances is legitim while PUT would be not, due to it does create a
33 new entity and not just replace /2/instances with it.
35 So when adding new methods, if they are operating on the URI entity itself,
36 PUT should be prefered over POST.
40 # pylint: disable-msg=C0103
42 # C0103: Invalid name, since the R_* names are not conforming
44 from ganeti import opcodes
45 from ganeti import http
46 from ganeti import constants
47 from ganeti import cli
48 from ganeti import utils
49 from ganeti import rapi
50 from ganeti.rapi import baserlib
53 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54 I_FIELDS = ["name", "admin_state", "os",
57 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
59 "disk.sizes", "disk_usage",
60 "beparams", "hvparams",
61 "oper_state", "oper_ram", "status",
64 N_FIELDS = ["name", "offline", "master_candidate", "drained",
66 "mtotal", "mnode", "mfree",
67 "pinst_cnt", "sinst_cnt",
68 "ctotal", "cnodes", "csockets",
70 "pinst_list", "sinst_list",
73 _NR_DRAINED = "drained"
74 _NR_MASTER_CANDIATE = "master-candidate"
76 _NR_OFFLINE = "offline"
77 _NR_REGULAR = "regular"
81 "C": _NR_MASTER_CANDIATE,
87 # Request data version field
88 _REQ_DATA_VERSION = "__version__"
90 # Feature string for instance creation request data version 1
91 _INST_CREATE_REQV1 = "instance-create-reqv1"
93 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
97 class R_version(baserlib.R_Generic):
100 This resource should be used to determine the remote API version and
101 to adapt clients accordingly.
106 """Returns the remote API version.
109 return constants.RAPI_VERSION
112 class R_2_info(baserlib.R_Generic):
118 """Returns cluster information.
121 client = baserlib.GetClient()
122 return client.QueryClusterInfo()
125 class R_2_features(baserlib.R_Generic):
126 """/2/features resource.
131 """Returns list of optional RAPI features implemented.
134 return [_INST_CREATE_REQV1]
137 class R_2_os(baserlib.R_Generic):
143 """Return a list of all OSes.
145 Can return error 500 in case of a problem.
147 Example: ["debian-etch"]
150 cl = baserlib.GetClient()
151 op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
153 job_id = baserlib.SubmitJob([op], cl)
154 # we use custom feedback function, instead of print we log the status
155 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
156 diagnose_data = result[0]
158 if not isinstance(diagnose_data, list):
159 raise http.HttpBadGateway(message="Can't get OS list")
162 for (name, valid, variants) in diagnose_data:
164 os_names.extend(cli.CalculateOSNames(name, variants))
169 class R_2_redist_config(baserlib.R_Generic):
170 """/2/redistribute-config resource.
175 """Redistribute configuration to all nodes.
178 return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
181 class R_2_jobs(baserlib.R_Generic):
187 """Returns a dictionary of jobs.
189 @return: a dictionary with jobs id and uri.
193 cl = baserlib.GetClient()
194 # Convert the list of lists to the list of ids
195 result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
196 return baserlib.BuildUriList(result, "/2/jobs/%s",
197 uri_fields=("id", "uri"))
200 class R_2_jobs_id(baserlib.R_Generic):
201 """/2/jobs/[job_id] resource.
205 """Returns a job status.
207 @return: a dictionary with job parameters.
209 - id: job ID as a number
210 - status: current job status as a string
211 - ops: involved OpCodes as a list of dictionaries for each
213 - opstatus: OpCodes status as a list
214 - opresult: OpCodes results as a list of lists
217 fields = ["id", "ops", "status", "summary",
218 "opstatus", "opresult", "oplog",
219 "received_ts", "start_ts", "end_ts",
221 job_id = self.items[0]
222 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
224 raise http.HttpNotFound()
225 return baserlib.MapFields(fields, result)
228 """Cancel not-yet-started job.
231 job_id = self.items[0]
232 result = baserlib.GetClient().CancelJob(job_id)
236 class R_2_jobs_id_wait(baserlib.R_Generic):
237 """/2/jobs/[job_id]/wait resource.
240 # WaitForJobChange provides access to sensitive information and blocks
241 # machine resources (it's a blocking RAPI call), hence restricting access.
242 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
245 """Waits for job changes.
248 job_id = self.items[0]
250 fields = self.getBodyParameter("fields")
251 prev_job_info = self.getBodyParameter("previous_job_info", None)
252 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
254 if not isinstance(fields, list):
255 raise http.HttpBadRequest("The 'fields' parameter should be a list")
257 if not (prev_job_info is None or isinstance(prev_job_info, list)):
258 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
261 if not (prev_log_serial is None or
262 isinstance(prev_log_serial, (int, long))):
263 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
266 client = baserlib.GetClient()
267 result = client.WaitForJobChangeOnce(job_id, fields,
268 prev_job_info, prev_log_serial,
269 timeout=_WFJC_TIMEOUT)
271 raise http.HttpNotFound()
273 if result == constants.JOB_NOTCHANGED:
277 (job_info, log_entries) = result
280 "job_info": job_info,
281 "log_entries": log_entries,
285 class R_2_nodes(baserlib.R_Generic):
286 """/2/nodes resource.
290 """Returns a list of all nodes.
293 client = baserlib.GetClient()
296 bulkdata = client.QueryNodes([], N_FIELDS, False)
297 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
299 nodesdata = client.QueryNodes([], ["name"], False)
300 nodeslist = [row[0] for row in nodesdata]
301 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
302 uri_fields=("id", "uri"))
305 class R_2_nodes_name(baserlib.R_Generic):
306 """/2/nodes/[node_name] resources.
310 """Send information about a node.
313 node_name = self.items[0]
314 client = baserlib.GetClient()
316 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
317 names=[node_name], fields=N_FIELDS,
318 use_locking=self.useLocking())
320 return baserlib.MapFields(N_FIELDS, result[0])
323 class R_2_nodes_name_role(baserlib.R_Generic):
324 """ /2/nodes/[node_name]/role resource.
328 """Returns the current node role.
333 node_name = self.items[0]
334 client = baserlib.GetClient()
335 result = client.QueryNodes(names=[node_name], fields=["role"],
336 use_locking=self.useLocking())
338 return _NR_MAP[result[0][0]]
341 """Sets the node role.
346 if not isinstance(self.request_body, basestring):
347 raise http.HttpBadRequest("Invalid body contents, not a string")
349 node_name = self.items[0]
350 role = self.request_body
352 if role == _NR_REGULAR:
357 elif role == _NR_MASTER_CANDIATE:
359 offline = drained = None
361 elif role == _NR_DRAINED:
363 candidate = offline = None
365 elif role == _NR_OFFLINE:
367 candidate = drained = None
370 raise http.HttpBadRequest("Can't set '%s' role" % role)
372 op = opcodes.OpSetNodeParams(node_name=node_name,
373 master_candidate=candidate,
376 force=bool(self.useForce()))
378 return baserlib.SubmitJob([op])
381 class R_2_nodes_name_evacuate(baserlib.R_Generic):
382 """/2/nodes/[node_name]/evacuate resource.
386 """Evacuate all secondary instances off a node.
389 node_name = self.items[0]
390 remote_node = self._checkStringVariable("remote_node", default=None)
391 iallocator = self._checkStringVariable("iallocator", default=None)
392 early_r = bool(self._checkIntVariable("early_release", default=0))
393 dry_run = bool(self.dryRun())
395 cl = baserlib.GetClient()
397 op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name],
398 iallocator=iallocator,
399 remote_node=remote_node)
401 job_id = baserlib.SubmitJob([op], cl)
402 # we use custom feedback function, instead of print we log the status
403 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
406 for iname, node in result:
410 op = opcodes.OpReplaceDisks(instance_name=iname,
411 remote_node=node, disks=[],
412 mode=constants.REPLACE_DISK_CHG,
413 early_release=early_r)
414 jid = baserlib.SubmitJob([op])
415 jobs.append((jid, iname, node))
420 class R_2_nodes_name_migrate(baserlib.R_Generic):
421 """/2/nodes/[node_name]/migrate resource.
425 """Migrate all primary instances from a node.
428 node_name = self.items[0]
429 live = bool(self._checkIntVariable("live", default=1))
431 op = opcodes.OpMigrateNode(node_name=node_name, live=live)
433 return baserlib.SubmitJob([op])
436 class R_2_nodes_name_storage(baserlib.R_Generic):
437 """/2/nodes/[node_name]/storage ressource.
440 # LUQueryNodeStorage acquires locks, hence restricting access to GET
441 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
444 node_name = self.items[0]
446 storage_type = self._checkStringVariable("storage_type", None)
448 raise http.HttpBadRequest("Missing the required 'storage_type'"
451 output_fields = self._checkStringVariable("output_fields", None)
452 if not output_fields:
453 raise http.HttpBadRequest("Missing the required 'output_fields'"
456 op = opcodes.OpQueryNodeStorage(nodes=[node_name],
457 storage_type=storage_type,
458 output_fields=output_fields.split(","))
459 return baserlib.SubmitJob([op])
462 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
463 """/2/nodes/[node_name]/storage/modify ressource.
467 node_name = self.items[0]
469 storage_type = self._checkStringVariable("storage_type", None)
471 raise http.HttpBadRequest("Missing the required 'storage_type'"
474 name = self._checkStringVariable("name", None)
476 raise http.HttpBadRequest("Missing the required 'name'"
481 if "allocatable" in self.queryargs:
482 changes[constants.SF_ALLOCATABLE] = \
483 bool(self._checkIntVariable("allocatable", default=1))
485 op = opcodes.OpModifyNodeStorage(node_name=node_name,
486 storage_type=storage_type,
489 return baserlib.SubmitJob([op])
492 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
493 """/2/nodes/[node_name]/storage/repair ressource.
497 node_name = self.items[0]
499 storage_type = self._checkStringVariable("storage_type", None)
501 raise http.HttpBadRequest("Missing the required 'storage_type'"
504 name = self._checkStringVariable("name", None)
506 raise http.HttpBadRequest("Missing the required 'name'"
509 op = opcodes.OpRepairNodeStorage(node_name=node_name,
510 storage_type=storage_type,
512 return baserlib.SubmitJob([op])
515 def _ParseInstanceCreateRequestVersion1(data, dry_run):
516 """Parses an instance creation request version 1.
518 @rtype: L{opcodes.OpCreateInstance}
519 @return: Instance creation opcode
523 disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
526 for idx, i in enumerate(disks_input):
527 baserlib.CheckType(i, dict, "Disk %d specification" % idx)
533 raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
540 # Optional disk access mode
542 disk_access = i["mode"]
546 disk["mode"] = disk_access
550 assert len(disks_input) == len(disks)
553 nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
556 for idx, i in enumerate(nics_input):
557 baserlib.CheckType(i, dict, "NIC %d specification" % idx)
561 for field in ["mode", "ip", "link", "bridge"]:
571 assert len(nics_input) == len(nics)
574 hvparams = baserlib.CheckParameter(data, "hvparams", default={})
575 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
577 beparams = baserlib.CheckParameter(data, "beparams", default={})
578 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
580 return opcodes.OpCreateInstance(
581 mode=baserlib.CheckParameter(data, "mode"),
582 instance_name=baserlib.CheckParameter(data, "name"),
583 os_type=baserlib.CheckParameter(data, "os", default=None),
584 force_variant=baserlib.CheckParameter(data, "force_variant",
586 pnode=baserlib.CheckParameter(data, "pnode", default=None),
587 snode=baserlib.CheckParameter(data, "snode", default=None),
588 disk_template=baserlib.CheckParameter(data, "disk_template"),
591 src_node=baserlib.CheckParameter(data, "src_node", default=None),
592 src_path=baserlib.CheckParameter(data, "src_path", default=None),
593 start=baserlib.CheckParameter(data, "start", default=True),
595 ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
596 name_check=baserlib.CheckParameter(data, "name_check", default=True),
597 file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
599 file_driver=baserlib.CheckParameter(data, "file_driver",
600 default=constants.FD_LOOP),
601 source_handshake=baserlib.CheckParameter(data, "source_handshake",
603 source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
605 source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
607 iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
608 hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
615 class R_2_instances(baserlib.R_Generic):
616 """/2/instances resource.
620 """Returns a list of all available instances.
623 client = baserlib.GetClient()
625 use_locking = self.useLocking()
627 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
628 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
630 instancesdata = client.QueryInstances([], ["name"], use_locking)
631 instanceslist = [row[0] for row in instancesdata]
632 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
633 uri_fields=("id", "uri"))
635 def _ParseVersion0CreateRequest(self):
636 """Parses an instance creation request version 0.
638 Request data version 0 is deprecated and should not be used anymore.
640 @rtype: L{opcodes.OpCreateInstance}
641 @return: Instance creation opcode
644 # Do not modify anymore, request data version 0 is deprecated
645 beparams = baserlib.MakeParamsDict(self.request_body,
646 constants.BES_PARAMETERS)
647 hvparams = baserlib.MakeParamsDict(self.request_body,
648 constants.HVS_PARAMETERS)
649 fn = self.getBodyParameter
652 disk_data = fn('disks')
653 if not isinstance(disk_data, list):
654 raise http.HttpBadRequest("The 'disks' parameter should be a list")
656 for idx, d in enumerate(disk_data):
657 if not isinstance(d, int):
658 raise http.HttpBadRequest("Disk %d specification wrong: should"
659 " be an integer" % idx)
660 disks.append({"size": d})
662 # nic processing (one nic only)
663 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
664 if fn("ip", None) is not None:
665 nics[0]["ip"] = fn("ip")
666 if fn("mode", None) is not None:
667 nics[0]["mode"] = fn("mode")
668 if fn("link", None) is not None:
669 nics[0]["link"] = fn("link")
670 if fn("bridge", None) is not None:
671 nics[0]["bridge"] = fn("bridge")
673 # Do not modify anymore, request data version 0 is deprecated
674 return opcodes.OpCreateInstance(
675 mode=constants.INSTANCE_CREATE,
676 instance_name=fn('name'),
678 disk_template=fn('disk_template'),
680 pnode=fn('pnode', None),
681 snode=fn('snode', None),
682 iallocator=fn('iallocator', None),
684 start=fn('start', True),
685 ip_check=fn('ip_check', True),
686 name_check=fn('name_check', True),
688 hypervisor=fn('hypervisor', None),
691 file_storage_dir=fn('file_storage_dir', None),
692 file_driver=fn('file_driver', constants.FD_LOOP),
693 dry_run=bool(self.dryRun()),
697 """Create an instance.
702 if not isinstance(self.request_body, dict):
703 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
705 # Default to request data version 0
706 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
708 if data_version == 0:
709 op = self._ParseVersion0CreateRequest()
710 elif data_version == 1:
711 op = _ParseInstanceCreateRequestVersion1(self.request_body,
714 raise http.HttpBadRequest("Unsupported request data version %s" %
717 return baserlib.SubmitJob([op])
720 class R_2_instances_name(baserlib.R_Generic):
721 """/2/instances/[instance_name] resources.
725 """Send information about an instance.
728 client = baserlib.GetClient()
729 instance_name = self.items[0]
731 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
732 names=[instance_name],
734 use_locking=self.useLocking())
736 return baserlib.MapFields(I_FIELDS, result[0])
739 """Delete an instance.
742 op = opcodes.OpRemoveInstance(instance_name=self.items[0],
743 ignore_failures=False,
744 dry_run=bool(self.dryRun()))
745 return baserlib.SubmitJob([op])
748 class R_2_instances_name_info(baserlib.R_Generic):
749 """/2/instances/[instance_name]/info resource.
753 """Request detailed instance information.
756 instance_name = self.items[0]
757 static = bool(self._checkIntVariable("static", default=0))
759 op = opcodes.OpQueryInstanceData(instances=[instance_name],
761 return baserlib.SubmitJob([op])
764 class R_2_instances_name_reboot(baserlib.R_Generic):
765 """/2/instances/[instance_name]/reboot resource.
767 Implements an instance reboot.
771 """Reboot an instance.
773 The URI takes type=[hard|soft|full] and
774 ignore_secondaries=[False|True] parameters.
777 instance_name = self.items[0]
778 reboot_type = self.queryargs.get('type',
779 [constants.INSTANCE_REBOOT_HARD])[0]
780 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
781 op = opcodes.OpRebootInstance(instance_name=instance_name,
782 reboot_type=reboot_type,
783 ignore_secondaries=ignore_secondaries,
784 dry_run=bool(self.dryRun()))
786 return baserlib.SubmitJob([op])
789 class R_2_instances_name_startup(baserlib.R_Generic):
790 """/2/instances/[instance_name]/startup resource.
792 Implements an instance startup.
796 """Startup an instance.
798 The URI takes force=[False|True] parameter to start the instance
799 if even if secondary disks are failing.
802 instance_name = self.items[0]
803 force_startup = bool(self._checkIntVariable('force'))
804 op = opcodes.OpStartupInstance(instance_name=instance_name,
806 dry_run=bool(self.dryRun()))
808 return baserlib.SubmitJob([op])
811 class R_2_instances_name_shutdown(baserlib.R_Generic):
812 """/2/instances/[instance_name]/shutdown resource.
814 Implements an instance shutdown.
818 """Shutdown an instance.
821 instance_name = self.items[0]
822 op = opcodes.OpShutdownInstance(instance_name=instance_name,
823 dry_run=bool(self.dryRun()))
825 return baserlib.SubmitJob([op])
828 class R_2_instances_name_reinstall(baserlib.R_Generic):
829 """/2/instances/[instance_name]/reinstall resource.
831 Implements an instance reinstall.
835 """Reinstall an instance.
837 The URI takes os=name and nostartup=[0|1] optional
838 parameters. By default, the instance will be started
842 instance_name = self.items[0]
843 ostype = self._checkStringVariable('os')
844 nostartup = self._checkIntVariable('nostartup')
846 opcodes.OpShutdownInstance(instance_name=instance_name),
847 opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
850 ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
852 return baserlib.SubmitJob(ops)
855 class R_2_instances_name_replace_disks(baserlib.R_Generic):
856 """/2/instances/[instance_name]/replace-disks resource.
860 """Replaces disks on an instance.
863 instance_name = self.items[0]
864 remote_node = self._checkStringVariable("remote_node", default=None)
865 mode = self._checkStringVariable("mode", default=None)
866 raw_disks = self._checkStringVariable("disks", default=None)
867 iallocator = self._checkStringVariable("iallocator", default=None)
871 disks = [int(part) for part in raw_disks.split(",")]
872 except ValueError, err:
873 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
877 op = opcodes.OpReplaceDisks(instance_name=instance_name,
878 remote_node=remote_node,
881 iallocator=iallocator)
883 return baserlib.SubmitJob([op])
886 class R_2_instances_name_activate_disks(baserlib.R_Generic):
887 """/2/instances/[instance_name]/activate-disks resource.
891 """Activate disks for an instance.
893 The URI might contain ignore_size to ignore current recorded size.
896 instance_name = self.items[0]
897 ignore_size = bool(self._checkIntVariable('ignore_size'))
899 op = opcodes.OpActivateInstanceDisks(instance_name=instance_name,
900 ignore_size=ignore_size)
902 return baserlib.SubmitJob([op])
905 class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
906 """/2/instances/[instance_name]/deactivate-disks resource.
910 """Deactivate disks for an instance.
913 instance_name = self.items[0]
915 op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
917 return baserlib.SubmitJob([op])
920 class R_2_instances_name_prepare_export(baserlib.R_Generic):
921 """/2/instances/[instance_name]/prepare-export resource.
925 """Prepares an export for an instance.
930 instance_name = self.items[0]
931 mode = self._checkStringVariable("mode")
933 op = opcodes.OpPrepareExport(instance_name=instance_name,
936 return baserlib.SubmitJob([op])
939 def _ParseExportInstanceRequest(name, data):
940 """Parses a request for an instance export.
942 @rtype: L{opcodes.OpExportInstance}
943 @return: Instance export opcode
946 mode = baserlib.CheckParameter(data, "mode",
947 default=constants.EXPORT_MODE_LOCAL)
948 target_node = baserlib.CheckParameter(data, "destination")
949 shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
950 remove_instance = baserlib.CheckParameter(data, "remove_instance",
951 exptype=bool, default=False)
952 x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
953 destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
956 return opcodes.OpExportInstance(instance_name=name,
958 target_node=target_node,
960 remove_instance=remove_instance,
961 x509_key_name=x509_key_name,
962 destination_x509_ca=destination_x509_ca)
965 class R_2_instances_name_export(baserlib.R_Generic):
966 """/2/instances/[instance_name]/export resource.
970 """Exports an instance.
975 if not isinstance(self.request_body, dict):
976 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
978 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
980 return baserlib.SubmitJob([op])
983 class _R_Tags(baserlib.R_Generic):
984 """ Quasiclass for tagging resources
986 Manages tags. When inheriting this class you must define the
992 def __init__(self, items, queryargs, req):
993 """A tag resource constructor.
995 We have to override the default to sort out cluster naming case.
998 baserlib.R_Generic.__init__(self, items, queryargs, req)
1000 if self.TAG_LEVEL != constants.TAG_CLUSTER:
1001 self.name = items[0]
1006 """Returns a list of tags.
1008 Example: ["tag1", "tag2", "tag3"]
1011 # pylint: disable-msg=W0212
1012 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1015 """Add a set of tags.
1017 The request as a list of strings should be PUT to this URI. And
1018 you'll have back a job id.
1021 # pylint: disable-msg=W0212
1022 if 'tag' not in self.queryargs:
1023 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1024 " the 'tag' parameter")
1025 return baserlib._Tags_PUT(self.TAG_LEVEL,
1026 self.queryargs['tag'], name=self.name,
1027 dry_run=bool(self.dryRun()))
1032 In order to delete a set of tags, the DELETE
1033 request should be addressed to URI like:
1034 /tags?tag=[tag]&tag=[tag]
1037 # pylint: disable-msg=W0212
1038 if 'tag' not in self.queryargs:
1039 # no we not gonna delete all tags
1040 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1041 " tag(s) using the 'tag' parameter")
1042 return baserlib._Tags_DELETE(self.TAG_LEVEL,
1043 self.queryargs['tag'],
1045 dry_run=bool(self.dryRun()))
1048 class R_2_instances_name_tags(_R_Tags):
1049 """ /2/instances/[instance_name]/tags resource.
1051 Manages per-instance tags.
1054 TAG_LEVEL = constants.TAG_INSTANCE
1057 class R_2_nodes_name_tags(_R_Tags):
1058 """ /2/nodes/[node_name]/tags resource.
1060 Manages per-node tags.
1063 TAG_LEVEL = constants.TAG_NODE
1066 class R_2_tags(_R_Tags):
1067 """ /2/instances/tags resource.
1069 Manages cluster tags.
1072 TAG_LEVEL = constants.TAG_CLUSTER