4 # Copyright (C) 2006, 2007, 2008 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Remote API version 2 baserlib.library.
26 # pylint: disable-msg=C0103
28 # C0103: Invalid name, since the R_* names are not conforming
30 from ganeti import opcodes
31 from ganeti import http
32 from ganeti import constants
33 from ganeti import cli
34 from ganeti import rapi
35 from ganeti.rapi import baserlib
38 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
39 I_FIELDS = ["name", "admin_state", "os",
42 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
44 "disk.sizes", "disk_usage",
45 "beparams", "hvparams",
46 "oper_state", "oper_ram", "status",
49 N_FIELDS = ["name", "offline", "master_candidate", "drained",
51 "mtotal", "mnode", "mfree",
52 "pinst_cnt", "sinst_cnt",
53 "ctotal", "cnodes", "csockets",
55 "pinst_list", "sinst_list",
58 _NR_DRAINED = "drained"
59 _NR_MASTER_CANDIATE = "master-candidate"
61 _NR_OFFLINE = "offline"
62 _NR_REGULAR = "regular"
66 "C": _NR_MASTER_CANDIATE,
73 class R_version(baserlib.R_Generic):
76 This resource should be used to determine the remote API version and
77 to adapt clients accordingly.
82 """Returns the remote API version.
85 return constants.RAPI_VERSION
88 class R_2_info(baserlib.R_Generic):
94 """Returns cluster information.
97 client = baserlib.GetClient()
98 return client.QueryClusterInfo()
101 class R_2_os(baserlib.R_Generic):
107 """Return a list of all OSes.
109 Can return error 500 in case of a problem.
111 Example: ["debian-etch"]
114 cl = baserlib.GetClient()
115 op = opcodes.OpDiagnoseOS(output_fields=["name", "valid", "variants"],
117 job_id = baserlib.SubmitJob([op], cl)
118 # we use custom feedback function, instead of print we log the status
119 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
120 diagnose_data = result[0]
122 if not isinstance(diagnose_data, list):
123 raise http.HttpBadGateway(message="Can't get OS list")
126 for (name, valid, variants) in diagnose_data:
128 os_names.extend(cli.CalculateOSNames(name, variants))
133 class R_2_redist_config(baserlib.R_Generic):
134 """/2/redistribute-config resource.
139 """Redistribute configuration to all nodes.
142 return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
145 class R_2_jobs(baserlib.R_Generic):
151 """Returns a dictionary of jobs.
153 @return: a dictionary with jobs id and uri.
157 cl = baserlib.GetClient()
158 # Convert the list of lists to the list of ids
159 result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
160 return baserlib.BuildUriList(result, "/2/jobs/%s",
161 uri_fields=("id", "uri"))
164 class R_2_jobs_id(baserlib.R_Generic):
165 """/2/jobs/[job_id] resource.
169 """Returns a job status.
171 @return: a dictionary with job parameters.
173 - id: job ID as a number
174 - status: current job status as a string
175 - ops: involved OpCodes as a list of dictionaries for each
177 - opstatus: OpCodes status as a list
178 - opresult: OpCodes results as a list of lists
181 fields = ["id", "ops", "status", "summary",
182 "opstatus", "opresult", "oplog",
183 "received_ts", "start_ts", "end_ts",
185 job_id = self.items[0]
186 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
188 raise http.HttpNotFound()
189 return baserlib.MapFields(fields, result)
192 """Cancel not-yet-started job.
195 job_id = self.items[0]
196 result = baserlib.GetClient().CancelJob(job_id)
200 class R_2_nodes(baserlib.R_Generic):
201 """/2/nodes resource.
205 """Returns a list of all nodes.
208 client = baserlib.GetClient()
211 bulkdata = client.QueryNodes([], N_FIELDS, False)
212 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
214 nodesdata = client.QueryNodes([], ["name"], False)
215 nodeslist = [row[0] for row in nodesdata]
216 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
217 uri_fields=("id", "uri"))
220 class R_2_nodes_name(baserlib.R_Generic):
221 """/2/nodes/[node_name] resources.
225 """Send information about a node.
228 node_name = self.items[0]
229 client = baserlib.GetClient()
230 result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
231 use_locking=self.useLocking())
233 return baserlib.MapFields(N_FIELDS, result[0])
236 class R_2_nodes_name_role(baserlib.R_Generic):
237 """ /2/nodes/[node_name]/role resource.
241 """Returns the current node role.
246 node_name = self.items[0]
247 client = baserlib.GetClient()
248 result = client.QueryNodes(names=[node_name], fields=["role"],
249 use_locking=self.useLocking())
251 return _NR_MAP[result[0][0]]
254 """Sets the node role.
259 if not isinstance(self.req.request_body, basestring):
260 raise http.HttpBadRequest("Invalid body contents, not a string")
262 node_name = self.items[0]
263 role = self.req.request_body
265 if role == _NR_REGULAR:
270 elif role == _NR_MASTER_CANDIATE:
272 offline = drained = None
274 elif role == _NR_DRAINED:
276 candidate = offline = None
278 elif role == _NR_OFFLINE:
280 candidate = drained = None
283 raise http.HttpBadRequest("Can't set '%s' role" % role)
285 op = opcodes.OpSetNodeParams(node_name=node_name,
286 master_candidate=candidate,
289 force=bool(self.useForce()))
291 return baserlib.SubmitJob([op])
294 class R_2_nodes_name_evacuate(baserlib.R_Generic):
295 """/2/nodes/[node_name]/evacuate resource.
299 """Evacuate all secondary instances off a node.
302 node_name = self.items[0]
303 remote_node = self._checkStringVariable("remote_node", default=None)
304 iallocator = self._checkStringVariable("iallocator", default=None)
306 op = opcodes.OpEvacuateNode(node_name=node_name,
307 remote_node=remote_node,
308 iallocator=iallocator)
310 return baserlib.SubmitJob([op])
313 class R_2_nodes_name_migrate(baserlib.R_Generic):
314 """/2/nodes/[node_name]/migrate resource.
318 """Migrate all primary instances from a node.
321 node_name = self.items[0]
322 live = bool(self._checkIntVariable("live", default=1))
324 op = opcodes.OpMigrateNode(node_name=node_name, live=live)
326 return baserlib.SubmitJob([op])
329 class R_2_nodes_name_storage(baserlib.R_Generic):
330 """/2/nodes/[node_name]/storage ressource.
333 # LUQueryNodeStorage acquires locks, hence restricting access to GET
334 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
337 node_name = self.items[0]
339 storage_type = self._checkStringVariable("storage_type", None)
341 raise http.HttpBadRequest("Missing the required 'storage_type'"
344 output_fields = self._checkStringVariable("output_fields", None)
345 if not output_fields:
346 raise http.HttpBadRequest("Missing the required 'output_fields'"
349 op = opcodes.OpQueryNodeStorage(nodes=[node_name],
350 storage_type=storage_type,
351 output_fields=output_fields.split(","))
352 return baserlib.SubmitJob([op])
355 class R_2_nodes_name_storage_modify(baserlib.R_Generic):
356 """/2/nodes/[node_name]/storage/modify ressource.
360 node_name = self.items[0]
362 storage_type = self._checkStringVariable("storage_type", None)
364 raise http.HttpBadRequest("Missing the required 'storage_type'"
367 name = self._checkStringVariable("name", None)
369 raise http.HttpBadRequest("Missing the required 'name'"
374 if "allocatable" in self.queryargs:
375 changes[constants.SF_ALLOCATABLE] = \
376 bool(self._checkIntVariable("allocatable", default=1))
378 op = opcodes.OpModifyNodeStorage(node_name=node_name,
379 storage_type=storage_type,
382 return baserlib.SubmitJob([op])
385 class R_2_nodes_name_storage_repair(baserlib.R_Generic):
386 """/2/nodes/[node_name]/storage/repair ressource.
390 node_name = self.items[0]
392 storage_type = self._checkStringVariable("storage_type", None)
394 raise http.HttpBadRequest("Missing the required 'storage_type'"
397 name = self._checkStringVariable("name", None)
399 raise http.HttpBadRequest("Missing the required 'name'"
402 op = opcodes.OpRepairNodeStorage(node_name=node_name,
403 storage_type=storage_type,
405 return baserlib.SubmitJob([op])
408 class R_2_instances(baserlib.R_Generic):
409 """/2/instances resource.
413 """Returns a list of all available instances.
416 client = baserlib.GetClient()
418 use_locking = self.useLocking()
420 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
421 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
423 instancesdata = client.QueryInstances([], ["name"], use_locking)
424 instanceslist = [row[0] for row in instancesdata]
425 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
426 uri_fields=("id", "uri"))
429 """Create an instance.
434 if not isinstance(self.req.request_body, dict):
435 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
437 beparams = baserlib.MakeParamsDict(self.req.request_body,
438 constants.BES_PARAMETERS)
439 hvparams = baserlib.MakeParamsDict(self.req.request_body,
440 constants.HVS_PARAMETERS)
441 fn = self.getBodyParameter
444 disk_data = fn('disks')
445 if not isinstance(disk_data, list):
446 raise http.HttpBadRequest("The 'disks' parameter should be a list")
448 for idx, d in enumerate(disk_data):
449 if not isinstance(d, int):
450 raise http.HttpBadRequest("Disk %d specification wrong: should"
451 " be an integer" % idx)
452 disks.append({"size": d})
453 # nic processing (one nic only)
454 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
455 if fn("ip", None) is not None:
456 nics[0]["ip"] = fn("ip")
457 if fn("mode", None) is not None:
458 nics[0]["mode"] = fn("mode")
459 if fn("link", None) is not None:
460 nics[0]["link"] = fn("link")
461 if fn("bridge", None) is not None:
462 nics[0]["bridge"] = fn("bridge")
464 op = opcodes.OpCreateInstance(
465 mode=constants.INSTANCE_CREATE,
466 instance_name=fn('name'),
468 disk_template=fn('disk_template'),
470 pnode=fn('pnode', None),
471 snode=fn('snode', None),
472 iallocator=fn('iallocator', None),
474 start=fn('start', True),
475 ip_check=fn('ip_check', True),
476 name_check=fn('name_check', True),
478 hypervisor=fn('hypervisor', None),
481 file_storage_dir=fn('file_storage_dir', None),
482 file_driver=fn('file_driver', 'loop'),
483 dry_run=bool(self.dryRun()),
486 return baserlib.SubmitJob([op])
489 class R_2_instances_name(baserlib.R_Generic):
490 """/2/instances/[instance_name] resources.
494 """Send information about an instance.
497 client = baserlib.GetClient()
498 instance_name = self.items[0]
499 result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
500 use_locking=self.useLocking())
502 return baserlib.MapFields(I_FIELDS, result[0])
505 """Delete an instance.
508 op = opcodes.OpRemoveInstance(instance_name=self.items[0],
509 ignore_failures=False,
510 dry_run=bool(self.dryRun()))
511 return baserlib.SubmitJob([op])
514 class R_2_instances_name_info(baserlib.R_Generic):
515 """/2/instances/[instance_name]/info resource.
519 """Request detailed instance information.
522 instance_name = self.items[0]
523 static = bool(self._checkIntVariable("static", default=0))
525 op = opcodes.OpQueryInstanceData(instances=[instance_name],
527 return baserlib.SubmitJob([op])
530 class R_2_instances_name_reboot(baserlib.R_Generic):
531 """/2/instances/[instance_name]/reboot resource.
533 Implements an instance reboot.
537 """Reboot an instance.
539 The URI takes type=[hard|soft|full] and
540 ignore_secondaries=[False|True] parameters.
543 instance_name = self.items[0]
544 reboot_type = self.queryargs.get('type',
545 [constants.INSTANCE_REBOOT_HARD])[0]
546 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
547 op = opcodes.OpRebootInstance(instance_name=instance_name,
548 reboot_type=reboot_type,
549 ignore_secondaries=ignore_secondaries,
550 dry_run=bool(self.dryRun()))
552 return baserlib.SubmitJob([op])
555 class R_2_instances_name_startup(baserlib.R_Generic):
556 """/2/instances/[instance_name]/startup resource.
558 Implements an instance startup.
562 """Startup an instance.
564 The URI takes force=[False|True] parameter to start the instance
565 if even if secondary disks are failing.
568 instance_name = self.items[0]
569 force_startup = bool(self._checkIntVariable('force'))
570 op = opcodes.OpStartupInstance(instance_name=instance_name,
572 dry_run=bool(self.dryRun()))
574 return baserlib.SubmitJob([op])
577 class R_2_instances_name_shutdown(baserlib.R_Generic):
578 """/2/instances/[instance_name]/shutdown resource.
580 Implements an instance shutdown.
584 """Shutdown an instance.
587 instance_name = self.items[0]
588 op = opcodes.OpShutdownInstance(instance_name=instance_name,
589 dry_run=bool(self.dryRun()))
591 return baserlib.SubmitJob([op])
594 class R_2_instances_name_reinstall(baserlib.R_Generic):
595 """/2/instances/[instance_name]/reinstall resource.
597 Implements an instance reinstall.
601 """Reinstall an instance.
603 The URI takes os=name and nostartup=[0|1] optional
604 parameters. By default, the instance will be started
608 instance_name = self.items[0]
609 ostype = self._checkStringVariable('os')
610 nostartup = self._checkIntVariable('nostartup')
612 opcodes.OpShutdownInstance(instance_name=instance_name),
613 opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
616 ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
618 return baserlib.SubmitJob(ops)
621 class R_2_instances_name_replace_disks(baserlib.R_Generic):
622 """/2/instances/[instance_name]/replace-disks resource.
626 """Replaces disks on an instance.
629 instance_name = self.items[0]
630 remote_node = self._checkStringVariable("remote_node", default=None)
631 mode = self._checkStringVariable("mode", default=None)
632 raw_disks = self._checkStringVariable("disks", default=None)
633 iallocator = self._checkStringVariable("iallocator", default=None)
637 disks = [int(part) for part in raw_disks.split(",")]
638 except ValueError, err:
639 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
643 op = opcodes.OpReplaceDisks(instance_name=instance_name,
644 remote_node=remote_node,
647 iallocator=iallocator)
649 return baserlib.SubmitJob([op])
652 class _R_Tags(baserlib.R_Generic):
653 """ Quasiclass for tagging resources
655 Manages tags. When inheriting this class you must define the
661 def __init__(self, items, queryargs, req):
662 """A tag resource constructor.
664 We have to override the default to sort out cluster naming case.
667 baserlib.R_Generic.__init__(self, items, queryargs, req)
669 if self.TAG_LEVEL != constants.TAG_CLUSTER:
675 """Returns a list of tags.
677 Example: ["tag1", "tag2", "tag3"]
680 # pylint: disable-msg=W0212
681 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
684 """Add a set of tags.
686 The request as a list of strings should be PUT to this URI. And
687 you'll have back a job id.
690 # pylint: disable-msg=W0212
691 if 'tag' not in self.queryargs:
692 raise http.HttpBadRequest("Please specify tag(s) to add using the"
693 " the 'tag' parameter")
694 return baserlib._Tags_PUT(self.TAG_LEVEL,
695 self.queryargs['tag'], name=self.name,
696 dry_run=bool(self.dryRun()))
701 In order to delete a set of tags, the DELETE
702 request should be addressed to URI like:
703 /tags?tag=[tag]&tag=[tag]
706 # pylint: disable-msg=W0212
707 if 'tag' not in self.queryargs:
708 # no we not gonna delete all tags
709 raise http.HttpBadRequest("Cannot delete all tags - please specify"
710 " tag(s) using the 'tag' parameter")
711 return baserlib._Tags_DELETE(self.TAG_LEVEL,
712 self.queryargs['tag'],
714 dry_run=bool(self.dryRun()))
717 class R_2_instances_name_tags(_R_Tags):
718 """ /2/instances/[instance_name]/tags resource.
720 Manages per-instance tags.
723 TAG_LEVEL = constants.TAG_INSTANCE
726 class R_2_nodes_name_tags(_R_Tags):
727 """ /2/nodes/[node_name]/tags resource.
729 Manages per-node tags.
732 TAG_LEVEL = constants.TAG_NODE
735 class R_2_tags(_R_Tags):
736 """ /2/instances/tags resource.
738 Manages cluster tags.
741 TAG_LEVEL = constants.TAG_CLUSTER