root / lib / rapi / rlib2.py @ 0b2454b9
History | View | Annotate | Download (13.2 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Remote API version 2 baserlib.library.
|
23 |
|
24 |
"""
|
25 |
|
26 |
import ganeti.opcodes |
27 |
from ganeti import http |
28 |
from ganeti import luxi |
29 |
from ganeti import constants |
30 |
from ganeti.rapi import baserlib |
31 |
|
32 |
|
33 |
I_FIELDS = ["name", "admin_state", "os", |
34 |
"pnode", "snodes", |
35 |
"disk_template",
|
36 |
"nic.ips", "nic.macs", "nic.bridges", |
37 |
"disk.sizes", "disk_usage", |
38 |
"beparams", "hvparams", |
39 |
"oper_state", "oper_ram", "status", |
40 |
"tags"]
|
41 |
|
42 |
N_FIELDS = ["name", "offline", "master_candidate", "drained", |
43 |
"dtotal", "dfree", |
44 |
"mtotal", "mnode", "mfree", |
45 |
"pinst_cnt", "sinst_cnt", "tags", |
46 |
"ctotal", "cnodes", "csockets", |
47 |
] |
48 |
|
49 |
|
50 |
class R_version(baserlib.R_Generic): |
51 |
"""/version resource.
|
52 |
|
53 |
This resource should be used to determine the remote API version and
|
54 |
to adapt clients accordingly.
|
55 |
|
56 |
"""
|
57 |
DOC_URI = "/version"
|
58 |
|
59 |
def GET(self): |
60 |
"""Returns the remote API version.
|
61 |
|
62 |
"""
|
63 |
return constants.RAPI_VERSION
|
64 |
|
65 |
|
66 |
class R_2_info(baserlib.R_Generic): |
67 |
"""Cluster info.
|
68 |
|
69 |
"""
|
70 |
DOC_URI = "/2/info"
|
71 |
|
72 |
def GET(self): |
73 |
"""Returns cluster information.
|
74 |
|
75 |
Example::
|
76 |
|
77 |
{
|
78 |
"config_version": 3,
|
79 |
"name": "cluster1.example.com",
|
80 |
"software_version": "1.2.4",
|
81 |
"os_api_version": 5,
|
82 |
"export_version": 0,
|
83 |
"master": "node1.example.com",
|
84 |
"architecture": [
|
85 |
"64bit",
|
86 |
"x86_64"
|
87 |
],
|
88 |
"hypervisor_type": "xen-pvm",
|
89 |
"protocol_version": 12
|
90 |
}
|
91 |
|
92 |
"""
|
93 |
client = luxi.Client() |
94 |
return client.QueryClusterInfo()
|
95 |
|
96 |
|
97 |
class R_2_os(baserlib.R_Generic): |
98 |
"""/2/os resource.
|
99 |
|
100 |
"""
|
101 |
DOC_URI = "/2/os"
|
102 |
|
103 |
def GET(self): |
104 |
"""Return a list of all OSes.
|
105 |
|
106 |
Can return error 500 in case of a problem.
|
107 |
|
108 |
Example: ["debian-etch"]
|
109 |
|
110 |
"""
|
111 |
op = ganeti.opcodes.OpDiagnoseOS(output_fields=["name", "valid"], |
112 |
names=[]) |
113 |
diagnose_data = ganeti.cli.SubmitOpCode(op) |
114 |
|
115 |
if not isinstance(diagnose_data, list): |
116 |
raise http.HttpInternalServerError(message="Can't get OS list") |
117 |
|
118 |
return [row[0] for row in diagnose_data if row[1]] |
119 |
|
120 |
|
121 |
class R_2_jobs(baserlib.R_Generic): |
122 |
"""/2/jobs resource.
|
123 |
|
124 |
"""
|
125 |
DOC_URI = "/2/jobs"
|
126 |
|
127 |
def GET(self): |
128 |
"""Returns a dictionary of jobs.
|
129 |
|
130 |
@return: a dictionary with jobs id and uri.
|
131 |
|
132 |
"""
|
133 |
fields = ["id"]
|
134 |
# Convert the list of lists to the list of ids
|
135 |
result = [job_id for [job_id] in luxi.Client().QueryJobs(None, fields)] |
136 |
return baserlib.BuildUriList(result, "/2/jobs/%s", |
137 |
uri_fields=("id", "uri")) |
138 |
|
139 |
|
140 |
class R_2_jobs_id(baserlib.R_Generic): |
141 |
"""/2/jobs/[job_id] resource.
|
142 |
|
143 |
"""
|
144 |
DOC_URI = "/2/jobs/[job_id]"
|
145 |
|
146 |
def GET(self): |
147 |
"""Returns a job status.
|
148 |
|
149 |
@return: a dictionary with job parameters.
|
150 |
The result includes:
|
151 |
- id: job ID as a number
|
152 |
- status: current job status as a string
|
153 |
- ops: involved OpCodes as a list of dictionaries for each
|
154 |
opcodes in the job
|
155 |
- opstatus: OpCodes status as a list
|
156 |
- opresult: OpCodes results as a list of lists
|
157 |
|
158 |
"""
|
159 |
fields = ["id", "ops", "status", "summary", |
160 |
"opstatus", "opresult", "oplog", |
161 |
"received_ts", "start_ts", "end_ts", |
162 |
] |
163 |
job_id = self.items[0] |
164 |
result = luxi.Client().QueryJobs([job_id, ], fields)[0]
|
165 |
if result is None: |
166 |
raise http.HttpNotFound()
|
167 |
return baserlib.MapFields(fields, result)
|
168 |
|
169 |
def DELETE(self): |
170 |
"""Cancel not-yet-started job.
|
171 |
|
172 |
"""
|
173 |
job_id = self.items[0] |
174 |
result = luxi.Client().CancelJob(job_id) |
175 |
return result
|
176 |
|
177 |
|
178 |
class R_2_nodes(baserlib.R_Generic): |
179 |
"""/2/nodes resource.
|
180 |
|
181 |
"""
|
182 |
DOC_URI = "/2/nodes"
|
183 |
|
184 |
def GET(self): |
185 |
"""Returns a list of all nodes.
|
186 |
|
187 |
Example::
|
188 |
|
189 |
[
|
190 |
{
|
191 |
"id": "node1.example.com",
|
192 |
"uri": "\/instances\/node1.example.com"
|
193 |
},
|
194 |
{
|
195 |
"id": "node2.example.com",
|
196 |
"uri": "\/instances\/node2.example.com"
|
197 |
}
|
198 |
]
|
199 |
|
200 |
If the optional 'bulk' argument is provided and set to 'true'
|
201 |
value (i.e '?bulk=1'), the output contains detailed
|
202 |
information about nodes as a list.
|
203 |
|
204 |
Example::
|
205 |
|
206 |
[
|
207 |
{
|
208 |
"pinst_cnt": 1,
|
209 |
"mfree": 31280,
|
210 |
"mtotal": 32763,
|
211 |
"name": "www.example.com",
|
212 |
"tags": [],
|
213 |
"mnode": 512,
|
214 |
"dtotal": 5246208,
|
215 |
"sinst_cnt": 2,
|
216 |
"dfree": 5171712,
|
217 |
"offline": false
|
218 |
},
|
219 |
...
|
220 |
]
|
221 |
|
222 |
@return: a dictionary with 'name' and 'uri' keys for each of them
|
223 |
|
224 |
"""
|
225 |
client = luxi.Client() |
226 |
|
227 |
if self.useBulk(): |
228 |
bulkdata = client.QueryNodes([], N_FIELDS, False)
|
229 |
return baserlib.MapBulkFields(bulkdata, N_FIELDS)
|
230 |
else:
|
231 |
nodesdata = client.QueryNodes([], ["name"], False) |
232 |
nodeslist = [row[0] for row in nodesdata] |
233 |
return baserlib.BuildUriList(nodeslist, "/2/nodes/%s", |
234 |
uri_fields=("id", "uri")) |
235 |
|
236 |
|
237 |
class R_2_nodes_name(baserlib.R_Generic): |
238 |
"""/2/nodes/[node_name] resources.
|
239 |
|
240 |
"""
|
241 |
DOC_URI = "/nodes/[node_name]"
|
242 |
|
243 |
def GET(self): |
244 |
"""Send information about a node.
|
245 |
|
246 |
"""
|
247 |
node_name = self.items[0] |
248 |
client = luxi.Client() |
249 |
result = client.QueryNodes(names=[node_name], fields=N_FIELDS, |
250 |
use_locking=self.useLocking())
|
251 |
|
252 |
return baserlib.MapFields(N_FIELDS, result[0]) |
253 |
|
254 |
|
255 |
class R_2_instances(baserlib.R_Generic): |
256 |
"""/2/instances resource.
|
257 |
|
258 |
"""
|
259 |
DOC_URI = "/2/instances"
|
260 |
|
261 |
def GET(self): |
262 |
"""Returns a list of all available instances.
|
263 |
|
264 |
|
265 |
Example::
|
266 |
|
267 |
[
|
268 |
{
|
269 |
"name": "web.example.com",
|
270 |
"uri": "\/instances\/web.example.com"
|
271 |
},
|
272 |
{
|
273 |
"name": "mail.example.com",
|
274 |
"uri": "\/instances\/mail.example.com"
|
275 |
}
|
276 |
]
|
277 |
|
278 |
If the optional 'bulk' argument is provided and set to 'true'
|
279 |
value (i.e '?bulk=1'), the output contains detailed
|
280 |
information about instances as a list.
|
281 |
|
282 |
Example::
|
283 |
|
284 |
[
|
285 |
{
|
286 |
"status": "running",
|
287 |
"bridge": "xen-br0",
|
288 |
"name": "web.example.com",
|
289 |
"tags": ["tag1", "tag2"],
|
290 |
"admin_ram": 512,
|
291 |
"sda_size": 20480,
|
292 |
"pnode": "node1.example.com",
|
293 |
"mac": "01:23:45:67:89:01",
|
294 |
"sdb_size": 4096,
|
295 |
"snodes": ["node2.example.com"],
|
296 |
"disk_template": "drbd",
|
297 |
"ip": null,
|
298 |
"admin_state": true,
|
299 |
"os": "debian-etch",
|
300 |
"vcpus": 2,
|
301 |
"oper_state": true
|
302 |
},
|
303 |
...
|
304 |
]
|
305 |
|
306 |
@returns: a dictionary with 'name' and 'uri' keys for each of them.
|
307 |
|
308 |
"""
|
309 |
client = luxi.Client() |
310 |
|
311 |
use_locking = self.useLocking()
|
312 |
if self.useBulk(): |
313 |
bulkdata = client.QueryInstances([], I_FIELDS, use_locking) |
314 |
return baserlib.MapBulkFields(bulkdata, I_FIELDS)
|
315 |
else:
|
316 |
instancesdata = client.QueryInstances([], ["name"], use_locking)
|
317 |
instanceslist = [row[0] for row in instancesdata] |
318 |
return baserlib.BuildUriList(instanceslist, "/2/instances/%s", |
319 |
uri_fields=("id", "uri")) |
320 |
|
321 |
def POST(self): |
322 |
"""Create an instance.
|
323 |
|
324 |
@returns: a job id
|
325 |
|
326 |
"""
|
327 |
opts = self.req.request_post_data
|
328 |
|
329 |
beparams = baserlib.MakeParamsDict(opts, constants.BES_PARAMETERS) |
330 |
hvparams = baserlib.MakeParamsDict(opts, constants.HVS_PARAMETERS) |
331 |
|
332 |
op = ganeti.opcodes.OpCreateInstance( |
333 |
instance_name=opts.get('name'),
|
334 |
disk_size=opts.get('size', 20 * 1024), |
335 |
swap_size=opts.get('swap', 4 * 1024), |
336 |
disk_template=opts.get('disk_template', None), |
337 |
mode=constants.INSTANCE_CREATE, |
338 |
os_type=opts.get('os'),
|
339 |
pnode=opts.get('pnode'),
|
340 |
snode=opts.get('snode'),
|
341 |
ip=opts.get('ip', 'none'), |
342 |
bridge=opts.get('bridge', None), |
343 |
start=opts.get('start', True), |
344 |
ip_check=opts.get('ip_check', True), |
345 |
wait_for_sync=opts.get('wait_for_sync', True), |
346 |
mac=opts.get('mac', 'auto'), |
347 |
hypervisor=opts.get('hypervisor', None), |
348 |
hvparams=hvparams, |
349 |
beparams=beparams, |
350 |
iallocator=opts.get('iallocator', None), |
351 |
file_storage_dir=opts.get('file_storage_dir', None), |
352 |
file_driver=opts.get('file_driver', 'loop'), |
353 |
) |
354 |
|
355 |
job_id = ganeti.cli.SendJob([op]) |
356 |
return job_id
|
357 |
|
358 |
|
359 |
class R_2_instances_name(baserlib.R_Generic): |
360 |
"""/2/instances/[instance_name] resources.
|
361 |
|
362 |
"""
|
363 |
DOC_URI = "/2/instances/[instance_name]"
|
364 |
|
365 |
def GET(self): |
366 |
"""Send information about an instance.
|
367 |
|
368 |
"""
|
369 |
client = luxi.Client() |
370 |
instance_name = self.items[0] |
371 |
result = client.QueryInstances(names=[instance_name], fields=I_FIELDS, |
372 |
use_locking=self.useLocking())
|
373 |
|
374 |
return baserlib.MapFields(I_FIELDS, result[0]) |
375 |
|
376 |
|
377 |
class R_2_instances_name_reboot(baserlib.R_Generic): |
378 |
"""/2/instances/[instance_name]/reboot resource.
|
379 |
|
380 |
Implements an instance reboot.
|
381 |
|
382 |
"""
|
383 |
|
384 |
DOC_URI = "/2/instances/[instance_name]/reboot"
|
385 |
|
386 |
def POST(self): |
387 |
"""Reboot an instance.
|
388 |
|
389 |
The URI takes type=[hard|soft|full] and
|
390 |
ignore_secondaries=[False|True] parameters.
|
391 |
|
392 |
"""
|
393 |
instance_name = self.items[0] |
394 |
reboot_type = self.queryargs.get('type', |
395 |
[constants.INSTANCE_REBOOT_HARD])[0]
|
396 |
ignore_secondaries = bool(self.queryargs.get('ignore_secondaries', |
397 |
[False])[0]) |
398 |
op = ganeti.opcodes.OpRebootInstance( |
399 |
instance_name=instance_name, |
400 |
reboot_type=reboot_type, |
401 |
ignore_secondaries=ignore_secondaries) |
402 |
|
403 |
job_id = ganeti.cli.SendJob([op]) |
404 |
|
405 |
return job_id
|
406 |
|
407 |
|
408 |
class R_2_instances_name_startup(baserlib.R_Generic): |
409 |
"""/2/instances/[instance_name]/startup resource.
|
410 |
|
411 |
Implements an instance startup.
|
412 |
|
413 |
"""
|
414 |
|
415 |
DOC_URI = "/2/instances/[instance_name]/startup"
|
416 |
|
417 |
def PUT(self): |
418 |
"""Startup an instance.
|
419 |
|
420 |
The URI takes force=[False|True] parameter to start the instance
|
421 |
if even if secondary disks are failing.
|
422 |
|
423 |
"""
|
424 |
instance_name = self.items[0] |
425 |
force_startup = bool(self.queryargs.get('force', [False])[0]) |
426 |
op = ganeti.opcodes.OpStartupInstance(instance_name=instance_name, |
427 |
force=force_startup) |
428 |
|
429 |
job_id = ganeti.cli.SendJob([op]) |
430 |
|
431 |
return job_id
|
432 |
|
433 |
|
434 |
class R_2_instances_name_shutdown(baserlib.R_Generic): |
435 |
"""/2/instances/[instance_name]/shutdown resource.
|
436 |
|
437 |
Implements an instance shutdown.
|
438 |
|
439 |
"""
|
440 |
|
441 |
DOC_URI = "/2/instances/[instance_name]/shutdown"
|
442 |
|
443 |
def PUT(self): |
444 |
"""Shutdown an instance.
|
445 |
|
446 |
"""
|
447 |
instance_name = self.items[0] |
448 |
op = ganeti.opcodes.OpShutdownInstance(instance_name=instance_name) |
449 |
|
450 |
job_id = ganeti.cli.SendJob([op]) |
451 |
|
452 |
return job_id
|
453 |
|
454 |
|
455 |
class _R_Tags(baserlib.R_Generic): |
456 |
""" Quasiclass for tagging resources
|
457 |
|
458 |
Manages tags. Inheriting this class you suppose to define DOC_URI and
|
459 |
TAG_LEVEL for it.
|
460 |
|
461 |
"""
|
462 |
TAG_LEVEL = None
|
463 |
|
464 |
def __init__(self, items, queryargs, req): |
465 |
"""A tag resource constructor.
|
466 |
|
467 |
We have to override the default to sort out cluster naming case.
|
468 |
|
469 |
"""
|
470 |
baserlib.R_Generic.__init__(self, items, queryargs, req)
|
471 |
|
472 |
if self.TAG_LEVEL != constants.TAG_CLUSTER: |
473 |
self.name = items[0] |
474 |
else:
|
475 |
self.name = "" |
476 |
|
477 |
def GET(self): |
478 |
"""Returns a list of tags.
|
479 |
|
480 |
Example: ["tag1", "tag2", "tag3"]
|
481 |
|
482 |
"""
|
483 |
return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name) |
484 |
|
485 |
def PUT(self): |
486 |
"""Add a set of tags.
|
487 |
|
488 |
The request as a list of strings should be PUT to this URI. And
|
489 |
you'll have back a job id.
|
490 |
|
491 |
"""
|
492 |
return baserlib._Tags_PUT(self.TAG_LEVEL, |
493 |
self.req.request_post_data, name=self.name) |
494 |
|
495 |
def DELETE(self): |
496 |
"""Delete a tag.
|
497 |
|
498 |
In order to delete a set of tags, the DELETE
|
499 |
request should be addressed to URI like:
|
500 |
/tags?tag=[tag]&tag=[tag]
|
501 |
|
502 |
"""
|
503 |
if 'tag' not in self.queryargs: |
504 |
# no we not gonna delete all tags
|
505 |
raise http.HttpNotImplemented()
|
506 |
return baserlib._Tags_DELETE(self.TAG_LEVEL, |
507 |
self.queryargs['tag'], |
508 |
name=self.name)
|
509 |
|
510 |
|
511 |
class R_2_instances_name_tags(_R_Tags): |
512 |
""" /2/instances/[instance_name]/tags resource.
|
513 |
|
514 |
Manages per-instance tags.
|
515 |
|
516 |
"""
|
517 |
DOC_URI = "/2/instances/[instance_name]/tags"
|
518 |
TAG_LEVEL = constants.TAG_INSTANCE |
519 |
|
520 |
|
521 |
class R_2_nodes_name_tags(_R_Tags): |
522 |
""" /2/nodes/[node_name]/tags resource.
|
523 |
|
524 |
Manages per-node tags.
|
525 |
|
526 |
"""
|
527 |
DOC_URI = "/2/nodes/[node_name]/tags"
|
528 |
TAG_LEVEL = constants.TAG_NODE |
529 |
|
530 |
|
531 |
class R_2_tags(_R_Tags): |
532 |
""" /2/instances/tags resource.
|
533 |
|
534 |
Manages cluster tags.
|
535 |
|
536 |
"""
|
537 |
DOC_URI = "/2/tags"
|
538 |
TAG_LEVEL = constants.TAG_CLUSTER |