Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ 508e9b20

History | View | Annotate | Download (19.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
"""
25

    
26
from ganeti import opcodes
27
from ganeti import http
28
from ganeti import constants
29
from ganeti import cli
30
from ganeti import rapi
31
from ganeti.rapi import baserlib
32

    
33

    
34
I_FIELDS = ["name", "admin_state", "os",
35
            "pnode", "snodes",
36
            "disk_template",
37
            "nic.ips", "nic.macs", "nic.modes", "nic.links",
38
            "network_port",
39
            "disk.sizes", "disk_usage",
40
            "beparams", "hvparams",
41
            "oper_state", "oper_ram", "status",
42
            "tags"]
43

    
44
N_FIELDS = ["name", "offline", "master_candidate", "drained",
45
            "dtotal", "dfree",
46
            "mtotal", "mnode", "mfree",
47
            "pinst_cnt", "sinst_cnt", "tags",
48
            "ctotal", "cnodes", "csockets",
49
            "pip", "sip", "serial_no", "role",
50
            "pinst_list", "sinst_list",
51
            ]
52

    
53
_NR_DRAINED = "drained"
54
_NR_MASTER_CANDIATE = "master-candidate"
55
_NR_MASTER = "master"
56
_NR_OFFLINE = "offline"
57
_NR_REGULAR = "regular"
58

    
59
_NR_MAP = {
60
  "M": _NR_MASTER,
61
  "C": _NR_MASTER_CANDIATE,
62
  "D": _NR_DRAINED,
63
  "O": _NR_OFFLINE,
64
  "R": _NR_REGULAR,
65
  }
66

    
67

    
68
class R_version(baserlib.R_Generic):
69
  """/version resource.
70

71
  This resource should be used to determine the remote API version and
72
  to adapt clients accordingly.
73

74
  """
75
  def GET(self):
76
    """Returns the remote API version.
77

78
    """
79
    return constants.RAPI_VERSION
80

    
81

    
82
class R_2_info(baserlib.R_Generic):
83
  """Cluster info.
84

85
  """
86
  def GET(self):
87
    """Returns cluster information.
88

89
    """
90
    client = baserlib.GetClient()
91
    return client.QueryClusterInfo()
92

    
93

    
94
class R_2_os(baserlib.R_Generic):
95
  """/2/os resource.
96

97
  """
98
  def GET(self):
99
    """Return a list of all OSes.
100

101
    Can return error 500 in case of a problem.
102

103
    Example: ["debian-etch"]
104

105
    """
106
    cl = baserlib.GetClient()
107
    op = opcodes.OpDiagnoseOS(output_fields=["name", "valid"], names=[])
108
    job_id = baserlib.SubmitJob([op], cl)
109
    # we use custom feedback function, instead of print we log the status
110
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
111
    diagnose_data = result[0]
112

    
113
    if not isinstance(diagnose_data, list):
114
      raise http.HttpBadGateway(message="Can't get OS list")
115

    
116
    return [row[0] for row in diagnose_data if row[1]]
117

    
118

    
119
class R_2_redist_config(baserlib.R_Generic):
120
  """/2/redistribute-config resource.
121

122
  """
123
  def PUT(self):
124
    """Redistribute configuration to all nodes.
125

126
    """
127
    return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
128

    
129

    
130
class R_2_jobs(baserlib.R_Generic):
131
  """/2/jobs resource.
132

133
  """
134
  def GET(self):
135
    """Returns a dictionary of jobs.
136

137
    @return: a dictionary with jobs id and uri.
138

139
    """
140
    fields = ["id"]
141
    cl = baserlib.GetClient()
142
    # Convert the list of lists to the list of ids
143
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
144
    return baserlib.BuildUriList(result, "/2/jobs/%s",
145
                                 uri_fields=("id", "uri"))
146

    
147

    
148
class R_2_jobs_id(baserlib.R_Generic):
149
  """/2/jobs/[job_id] resource.
150

151
  """
152
  def GET(self):
153
    """Returns a job status.
154

155
    @return: a dictionary with job parameters.
156
        The result includes:
157
            - id: job ID as a number
158
            - status: current job status as a string
159
            - ops: involved OpCodes as a list of dictionaries for each
160
              opcodes in the job
161
            - opstatus: OpCodes status as a list
162
            - opresult: OpCodes results as a list of lists
163

164
    """
165
    fields = ["id", "ops", "status", "summary",
166
              "opstatus", "opresult", "oplog",
167
              "received_ts", "start_ts", "end_ts",
168
              ]
169
    job_id = self.items[0]
170
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
171
    if result is None:
172
      raise http.HttpNotFound()
173
    return baserlib.MapFields(fields, result)
174

    
175
  def DELETE(self):
176
    """Cancel not-yet-started job.
177

178
    """
179
    job_id = self.items[0]
180
    result = baserlib.GetClient().CancelJob(job_id)
181
    return result
182

    
183

    
184
class R_2_nodes(baserlib.R_Generic):
185
  """/2/nodes resource.
186

187
  """
188
  def GET(self):
189
    """Returns a list of all nodes.
190

191
    """
192
    client = baserlib.GetClient()
193

    
194
    if self.useBulk():
195
      bulkdata = client.QueryNodes([], N_FIELDS, False)
196
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
197
    else:
198
      nodesdata = client.QueryNodes([], ["name"], False)
199
      nodeslist = [row[0] for row in nodesdata]
200
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
201
                                   uri_fields=("id", "uri"))
202

    
203

    
204
class R_2_nodes_name(baserlib.R_Generic):
205
  """/2/nodes/[node_name] resources.
206

207
  """
208
  def GET(self):
209
    """Send information about a node.
210

211
    """
212
    node_name = self.items[0]
213
    client = baserlib.GetClient()
214
    result = client.QueryNodes(names=[node_name], fields=N_FIELDS,
215
                               use_locking=self.useLocking())
216

    
217
    return baserlib.MapFields(N_FIELDS, result[0])
218

    
219

    
220
class R_2_nodes_name_role(baserlib.R_Generic):
221
  """ /2/nodes/[node_name]/role resource.
222

223
  """
224
  def GET(self):
225
    """Returns the current node role.
226

227
    @return: Node role
228

229
    """
230
    node_name = self.items[0]
231
    client = baserlib.GetClient()
232
    result = client.QueryNodes(names=[node_name], fields=["role"],
233
                               use_locking=self.useLocking())
234

    
235
    return _NR_MAP[result[0][0]]
236

    
237
  def PUT(self):
238
    """Sets the node role.
239

240
    @return: a job id
241

242
    """
243
    if not isinstance(self.req.request_body, basestring):
244
      raise http.HttpBadRequest("Invalid body contents, not a string")
245

    
246
    node_name = self.items[0]
247
    role = self.req.request_body
248

    
249
    if role == _NR_REGULAR:
250
      candidate = False
251
      offline = False
252
      drained = False
253

    
254
    elif role == _NR_MASTER_CANDIATE:
255
      candidate = True
256
      offline = drained = None
257

    
258
    elif role == _NR_DRAINED:
259
      drained = True
260
      candidate = offline = None
261

    
262
    elif role == _NR_OFFLINE:
263
      offline = True
264
      candidate = drained = None
265

    
266
    else:
267
      raise http.HttpBadRequest("Can't set '%s' role" % role)
268

    
269
    op = opcodes.OpSetNodeParams(node_name=node_name,
270
                                 master_candidate=candidate,
271
                                 offline=offline,
272
                                 drained=drained,
273
                                 force=bool(self.useForce()))
274

    
275
    return baserlib.SubmitJob([op])
276

    
277

    
278
class R_2_nodes_name_evacuate(baserlib.R_Generic):
279
  """/2/nodes/[node_name]/evacuate resource.
280

281
  """
282
  def POST(self):
283
    """Evacuate all secondary instances off a node.
284

285
    """
286
    node_name = self.items[0]
287
    remote_node = self._checkStringVariable("remote_node", default=None)
288
    iallocator = self._checkStringVariable("iallocator", default=None)
289

    
290
    op = opcodes.OpEvacuateNode(node_name=node_name,
291
                                remote_node=remote_node,
292
                                iallocator=iallocator)
293

    
294
    return baserlib.SubmitJob([op])
295

    
296

    
297
class R_2_nodes_name_migrate(baserlib.R_Generic):
298
  """/2/nodes/[node_name]/migrate resource.
299

300
  """
301
  def POST(self):
302
    """Migrate all primary instances from a node.
303

304
    """
305
    node_name = self.items[0]
306
    live = bool(self._checkIntVariable("live", default=1))
307

    
308
    op = opcodes.OpMigrateNode(node_name=node_name, live=live)
309

    
310
    return baserlib.SubmitJob([op])
311

    
312

    
313
class R_2_nodes_name_storage(baserlib.R_Generic):
314
  """/2/nodes/[node_name]/storage ressource.
315

316
  """
317
  # LUQueryNodeStorage acquires locks, hence restricting access to GET
318
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
319

    
320
  def GET(self):
321
    node_name = self.items[0]
322

    
323
    storage_type = self._checkStringVariable("storage_type", None)
324
    if not storage_type:
325
      raise http.HttpBadRequest("Missing the required 'storage_type'"
326
                                " parameter")
327

    
328
    output_fields = self._checkStringVariable("output_fields", None)
329
    if not output_fields:
330
      raise http.HttpBadRequest("Missing the required 'output_fields'"
331
                                " parameter")
332

    
333
    op = opcodes.OpQueryNodeStorage(nodes=[node_name],
334
                                    storage_type=storage_type,
335
                                    output_fields=output_fields.split(","))
336
    return baserlib.SubmitJob([op])
337

    
338

    
339
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
340
  """/2/nodes/[node_name]/storage/modify ressource.
341

342
  """
343
  def PUT(self):
344
    node_name = self.items[0]
345

    
346
    storage_type = self._checkStringVariable("storage_type", None)
347
    if not storage_type:
348
      raise http.HttpBadRequest("Missing the required 'storage_type'"
349
                                " parameter")
350

    
351
    name = self._checkStringVariable("name", None)
352
    if not name:
353
      raise http.HttpBadRequest("Missing the required 'name'"
354
                                " parameter")
355

    
356
    changes = {}
357

    
358
    if "allocatable" in self.queryargs:
359
      changes[constants.SF_ALLOCATABLE] = \
360
        bool(self._checkIntVariable("allocatable", default=1))
361

    
362
    op = opcodes.OpModifyNodeStorage(node_name=node_name,
363
                                     storage_type=storage_type,
364
                                     name=name,
365
                                     changes=changes)
366
    return baserlib.SubmitJob([op])
367

    
368

    
369
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
370
  """/2/nodes/[node_name]/storage/repair ressource.
371

372
  """
373
  def PUT(self):
374
    node_name = self.items[0]
375

    
376
    storage_type = self._checkStringVariable("storage_type", None)
377
    if not storage_type:
378
      raise http.HttpBadRequest("Missing the required 'storage_type'"
379
                                " parameter")
380

    
381
    name = self._checkStringVariable("name", None)
382
    if not name:
383
      raise http.HttpBadRequest("Missing the required 'name'"
384
                                " parameter")
385

    
386
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
387
                                     storage_type=storage_type,
388
                                     name=name)
389
    return baserlib.SubmitJob([op])
390

    
391

    
392
class R_2_instances(baserlib.R_Generic):
393
  """/2/instances resource.
394

395
  """
396
  def GET(self):
397
    """Returns a list of all available instances.
398

399
    """
400
    client = baserlib.GetClient()
401

    
402
    use_locking = self.useLocking()
403
    if self.useBulk():
404
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
405
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
406
    else:
407
      instancesdata = client.QueryInstances([], ["name"], use_locking)
408
      instanceslist = [row[0] for row in instancesdata]
409
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
410
                                   uri_fields=("id", "uri"))
411

    
412
  def POST(self):
413
    """Create an instance.
414

415
    @return: a job id
416

417
    """
418
    if not isinstance(self.req.request_body, dict):
419
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
420

    
421
    beparams = baserlib.MakeParamsDict(self.req.request_body,
422
                                       constants.BES_PARAMETERS)
423
    hvparams = baserlib.MakeParamsDict(self.req.request_body,
424
                                       constants.HVS_PARAMETERS)
425
    fn = self.getBodyParameter
426

    
427
    # disk processing
428
    disk_data = fn('disks')
429
    if not isinstance(disk_data, list):
430
      raise http.HttpBadRequest("The 'disks' parameter should be a list")
431
    disks = []
432
    for idx, d in enumerate(disk_data):
433
      if not isinstance(d, int):
434
        raise http.HttpBadRequest("Disk %d specification wrong: should"
435
                                  " be an integer")
436
      disks.append({"size": d})
437
    # nic processing (one nic only)
438
    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
439
    if fn("ip", None) is not None:
440
      nics[0]["ip"] = fn("ip")
441
    if fn("mode", None) is not None:
442
      nics[0]["mode"] = fn("mode")
443
    if fn("link", None) is not None:
444
      nics[0]["link"] = fn("link")
445
    if fn("bridge", None) is not None:
446
       nics[0]["bridge"] = fn("bridge")
447

    
448
    op = opcodes.OpCreateInstance(
449
      mode=constants.INSTANCE_CREATE,
450
      instance_name=fn('name'),
451
      disks=disks,
452
      disk_template=fn('disk_template'),
453
      os_type=fn('os'),
454
      pnode=fn('pnode', None),
455
      snode=fn('snode', None),
456
      iallocator=fn('iallocator', None),
457
      nics=nics,
458
      start=fn('start', True),
459
      ip_check=fn('ip_check', True),
460
      wait_for_sync=True,
461
      hypervisor=fn('hypervisor', None),
462
      hvparams=hvparams,
463
      beparams=beparams,
464
      file_storage_dir=fn('file_storage_dir', None),
465
      file_driver=fn('file_driver', 'loop'),
466
      dry_run=bool(self.dryRun()),
467
      )
468

    
469
    return baserlib.SubmitJob([op])
470

    
471

    
472
class R_2_instances_name(baserlib.R_Generic):
473
  """/2/instances/[instance_name] resources.
474

475
  """
476
  def GET(self):
477
    """Send information about an instance.
478

479
    """
480
    client = baserlib.GetClient()
481
    instance_name = self.items[0]
482
    result = client.QueryInstances(names=[instance_name], fields=I_FIELDS,
483
                                   use_locking=self.useLocking())
484

    
485
    return baserlib.MapFields(I_FIELDS, result[0])
486

    
487
  def DELETE(self):
488
    """Delete an instance.
489

490
    """
491
    op = opcodes.OpRemoveInstance(instance_name=self.items[0],
492
                                  ignore_failures=False,
493
                                  dry_run=bool(self.dryRun()))
494
    return baserlib.SubmitJob([op])
495

    
496

    
497
class R_2_instances_name_info(baserlib.R_Generic):
498
  """/2/instances/[instance_name]/info resource.
499

500
  """
501
  def GET(self):
502
    """Request detailed instance information.
503

504
    """
505
    instance_name = self.items[0]
506
    static = bool(self._checkIntVariable("static", default=0))
507

    
508
    op = opcodes.OpQueryInstanceData(instances=[instance_name],
509
                                     static=static)
510
    return baserlib.SubmitJob([op])
511

    
512

    
513
class R_2_instances_name_reboot(baserlib.R_Generic):
514
  """/2/instances/[instance_name]/reboot resource.
515

516
  Implements an instance reboot.
517

518
  """
519
  def POST(self):
520
    """Reboot an instance.
521

522
    The URI takes type=[hard|soft|full] and
523
    ignore_secondaries=[False|True] parameters.
524

525
    """
526
    instance_name = self.items[0]
527
    reboot_type = self.queryargs.get('type',
528
                                     [constants.INSTANCE_REBOOT_HARD])[0]
529
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
530
    op = opcodes.OpRebootInstance(instance_name=instance_name,
531
                                  reboot_type=reboot_type,
532
                                  ignore_secondaries=ignore_secondaries,
533
                                  dry_run=bool(self.dryRun()))
534

    
535
    return baserlib.SubmitJob([op])
536

    
537

    
538
class R_2_instances_name_startup(baserlib.R_Generic):
539
  """/2/instances/[instance_name]/startup resource.
540

541
  Implements an instance startup.
542

543
  """
544
  def PUT(self):
545
    """Startup an instance.
546

547
    The URI takes force=[False|True] parameter to start the instance
548
    if even if secondary disks are failing.
549

550
    """
551
    instance_name = self.items[0]
552
    force_startup = bool(self._checkIntVariable('force'))
553
    op = opcodes.OpStartupInstance(instance_name=instance_name,
554
                                   force=force_startup,
555
                                   dry_run=bool(self.dryRun()))
556

    
557
    return baserlib.SubmitJob([op])
558

    
559

    
560
class R_2_instances_name_shutdown(baserlib.R_Generic):
561
  """/2/instances/[instance_name]/shutdown resource.
562

563
  Implements an instance shutdown.
564

565
  """
566
  def PUT(self):
567
    """Shutdown an instance.
568

569
    """
570
    instance_name = self.items[0]
571
    op = opcodes.OpShutdownInstance(instance_name=instance_name,
572
                                    dry_run=bool(self.dryRun()))
573

    
574
    return baserlib.SubmitJob([op])
575

    
576

    
577
class R_2_instances_name_reinstall(baserlib.R_Generic):
578
  """/2/instances/[instance_name]/reinstall resource.
579

580
  Implements an instance reinstall.
581

582
  """
583
  def POST(self):
584
    """Reinstall an instance.
585

586
    The URI takes os=name and nostartup=[0|1] optional
587
    parameters. By default, the instance will be started
588
    automatically.
589

590
    """
591
    instance_name = self.items[0]
592
    ostype = self._checkStringVariable('os')
593
    nostartup = self._checkIntVariable('nostartup')
594
    ops = [
595
      opcodes.OpShutdownInstance(instance_name=instance_name),
596
      opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype),
597
      ]
598
    if not nostartup:
599
      ops.append(opcodes.OpStartupInstance(instance_name=instance_name,
600
                                           force=False))
601
    return baserlib.SubmitJob(ops)
602

    
603

    
604
class R_2_instances_name_replace_disks(baserlib.R_Generic):
605
  """/2/instances/[instance_name]/replace-disks resource.
606

607
  """
608
  def POST(self):
609
    """Replaces disks on an instance.
610

611
    """
612
    instance_name = self.items[0]
613
    remote_node = self._checkStringVariable("remote_node", default=None)
614
    mode = self._checkStringVariable("mode", default=None)
615
    raw_disks = self._checkStringVariable("disks", default=None)
616
    iallocator = self._checkStringVariable("iallocator", default=None)
617

    
618
    if raw_disks:
619
      try:
620
        disks = [int(part) for part in raw_disks.split(",")]
621
      except ValueError, err:
622
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
623
    else:
624
      disks = []
625

    
626
    op = opcodes.OpReplaceDisks(instance_name=instance_name,
627
                                remote_node=remote_node,
628
                                mode=mode,
629
                                disks=disks,
630
                                iallocator=iallocator)
631

    
632
    return baserlib.SubmitJob([op])
633

    
634

    
635
class _R_Tags(baserlib.R_Generic):
636
  """ Quasiclass for tagging resources
637

638
  Manages tags. When inheriting this class you must define the
639
  TAG_LEVEL for it.
640

641
  """
642
  TAG_LEVEL = None
643

    
644
  def __init__(self, items, queryargs, req):
645
    """A tag resource constructor.
646

647
    We have to override the default to sort out cluster naming case.
648

649
    """
650
    baserlib.R_Generic.__init__(self, items, queryargs, req)
651

    
652
    if self.TAG_LEVEL != constants.TAG_CLUSTER:
653
      self.name = items[0]
654
    else:
655
      self.name = ""
656

    
657
  def GET(self):
658
    """Returns a list of tags.
659

660
    Example: ["tag1", "tag2", "tag3"]
661

662
    """
663
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
664

    
665
  def PUT(self):
666
    """Add a set of tags.
667

668
    The request as a list of strings should be PUT to this URI. And
669
    you'll have back a job id.
670

671
    """
672
    if 'tag' not in self.queryargs:
673
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
674
                                " the 'tag' parameter")
675
    return baserlib._Tags_PUT(self.TAG_LEVEL,
676
                              self.queryargs['tag'], name=self.name,
677
                              dry_run=bool(self.dryRun()))
678

    
679
  def DELETE(self):
680
    """Delete a tag.
681

682
    In order to delete a set of tags, the DELETE
683
    request should be addressed to URI like:
684
    /tags?tag=[tag]&tag=[tag]
685

686
    """
687
    if 'tag' not in self.queryargs:
688
      # no we not gonna delete all tags
689
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
690
                                " tag(s) using the 'tag' parameter")
691
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
692
                                 self.queryargs['tag'],
693
                                 name=self.name,
694
                                 dry_run=bool(self.dryRun()))
695

    
696

    
697
class R_2_instances_name_tags(_R_Tags):
698
  """ /2/instances/[instance_name]/tags resource.
699

700
  Manages per-instance tags.
701

702
  """
703
  TAG_LEVEL = constants.TAG_INSTANCE
704

    
705

    
706
class R_2_nodes_name_tags(_R_Tags):
707
  """ /2/nodes/[node_name]/tags resource.
708

709
  Manages per-node tags.
710

711
  """
712
  TAG_LEVEL = constants.TAG_NODE
713

    
714

    
715
class R_2_tags(_R_Tags):
716
  """ /2/instances/tags resource.
717

718
  Manages cluster tags.
719

720
  """
721
  TAG_LEVEL = constants.TAG_CLUSTER