Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ 414ebaf1

History | View | Annotate | Download (36.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import rapi
49
from ganeti import ht
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "oper_vcpus", "status",
62
            "custom_hvparams", "custom_beparams", "custom_nicparams",
63
            ] + _COMMON_FIELDS
64

    
65
N_FIELDS = ["name", "offline", "master_candidate", "drained",
66
            "dtotal", "dfree",
67
            "mtotal", "mnode", "mfree",
68
            "pinst_cnt", "sinst_cnt",
69
            "ctotal", "cnodes", "csockets",
70
            "pip", "sip", "role",
71
            "pinst_list", "sinst_list",
72
            "master_capable", "vm_capable",
73
            "group.uuid",
74
            ] + _COMMON_FIELDS
75

    
76
G_FIELDS = ["name", "uuid",
77
            "alloc_policy",
78
            "node_cnt", "node_list",
79
            "ctime", "mtime", "serial_no",
80
            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
81

    
82
_NR_DRAINED = "drained"
83
_NR_MASTER_CANDIATE = "master-candidate"
84
_NR_MASTER = "master"
85
_NR_OFFLINE = "offline"
86
_NR_REGULAR = "regular"
87

    
88
_NR_MAP = {
89
  constants.NR_MASTER: _NR_MASTER,
90
  constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
91
  constants.NR_DRAINED: _NR_DRAINED,
92
  constants.NR_OFFLINE: _NR_OFFLINE,
93
  constants.NR_REGULAR: _NR_REGULAR,
94
  }
95

    
96
assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
97

    
98
# Request data version field
99
_REQ_DATA_VERSION = "__version__"
100

    
101
# Feature string for instance creation request data version 1
102
_INST_CREATE_REQV1 = "instance-create-reqv1"
103

    
104
# Feature string for instance reinstall request version 1
105
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
106

    
107
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
108
_WFJC_TIMEOUT = 10
109

    
110

    
111
class R_version(baserlib.R_Generic):
112
  """/version resource.
113

114
  This resource should be used to determine the remote API version and
115
  to adapt clients accordingly.
116

117
  """
118
  @staticmethod
119
  def GET():
120
    """Returns the remote API version.
121

122
    """
123
    return constants.RAPI_VERSION
124

    
125

    
126
class R_2_info(baserlib.R_Generic):
127
  """/2/info resource.
128

129
  """
130
  @staticmethod
131
  def GET():
132
    """Returns cluster information.
133

134
    """
135
    client = baserlib.GetClient()
136
    return client.QueryClusterInfo()
137

    
138

    
139
class R_2_features(baserlib.R_Generic):
140
  """/2/features resource.
141

142
  """
143
  @staticmethod
144
  def GET():
145
    """Returns list of optional RAPI features implemented.
146

147
    """
148
    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
149

    
150

    
151
class R_2_os(baserlib.R_Generic):
152
  """/2/os resource.
153

154
  """
155
  @staticmethod
156
  def GET():
157
    """Return a list of all OSes.
158

159
    Can return error 500 in case of a problem.
160

161
    Example: ["debian-etch"]
162

163
    """
164
    cl = baserlib.GetClient()
165
    op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
166
    job_id = baserlib.SubmitJob([op], cl)
167
    # we use custom feedback function, instead of print we log the status
168
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
169
    diagnose_data = result[0]
170

    
171
    if not isinstance(diagnose_data, list):
172
      raise http.HttpBadGateway(message="Can't get OS list")
173

    
174
    os_names = []
175
    for (name, variants) in diagnose_data:
176
      os_names.extend(cli.CalculateOSNames(name, variants))
177

    
178
    return os_names
179

    
180

    
181
class R_2_redist_config(baserlib.R_Generic):
182
  """/2/redistribute-config resource.
183

184
  """
185
  @staticmethod
186
  def PUT():
187
    """Redistribute configuration to all nodes.
188

189
    """
190
    return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
191

    
192

    
193
class R_2_cluster_modify(baserlib.R_Generic):
194
  """/2/modify resource.
195

196
  """
197
  def PUT(self):
198
    """Modifies cluster parameters.
199

200
    @return: a job id
201

202
    """
203
    op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
204
                             None)
205

    
206
    return baserlib.SubmitJob([op])
207

    
208

    
209
class R_2_jobs(baserlib.R_Generic):
210
  """/2/jobs resource.
211

212
  """
213
  @staticmethod
214
  def GET():
215
    """Returns a dictionary of jobs.
216

217
    @return: a dictionary with jobs id and uri.
218

219
    """
220
    fields = ["id"]
221
    cl = baserlib.GetClient()
222
    # Convert the list of lists to the list of ids
223
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
224
    return baserlib.BuildUriList(result, "/2/jobs/%s",
225
                                 uri_fields=("id", "uri"))
226

    
227

    
228
class R_2_jobs_id(baserlib.R_Generic):
229
  """/2/jobs/[job_id] resource.
230

231
  """
232
  def GET(self):
233
    """Returns a job status.
234

235
    @return: a dictionary with job parameters.
236
        The result includes:
237
            - id: job ID as a number
238
            - status: current job status as a string
239
            - ops: involved OpCodes as a list of dictionaries for each
240
              opcodes in the job
241
            - opstatus: OpCodes status as a list
242
            - opresult: OpCodes results as a list of lists
243

244
    """
245
    fields = ["id", "ops", "status", "summary",
246
              "opstatus", "opresult", "oplog",
247
              "received_ts", "start_ts", "end_ts",
248
              ]
249
    job_id = self.items[0]
250
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
251
    if result is None:
252
      raise http.HttpNotFound()
253
    return baserlib.MapFields(fields, result)
254

    
255
  def DELETE(self):
256
    """Cancel not-yet-started job.
257

258
    """
259
    job_id = self.items[0]
260
    result = baserlib.GetClient().CancelJob(job_id)
261
    return result
262

    
263

    
264
class R_2_jobs_id_wait(baserlib.R_Generic):
265
  """/2/jobs/[job_id]/wait resource.
266

267
  """
268
  # WaitForJobChange provides access to sensitive information and blocks
269
  # machine resources (it's a blocking RAPI call), hence restricting access.
270
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
271

    
272
  def GET(self):
273
    """Waits for job changes.
274

275
    """
276
    job_id = self.items[0]
277

    
278
    fields = self.getBodyParameter("fields")
279
    prev_job_info = self.getBodyParameter("previous_job_info", None)
280
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
281

    
282
    if not isinstance(fields, list):
283
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
284

    
285
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
286
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
287
                                " be a list")
288

    
289
    if not (prev_log_serial is None or
290
            isinstance(prev_log_serial, (int, long))):
291
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
292
                                " be a number")
293

    
294
    client = baserlib.GetClient()
295
    result = client.WaitForJobChangeOnce(job_id, fields,
296
                                         prev_job_info, prev_log_serial,
297
                                         timeout=_WFJC_TIMEOUT)
298
    if not result:
299
      raise http.HttpNotFound()
300

    
301
    if result == constants.JOB_NOTCHANGED:
302
      # No changes
303
      return None
304

    
305
    (job_info, log_entries) = result
306

    
307
    return {
308
      "job_info": job_info,
309
      "log_entries": log_entries,
310
      }
311

    
312

    
313
class R_2_nodes(baserlib.R_Generic):
314
  """/2/nodes resource.
315

316
  """
317
  def GET(self):
318
    """Returns a list of all nodes.
319

320
    """
321
    client = baserlib.GetClient()
322

    
323
    if self.useBulk():
324
      bulkdata = client.QueryNodes([], N_FIELDS, False)
325
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
326
    else:
327
      nodesdata = client.QueryNodes([], ["name"], False)
328
      nodeslist = [row[0] for row in nodesdata]
329
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
330
                                   uri_fields=("id", "uri"))
331

    
332

    
333
class R_2_nodes_name(baserlib.R_Generic):
334
  """/2/nodes/[node_name] resource.
335

336
  """
337
  def GET(self):
338
    """Send information about a node.
339

340
    """
341
    node_name = self.items[0]
342
    client = baserlib.GetClient()
343

    
344
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
345
                                            names=[node_name], fields=N_FIELDS,
346
                                            use_locking=self.useLocking())
347

    
348
    return baserlib.MapFields(N_FIELDS, result[0])
349

    
350

    
351
class R_2_nodes_name_role(baserlib.R_Generic):
352
  """ /2/nodes/[node_name]/role resource.
353

354
  """
355
  def GET(self):
356
    """Returns the current node role.
357

358
    @return: Node role
359

360
    """
361
    node_name = self.items[0]
362
    client = baserlib.GetClient()
363
    result = client.QueryNodes(names=[node_name], fields=["role"],
364
                               use_locking=self.useLocking())
365

    
366
    return _NR_MAP[result[0][0]]
367

    
368
  def PUT(self):
369
    """Sets the node role.
370

371
    @return: a job id
372

373
    """
374
    if not isinstance(self.request_body, basestring):
375
      raise http.HttpBadRequest("Invalid body contents, not a string")
376

    
377
    node_name = self.items[0]
378
    role = self.request_body
379

    
380
    if role == _NR_REGULAR:
381
      candidate = False
382
      offline = False
383
      drained = False
384

    
385
    elif role == _NR_MASTER_CANDIATE:
386
      candidate = True
387
      offline = drained = None
388

    
389
    elif role == _NR_DRAINED:
390
      drained = True
391
      candidate = offline = None
392

    
393
    elif role == _NR_OFFLINE:
394
      offline = True
395
      candidate = drained = None
396

    
397
    else:
398
      raise http.HttpBadRequest("Can't set '%s' role" % role)
399

    
400
    op = opcodes.OpNodeSetParams(node_name=node_name,
401
                                 master_candidate=candidate,
402
                                 offline=offline,
403
                                 drained=drained,
404
                                 force=bool(self.useForce()))
405

    
406
    return baserlib.SubmitJob([op])
407

    
408

    
409
class R_2_nodes_name_evacuate(baserlib.R_Generic):
410
  """/2/nodes/[node_name]/evacuate resource.
411

412
  """
413
  def POST(self):
414
    """Evacuate all secondary instances off a node.
415

416
    """
417
    node_name = self.items[0]
418
    remote_node = self._checkStringVariable("remote_node", default=None)
419
    iallocator = self._checkStringVariable("iallocator", default=None)
420
    early_r = bool(self._checkIntVariable("early_release", default=0))
421
    dry_run = bool(self.dryRun())
422

    
423
    cl = baserlib.GetClient()
424

    
425
    op = opcodes.OpNodeEvacStrategy(nodes=[node_name],
426
                                    iallocator=iallocator,
427
                                    remote_node=remote_node)
428

    
429
    job_id = baserlib.SubmitJob([op], cl)
430
    # we use custom feedback function, instead of print we log the status
431
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
432

    
433
    jobs = []
434
    for iname, node in result[0]:
435
      if dry_run:
436
        jid = None
437
      else:
438
        op = opcodes.OpInstanceReplaceDisks(instance_name=iname,
439
                                            remote_node=node, disks=[],
440
                                            mode=constants.REPLACE_DISK_CHG,
441
                                            early_release=early_r)
442
        jid = baserlib.SubmitJob([op])
443
      jobs.append((jid, iname, node))
444

    
445
    return jobs
446

    
447

    
448
class R_2_nodes_name_migrate(baserlib.R_Generic):
449
  """/2/nodes/[node_name]/migrate resource.
450

451
  """
452
  def POST(self):
453
    """Migrate all primary instances from a node.
454

455
    """
456
    node_name = self.items[0]
457

    
458
    if "live" in self.queryargs and "mode" in self.queryargs:
459
      raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
460
                                " be passed")
461
    elif "live" in self.queryargs:
462
      if self._checkIntVariable("live", default=1):
463
        mode = constants.HT_MIGRATION_LIVE
464
      else:
465
        mode = constants.HT_MIGRATION_NONLIVE
466
    else:
467
      mode = self._checkStringVariable("mode", default=None)
468

    
469
    op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode)
470

    
471
    return baserlib.SubmitJob([op])
472

    
473

    
474
class R_2_nodes_name_storage(baserlib.R_Generic):
475
  """/2/nodes/[node_name]/storage resource.
476

477
  """
478
  # LUNodeQueryStorage acquires locks, hence restricting access to GET
479
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
480

    
481
  def GET(self):
482
    node_name = self.items[0]
483

    
484
    storage_type = self._checkStringVariable("storage_type", None)
485
    if not storage_type:
486
      raise http.HttpBadRequest("Missing the required 'storage_type'"
487
                                " parameter")
488

    
489
    output_fields = self._checkStringVariable("output_fields", None)
490
    if not output_fields:
491
      raise http.HttpBadRequest("Missing the required 'output_fields'"
492
                                " parameter")
493

    
494
    op = opcodes.OpNodeQueryStorage(nodes=[node_name],
495
                                    storage_type=storage_type,
496
                                    output_fields=output_fields.split(","))
497
    return baserlib.SubmitJob([op])
498

    
499

    
500
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
501
  """/2/nodes/[node_name]/storage/modify resource.
502

503
  """
504
  def PUT(self):
505
    node_name = self.items[0]
506

    
507
    storage_type = self._checkStringVariable("storage_type", None)
508
    if not storage_type:
509
      raise http.HttpBadRequest("Missing the required 'storage_type'"
510
                                " parameter")
511

    
512
    name = self._checkStringVariable("name", None)
513
    if not name:
514
      raise http.HttpBadRequest("Missing the required 'name'"
515
                                " parameter")
516

    
517
    changes = {}
518

    
519
    if "allocatable" in self.queryargs:
520
      changes[constants.SF_ALLOCATABLE] = \
521
        bool(self._checkIntVariable("allocatable", default=1))
522

    
523
    op = opcodes.OpNodeModifyStorage(node_name=node_name,
524
                                     storage_type=storage_type,
525
                                     name=name,
526
                                     changes=changes)
527
    return baserlib.SubmitJob([op])
528

    
529

    
530
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
531
  """/2/nodes/[node_name]/storage/repair resource.
532

533
  """
534
  def PUT(self):
535
    node_name = self.items[0]
536

    
537
    storage_type = self._checkStringVariable("storage_type", None)
538
    if not storage_type:
539
      raise http.HttpBadRequest("Missing the required 'storage_type'"
540
                                " parameter")
541

    
542
    name = self._checkStringVariable("name", None)
543
    if not name:
544
      raise http.HttpBadRequest("Missing the required 'name'"
545
                                " parameter")
546

    
547
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
548
                                     storage_type=storage_type,
549
                                     name=name)
550
    return baserlib.SubmitJob([op])
551

    
552

    
553
def _ParseCreateGroupRequest(data, dry_run):
554
  """Parses a request for creating a node group.
555

556
  @rtype: L{opcodes.OpGroupAdd}
557
  @return: Group creation opcode
558

559
  """
560
  override = {
561
    "dry_run": dry_run,
562
    }
563

    
564
  rename = {
565
    "name": "group_name",
566
    }
567

    
568
  return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
569
                             rename=rename)
570

    
571

    
572
class R_2_groups(baserlib.R_Generic):
573
  """/2/groups resource.
574

575
  """
576
  def GET(self):
577
    """Returns a list of all node groups.
578

579
    """
580
    client = baserlib.GetClient()
581

    
582
    if self.useBulk():
583
      bulkdata = client.QueryGroups([], G_FIELDS, False)
584
      return baserlib.MapBulkFields(bulkdata, G_FIELDS)
585
    else:
586
      data = client.QueryGroups([], ["name"], False)
587
      groupnames = [row[0] for row in data]
588
      return baserlib.BuildUriList(groupnames, "/2/groups/%s",
589
                                   uri_fields=("name", "uri"))
590

    
591
  def POST(self):
592
    """Create a node group.
593

594
    @return: a job id
595

596
    """
597
    baserlib.CheckType(self.request_body, dict, "Body contents")
598
    op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
599
    return baserlib.SubmitJob([op])
600

    
601

    
602
class R_2_groups_name(baserlib.R_Generic):
603
  """/2/groups/[group_name] resource.
604

605
  """
606
  def GET(self):
607
    """Send information about a node group.
608

609
    """
610
    group_name = self.items[0]
611
    client = baserlib.GetClient()
612

    
613
    result = baserlib.HandleItemQueryErrors(client.QueryGroups,
614
                                            names=[group_name], fields=G_FIELDS,
615
                                            use_locking=self.useLocking())
616

    
617
    return baserlib.MapFields(G_FIELDS, result[0])
618

    
619
  def DELETE(self):
620
    """Delete a node group.
621

622
    """
623
    op = opcodes.OpGroupRemove(group_name=self.items[0],
624
                               dry_run=bool(self.dryRun()))
625

    
626
    return baserlib.SubmitJob([op])
627

    
628

    
629
def _ParseModifyGroupRequest(name, data):
630
  """Parses a request for modifying a node group.
631

632
  @rtype: L{opcodes.OpGroupSetParams}
633
  @return: Group modify opcode
634

635
  """
636
  return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
637
    "group_name": name,
638
    })
639

    
640

    
641

    
642
class R_2_groups_name_modify(baserlib.R_Generic):
643
  """/2/groups/[group_name]/modify resource.
644

645
  """
646
  def PUT(self):
647
    """Changes some parameters of node group.
648

649
    @return: a job id
650

651
    """
652
    baserlib.CheckType(self.request_body, dict, "Body contents")
653

    
654
    op = _ParseModifyGroupRequest(self.items[0], self.request_body)
655

    
656
    return baserlib.SubmitJob([op])
657

    
658

    
659
def _ParseRenameGroupRequest(name, data, dry_run):
660
  """Parses a request for renaming a node group.
661

662
  @type name: string
663
  @param name: name of the node group to rename
664
  @type data: dict
665
  @param data: the body received by the rename request
666
  @type dry_run: bool
667
  @param dry_run: whether to perform a dry run
668

669
  @rtype: L{opcodes.OpGroupRename}
670
  @return: Node group rename opcode
671

672
  """
673
  return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
674
    "group_name": name,
675
    "dry_run": dry_run,
676
    })
677

    
678

    
679
class R_2_groups_name_rename(baserlib.R_Generic):
680
  """/2/groups/[group_name]/rename resource.
681

682
  """
683
  def PUT(self):
684
    """Changes the name of a node group.
685

686
    @return: a job id
687

688
    """
689
    baserlib.CheckType(self.request_body, dict, "Body contents")
690
    op = _ParseRenameGroupRequest(self.items[0], self.request_body,
691
                                  self.dryRun())
692
    return baserlib.SubmitJob([op])
693

    
694

    
695
class R_2_groups_name_assign_nodes(baserlib.R_Generic):
696
  """/2/groups/[group_name]/assign-nodes resource.
697

698
  """
699
  def PUT(self):
700
    """Assigns nodes to a group.
701

702
    @return: a job id
703

704
    """
705
    op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
706
      "group_name": self.items[0],
707
      "dry_run": self.dryRun(),
708
      "force": self.useForce(),
709
      })
710

    
711
    return baserlib.SubmitJob([op])
712

    
713

    
714
def _ParseInstanceCreateRequestVersion1(data, dry_run):
715
  """Parses an instance creation request version 1.
716

717
  @rtype: L{opcodes.OpInstanceCreate}
718
  @return: Instance creation opcode
719

720
  """
721
  override = {
722
    "dry_run": dry_run,
723
    }
724

    
725
  rename = {
726
    "os": "os_type",
727
    "name": "instance_name",
728
    }
729

    
730
  return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
731
                             rename=rename)
732

    
733

    
734
class R_2_instances(baserlib.R_Generic):
735
  """/2/instances resource.
736

737
  """
738
  def GET(self):
739
    """Returns a list of all available instances.
740

741
    """
742
    client = baserlib.GetClient()
743

    
744
    use_locking = self.useLocking()
745
    if self.useBulk():
746
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
747
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
748
    else:
749
      instancesdata = client.QueryInstances([], ["name"], use_locking)
750
      instanceslist = [row[0] for row in instancesdata]
751
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
752
                                   uri_fields=("id", "uri"))
753

    
754
  def POST(self):
755
    """Create an instance.
756

757
    @return: a job id
758

759
    """
760
    if not isinstance(self.request_body, dict):
761
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
762

    
763
    # Default to request data version 0
764
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
765

    
766
    if data_version == 0:
767
      raise http.HttpBadRequest("Instance creation request version 0 is no"
768
                                " longer supported")
769
    elif data_version == 1:
770
      data = self.request_body.copy()
771
      # Remove "__version__"
772
      data.pop(_REQ_DATA_VERSION, None)
773
      op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
774
    else:
775
      raise http.HttpBadRequest("Unsupported request data version %s" %
776
                                data_version)
777

    
778
    return baserlib.SubmitJob([op])
779

    
780

    
781
class R_2_instances_name(baserlib.R_Generic):
782
  """/2/instances/[instance_name] resource.
783

784
  """
785
  def GET(self):
786
    """Send information about an instance.
787

788
    """
789
    client = baserlib.GetClient()
790
    instance_name = self.items[0]
791

    
792
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
793
                                            names=[instance_name],
794
                                            fields=I_FIELDS,
795
                                            use_locking=self.useLocking())
796

    
797
    return baserlib.MapFields(I_FIELDS, result[0])
798

    
799
  def DELETE(self):
800
    """Delete an instance.
801

802
    """
803
    op = opcodes.OpInstanceRemove(instance_name=self.items[0],
804
                                  ignore_failures=False,
805
                                  dry_run=bool(self.dryRun()))
806
    return baserlib.SubmitJob([op])
807

    
808

    
809
class R_2_instances_name_info(baserlib.R_Generic):
810
  """/2/instances/[instance_name]/info resource.
811

812
  """
813
  def GET(self):
814
    """Request detailed instance information.
815

816
    """
817
    instance_name = self.items[0]
818
    static = bool(self._checkIntVariable("static", default=0))
819

    
820
    op = opcodes.OpInstanceQueryData(instances=[instance_name],
821
                                     static=static)
822
    return baserlib.SubmitJob([op])
823

    
824

    
825
class R_2_instances_name_reboot(baserlib.R_Generic):
826
  """/2/instances/[instance_name]/reboot resource.
827

828
  Implements an instance reboot.
829

830
  """
831
  def POST(self):
832
    """Reboot an instance.
833

834
    The URI takes type=[hard|soft|full] and
835
    ignore_secondaries=[False|True] parameters.
836

837
    """
838
    instance_name = self.items[0]
839
    reboot_type = self.queryargs.get('type',
840
                                     [constants.INSTANCE_REBOOT_HARD])[0]
841
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
842
    op = opcodes.OpInstanceReboot(instance_name=instance_name,
843
                                  reboot_type=reboot_type,
844
                                  ignore_secondaries=ignore_secondaries,
845
                                  dry_run=bool(self.dryRun()))
846

    
847
    return baserlib.SubmitJob([op])
848

    
849

    
850
class R_2_instances_name_startup(baserlib.R_Generic):
851
  """/2/instances/[instance_name]/startup resource.
852

853
  Implements an instance startup.
854

855
  """
856
  def PUT(self):
857
    """Startup an instance.
858

859
    The URI takes force=[False|True] parameter to start the instance
860
    if even if secondary disks are failing.
861

862
    """
863
    instance_name = self.items[0]
864
    force_startup = bool(self._checkIntVariable('force'))
865
    op = opcodes.OpInstanceStartup(instance_name=instance_name,
866
                                   force=force_startup,
867
                                   dry_run=bool(self.dryRun()))
868

    
869
    return baserlib.SubmitJob([op])
870

    
871

    
872
def _ParseShutdownInstanceRequest(name, data, dry_run):
873
  """Parses a request for an instance shutdown.
874

875
  @rtype: L{opcodes.OpInstanceShutdown}
876
  @return: Instance shutdown opcode
877

878
  """
879
  return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
880
    "instance_name": name,
881
    "dry_run": dry_run,
882
    })
883

    
884

    
885
class R_2_instances_name_shutdown(baserlib.R_Generic):
886
  """/2/instances/[instance_name]/shutdown resource.
887

888
  Implements an instance shutdown.
889

890
  """
891
  def PUT(self):
892
    """Shutdown an instance.
893

894
    @return: a job id
895

896
    """
897
    baserlib.CheckType(self.request_body, dict, "Body contents")
898

    
899
    op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
900
                                       bool(self.dryRun()))
901

    
902
    return baserlib.SubmitJob([op])
903

    
904

    
905
def _ParseInstanceReinstallRequest(name, data):
906
  """Parses a request for reinstalling an instance.
907

908
  """
909
  if not isinstance(data, dict):
910
    raise http.HttpBadRequest("Invalid body contents, not a dictionary")
911

    
912
  ostype = baserlib.CheckParameter(data, "os", default=None)
913
  start = baserlib.CheckParameter(data, "start", exptype=bool,
914
                                  default=True)
915
  osparams = baserlib.CheckParameter(data, "osparams", default=None)
916

    
917
  ops = [
918
    opcodes.OpInstanceShutdown(instance_name=name),
919
    opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
920
                                osparams=osparams),
921
    ]
922

    
923
  if start:
924
    ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
925

    
926
  return ops
927

    
928

    
929
class R_2_instances_name_reinstall(baserlib.R_Generic):
930
  """/2/instances/[instance_name]/reinstall resource.
931

932
  Implements an instance reinstall.
933

934
  """
935
  def POST(self):
936
    """Reinstall an instance.
937

938
    The URI takes os=name and nostartup=[0|1] optional
939
    parameters. By default, the instance will be started
940
    automatically.
941

942
    """
943
    if self.request_body:
944
      if self.queryargs:
945
        raise http.HttpBadRequest("Can't combine query and body parameters")
946

    
947
      body = self.request_body
948
    elif self.queryargs:
949
      # Legacy interface, do not modify/extend
950
      body = {
951
        "os": self._checkStringVariable("os"),
952
        "start": not self._checkIntVariable("nostartup"),
953
        }
954
    else:
955
      body = {}
956

    
957
    ops = _ParseInstanceReinstallRequest(self.items[0], body)
958

    
959
    return baserlib.SubmitJob(ops)
960

    
961

    
962
def _ParseInstanceReplaceDisksRequest(name, data):
963
  """Parses a request for an instance export.
964

965
  @rtype: L{opcodes.OpInstanceReplaceDisks}
966
  @return: Instance export opcode
967

968
  """
969
  override = {
970
    "instance_name": name,
971
    }
972

    
973
  # Parse disks
974
  try:
975
    raw_disks = data["disks"]
976
  except KeyError:
977
    pass
978
  else:
979
    if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
980
      # Backwards compatibility for strings of the format "1, 2, 3"
981
      try:
982
        data["disks"] = [int(part) for part in raw_disks.split(",")]
983
      except (TypeError, ValueError), err:
984
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
985

    
986
  return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
987

    
988

    
989
class R_2_instances_name_replace_disks(baserlib.R_Generic):
990
  """/2/instances/[instance_name]/replace-disks resource.
991

992
  """
993
  def POST(self):
994
    """Replaces disks on an instance.
995

996
    """
997
    op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
998

    
999
    return baserlib.SubmitJob([op])
1000

    
1001

    
1002
class R_2_instances_name_activate_disks(baserlib.R_Generic):
1003
  """/2/instances/[instance_name]/activate-disks resource.
1004

1005
  """
1006
  def PUT(self):
1007
    """Activate disks for an instance.
1008

1009
    The URI might contain ignore_size to ignore current recorded size.
1010

1011
    """
1012
    instance_name = self.items[0]
1013
    ignore_size = bool(self._checkIntVariable('ignore_size'))
1014

    
1015
    op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1016
                                         ignore_size=ignore_size)
1017

    
1018
    return baserlib.SubmitJob([op])
1019

    
1020

    
1021
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1022
  """/2/instances/[instance_name]/deactivate-disks resource.
1023

1024
  """
1025
  def PUT(self):
1026
    """Deactivate disks for an instance.
1027

1028
    """
1029
    instance_name = self.items[0]
1030

    
1031
    op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1032

    
1033
    return baserlib.SubmitJob([op])
1034

    
1035

    
1036
class R_2_instances_name_prepare_export(baserlib.R_Generic):
1037
  """/2/instances/[instance_name]/prepare-export resource.
1038

1039
  """
1040
  def PUT(self):
1041
    """Prepares an export for an instance.
1042

1043
    @return: a job id
1044

1045
    """
1046
    instance_name = self.items[0]
1047
    mode = self._checkStringVariable("mode")
1048

    
1049
    op = opcodes.OpBackupPrepare(instance_name=instance_name,
1050
                                 mode=mode)
1051

    
1052
    return baserlib.SubmitJob([op])
1053

    
1054

    
1055
def _ParseExportInstanceRequest(name, data):
1056
  """Parses a request for an instance export.
1057

1058
  @rtype: L{opcodes.OpBackupExport}
1059
  @return: Instance export opcode
1060

1061
  """
1062
  # Rename "destination" to "target_node"
1063
  try:
1064
    data["target_node"] = data.pop("destination")
1065
  except KeyError:
1066
    pass
1067

    
1068
  return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1069
    "instance_name": name,
1070
    })
1071

    
1072

    
1073
class R_2_instances_name_export(baserlib.R_Generic):
1074
  """/2/instances/[instance_name]/export resource.
1075

1076
  """
1077
  def PUT(self):
1078
    """Exports an instance.
1079

1080
    @return: a job id
1081

1082
    """
1083
    if not isinstance(self.request_body, dict):
1084
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1085

    
1086
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1087

    
1088
    return baserlib.SubmitJob([op])
1089

    
1090

    
1091
def _ParseMigrateInstanceRequest(name, data):
1092
  """Parses a request for an instance migration.
1093

1094
  @rtype: L{opcodes.OpInstanceMigrate}
1095
  @return: Instance migration opcode
1096

1097
  """
1098
  return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1099
    "instance_name": name,
1100
    })
1101

    
1102

    
1103
class R_2_instances_name_migrate(baserlib.R_Generic):
1104
  """/2/instances/[instance_name]/migrate resource.
1105

1106
  """
1107
  def PUT(self):
1108
    """Migrates an instance.
1109

1110
    @return: a job id
1111

1112
    """
1113
    baserlib.CheckType(self.request_body, dict, "Body contents")
1114

    
1115
    op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1116

    
1117
    return baserlib.SubmitJob([op])
1118

    
1119

    
1120
def _ParseRenameInstanceRequest(name, data):
1121
  """Parses a request for renaming an instance.
1122

1123
  @rtype: L{opcodes.OpInstanceRename}
1124
  @return: Instance rename opcode
1125

1126
  """
1127
  return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1128
    "instance_name": name,
1129
    })
1130

    
1131

    
1132
class R_2_instances_name_rename(baserlib.R_Generic):
1133
  """/2/instances/[instance_name]/rename resource.
1134

1135
  """
1136
  def PUT(self):
1137
    """Changes the name of an instance.
1138

1139
    @return: a job id
1140

1141
    """
1142
    baserlib.CheckType(self.request_body, dict, "Body contents")
1143

    
1144
    op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1145

    
1146
    return baserlib.SubmitJob([op])
1147

    
1148

    
1149
def _ParseModifyInstanceRequest(name, data):
1150
  """Parses a request for modifying an instance.
1151

1152
  @rtype: L{opcodes.OpInstanceSetParams}
1153
  @return: Instance modify opcode
1154

1155
  """
1156
  return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1157
    "instance_name": name,
1158
    })
1159

    
1160

    
1161
class R_2_instances_name_modify(baserlib.R_Generic):
1162
  """/2/instances/[instance_name]/modify resource.
1163

1164
  """
1165
  def PUT(self):
1166
    """Changes some parameters of an instance.
1167

1168
    @return: a job id
1169

1170
    """
1171
    baserlib.CheckType(self.request_body, dict, "Body contents")
1172

    
1173
    op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1174

    
1175
    return baserlib.SubmitJob([op])
1176

    
1177

    
1178
class R_2_instances_name_disk_grow(baserlib.R_Generic):
1179
  """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1180

1181
  """
1182
  def POST(self):
1183
    """Increases the size of an instance disk.
1184

1185
    @return: a job id
1186

1187
    """
1188
    op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1189
      "instance_name": self.items[0],
1190
      "disk": int(self.items[1]),
1191
      })
1192

    
1193
    return baserlib.SubmitJob([op])
1194

    
1195

    
1196
class R_2_instances_name_console(baserlib.R_Generic):
1197
  """/2/instances/[instance_name]/console resource.
1198

1199
  """
1200
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1201

    
1202
  def GET(self):
1203
    """Request information for connecting to instance's console.
1204

1205
    @return: Serialized instance console description, see
1206
             L{objects.InstanceConsole}
1207

1208
    """
1209
    client = baserlib.GetClient()
1210

    
1211
    ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1212

    
1213
    if console is None:
1214
      raise http.HttpServiceUnavailable("Instance console unavailable")
1215

    
1216
    assert isinstance(console, dict)
1217
    return console
1218

    
1219

    
1220
def _GetQueryFields(args):
1221
  """
1222

1223
  """
1224
  try:
1225
    fields = args["fields"]
1226
  except KeyError:
1227
    raise http.HttpBadRequest("Missing 'fields' query argument")
1228

    
1229
  return _SplitQueryFields(fields[0])
1230

    
1231

    
1232
def _SplitQueryFields(fields):
1233
  """
1234

1235
  """
1236
  return [i.strip() for i in fields.split(",")]
1237

    
1238

    
1239
class R_2_query(baserlib.R_Generic):
1240
  """/2/query/[resource] resource.
1241

1242
  """
1243
  # Results might contain sensitive information
1244
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1245

    
1246
  def _Query(self, fields, filter_):
1247
    return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
1248

    
1249
  def GET(self):
1250
    """Returns resource information.
1251

1252
    @return: Query result, see L{objects.QueryResponse}
1253

1254
    """
1255
    return self._Query(_GetQueryFields(self.queryargs), None)
1256

    
1257
  def PUT(self):
1258
    """Submits job querying for resources.
1259

1260
    @return: Query result, see L{objects.QueryResponse}
1261

1262
    """
1263
    body = self.request_body
1264

    
1265
    baserlib.CheckType(body, dict, "Body contents")
1266

    
1267
    try:
1268
      fields = body["fields"]
1269
    except KeyError:
1270
      fields = _GetQueryFields(self.queryargs)
1271

    
1272
    return self._Query(fields, self.request_body.get("filter", None))
1273

    
1274

    
1275
class R_2_query_fields(baserlib.R_Generic):
1276
  """/2/query/[resource]/fields resource.
1277

1278
  """
1279
  def GET(self):
1280
    """Retrieves list of available fields for a resource.
1281

1282
    @return: List of serialized L{objects.QueryFieldDefinition}
1283

1284
    """
1285
    try:
1286
      raw_fields = self.queryargs["fields"]
1287
    except KeyError:
1288
      fields = None
1289
    else:
1290
      fields = _SplitQueryFields(raw_fields[0])
1291

    
1292
    return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
1293

    
1294

    
1295
class _R_Tags(baserlib.R_Generic):
1296
  """ Quasiclass for tagging resources
1297

1298
  Manages tags. When inheriting this class you must define the
1299
  TAG_LEVEL for it.
1300

1301
  """
1302
  TAG_LEVEL = None
1303

    
1304
  def __init__(self, items, queryargs, req):
1305
    """A tag resource constructor.
1306

1307
    We have to override the default to sort out cluster naming case.
1308

1309
    """
1310
    baserlib.R_Generic.__init__(self, items, queryargs, req)
1311

    
1312
    if self.TAG_LEVEL == constants.TAG_CLUSTER:
1313
      self.name = None
1314
    else:
1315
      self.name = items[0]
1316

    
1317
  def GET(self):
1318
    """Returns a list of tags.
1319

1320
    Example: ["tag1", "tag2", "tag3"]
1321

1322
    """
1323
    # pylint: disable-msg=W0212
1324
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1325

    
1326
  def PUT(self):
1327
    """Add a set of tags.
1328

1329
    The request as a list of strings should be PUT to this URI. And
1330
    you'll have back a job id.
1331

1332
    """
1333
    # pylint: disable-msg=W0212
1334
    if 'tag' not in self.queryargs:
1335
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1336
                                " the 'tag' parameter")
1337
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1338
                              self.queryargs['tag'], name=self.name,
1339
                              dry_run=bool(self.dryRun()))
1340

    
1341
  def DELETE(self):
1342
    """Delete a tag.
1343

1344
    In order to delete a set of tags, the DELETE
1345
    request should be addressed to URI like:
1346
    /tags?tag=[tag]&tag=[tag]
1347

1348
    """
1349
    # pylint: disable-msg=W0212
1350
    if 'tag' not in self.queryargs:
1351
      # no we not gonna delete all tags
1352
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1353
                                " tag(s) using the 'tag' parameter")
1354
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1355
                                 self.queryargs['tag'],
1356
                                 name=self.name,
1357
                                 dry_run=bool(self.dryRun()))
1358

    
1359

    
1360
class R_2_instances_name_tags(_R_Tags):
1361
  """ /2/instances/[instance_name]/tags resource.
1362

1363
  Manages per-instance tags.
1364

1365
  """
1366
  TAG_LEVEL = constants.TAG_INSTANCE
1367

    
1368

    
1369
class R_2_nodes_name_tags(_R_Tags):
1370
  """ /2/nodes/[node_name]/tags resource.
1371

1372
  Manages per-node tags.
1373

1374
  """
1375
  TAG_LEVEL = constants.TAG_NODE
1376

    
1377

    
1378
class R_2_groups_name_tags(_R_Tags):
1379
  """ /2/groups/[group_name]/tags resource.
1380

1381
  Manages per-nodegroup tags.
1382

1383
  """
1384
  TAG_LEVEL = constants.TAG_NODEGROUP
1385

    
1386

    
1387
class R_2_tags(_R_Tags):
1388
  """ /2/tags resource.
1389

1390
  Manages cluster tags.
1391

1392
  """
1393
  TAG_LEVEL = constants.TAG_CLUSTER