Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ 208a6cff

History | View | Annotate | Download (37.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import rapi
49
from ganeti import ht
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "oper_vcpus", "status",
62
            "custom_hvparams", "custom_beparams", "custom_nicparams",
63
            ] + _COMMON_FIELDS
64

    
65
N_FIELDS = ["name", "offline", "master_candidate", "drained",
66
            "dtotal", "dfree",
67
            "mtotal", "mnode", "mfree",
68
            "pinst_cnt", "sinst_cnt",
69
            "ctotal", "cnodes", "csockets",
70
            "pip", "sip", "role",
71
            "pinst_list", "sinst_list",
72
            "master_capable", "vm_capable",
73
            "group.uuid",
74
            ] + _COMMON_FIELDS
75

    
76
G_FIELDS = ["name", "uuid",
77
            "alloc_policy",
78
            "node_cnt", "node_list",
79
            "ctime", "mtime", "serial_no",
80
            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
81

    
82
_NR_DRAINED = "drained"
83
_NR_MASTER_CANDIATE = "master-candidate"
84
_NR_MASTER = "master"
85
_NR_OFFLINE = "offline"
86
_NR_REGULAR = "regular"
87

    
88
_NR_MAP = {
89
  constants.NR_MASTER: _NR_MASTER,
90
  constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
91
  constants.NR_DRAINED: _NR_DRAINED,
92
  constants.NR_OFFLINE: _NR_OFFLINE,
93
  constants.NR_REGULAR: _NR_REGULAR,
94
  }
95

    
96
assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
97

    
98
# Request data version field
99
_REQ_DATA_VERSION = "__version__"
100

    
101
# Feature string for instance creation request data version 1
102
_INST_CREATE_REQV1 = "instance-create-reqv1"
103

    
104
# Feature string for instance reinstall request version 1
105
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
106

    
107
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
108
_WFJC_TIMEOUT = 10
109

    
110

    
111
class R_version(baserlib.R_Generic):
112
  """/version resource.
113

114
  This resource should be used to determine the remote API version and
115
  to adapt clients accordingly.
116

117
  """
118
  @staticmethod
119
  def GET():
120
    """Returns the remote API version.
121

122
    """
123
    return constants.RAPI_VERSION
124

    
125

    
126
class R_2_info(baserlib.R_Generic):
127
  """/2/info resource.
128

129
  """
130
  @staticmethod
131
  def GET():
132
    """Returns cluster information.
133

134
    """
135
    client = baserlib.GetClient()
136
    return client.QueryClusterInfo()
137

    
138

    
139
class R_2_features(baserlib.R_Generic):
140
  """/2/features resource.
141

142
  """
143
  @staticmethod
144
  def GET():
145
    """Returns list of optional RAPI features implemented.
146

147
    """
148
    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
149

    
150

    
151
class R_2_os(baserlib.R_Generic):
152
  """/2/os resource.
153

154
  """
155
  @staticmethod
156
  def GET():
157
    """Return a list of all OSes.
158

159
    Can return error 500 in case of a problem.
160

161
    Example: ["debian-etch"]
162

163
    """
164
    cl = baserlib.GetClient()
165
    op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
166
    job_id = baserlib.SubmitJob([op], cl)
167
    # we use custom feedback function, instead of print we log the status
168
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
169
    diagnose_data = result[0]
170

    
171
    if not isinstance(diagnose_data, list):
172
      raise http.HttpBadGateway(message="Can't get OS list")
173

    
174
    os_names = []
175
    for (name, variants) in diagnose_data:
176
      os_names.extend(cli.CalculateOSNames(name, variants))
177

    
178
    return os_names
179

    
180

    
181
class R_2_redist_config(baserlib.R_Generic):
182
  """/2/redistribute-config resource.
183

184
  """
185
  @staticmethod
186
  def PUT():
187
    """Redistribute configuration to all nodes.
188

189
    """
190
    return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
191

    
192

    
193
class R_2_cluster_modify(baserlib.R_Generic):
194
  """/2/modify resource.
195

196
  """
197
  def PUT(self):
198
    """Modifies cluster parameters.
199

200
    @return: a job id
201

202
    """
203
    op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
204
                             None)
205

    
206
    return baserlib.SubmitJob([op])
207

    
208

    
209
class R_2_jobs(baserlib.R_Generic):
210
  """/2/jobs resource.
211

212
  """
213
  @staticmethod
214
  def GET():
215
    """Returns a dictionary of jobs.
216

217
    @return: a dictionary with jobs id and uri.
218

219
    """
220
    fields = ["id"]
221
    cl = baserlib.GetClient()
222
    # Convert the list of lists to the list of ids
223
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
224
    return baserlib.BuildUriList(result, "/2/jobs/%s",
225
                                 uri_fields=("id", "uri"))
226

    
227

    
228
class R_2_jobs_id(baserlib.R_Generic):
229
  """/2/jobs/[job_id] resource.
230

231
  """
232
  def GET(self):
233
    """Returns a job status.
234

235
    @return: a dictionary with job parameters.
236
        The result includes:
237
            - id: job ID as a number
238
            - status: current job status as a string
239
            - ops: involved OpCodes as a list of dictionaries for each
240
              opcodes in the job
241
            - opstatus: OpCodes status as a list
242
            - opresult: OpCodes results as a list of lists
243

244
    """
245
    fields = ["id", "ops", "status", "summary",
246
              "opstatus", "opresult", "oplog",
247
              "received_ts", "start_ts", "end_ts",
248
              ]
249
    job_id = self.items[0]
250
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
251
    if result is None:
252
      raise http.HttpNotFound()
253
    return baserlib.MapFields(fields, result)
254

    
255
  def DELETE(self):
256
    """Cancel not-yet-started job.
257

258
    """
259
    job_id = self.items[0]
260
    result = baserlib.GetClient().CancelJob(job_id)
261
    return result
262

    
263

    
264
class R_2_jobs_id_wait(baserlib.R_Generic):
265
  """/2/jobs/[job_id]/wait resource.
266

267
  """
268
  # WaitForJobChange provides access to sensitive information and blocks
269
  # machine resources (it's a blocking RAPI call), hence restricting access.
270
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
271

    
272
  def GET(self):
273
    """Waits for job changes.
274

275
    """
276
    job_id = self.items[0]
277

    
278
    fields = self.getBodyParameter("fields")
279
    prev_job_info = self.getBodyParameter("previous_job_info", None)
280
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
281

    
282
    if not isinstance(fields, list):
283
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
284

    
285
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
286
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
287
                                " be a list")
288

    
289
    if not (prev_log_serial is None or
290
            isinstance(prev_log_serial, (int, long))):
291
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
292
                                " be a number")
293

    
294
    client = baserlib.GetClient()
295
    result = client.WaitForJobChangeOnce(job_id, fields,
296
                                         prev_job_info, prev_log_serial,
297
                                         timeout=_WFJC_TIMEOUT)
298
    if not result:
299
      raise http.HttpNotFound()
300

    
301
    if result == constants.JOB_NOTCHANGED:
302
      # No changes
303
      return None
304

    
305
    (job_info, log_entries) = result
306

    
307
    return {
308
      "job_info": job_info,
309
      "log_entries": log_entries,
310
      }
311

    
312

    
313
class R_2_nodes(baserlib.R_Generic):
314
  """/2/nodes resource.
315

316
  """
317
  def GET(self):
318
    """Returns a list of all nodes.
319

320
    """
321
    client = baserlib.GetClient()
322

    
323
    if self.useBulk():
324
      bulkdata = client.QueryNodes([], N_FIELDS, False)
325
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
326
    else:
327
      nodesdata = client.QueryNodes([], ["name"], False)
328
      nodeslist = [row[0] for row in nodesdata]
329
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
330
                                   uri_fields=("id", "uri"))
331

    
332

    
333
class R_2_nodes_name(baserlib.R_Generic):
334
  """/2/nodes/[node_name] resource.
335

336
  """
337
  def GET(self):
338
    """Send information about a node.
339

340
    """
341
    node_name = self.items[0]
342
    client = baserlib.GetClient()
343

    
344
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
345
                                            names=[node_name], fields=N_FIELDS,
346
                                            use_locking=self.useLocking())
347

    
348
    return baserlib.MapFields(N_FIELDS, result[0])
349

    
350

    
351
class R_2_nodes_name_role(baserlib.R_Generic):
352
  """ /2/nodes/[node_name]/role resource.
353

354
  """
355
  def GET(self):
356
    """Returns the current node role.
357

358
    @return: Node role
359

360
    """
361
    node_name = self.items[0]
362
    client = baserlib.GetClient()
363
    result = client.QueryNodes(names=[node_name], fields=["role"],
364
                               use_locking=self.useLocking())
365

    
366
    return _NR_MAP[result[0][0]]
367

    
368
  def PUT(self):
369
    """Sets the node role.
370

371
    @return: a job id
372

373
    """
374
    if not isinstance(self.request_body, basestring):
375
      raise http.HttpBadRequest("Invalid body contents, not a string")
376

    
377
    node_name = self.items[0]
378
    role = self.request_body
379

    
380
    if role == _NR_REGULAR:
381
      candidate = False
382
      offline = False
383
      drained = False
384

    
385
    elif role == _NR_MASTER_CANDIATE:
386
      candidate = True
387
      offline = drained = None
388

    
389
    elif role == _NR_DRAINED:
390
      drained = True
391
      candidate = offline = None
392

    
393
    elif role == _NR_OFFLINE:
394
      offline = True
395
      candidate = drained = None
396

    
397
    else:
398
      raise http.HttpBadRequest("Can't set '%s' role" % role)
399

    
400
    op = opcodes.OpNodeSetParams(node_name=node_name,
401
                                 master_candidate=candidate,
402
                                 offline=offline,
403
                                 drained=drained,
404
                                 force=bool(self.useForce()))
405

    
406
    return baserlib.SubmitJob([op])
407

    
408

    
409
class R_2_nodes_name_evacuate(baserlib.R_Generic):
410
  """/2/nodes/[node_name]/evacuate resource.
411

412
  """
413
  def POST(self):
414
    """Evacuate all secondary instances off a node.
415

416
    """
417
    node_name = self.items[0]
418
    remote_node = self._checkStringVariable("remote_node", default=None)
419
    iallocator = self._checkStringVariable("iallocator", default=None)
420
    early_r = bool(self._checkIntVariable("early_release", default=0))
421
    dry_run = bool(self.dryRun())
422

    
423
    cl = baserlib.GetClient()
424

    
425
    op = opcodes.OpNodeEvacStrategy(nodes=[node_name],
426
                                    iallocator=iallocator,
427
                                    remote_node=remote_node)
428

    
429
    job_id = baserlib.SubmitJob([op], cl)
430
    # we use custom feedback function, instead of print we log the status
431
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
432

    
433
    jobs = []
434
    for iname, node in result[0]:
435
      if dry_run:
436
        jid = None
437
      else:
438
        op = opcodes.OpInstanceReplaceDisks(instance_name=iname,
439
                                            remote_node=node, disks=[],
440
                                            mode=constants.REPLACE_DISK_CHG,
441
                                            early_release=early_r)
442
        jid = baserlib.SubmitJob([op])
443
      jobs.append((jid, iname, node))
444

    
445
    return jobs
446

    
447

    
448
class R_2_nodes_name_migrate(baserlib.R_Generic):
449
  """/2/nodes/[node_name]/migrate resource.
450

451
  """
452
  def POST(self):
453
    """Migrate all primary instances from a node.
454

455
    """
456
    node_name = self.items[0]
457

    
458
    if "live" in self.queryargs and "mode" in self.queryargs:
459
      raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
460
                                " be passed")
461
    elif "live" in self.queryargs:
462
      if self._checkIntVariable("live", default=1):
463
        mode = constants.HT_MIGRATION_LIVE
464
      else:
465
        mode = constants.HT_MIGRATION_NONLIVE
466
    else:
467
      mode = self._checkStringVariable("mode", default=None)
468

    
469
    op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode)
470

    
471
    return baserlib.SubmitJob([op])
472

    
473

    
474
class R_2_nodes_name_storage(baserlib.R_Generic):
475
  """/2/nodes/[node_name]/storage resource.
476

477
  """
478
  # LUNodeQueryStorage acquires locks, hence restricting access to GET
479
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
480

    
481
  def GET(self):
482
    node_name = self.items[0]
483

    
484
    storage_type = self._checkStringVariable("storage_type", None)
485
    if not storage_type:
486
      raise http.HttpBadRequest("Missing the required 'storage_type'"
487
                                " parameter")
488

    
489
    output_fields = self._checkStringVariable("output_fields", None)
490
    if not output_fields:
491
      raise http.HttpBadRequest("Missing the required 'output_fields'"
492
                                " parameter")
493

    
494
    op = opcodes.OpNodeQueryStorage(nodes=[node_name],
495
                                    storage_type=storage_type,
496
                                    output_fields=output_fields.split(","))
497
    return baserlib.SubmitJob([op])
498

    
499

    
500
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
501
  """/2/nodes/[node_name]/storage/modify resource.
502

503
  """
504
  def PUT(self):
505
    node_name = self.items[0]
506

    
507
    storage_type = self._checkStringVariable("storage_type", None)
508
    if not storage_type:
509
      raise http.HttpBadRequest("Missing the required 'storage_type'"
510
                                " parameter")
511

    
512
    name = self._checkStringVariable("name", None)
513
    if not name:
514
      raise http.HttpBadRequest("Missing the required 'name'"
515
                                " parameter")
516

    
517
    changes = {}
518

    
519
    if "allocatable" in self.queryargs:
520
      changes[constants.SF_ALLOCATABLE] = \
521
        bool(self._checkIntVariable("allocatable", default=1))
522

    
523
    op = opcodes.OpNodeModifyStorage(node_name=node_name,
524
                                     storage_type=storage_type,
525
                                     name=name,
526
                                     changes=changes)
527
    return baserlib.SubmitJob([op])
528

    
529

    
530
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
531
  """/2/nodes/[node_name]/storage/repair resource.
532

533
  """
534
  def PUT(self):
535
    node_name = self.items[0]
536

    
537
    storage_type = self._checkStringVariable("storage_type", None)
538
    if not storage_type:
539
      raise http.HttpBadRequest("Missing the required 'storage_type'"
540
                                " parameter")
541

    
542
    name = self._checkStringVariable("name", None)
543
    if not name:
544
      raise http.HttpBadRequest("Missing the required 'name'"
545
                                " parameter")
546

    
547
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
548
                                     storage_type=storage_type,
549
                                     name=name)
550
    return baserlib.SubmitJob([op])
551

    
552

    
553
def _ParseCreateGroupRequest(data, dry_run):
554
  """Parses a request for creating a node group.
555

556
  @rtype: L{opcodes.OpGroupAdd}
557
  @return: Group creation opcode
558

559
  """
560
  override = {
561
    "dry_run": dry_run,
562
    }
563

    
564
  rename = {
565
    "name": "group_name",
566
    }
567

    
568
  return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
569
                             rename=rename)
570

    
571

    
572
class R_2_groups(baserlib.R_Generic):
573
  """/2/groups resource.
574

575
  """
576
  def GET(self):
577
    """Returns a list of all node groups.
578

579
    """
580
    client = baserlib.GetClient()
581

    
582
    if self.useBulk():
583
      bulkdata = client.QueryGroups([], G_FIELDS, False)
584
      return baserlib.MapBulkFields(bulkdata, G_FIELDS)
585
    else:
586
      data = client.QueryGroups([], ["name"], False)
587
      groupnames = [row[0] for row in data]
588
      return baserlib.BuildUriList(groupnames, "/2/groups/%s",
589
                                   uri_fields=("name", "uri"))
590

    
591
  def POST(self):
592
    """Create a node group.
593

594
    @return: a job id
595

596
    """
597
    baserlib.CheckType(self.request_body, dict, "Body contents")
598
    op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
599
    return baserlib.SubmitJob([op])
600

    
601

    
602
class R_2_groups_name(baserlib.R_Generic):
603
  """/2/groups/[group_name] resource.
604

605
  """
606
  def GET(self):
607
    """Send information about a node group.
608

609
    """
610
    group_name = self.items[0]
611
    client = baserlib.GetClient()
612

    
613
    result = baserlib.HandleItemQueryErrors(client.QueryGroups,
614
                                            names=[group_name], fields=G_FIELDS,
615
                                            use_locking=self.useLocking())
616

    
617
    return baserlib.MapFields(G_FIELDS, result[0])
618

    
619
  def DELETE(self):
620
    """Delete a node group.
621

622
    """
623
    op = opcodes.OpGroupRemove(group_name=self.items[0],
624
                               dry_run=bool(self.dryRun()))
625

    
626
    return baserlib.SubmitJob([op])
627

    
628

    
629
def _ParseModifyGroupRequest(name, data):
630
  """Parses a request for modifying a node group.
631

632
  @rtype: L{opcodes.OpGroupSetParams}
633
  @return: Group modify opcode
634

635
  """
636
  return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
637
    "group_name": name,
638
    })
639

    
640

    
641

    
642
class R_2_groups_name_modify(baserlib.R_Generic):
643
  """/2/groups/[group_name]/modify resource.
644

645
  """
646
  def PUT(self):
647
    """Changes some parameters of node group.
648

649
    @return: a job id
650

651
    """
652
    baserlib.CheckType(self.request_body, dict, "Body contents")
653

    
654
    op = _ParseModifyGroupRequest(self.items[0], self.request_body)
655

    
656
    return baserlib.SubmitJob([op])
657

    
658

    
659
def _ParseRenameGroupRequest(name, data, dry_run):
660
  """Parses a request for renaming a node group.
661

662
  @type name: string
663
  @param name: name of the node group to rename
664
  @type data: dict
665
  @param data: the body received by the rename request
666
  @type dry_run: bool
667
  @param dry_run: whether to perform a dry run
668

669
  @rtype: L{opcodes.OpGroupRename}
670
  @return: Node group rename opcode
671

672
  """
673
  return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
674
    "group_name": name,
675
    "dry_run": dry_run,
676
    })
677

    
678

    
679
class R_2_groups_name_rename(baserlib.R_Generic):
680
  """/2/groups/[group_name]/rename resource.
681

682
  """
683
  def PUT(self):
684
    """Changes the name of a node group.
685

686
    @return: a job id
687

688
    """
689
    baserlib.CheckType(self.request_body, dict, "Body contents")
690
    op = _ParseRenameGroupRequest(self.items[0], self.request_body,
691
                                  self.dryRun())
692
    return baserlib.SubmitJob([op])
693

    
694

    
695
class R_2_groups_name_assign_nodes(baserlib.R_Generic):
696
  """/2/groups/[group_name]/assign-nodes resource.
697

698
  """
699
  def PUT(self):
700
    """Assigns nodes to a group.
701

702
    @return: a job id
703

704
    """
705
    op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
706
      "group_name": self.items[0],
707
      "dry_run": self.dryRun(),
708
      "force": self.useForce(),
709
      })
710

    
711
    return baserlib.SubmitJob([op])
712

    
713

    
714
def _ParseInstanceCreateRequestVersion1(data, dry_run):
715
  """Parses an instance creation request version 1.
716

717
  @rtype: L{opcodes.OpInstanceCreate}
718
  @return: Instance creation opcode
719

720
  """
721
  override = {
722
    "dry_run": dry_run,
723
    }
724

    
725
  rename = {
726
    "os": "os_type",
727
    "name": "instance_name",
728
    }
729

    
730
  return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
731
                             rename=rename)
732

    
733

    
734
class R_2_instances(baserlib.R_Generic):
735
  """/2/instances resource.
736

737
  """
738
  def GET(self):
739
    """Returns a list of all available instances.
740

741
    """
742
    client = baserlib.GetClient()
743

    
744
    use_locking = self.useLocking()
745
    if self.useBulk():
746
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
747
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
748
    else:
749
      instancesdata = client.QueryInstances([], ["name"], use_locking)
750
      instanceslist = [row[0] for row in instancesdata]
751
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
752
                                   uri_fields=("id", "uri"))
753

    
754
  def _ParseVersion0CreateRequest(self):
755
    """Parses an instance creation request version 0.
756

757
    Request data version 0 is deprecated and should not be used anymore.
758

759
    @rtype: L{opcodes.OpInstanceCreate}
760
    @return: Instance creation opcode
761

762
    """
763
    # Do not modify anymore, request data version 0 is deprecated
764
    beparams = baserlib.MakeParamsDict(self.request_body,
765
                                       constants.BES_PARAMETERS)
766
    hvparams = baserlib.MakeParamsDict(self.request_body,
767
                                       constants.HVS_PARAMETERS)
768
    fn = self.getBodyParameter
769

    
770
    # disk processing
771
    disk_data = fn('disks')
772
    if not isinstance(disk_data, list):
773
      raise http.HttpBadRequest("The 'disks' parameter should be a list")
774
    disks = []
775
    for idx, d in enumerate(disk_data):
776
      if not isinstance(d, int):
777
        raise http.HttpBadRequest("Disk %d specification wrong: should"
778
                                  " be an integer" % idx)
779
      disks.append({"size": d})
780

    
781
    # nic processing (one nic only)
782
    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
783
    if fn("ip", None) is not None:
784
      nics[0]["ip"] = fn("ip")
785
    if fn("mode", None) is not None:
786
      nics[0]["mode"] = fn("mode")
787
    if fn("link", None) is not None:
788
      nics[0]["link"] = fn("link")
789

    
790
    # Do not modify anymore, request data version 0 is deprecated
791
    return opcodes.OpInstanceCreate(
792
      mode=constants.INSTANCE_CREATE,
793
      instance_name=fn('name'),
794
      disks=disks,
795
      disk_template=fn('disk_template'),
796
      os_type=fn('os'),
797
      pnode=fn('pnode', None),
798
      snode=fn('snode', None),
799
      iallocator=fn('iallocator', None),
800
      nics=nics,
801
      start=fn('start', True),
802
      ip_check=fn('ip_check', True),
803
      name_check=fn('name_check', True),
804
      wait_for_sync=True,
805
      hypervisor=fn('hypervisor', None),
806
      hvparams=hvparams,
807
      beparams=beparams,
808
      file_storage_dir=fn('file_storage_dir', None),
809
      file_driver=fn('file_driver', constants.FD_LOOP),
810
      dry_run=bool(self.dryRun()),
811
      )
812

    
813
  def POST(self):
814
    """Create an instance.
815

816
    @return: a job id
817

818
    """
819
    if not isinstance(self.request_body, dict):
820
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
821

    
822
    # Default to request data version 0
823
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
824

    
825
    if data_version == 0:
826
      op = self._ParseVersion0CreateRequest()
827
    elif data_version == 1:
828
      data = self.request_body.copy()
829
      # Remove "__version__"
830
      data.pop(_REQ_DATA_VERSION, None)
831
      op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
832
    else:
833
      raise http.HttpBadRequest("Unsupported request data version %s" %
834
                                data_version)
835

    
836
    return baserlib.SubmitJob([op])
837

    
838

    
839
class R_2_instances_name(baserlib.R_Generic):
840
  """/2/instances/[instance_name] resource.
841

842
  """
843
  def GET(self):
844
    """Send information about an instance.
845

846
    """
847
    client = baserlib.GetClient()
848
    instance_name = self.items[0]
849

    
850
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
851
                                            names=[instance_name],
852
                                            fields=I_FIELDS,
853
                                            use_locking=self.useLocking())
854

    
855
    return baserlib.MapFields(I_FIELDS, result[0])
856

    
857
  def DELETE(self):
858
    """Delete an instance.
859

860
    """
861
    op = opcodes.OpInstanceRemove(instance_name=self.items[0],
862
                                  ignore_failures=False,
863
                                  dry_run=bool(self.dryRun()))
864
    return baserlib.SubmitJob([op])
865

    
866

    
867
class R_2_instances_name_info(baserlib.R_Generic):
868
  """/2/instances/[instance_name]/info resource.
869

870
  """
871
  def GET(self):
872
    """Request detailed instance information.
873

874
    """
875
    instance_name = self.items[0]
876
    static = bool(self._checkIntVariable("static", default=0))
877

    
878
    op = opcodes.OpInstanceQueryData(instances=[instance_name],
879
                                     static=static)
880
    return baserlib.SubmitJob([op])
881

    
882

    
883
class R_2_instances_name_reboot(baserlib.R_Generic):
884
  """/2/instances/[instance_name]/reboot resource.
885

886
  Implements an instance reboot.
887

888
  """
889
  def POST(self):
890
    """Reboot an instance.
891

892
    The URI takes type=[hard|soft|full] and
893
    ignore_secondaries=[False|True] parameters.
894

895
    """
896
    instance_name = self.items[0]
897
    reboot_type = self.queryargs.get('type',
898
                                     [constants.INSTANCE_REBOOT_HARD])[0]
899
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
900
    op = opcodes.OpInstanceReboot(instance_name=instance_name,
901
                                  reboot_type=reboot_type,
902
                                  ignore_secondaries=ignore_secondaries,
903
                                  dry_run=bool(self.dryRun()))
904

    
905
    return baserlib.SubmitJob([op])
906

    
907

    
908
class R_2_instances_name_startup(baserlib.R_Generic):
909
  """/2/instances/[instance_name]/startup resource.
910

911
  Implements an instance startup.
912

913
  """
914
  def PUT(self):
915
    """Startup an instance.
916

917
    The URI takes force=[False|True] parameter to start the instance
918
    if even if secondary disks are failing.
919

920
    """
921
    instance_name = self.items[0]
922
    force_startup = bool(self._checkIntVariable('force'))
923
    op = opcodes.OpInstanceStartup(instance_name=instance_name,
924
                                   force=force_startup,
925
                                   dry_run=bool(self.dryRun()))
926

    
927
    return baserlib.SubmitJob([op])
928

    
929

    
930
class R_2_instances_name_shutdown(baserlib.R_Generic):
931
  """/2/instances/[instance_name]/shutdown resource.
932

933
  Implements an instance shutdown.
934

935
  """
936
  def PUT(self):
937
    """Shutdown an instance.
938

939
    """
940
    instance_name = self.items[0]
941
    op = opcodes.OpInstanceShutdown(instance_name=instance_name,
942
                                    dry_run=bool(self.dryRun()))
943

    
944
    return baserlib.SubmitJob([op])
945

    
946

    
947
def _ParseInstanceReinstallRequest(name, data):
948
  """Parses a request for reinstalling an instance.
949

950
  """
951
  if not isinstance(data, dict):
952
    raise http.HttpBadRequest("Invalid body contents, not a dictionary")
953

    
954
  ostype = baserlib.CheckParameter(data, "os", default=None)
955
  start = baserlib.CheckParameter(data, "start", exptype=bool,
956
                                  default=True)
957
  osparams = baserlib.CheckParameter(data, "osparams", default=None)
958

    
959
  ops = [
960
    opcodes.OpInstanceShutdown(instance_name=name),
961
    opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
962
                                osparams=osparams),
963
    ]
964

    
965
  if start:
966
    ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
967

    
968
  return ops
969

    
970

    
971
class R_2_instances_name_reinstall(baserlib.R_Generic):
972
  """/2/instances/[instance_name]/reinstall resource.
973

974
  Implements an instance reinstall.
975

976
  """
977
  def POST(self):
978
    """Reinstall an instance.
979

980
    The URI takes os=name and nostartup=[0|1] optional
981
    parameters. By default, the instance will be started
982
    automatically.
983

984
    """
985
    if self.request_body:
986
      if self.queryargs:
987
        raise http.HttpBadRequest("Can't combine query and body parameters")
988

    
989
      body = self.request_body
990
    elif self.queryargs:
991
      # Legacy interface, do not modify/extend
992
      body = {
993
        "os": self._checkStringVariable("os"),
994
        "start": not self._checkIntVariable("nostartup"),
995
        }
996
    else:
997
      body = {}
998

    
999
    ops = _ParseInstanceReinstallRequest(self.items[0], body)
1000

    
1001
    return baserlib.SubmitJob(ops)
1002

    
1003

    
1004
def _ParseInstanceReplaceDisksRequest(name, data):
1005
  """Parses a request for an instance export.
1006

1007
  @rtype: L{opcodes.OpInstanceReplaceDisks}
1008
  @return: Instance export opcode
1009

1010
  """
1011
  override = {
1012
    "instance_name": name,
1013
    }
1014

    
1015
  # Parse disks
1016
  try:
1017
    raw_disks = data["disks"]
1018
  except KeyError:
1019
    pass
1020
  else:
1021
    if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
1022
      # Backwards compatibility for strings of the format "1, 2, 3"
1023
      try:
1024
        data["disks"] = [int(part) for part in raw_disks.split(",")]
1025
      except (TypeError, ValueError), err:
1026
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1027

    
1028
  return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
1029

    
1030

    
1031
class R_2_instances_name_replace_disks(baserlib.R_Generic):
1032
  """/2/instances/[instance_name]/replace-disks resource.
1033

1034
  """
1035
  def POST(self):
1036
    """Replaces disks on an instance.
1037

1038
    """
1039
    op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
1040

    
1041
    return baserlib.SubmitJob([op])
1042

    
1043

    
1044
class R_2_instances_name_activate_disks(baserlib.R_Generic):
1045
  """/2/instances/[instance_name]/activate-disks resource.
1046

1047
  """
1048
  def PUT(self):
1049
    """Activate disks for an instance.
1050

1051
    The URI might contain ignore_size to ignore current recorded size.
1052

1053
    """
1054
    instance_name = self.items[0]
1055
    ignore_size = bool(self._checkIntVariable('ignore_size'))
1056

    
1057
    op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1058
                                         ignore_size=ignore_size)
1059

    
1060
    return baserlib.SubmitJob([op])
1061

    
1062

    
1063
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1064
  """/2/instances/[instance_name]/deactivate-disks resource.
1065

1066
  """
1067
  def PUT(self):
1068
    """Deactivate disks for an instance.
1069

1070
    """
1071
    instance_name = self.items[0]
1072

    
1073
    op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1074

    
1075
    return baserlib.SubmitJob([op])
1076

    
1077

    
1078
class R_2_instances_name_prepare_export(baserlib.R_Generic):
1079
  """/2/instances/[instance_name]/prepare-export resource.
1080

1081
  """
1082
  def PUT(self):
1083
    """Prepares an export for an instance.
1084

1085
    @return: a job id
1086

1087
    """
1088
    instance_name = self.items[0]
1089
    mode = self._checkStringVariable("mode")
1090

    
1091
    op = opcodes.OpBackupPrepare(instance_name=instance_name,
1092
                                 mode=mode)
1093

    
1094
    return baserlib.SubmitJob([op])
1095

    
1096

    
1097
def _ParseExportInstanceRequest(name, data):
1098
  """Parses a request for an instance export.
1099

1100
  @rtype: L{opcodes.OpBackupExport}
1101
  @return: Instance export opcode
1102

1103
  """
1104
  # Rename "destination" to "target_node"
1105
  try:
1106
    data["target_node"] = data.pop("destination")
1107
  except KeyError:
1108
    pass
1109

    
1110
  return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1111
    "instance_name": name,
1112
    })
1113

    
1114

    
1115
class R_2_instances_name_export(baserlib.R_Generic):
1116
  """/2/instances/[instance_name]/export resource.
1117

1118
  """
1119
  def PUT(self):
1120
    """Exports an instance.
1121

1122
    @return: a job id
1123

1124
    """
1125
    if not isinstance(self.request_body, dict):
1126
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1127

    
1128
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1129

    
1130
    return baserlib.SubmitJob([op])
1131

    
1132

    
1133
def _ParseMigrateInstanceRequest(name, data):
1134
  """Parses a request for an instance migration.
1135

1136
  @rtype: L{opcodes.OpInstanceMigrate}
1137
  @return: Instance migration opcode
1138

1139
  """
1140
  return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1141
    "instance_name": name,
1142
    })
1143

    
1144

    
1145
class R_2_instances_name_migrate(baserlib.R_Generic):
1146
  """/2/instances/[instance_name]/migrate resource.
1147

1148
  """
1149
  def PUT(self):
1150
    """Migrates an instance.
1151

1152
    @return: a job id
1153

1154
    """
1155
    baserlib.CheckType(self.request_body, dict, "Body contents")
1156

    
1157
    op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1158

    
1159
    return baserlib.SubmitJob([op])
1160

    
1161

    
1162
def _ParseRenameInstanceRequest(name, data):
1163
  """Parses a request for renaming an instance.
1164

1165
  @rtype: L{opcodes.OpInstanceRename}
1166
  @return: Instance rename opcode
1167

1168
  """
1169
  return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1170
    "instance_name": name,
1171
    })
1172

    
1173

    
1174
class R_2_instances_name_rename(baserlib.R_Generic):
1175
  """/2/instances/[instance_name]/rename resource.
1176

1177
  """
1178
  def PUT(self):
1179
    """Changes the name of an instance.
1180

1181
    @return: a job id
1182

1183
    """
1184
    baserlib.CheckType(self.request_body, dict, "Body contents")
1185

    
1186
    op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1187

    
1188
    return baserlib.SubmitJob([op])
1189

    
1190

    
1191
def _ParseModifyInstanceRequest(name, data):
1192
  """Parses a request for modifying an instance.
1193

1194
  @rtype: L{opcodes.OpInstanceSetParams}
1195
  @return: Instance modify opcode
1196

1197
  """
1198
  return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1199
    "instance_name": name,
1200
    })
1201

    
1202

    
1203
class R_2_instances_name_modify(baserlib.R_Generic):
1204
  """/2/instances/[instance_name]/modify resource.
1205

1206
  """
1207
  def PUT(self):
1208
    """Changes some parameters of an instance.
1209

1210
    @return: a job id
1211

1212
    """
1213
    baserlib.CheckType(self.request_body, dict, "Body contents")
1214

    
1215
    op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1216

    
1217
    return baserlib.SubmitJob([op])
1218

    
1219

    
1220
class R_2_instances_name_disk_grow(baserlib.R_Generic):
1221
  """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1222

1223
  """
1224
  def POST(self):
1225
    """Increases the size of an instance disk.
1226

1227
    @return: a job id
1228

1229
    """
1230
    op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1231
      "instance_name": self.items[0],
1232
      "disk": int(self.items[1]),
1233
      })
1234

    
1235
    return baserlib.SubmitJob([op])
1236

    
1237

    
1238
class R_2_instances_name_console(baserlib.R_Generic):
1239
  """/2/instances/[instance_name]/console resource.
1240

1241
  """
1242
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1243

    
1244
  def GET(self):
1245
    """Request information for connecting to instance's console.
1246

1247
    @return: Serialized instance console description, see
1248
             L{objects.InstanceConsole}
1249

1250
    """
1251
    client = baserlib.GetClient()
1252

    
1253
    ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1254

    
1255
    if console is None:
1256
      raise http.HttpServiceUnavailable("Instance console unavailable")
1257

    
1258
    assert isinstance(console, dict)
1259
    return console
1260

    
1261

    
1262
def _GetQueryFields(args):
1263
  """
1264

1265
  """
1266
  try:
1267
    fields = args["fields"]
1268
  except KeyError:
1269
    raise http.HttpBadRequest("Missing 'fields' query argument")
1270

    
1271
  return _SplitQueryFields(fields[0])
1272

    
1273

    
1274
def _SplitQueryFields(fields):
1275
  """
1276

1277
  """
1278
  return [i.strip() for i in fields.split(",")]
1279

    
1280

    
1281
class R_2_query(baserlib.R_Generic):
1282
  """/2/query/[resource] resource.
1283

1284
  """
1285
  # Results might contain sensitive information
1286
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1287

    
1288
  def _Query(self, fields, filter_):
1289
    return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
1290

    
1291
  def GET(self):
1292
    """Returns resource information.
1293

1294
    @return: Query result, see L{objects.QueryResponse}
1295

1296
    """
1297
    return self._Query(_GetQueryFields(self.queryargs), None)
1298

    
1299
  def PUT(self):
1300
    """Submits job querying for resources.
1301

1302
    @return: Query result, see L{objects.QueryResponse}
1303

1304
    """
1305
    body = self.request_body
1306

    
1307
    baserlib.CheckType(body, dict, "Body contents")
1308

    
1309
    try:
1310
      fields = body["fields"]
1311
    except KeyError:
1312
      fields = _GetQueryFields(self.queryargs)
1313

    
1314
    return self._Query(fields, self.request_body.get("filter", None))
1315

    
1316

    
1317
class R_2_query_fields(baserlib.R_Generic):
1318
  """/2/query/[resource]/fields resource.
1319

1320
  """
1321
  def GET(self):
1322
    """Retrieves list of available fields for a resource.
1323

1324
    @return: List of serialized L{objects.QueryFieldDefinition}
1325

1326
    """
1327
    try:
1328
      raw_fields = self.queryargs["fields"]
1329
    except KeyError:
1330
      fields = None
1331
    else:
1332
      fields = _SplitQueryFields(raw_fields[0])
1333

    
1334
    return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
1335

    
1336

    
1337
class _R_Tags(baserlib.R_Generic):
1338
  """ Quasiclass for tagging resources
1339

1340
  Manages tags. When inheriting this class you must define the
1341
  TAG_LEVEL for it.
1342

1343
  """
1344
  TAG_LEVEL = None
1345

    
1346
  def __init__(self, items, queryargs, req):
1347
    """A tag resource constructor.
1348

1349
    We have to override the default to sort out cluster naming case.
1350

1351
    """
1352
    baserlib.R_Generic.__init__(self, items, queryargs, req)
1353

    
1354
    if self.TAG_LEVEL == constants.TAG_CLUSTER:
1355
      self.name = None
1356
    else:
1357
      self.name = items[0]
1358

    
1359
  def GET(self):
1360
    """Returns a list of tags.
1361

1362
    Example: ["tag1", "tag2", "tag3"]
1363

1364
    """
1365
    # pylint: disable-msg=W0212
1366
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1367

    
1368
  def PUT(self):
1369
    """Add a set of tags.
1370

1371
    The request as a list of strings should be PUT to this URI. And
1372
    you'll have back a job id.
1373

1374
    """
1375
    # pylint: disable-msg=W0212
1376
    if 'tag' not in self.queryargs:
1377
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1378
                                " the 'tag' parameter")
1379
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1380
                              self.queryargs['tag'], name=self.name,
1381
                              dry_run=bool(self.dryRun()))
1382

    
1383
  def DELETE(self):
1384
    """Delete a tag.
1385

1386
    In order to delete a set of tags, the DELETE
1387
    request should be addressed to URI like:
1388
    /tags?tag=[tag]&tag=[tag]
1389

1390
    """
1391
    # pylint: disable-msg=W0212
1392
    if 'tag' not in self.queryargs:
1393
      # no we not gonna delete all tags
1394
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1395
                                " tag(s) using the 'tag' parameter")
1396
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1397
                                 self.queryargs['tag'],
1398
                                 name=self.name,
1399
                                 dry_run=bool(self.dryRun()))
1400

    
1401

    
1402
class R_2_instances_name_tags(_R_Tags):
1403
  """ /2/instances/[instance_name]/tags resource.
1404

1405
  Manages per-instance tags.
1406

1407
  """
1408
  TAG_LEVEL = constants.TAG_INSTANCE
1409

    
1410

    
1411
class R_2_nodes_name_tags(_R_Tags):
1412
  """ /2/nodes/[node_name]/tags resource.
1413

1414
  Manages per-node tags.
1415

1416
  """
1417
  TAG_LEVEL = constants.TAG_NODE
1418

    
1419

    
1420
class R_2_tags(_R_Tags):
1421
  """ /2/tags resource.
1422

1423
  Manages cluster tags.
1424

1425
  """
1426
  TAG_LEVEL = constants.TAG_CLUSTER