Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ c0a146a1

History | View | Annotate | Download (36.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import rapi
49
from ganeti import ht
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "oper_vcpus", "status",
62
            "custom_hvparams", "custom_beparams", "custom_nicparams",
63
            ] + _COMMON_FIELDS
64

    
65
N_FIELDS = ["name", "offline", "master_candidate", "drained",
66
            "dtotal", "dfree",
67
            "mtotal", "mnode", "mfree",
68
            "pinst_cnt", "sinst_cnt",
69
            "ctotal", "cnodes", "csockets",
70
            "pip", "sip", "role",
71
            "pinst_list", "sinst_list",
72
            "master_capable", "vm_capable",
73
            "group.uuid",
74
            ] + _COMMON_FIELDS
75

    
76
G_FIELDS = ["name", "uuid",
77
            "alloc_policy",
78
            "node_cnt", "node_list",
79
            "ctime", "mtime", "serial_no",
80
            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
81

    
82
_NR_DRAINED = "drained"
83
_NR_MASTER_CANDIATE = "master-candidate"
84
_NR_MASTER = "master"
85
_NR_OFFLINE = "offline"
86
_NR_REGULAR = "regular"
87

    
88
_NR_MAP = {
89
  constants.NR_MASTER: _NR_MASTER,
90
  constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
91
  constants.NR_DRAINED: _NR_DRAINED,
92
  constants.NR_OFFLINE: _NR_OFFLINE,
93
  constants.NR_REGULAR: _NR_REGULAR,
94
  }
95

    
96
assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
97

    
98
# Request data version field
99
_REQ_DATA_VERSION = "__version__"
100

    
101
# Feature string for instance creation request data version 1
102
_INST_CREATE_REQV1 = "instance-create-reqv1"
103

    
104
# Feature string for instance reinstall request version 1
105
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
106

    
107
# Feature string for node migration version 1
108
_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
109

    
110
# Feature string for node evacuation with LU-generated jobs
111
_NODE_EVAC_RES1 = "node-evac-res1"
112

    
113
ALL_FEATURES = frozenset([
114
  _INST_CREATE_REQV1,
115
  _INST_REINSTALL_REQV1,
116
  _NODE_MIGRATE_REQV1,
117
  _NODE_EVAC_RES1,
118
  ])
119

    
120
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
121
_WFJC_TIMEOUT = 10
122

    
123

    
124
class R_version(baserlib.R_Generic):
125
  """/version resource.
126

127
  This resource should be used to determine the remote API version and
128
  to adapt clients accordingly.
129

130
  """
131
  @staticmethod
132
  def GET():
133
    """Returns the remote API version.
134

135
    """
136
    return constants.RAPI_VERSION
137

    
138

    
139
class R_2_info(baserlib.R_Generic):
140
  """/2/info resource.
141

142
  """
143
  @staticmethod
144
  def GET():
145
    """Returns cluster information.
146

147
    """
148
    client = baserlib.GetClient()
149
    return client.QueryClusterInfo()
150

    
151

    
152
class R_2_features(baserlib.R_Generic):
153
  """/2/features resource.
154

155
  """
156
  @staticmethod
157
  def GET():
158
    """Returns list of optional RAPI features implemented.
159

160
    """
161
    return list(ALL_FEATURES)
162

    
163

    
164
class R_2_os(baserlib.R_Generic):
165
  """/2/os resource.
166

167
  """
168
  @staticmethod
169
  def GET():
170
    """Return a list of all OSes.
171

172
    Can return error 500 in case of a problem.
173

174
    Example: ["debian-etch"]
175

176
    """
177
    cl = baserlib.GetClient()
178
    op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
179
    job_id = baserlib.SubmitJob([op], cl)
180
    # we use custom feedback function, instead of print we log the status
181
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
182
    diagnose_data = result[0]
183

    
184
    if not isinstance(diagnose_data, list):
185
      raise http.HttpBadGateway(message="Can't get OS list")
186

    
187
    os_names = []
188
    for (name, variants) in diagnose_data:
189
      os_names.extend(cli.CalculateOSNames(name, variants))
190

    
191
    return os_names
192

    
193

    
194
class R_2_redist_config(baserlib.R_Generic):
195
  """/2/redistribute-config resource.
196

197
  """
198
  @staticmethod
199
  def PUT():
200
    """Redistribute configuration to all nodes.
201

202
    """
203
    return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
204

    
205

    
206
class R_2_cluster_modify(baserlib.R_Generic):
207
  """/2/modify resource.
208

209
  """
210
  def PUT(self):
211
    """Modifies cluster parameters.
212

213
    @return: a job id
214

215
    """
216
    op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
217
                             None)
218

    
219
    return baserlib.SubmitJob([op])
220

    
221

    
222
class R_2_jobs(baserlib.R_Generic):
223
  """/2/jobs resource.
224

225
  """
226
  @staticmethod
227
  def GET():
228
    """Returns a dictionary of jobs.
229

230
    @return: a dictionary with jobs id and uri.
231

232
    """
233
    fields = ["id"]
234
    cl = baserlib.GetClient()
235
    # Convert the list of lists to the list of ids
236
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
237
    return baserlib.BuildUriList(result, "/2/jobs/%s",
238
                                 uri_fields=("id", "uri"))
239

    
240

    
241
class R_2_jobs_id(baserlib.R_Generic):
242
  """/2/jobs/[job_id] resource.
243

244
  """
245
  def GET(self):
246
    """Returns a job status.
247

248
    @return: a dictionary with job parameters.
249
        The result includes:
250
            - id: job ID as a number
251
            - status: current job status as a string
252
            - ops: involved OpCodes as a list of dictionaries for each
253
              opcodes in the job
254
            - opstatus: OpCodes status as a list
255
            - opresult: OpCodes results as a list of lists
256

257
    """
258
    fields = ["id", "ops", "status", "summary",
259
              "opstatus", "opresult", "oplog",
260
              "received_ts", "start_ts", "end_ts",
261
              ]
262
    job_id = self.items[0]
263
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
264
    if result is None:
265
      raise http.HttpNotFound()
266
    return baserlib.MapFields(fields, result)
267

    
268
  def DELETE(self):
269
    """Cancel not-yet-started job.
270

271
    """
272
    job_id = self.items[0]
273
    result = baserlib.GetClient().CancelJob(job_id)
274
    return result
275

    
276

    
277
class R_2_jobs_id_wait(baserlib.R_Generic):
278
  """/2/jobs/[job_id]/wait resource.
279

280
  """
281
  # WaitForJobChange provides access to sensitive information and blocks
282
  # machine resources (it's a blocking RAPI call), hence restricting access.
283
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
284

    
285
  def GET(self):
286
    """Waits for job changes.
287

288
    """
289
    job_id = self.items[0]
290

    
291
    fields = self.getBodyParameter("fields")
292
    prev_job_info = self.getBodyParameter("previous_job_info", None)
293
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
294

    
295
    if not isinstance(fields, list):
296
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
297

    
298
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
299
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
300
                                " be a list")
301

    
302
    if not (prev_log_serial is None or
303
            isinstance(prev_log_serial, (int, long))):
304
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
305
                                " be a number")
306

    
307
    client = baserlib.GetClient()
308
    result = client.WaitForJobChangeOnce(job_id, fields,
309
                                         prev_job_info, prev_log_serial,
310
                                         timeout=_WFJC_TIMEOUT)
311
    if not result:
312
      raise http.HttpNotFound()
313

    
314
    if result == constants.JOB_NOTCHANGED:
315
      # No changes
316
      return None
317

    
318
    (job_info, log_entries) = result
319

    
320
    return {
321
      "job_info": job_info,
322
      "log_entries": log_entries,
323
      }
324

    
325

    
326
class R_2_nodes(baserlib.R_Generic):
327
  """/2/nodes resource.
328

329
  """
330
  def GET(self):
331
    """Returns a list of all nodes.
332

333
    """
334
    client = baserlib.GetClient()
335

    
336
    if self.useBulk():
337
      bulkdata = client.QueryNodes([], N_FIELDS, False)
338
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
339
    else:
340
      nodesdata = client.QueryNodes([], ["name"], False)
341
      nodeslist = [row[0] for row in nodesdata]
342
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
343
                                   uri_fields=("id", "uri"))
344

    
345

    
346
class R_2_nodes_name(baserlib.R_Generic):
347
  """/2/nodes/[node_name] resource.
348

349
  """
350
  def GET(self):
351
    """Send information about a node.
352

353
    """
354
    node_name = self.items[0]
355
    client = baserlib.GetClient()
356

    
357
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
358
                                            names=[node_name], fields=N_FIELDS,
359
                                            use_locking=self.useLocking())
360

    
361
    return baserlib.MapFields(N_FIELDS, result[0])
362

    
363

    
364
class R_2_nodes_name_role(baserlib.R_Generic):
365
  """ /2/nodes/[node_name]/role resource.
366

367
  """
368
  def GET(self):
369
    """Returns the current node role.
370

371
    @return: Node role
372

373
    """
374
    node_name = self.items[0]
375
    client = baserlib.GetClient()
376
    result = client.QueryNodes(names=[node_name], fields=["role"],
377
                               use_locking=self.useLocking())
378

    
379
    return _NR_MAP[result[0][0]]
380

    
381
  def PUT(self):
382
    """Sets the node role.
383

384
    @return: a job id
385

386
    """
387
    if not isinstance(self.request_body, basestring):
388
      raise http.HttpBadRequest("Invalid body contents, not a string")
389

    
390
    node_name = self.items[0]
391
    role = self.request_body
392

    
393
    if role == _NR_REGULAR:
394
      candidate = False
395
      offline = False
396
      drained = False
397

    
398
    elif role == _NR_MASTER_CANDIATE:
399
      candidate = True
400
      offline = drained = None
401

    
402
    elif role == _NR_DRAINED:
403
      drained = True
404
      candidate = offline = None
405

    
406
    elif role == _NR_OFFLINE:
407
      offline = True
408
      candidate = drained = None
409

    
410
    else:
411
      raise http.HttpBadRequest("Can't set '%s' role" % role)
412

    
413
    op = opcodes.OpNodeSetParams(node_name=node_name,
414
                                 master_candidate=candidate,
415
                                 offline=offline,
416
                                 drained=drained,
417
                                 force=bool(self.useForce()))
418

    
419
    return baserlib.SubmitJob([op])
420

    
421

    
422
class R_2_nodes_name_evacuate(baserlib.R_Generic):
423
  """/2/nodes/[node_name]/evacuate resource.
424

425
  """
426
  def POST(self):
427
    """Evacuate all instances off a node.
428

429
    """
430
    op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
431
      "node_name": self.items[0],
432
      "dry_run": self.dryRun(),
433
      })
434

    
435
    return baserlib.SubmitJob([op])
436

    
437

    
438
class R_2_nodes_name_migrate(baserlib.R_Generic):
439
  """/2/nodes/[node_name]/migrate resource.
440

441
  """
442
  def POST(self):
443
    """Migrate all primary instances from a node.
444

445
    """
446
    node_name = self.items[0]
447

    
448
    if self.queryargs:
449
      # Support old-style requests
450
      if "live" in self.queryargs and "mode" in self.queryargs:
451
        raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
452
                                  " be passed")
453

    
454
      if "live" in self.queryargs:
455
        if self._checkIntVariable("live", default=1):
456
          mode = constants.HT_MIGRATION_LIVE
457
        else:
458
          mode = constants.HT_MIGRATION_NONLIVE
459
      else:
460
        mode = self._checkStringVariable("mode", default=None)
461

    
462
      data = {
463
        "mode": mode,
464
        }
465
    else:
466
      data = self.request_body
467

    
468
    op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
469
      "node_name": node_name,
470
      })
471

    
472
    return baserlib.SubmitJob([op])
473

    
474

    
475
class R_2_nodes_name_storage(baserlib.R_Generic):
476
  """/2/nodes/[node_name]/storage resource.
477

478
  """
479
  # LUNodeQueryStorage acquires locks, hence restricting access to GET
480
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
481

    
482
  def GET(self):
483
    node_name = self.items[0]
484

    
485
    storage_type = self._checkStringVariable("storage_type", None)
486
    if not storage_type:
487
      raise http.HttpBadRequest("Missing the required 'storage_type'"
488
                                " parameter")
489

    
490
    output_fields = self._checkStringVariable("output_fields", None)
491
    if not output_fields:
492
      raise http.HttpBadRequest("Missing the required 'output_fields'"
493
                                " parameter")
494

    
495
    op = opcodes.OpNodeQueryStorage(nodes=[node_name],
496
                                    storage_type=storage_type,
497
                                    output_fields=output_fields.split(","))
498
    return baserlib.SubmitJob([op])
499

    
500

    
501
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
502
  """/2/nodes/[node_name]/storage/modify resource.
503

504
  """
505
  def PUT(self):
506
    node_name = self.items[0]
507

    
508
    storage_type = self._checkStringVariable("storage_type", None)
509
    if not storage_type:
510
      raise http.HttpBadRequest("Missing the required 'storage_type'"
511
                                " parameter")
512

    
513
    name = self._checkStringVariable("name", None)
514
    if not name:
515
      raise http.HttpBadRequest("Missing the required 'name'"
516
                                " parameter")
517

    
518
    changes = {}
519

    
520
    if "allocatable" in self.queryargs:
521
      changes[constants.SF_ALLOCATABLE] = \
522
        bool(self._checkIntVariable("allocatable", default=1))
523

    
524
    op = opcodes.OpNodeModifyStorage(node_name=node_name,
525
                                     storage_type=storage_type,
526
                                     name=name,
527
                                     changes=changes)
528
    return baserlib.SubmitJob([op])
529

    
530

    
531
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
532
  """/2/nodes/[node_name]/storage/repair resource.
533

534
  """
535
  def PUT(self):
536
    node_name = self.items[0]
537

    
538
    storage_type = self._checkStringVariable("storage_type", None)
539
    if not storage_type:
540
      raise http.HttpBadRequest("Missing the required 'storage_type'"
541
                                " parameter")
542

    
543
    name = self._checkStringVariable("name", None)
544
    if not name:
545
      raise http.HttpBadRequest("Missing the required 'name'"
546
                                " parameter")
547

    
548
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
549
                                     storage_type=storage_type,
550
                                     name=name)
551
    return baserlib.SubmitJob([op])
552

    
553

    
554
def _ParseCreateGroupRequest(data, dry_run):
555
  """Parses a request for creating a node group.
556

557
  @rtype: L{opcodes.OpGroupAdd}
558
  @return: Group creation opcode
559

560
  """
561
  override = {
562
    "dry_run": dry_run,
563
    }
564

    
565
  rename = {
566
    "name": "group_name",
567
    }
568

    
569
  return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
570
                             rename=rename)
571

    
572

    
573
class R_2_groups(baserlib.R_Generic):
574
  """/2/groups resource.
575

576
  """
577
  def GET(self):
578
    """Returns a list of all node groups.
579

580
    """
581
    client = baserlib.GetClient()
582

    
583
    if self.useBulk():
584
      bulkdata = client.QueryGroups([], G_FIELDS, False)
585
      return baserlib.MapBulkFields(bulkdata, G_FIELDS)
586
    else:
587
      data = client.QueryGroups([], ["name"], False)
588
      groupnames = [row[0] for row in data]
589
      return baserlib.BuildUriList(groupnames, "/2/groups/%s",
590
                                   uri_fields=("name", "uri"))
591

    
592
  def POST(self):
593
    """Create a node group.
594

595
    @return: a job id
596

597
    """
598
    baserlib.CheckType(self.request_body, dict, "Body contents")
599
    op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
600
    return baserlib.SubmitJob([op])
601

    
602

    
603
class R_2_groups_name(baserlib.R_Generic):
604
  """/2/groups/[group_name] resource.
605

606
  """
607
  def GET(self):
608
    """Send information about a node group.
609

610
    """
611
    group_name = self.items[0]
612
    client = baserlib.GetClient()
613

    
614
    result = baserlib.HandleItemQueryErrors(client.QueryGroups,
615
                                            names=[group_name], fields=G_FIELDS,
616
                                            use_locking=self.useLocking())
617

    
618
    return baserlib.MapFields(G_FIELDS, result[0])
619

    
620
  def DELETE(self):
621
    """Delete a node group.
622

623
    """
624
    op = opcodes.OpGroupRemove(group_name=self.items[0],
625
                               dry_run=bool(self.dryRun()))
626

    
627
    return baserlib.SubmitJob([op])
628

    
629

    
630
def _ParseModifyGroupRequest(name, data):
631
  """Parses a request for modifying a node group.
632

633
  @rtype: L{opcodes.OpGroupSetParams}
634
  @return: Group modify opcode
635

636
  """
637
  return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
638
    "group_name": name,
639
    })
640

    
641

    
642

    
643
class R_2_groups_name_modify(baserlib.R_Generic):
644
  """/2/groups/[group_name]/modify resource.
645

646
  """
647
  def PUT(self):
648
    """Changes some parameters of node group.
649

650
    @return: a job id
651

652
    """
653
    baserlib.CheckType(self.request_body, dict, "Body contents")
654

    
655
    op = _ParseModifyGroupRequest(self.items[0], self.request_body)
656

    
657
    return baserlib.SubmitJob([op])
658

    
659

    
660
def _ParseRenameGroupRequest(name, data, dry_run):
661
  """Parses a request for renaming a node group.
662

663
  @type name: string
664
  @param name: name of the node group to rename
665
  @type data: dict
666
  @param data: the body received by the rename request
667
  @type dry_run: bool
668
  @param dry_run: whether to perform a dry run
669

670
  @rtype: L{opcodes.OpGroupRename}
671
  @return: Node group rename opcode
672

673
  """
674
  return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
675
    "group_name": name,
676
    "dry_run": dry_run,
677
    })
678

    
679

    
680
class R_2_groups_name_rename(baserlib.R_Generic):
681
  """/2/groups/[group_name]/rename resource.
682

683
  """
684
  def PUT(self):
685
    """Changes the name of a node group.
686

687
    @return: a job id
688

689
    """
690
    baserlib.CheckType(self.request_body, dict, "Body contents")
691
    op = _ParseRenameGroupRequest(self.items[0], self.request_body,
692
                                  self.dryRun())
693
    return baserlib.SubmitJob([op])
694

    
695

    
696
class R_2_groups_name_assign_nodes(baserlib.R_Generic):
697
  """/2/groups/[group_name]/assign-nodes resource.
698

699
  """
700
  def PUT(self):
701
    """Assigns nodes to a group.
702

703
    @return: a job id
704

705
    """
706
    op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
707
      "group_name": self.items[0],
708
      "dry_run": self.dryRun(),
709
      "force": self.useForce(),
710
      })
711

    
712
    return baserlib.SubmitJob([op])
713

    
714

    
715
def _ParseInstanceCreateRequestVersion1(data, dry_run):
716
  """Parses an instance creation request version 1.
717

718
  @rtype: L{opcodes.OpInstanceCreate}
719
  @return: Instance creation opcode
720

721
  """
722
  override = {
723
    "dry_run": dry_run,
724
    }
725

    
726
  rename = {
727
    "os": "os_type",
728
    "name": "instance_name",
729
    }
730

    
731
  return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
732
                             rename=rename)
733

    
734

    
735
class R_2_instances(baserlib.R_Generic):
736
  """/2/instances resource.
737

738
  """
739
  def GET(self):
740
    """Returns a list of all available instances.
741

742
    """
743
    client = baserlib.GetClient()
744

    
745
    use_locking = self.useLocking()
746
    if self.useBulk():
747
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
748
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
749
    else:
750
      instancesdata = client.QueryInstances([], ["name"], use_locking)
751
      instanceslist = [row[0] for row in instancesdata]
752
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
753
                                   uri_fields=("id", "uri"))
754

    
755
  def POST(self):
756
    """Create an instance.
757

758
    @return: a job id
759

760
    """
761
    if not isinstance(self.request_body, dict):
762
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
763

    
764
    # Default to request data version 0
765
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
766

    
767
    if data_version == 0:
768
      raise http.HttpBadRequest("Instance creation request version 0 is no"
769
                                " longer supported")
770
    elif data_version == 1:
771
      data = self.request_body.copy()
772
      # Remove "__version__"
773
      data.pop(_REQ_DATA_VERSION, None)
774
      op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
775
    else:
776
      raise http.HttpBadRequest("Unsupported request data version %s" %
777
                                data_version)
778

    
779
    return baserlib.SubmitJob([op])
780

    
781

    
782
class R_2_instances_name(baserlib.R_Generic):
783
  """/2/instances/[instance_name] resource.
784

785
  """
786
  def GET(self):
787
    """Send information about an instance.
788

789
    """
790
    client = baserlib.GetClient()
791
    instance_name = self.items[0]
792

    
793
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
794
                                            names=[instance_name],
795
                                            fields=I_FIELDS,
796
                                            use_locking=self.useLocking())
797

    
798
    return baserlib.MapFields(I_FIELDS, result[0])
799

    
800
  def DELETE(self):
801
    """Delete an instance.
802

803
    """
804
    op = opcodes.OpInstanceRemove(instance_name=self.items[0],
805
                                  ignore_failures=False,
806
                                  dry_run=bool(self.dryRun()))
807
    return baserlib.SubmitJob([op])
808

    
809

    
810
class R_2_instances_name_info(baserlib.R_Generic):
811
  """/2/instances/[instance_name]/info resource.
812

813
  """
814
  def GET(self):
815
    """Request detailed instance information.
816

817
    """
818
    instance_name = self.items[0]
819
    static = bool(self._checkIntVariable("static", default=0))
820

    
821
    op = opcodes.OpInstanceQueryData(instances=[instance_name],
822
                                     static=static)
823
    return baserlib.SubmitJob([op])
824

    
825

    
826
class R_2_instances_name_reboot(baserlib.R_Generic):
827
  """/2/instances/[instance_name]/reboot resource.
828

829
  Implements an instance reboot.
830

831
  """
832
  def POST(self):
833
    """Reboot an instance.
834

835
    The URI takes type=[hard|soft|full] and
836
    ignore_secondaries=[False|True] parameters.
837

838
    """
839
    instance_name = self.items[0]
840
    reboot_type = self.queryargs.get('type',
841
                                     [constants.INSTANCE_REBOOT_HARD])[0]
842
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
843
    op = opcodes.OpInstanceReboot(instance_name=instance_name,
844
                                  reboot_type=reboot_type,
845
                                  ignore_secondaries=ignore_secondaries,
846
                                  dry_run=bool(self.dryRun()))
847

    
848
    return baserlib.SubmitJob([op])
849

    
850

    
851
class R_2_instances_name_startup(baserlib.R_Generic):
852
  """/2/instances/[instance_name]/startup resource.
853

854
  Implements an instance startup.
855

856
  """
857
  def PUT(self):
858
    """Startup an instance.
859

860
    The URI takes force=[False|True] parameter to start the instance
861
    if even if secondary disks are failing.
862

863
    """
864
    instance_name = self.items[0]
865
    force_startup = bool(self._checkIntVariable('force'))
866
    no_remember = bool(self._checkIntVariable('no_remember'))
867
    op = opcodes.OpInstanceStartup(instance_name=instance_name,
868
                                   force=force_startup,
869
                                   dry_run=bool(self.dryRun()),
870
                                   no_remember=no_remember)
871

    
872
    return baserlib.SubmitJob([op])
873

    
874

    
875
def _ParseShutdownInstanceRequest(name, data, dry_run, no_remember):
876
  """Parses a request for an instance shutdown.
877

878
  @rtype: L{opcodes.OpInstanceShutdown}
879
  @return: Instance shutdown opcode
880

881
  """
882
  return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
883
    "instance_name": name,
884
    "dry_run": dry_run,
885
    "no_remember": no_remember,
886
    })
887

    
888

    
889
class R_2_instances_name_shutdown(baserlib.R_Generic):
890
  """/2/instances/[instance_name]/shutdown resource.
891

892
  Implements an instance shutdown.
893

894
  """
895
  def PUT(self):
896
    """Shutdown an instance.
897

898
    @return: a job id
899

900
    """
901
    baserlib.CheckType(self.request_body, dict, "Body contents")
902

    
903
    no_remember = bool(self._checkIntVariable('no_remember'))
904
    op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
905
                                       bool(self.dryRun()), no_remember)
906

    
907
    return baserlib.SubmitJob([op])
908

    
909

    
910
def _ParseInstanceReinstallRequest(name, data):
911
  """Parses a request for reinstalling an instance.
912

913
  """
914
  if not isinstance(data, dict):
915
    raise http.HttpBadRequest("Invalid body contents, not a dictionary")
916

    
917
  ostype = baserlib.CheckParameter(data, "os", default=None)
918
  start = baserlib.CheckParameter(data, "start", exptype=bool,
919
                                  default=True)
920
  osparams = baserlib.CheckParameter(data, "osparams", default=None)
921

    
922
  ops = [
923
    opcodes.OpInstanceShutdown(instance_name=name),
924
    opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
925
                                osparams=osparams),
926
    ]
927

    
928
  if start:
929
    ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
930

    
931
  return ops
932

    
933

    
934
class R_2_instances_name_reinstall(baserlib.R_Generic):
935
  """/2/instances/[instance_name]/reinstall resource.
936

937
  Implements an instance reinstall.
938

939
  """
940
  def POST(self):
941
    """Reinstall an instance.
942

943
    The URI takes os=name and nostartup=[0|1] optional
944
    parameters. By default, the instance will be started
945
    automatically.
946

947
    """
948
    if self.request_body:
949
      if self.queryargs:
950
        raise http.HttpBadRequest("Can't combine query and body parameters")
951

    
952
      body = self.request_body
953
    elif self.queryargs:
954
      # Legacy interface, do not modify/extend
955
      body = {
956
        "os": self._checkStringVariable("os"),
957
        "start": not self._checkIntVariable("nostartup"),
958
        }
959
    else:
960
      body = {}
961

    
962
    ops = _ParseInstanceReinstallRequest(self.items[0], body)
963

    
964
    return baserlib.SubmitJob(ops)
965

    
966

    
967
def _ParseInstanceReplaceDisksRequest(name, data):
968
  """Parses a request for an instance export.
969

970
  @rtype: L{opcodes.OpInstanceReplaceDisks}
971
  @return: Instance export opcode
972

973
  """
974
  override = {
975
    "instance_name": name,
976
    }
977

    
978
  # Parse disks
979
  try:
980
    raw_disks = data["disks"]
981
  except KeyError:
982
    pass
983
  else:
984
    if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
985
      # Backwards compatibility for strings of the format "1, 2, 3"
986
      try:
987
        data["disks"] = [int(part) for part in raw_disks.split(",")]
988
      except (TypeError, ValueError), err:
989
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
990

    
991
  return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
992

    
993

    
994
class R_2_instances_name_replace_disks(baserlib.R_Generic):
995
  """/2/instances/[instance_name]/replace-disks resource.
996

997
  """
998
  def POST(self):
999
    """Replaces disks on an instance.
1000

1001
    """
1002
    op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
1003

    
1004
    return baserlib.SubmitJob([op])
1005

    
1006

    
1007
class R_2_instances_name_activate_disks(baserlib.R_Generic):
1008
  """/2/instances/[instance_name]/activate-disks resource.
1009

1010
  """
1011
  def PUT(self):
1012
    """Activate disks for an instance.
1013

1014
    The URI might contain ignore_size to ignore current recorded size.
1015

1016
    """
1017
    instance_name = self.items[0]
1018
    ignore_size = bool(self._checkIntVariable('ignore_size'))
1019

    
1020
    op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1021
                                         ignore_size=ignore_size)
1022

    
1023
    return baserlib.SubmitJob([op])
1024

    
1025

    
1026
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1027
  """/2/instances/[instance_name]/deactivate-disks resource.
1028

1029
  """
1030
  def PUT(self):
1031
    """Deactivate disks for an instance.
1032

1033
    """
1034
    instance_name = self.items[0]
1035

    
1036
    op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1037

    
1038
    return baserlib.SubmitJob([op])
1039

    
1040

    
1041
class R_2_instances_name_prepare_export(baserlib.R_Generic):
1042
  """/2/instances/[instance_name]/prepare-export resource.
1043

1044
  """
1045
  def PUT(self):
1046
    """Prepares an export for an instance.
1047

1048
    @return: a job id
1049

1050
    """
1051
    instance_name = self.items[0]
1052
    mode = self._checkStringVariable("mode")
1053

    
1054
    op = opcodes.OpBackupPrepare(instance_name=instance_name,
1055
                                 mode=mode)
1056

    
1057
    return baserlib.SubmitJob([op])
1058

    
1059

    
1060
def _ParseExportInstanceRequest(name, data):
1061
  """Parses a request for an instance export.
1062

1063
  @rtype: L{opcodes.OpBackupExport}
1064
  @return: Instance export opcode
1065

1066
  """
1067
  # Rename "destination" to "target_node"
1068
  try:
1069
    data["target_node"] = data.pop("destination")
1070
  except KeyError:
1071
    pass
1072

    
1073
  return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1074
    "instance_name": name,
1075
    })
1076

    
1077

    
1078
class R_2_instances_name_export(baserlib.R_Generic):
1079
  """/2/instances/[instance_name]/export resource.
1080

1081
  """
1082
  def PUT(self):
1083
    """Exports an instance.
1084

1085
    @return: a job id
1086

1087
    """
1088
    if not isinstance(self.request_body, dict):
1089
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1090

    
1091
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1092

    
1093
    return baserlib.SubmitJob([op])
1094

    
1095

    
1096
def _ParseMigrateInstanceRequest(name, data):
1097
  """Parses a request for an instance migration.
1098

1099
  @rtype: L{opcodes.OpInstanceMigrate}
1100
  @return: Instance migration opcode
1101

1102
  """
1103
  return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1104
    "instance_name": name,
1105
    })
1106

    
1107

    
1108
class R_2_instances_name_migrate(baserlib.R_Generic):
1109
  """/2/instances/[instance_name]/migrate resource.
1110

1111
  """
1112
  def PUT(self):
1113
    """Migrates an instance.
1114

1115
    @return: a job id
1116

1117
    """
1118
    baserlib.CheckType(self.request_body, dict, "Body contents")
1119

    
1120
    op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1121

    
1122
    return baserlib.SubmitJob([op])
1123

    
1124

    
1125
class R_2_instances_name_failover(baserlib.R_Generic):
1126
  """/2/instances/[instance_name]/failover resource.
1127

1128
  """
1129
  def PUT(self):
1130
    """Does a failover of an instance.
1131

1132
    @return: a job id
1133

1134
    """
1135
    baserlib.CheckType(self.request_body, dict, "Body contents")
1136

    
1137
    op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
1138
      "instance_name": self.items[0],
1139
      })
1140

    
1141
    return baserlib.SubmitJob([op])
1142

    
1143

    
1144
def _ParseRenameInstanceRequest(name, data):
1145
  """Parses a request for renaming an instance.
1146

1147
  @rtype: L{opcodes.OpInstanceRename}
1148
  @return: Instance rename opcode
1149

1150
  """
1151
  return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1152
    "instance_name": name,
1153
    })
1154

    
1155

    
1156
class R_2_instances_name_rename(baserlib.R_Generic):
1157
  """/2/instances/[instance_name]/rename resource.
1158

1159
  """
1160
  def PUT(self):
1161
    """Changes the name of an instance.
1162

1163
    @return: a job id
1164

1165
    """
1166
    baserlib.CheckType(self.request_body, dict, "Body contents")
1167

    
1168
    op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1169

    
1170
    return baserlib.SubmitJob([op])
1171

    
1172

    
1173
def _ParseModifyInstanceRequest(name, data):
1174
  """Parses a request for modifying an instance.
1175

1176
  @rtype: L{opcodes.OpInstanceSetParams}
1177
  @return: Instance modify opcode
1178

1179
  """
1180
  return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1181
    "instance_name": name,
1182
    })
1183

    
1184

    
1185
class R_2_instances_name_modify(baserlib.R_Generic):
1186
  """/2/instances/[instance_name]/modify resource.
1187

1188
  """
1189
  def PUT(self):
1190
    """Changes some parameters of an instance.
1191

1192
    @return: a job id
1193

1194
    """
1195
    baserlib.CheckType(self.request_body, dict, "Body contents")
1196

    
1197
    op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1198

    
1199
    return baserlib.SubmitJob([op])
1200

    
1201

    
1202
class R_2_instances_name_disk_grow(baserlib.R_Generic):
1203
  """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1204

1205
  """
1206
  def POST(self):
1207
    """Increases the size of an instance disk.
1208

1209
    @return: a job id
1210

1211
    """
1212
    op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1213
      "instance_name": self.items[0],
1214
      "disk": int(self.items[1]),
1215
      })
1216

    
1217
    return baserlib.SubmitJob([op])
1218

    
1219

    
1220
class R_2_instances_name_console(baserlib.R_Generic):
1221
  """/2/instances/[instance_name]/console resource.
1222

1223
  """
1224
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1225

    
1226
  def GET(self):
1227
    """Request information for connecting to instance's console.
1228

1229
    @return: Serialized instance console description, see
1230
             L{objects.InstanceConsole}
1231

1232
    """
1233
    client = baserlib.GetClient()
1234

    
1235
    ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1236

    
1237
    if console is None:
1238
      raise http.HttpServiceUnavailable("Instance console unavailable")
1239

    
1240
    assert isinstance(console, dict)
1241
    return console
1242

    
1243

    
1244
def _GetQueryFields(args):
1245
  """
1246

1247
  """
1248
  try:
1249
    fields = args["fields"]
1250
  except KeyError:
1251
    raise http.HttpBadRequest("Missing 'fields' query argument")
1252

    
1253
  return _SplitQueryFields(fields[0])
1254

    
1255

    
1256
def _SplitQueryFields(fields):
1257
  """
1258

1259
  """
1260
  return [i.strip() for i in fields.split(",")]
1261

    
1262

    
1263
class R_2_query(baserlib.R_Generic):
1264
  """/2/query/[resource] resource.
1265

1266
  """
1267
  # Results might contain sensitive information
1268
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1269

    
1270
  def _Query(self, fields, filter_):
1271
    return baserlib.GetClient().Query(self.items[0], fields, filter_).ToDict()
1272

    
1273
  def GET(self):
1274
    """Returns resource information.
1275

1276
    @return: Query result, see L{objects.QueryResponse}
1277

1278
    """
1279
    return self._Query(_GetQueryFields(self.queryargs), None)
1280

    
1281
  def PUT(self):
1282
    """Submits job querying for resources.
1283

1284
    @return: Query result, see L{objects.QueryResponse}
1285

1286
    """
1287
    body = self.request_body
1288

    
1289
    baserlib.CheckType(body, dict, "Body contents")
1290

    
1291
    try:
1292
      fields = body["fields"]
1293
    except KeyError:
1294
      fields = _GetQueryFields(self.queryargs)
1295

    
1296
    return self._Query(fields, self.request_body.get("filter", None))
1297

    
1298

    
1299
class R_2_query_fields(baserlib.R_Generic):
1300
  """/2/query/[resource]/fields resource.
1301

1302
  """
1303
  def GET(self):
1304
    """Retrieves list of available fields for a resource.
1305

1306
    @return: List of serialized L{objects.QueryFieldDefinition}
1307

1308
    """
1309
    try:
1310
      raw_fields = self.queryargs["fields"]
1311
    except KeyError:
1312
      fields = None
1313
    else:
1314
      fields = _SplitQueryFields(raw_fields[0])
1315

    
1316
    return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
1317

    
1318

    
1319
class _R_Tags(baserlib.R_Generic):
1320
  """ Quasiclass for tagging resources
1321

1322
  Manages tags. When inheriting this class you must define the
1323
  TAG_LEVEL for it.
1324

1325
  """
1326
  TAG_LEVEL = None
1327

    
1328
  def __init__(self, items, queryargs, req):
1329
    """A tag resource constructor.
1330

1331
    We have to override the default to sort out cluster naming case.
1332

1333
    """
1334
    baserlib.R_Generic.__init__(self, items, queryargs, req)
1335

    
1336
    if self.TAG_LEVEL == constants.TAG_CLUSTER:
1337
      self.name = None
1338
    else:
1339
      self.name = items[0]
1340

    
1341
  def GET(self):
1342
    """Returns a list of tags.
1343

1344
    Example: ["tag1", "tag2", "tag3"]
1345

1346
    """
1347
    # pylint: disable-msg=W0212
1348
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1349

    
1350
  def PUT(self):
1351
    """Add a set of tags.
1352

1353
    The request as a list of strings should be PUT to this URI. And
1354
    you'll have back a job id.
1355

1356
    """
1357
    # pylint: disable-msg=W0212
1358
    if 'tag' not in self.queryargs:
1359
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1360
                                " the 'tag' parameter")
1361
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1362
                              self.queryargs['tag'], name=self.name,
1363
                              dry_run=bool(self.dryRun()))
1364

    
1365
  def DELETE(self):
1366
    """Delete a tag.
1367

1368
    In order to delete a set of tags, the DELETE
1369
    request should be addressed to URI like:
1370
    /tags?tag=[tag]&tag=[tag]
1371

1372
    """
1373
    # pylint: disable-msg=W0212
1374
    if 'tag' not in self.queryargs:
1375
      # no we not gonna delete all tags
1376
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1377
                                " tag(s) using the 'tag' parameter")
1378
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1379
                                 self.queryargs['tag'],
1380
                                 name=self.name,
1381
                                 dry_run=bool(self.dryRun()))
1382

    
1383

    
1384
class R_2_instances_name_tags(_R_Tags):
1385
  """ /2/instances/[instance_name]/tags resource.
1386

1387
  Manages per-instance tags.
1388

1389
  """
1390
  TAG_LEVEL = constants.TAG_INSTANCE
1391

    
1392

    
1393
class R_2_nodes_name_tags(_R_Tags):
1394
  """ /2/nodes/[node_name]/tags resource.
1395

1396
  Manages per-node tags.
1397

1398
  """
1399
  TAG_LEVEL = constants.TAG_NODE
1400

    
1401

    
1402
class R_2_groups_name_tags(_R_Tags):
1403
  """ /2/groups/[group_name]/tags resource.
1404

1405
  Manages per-nodegroup tags.
1406

1407
  """
1408
  TAG_LEVEL = constants.TAG_NODEGROUP
1409

    
1410

    
1411
class R_2_tags(_R_Tags):
1412
  """ /2/tags resource.
1413

1414
  Manages cluster tags.
1415

1416
  """
1417
  TAG_LEVEL = constants.TAG_CLUSTER