Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ 75c866c2

History | View | Annotate | Download (40.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import utils
49
from ganeti import rapi
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "oper_vcpus", "status",
62
            "custom_hvparams", "custom_beparams", "custom_nicparams",
63
            ] + _COMMON_FIELDS
64

    
65
N_FIELDS = ["name", "offline", "master_candidate", "drained",
66
            "dtotal", "dfree",
67
            "mtotal", "mnode", "mfree",
68
            "pinst_cnt", "sinst_cnt",
69
            "ctotal", "cnodes", "csockets",
70
            "pip", "sip", "role",
71
            "pinst_list", "sinst_list",
72
            "master_capable", "vm_capable",
73
            "group.uuid",
74
            ] + _COMMON_FIELDS
75

    
76
G_FIELDS = ["name", "uuid",
77
            "alloc_policy",
78
            "node_cnt", "node_list",
79
            "ctime", "mtime", "serial_no",
80
            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
81

    
82
_NR_DRAINED = "drained"
83
_NR_MASTER_CANDIATE = "master-candidate"
84
_NR_MASTER = "master"
85
_NR_OFFLINE = "offline"
86
_NR_REGULAR = "regular"
87

    
88
_NR_MAP = {
89
  "M": _NR_MASTER,
90
  "C": _NR_MASTER_CANDIATE,
91
  "D": _NR_DRAINED,
92
  "O": _NR_OFFLINE,
93
  "R": _NR_REGULAR,
94
  }
95

    
96
# Request data version field
97
_REQ_DATA_VERSION = "__version__"
98

    
99
# Feature string for instance creation request data version 1
100
_INST_CREATE_REQV1 = "instance-create-reqv1"
101

    
102
# Feature string for instance reinstall request version 1
103
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
104

    
105
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
106
_WFJC_TIMEOUT = 10
107

    
108

    
109
class R_version(baserlib.R_Generic):
110
  """/version resource.
111

112
  This resource should be used to determine the remote API version and
113
  to adapt clients accordingly.
114

115
  """
116
  @staticmethod
117
  def GET():
118
    """Returns the remote API version.
119

120
    """
121
    return constants.RAPI_VERSION
122

    
123

    
124
class R_2_info(baserlib.R_Generic):
125
  """/2/info resource.
126

127
  """
128
  @staticmethod
129
  def GET():
130
    """Returns cluster information.
131

132
    """
133
    client = baserlib.GetClient()
134
    return client.QueryClusterInfo()
135

    
136

    
137
class R_2_features(baserlib.R_Generic):
138
  """/2/features resource.
139

140
  """
141
  @staticmethod
142
  def GET():
143
    """Returns list of optional RAPI features implemented.
144

145
    """
146
    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
147

    
148

    
149
class R_2_os(baserlib.R_Generic):
150
  """/2/os resource.
151

152
  """
153
  @staticmethod
154
  def GET():
155
    """Return a list of all OSes.
156

157
    Can return error 500 in case of a problem.
158

159
    Example: ["debian-etch"]
160

161
    """
162
    cl = baserlib.GetClient()
163
    op = opcodes.OpDiagnoseOS(output_fields=["name", "variants"], names=[])
164
    job_id = baserlib.SubmitJob([op], cl)
165
    # we use custom feedback function, instead of print we log the status
166
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
167
    diagnose_data = result[0]
168

    
169
    if not isinstance(diagnose_data, list):
170
      raise http.HttpBadGateway(message="Can't get OS list")
171

    
172
    os_names = []
173
    for (name, variants) in diagnose_data:
174
      os_names.extend(cli.CalculateOSNames(name, variants))
175

    
176
    return os_names
177

    
178

    
179
class R_2_redist_config(baserlib.R_Generic):
180
  """/2/redistribute-config resource.
181

182
  """
183
  @staticmethod
184
  def PUT():
185
    """Redistribute configuration to all nodes.
186

187
    """
188
    return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
189

    
190

    
191
class R_2_cluster_modify(baserlib.R_Generic):
192
  """/2/modify resource.
193

194
  """
195
  def PUT(self):
196
    """Modifies cluster parameters.
197

198
    @return: a job id
199

200
    """
201
    op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
202
                             None)
203

    
204
    return baserlib.SubmitJob([op])
205

    
206

    
207
class R_2_jobs(baserlib.R_Generic):
208
  """/2/jobs resource.
209

210
  """
211
  @staticmethod
212
  def GET():
213
    """Returns a dictionary of jobs.
214

215
    @return: a dictionary with jobs id and uri.
216

217
    """
218
    fields = ["id"]
219
    cl = baserlib.GetClient()
220
    # Convert the list of lists to the list of ids
221
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
222
    return baserlib.BuildUriList(result, "/2/jobs/%s",
223
                                 uri_fields=("id", "uri"))
224

    
225

    
226
class R_2_jobs_id(baserlib.R_Generic):
227
  """/2/jobs/[job_id] resource.
228

229
  """
230
  def GET(self):
231
    """Returns a job status.
232

233
    @return: a dictionary with job parameters.
234
        The result includes:
235
            - id: job ID as a number
236
            - status: current job status as a string
237
            - ops: involved OpCodes as a list of dictionaries for each
238
              opcodes in the job
239
            - opstatus: OpCodes status as a list
240
            - opresult: OpCodes results as a list of lists
241

242
    """
243
    fields = ["id", "ops", "status", "summary",
244
              "opstatus", "opresult", "oplog",
245
              "received_ts", "start_ts", "end_ts",
246
              ]
247
    job_id = self.items[0]
248
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
249
    if result is None:
250
      raise http.HttpNotFound()
251
    return baserlib.MapFields(fields, result)
252

    
253
  def DELETE(self):
254
    """Cancel not-yet-started job.
255

256
    """
257
    job_id = self.items[0]
258
    result = baserlib.GetClient().CancelJob(job_id)
259
    return result
260

    
261

    
262
class R_2_jobs_id_wait(baserlib.R_Generic):
263
  """/2/jobs/[job_id]/wait resource.
264

265
  """
266
  # WaitForJobChange provides access to sensitive information and blocks
267
  # machine resources (it's a blocking RAPI call), hence restricting access.
268
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
269

    
270
  def GET(self):
271
    """Waits for job changes.
272

273
    """
274
    job_id = self.items[0]
275

    
276
    fields = self.getBodyParameter("fields")
277
    prev_job_info = self.getBodyParameter("previous_job_info", None)
278
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
279

    
280
    if not isinstance(fields, list):
281
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
282

    
283
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
284
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
285
                                " be a list")
286

    
287
    if not (prev_log_serial is None or
288
            isinstance(prev_log_serial, (int, long))):
289
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
290
                                " be a number")
291

    
292
    client = baserlib.GetClient()
293
    result = client.WaitForJobChangeOnce(job_id, fields,
294
                                         prev_job_info, prev_log_serial,
295
                                         timeout=_WFJC_TIMEOUT)
296
    if not result:
297
      raise http.HttpNotFound()
298

    
299
    if result == constants.JOB_NOTCHANGED:
300
      # No changes
301
      return None
302

    
303
    (job_info, log_entries) = result
304

    
305
    return {
306
      "job_info": job_info,
307
      "log_entries": log_entries,
308
      }
309

    
310

    
311
class R_2_nodes(baserlib.R_Generic):
312
  """/2/nodes resource.
313

314
  """
315
  def GET(self):
316
    """Returns a list of all nodes.
317

318
    """
319
    client = baserlib.GetClient()
320

    
321
    if self.useBulk():
322
      bulkdata = client.QueryNodes([], N_FIELDS, False)
323
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
324
    else:
325
      nodesdata = client.QueryNodes([], ["name"], False)
326
      nodeslist = [row[0] for row in nodesdata]
327
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
328
                                   uri_fields=("id", "uri"))
329

    
330

    
331
class R_2_nodes_name(baserlib.R_Generic):
332
  """/2/nodes/[node_name] resource.
333

334
  """
335
  def GET(self):
336
    """Send information about a node.
337

338
    """
339
    node_name = self.items[0]
340
    client = baserlib.GetClient()
341

    
342
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
343
                                            names=[node_name], fields=N_FIELDS,
344
                                            use_locking=self.useLocking())
345

    
346
    return baserlib.MapFields(N_FIELDS, result[0])
347

    
348

    
349
class R_2_nodes_name_role(baserlib.R_Generic):
350
  """ /2/nodes/[node_name]/role resource.
351

352
  """
353
  def GET(self):
354
    """Returns the current node role.
355

356
    @return: Node role
357

358
    """
359
    node_name = self.items[0]
360
    client = baserlib.GetClient()
361
    result = client.QueryNodes(names=[node_name], fields=["role"],
362
                               use_locking=self.useLocking())
363

    
364
    return _NR_MAP[result[0][0]]
365

    
366
  def PUT(self):
367
    """Sets the node role.
368

369
    @return: a job id
370

371
    """
372
    if not isinstance(self.request_body, basestring):
373
      raise http.HttpBadRequest("Invalid body contents, not a string")
374

    
375
    node_name = self.items[0]
376
    role = self.request_body
377

    
378
    if role == _NR_REGULAR:
379
      candidate = False
380
      offline = False
381
      drained = False
382

    
383
    elif role == _NR_MASTER_CANDIATE:
384
      candidate = True
385
      offline = drained = None
386

    
387
    elif role == _NR_DRAINED:
388
      drained = True
389
      candidate = offline = None
390

    
391
    elif role == _NR_OFFLINE:
392
      offline = True
393
      candidate = drained = None
394

    
395
    else:
396
      raise http.HttpBadRequest("Can't set '%s' role" % role)
397

    
398
    op = opcodes.OpSetNodeParams(node_name=node_name,
399
                                 master_candidate=candidate,
400
                                 offline=offline,
401
                                 drained=drained,
402
                                 force=bool(self.useForce()))
403

    
404
    return baserlib.SubmitJob([op])
405

    
406

    
407
class R_2_nodes_name_evacuate(baserlib.R_Generic):
408
  """/2/nodes/[node_name]/evacuate resource.
409

410
  """
411
  def POST(self):
412
    """Evacuate all secondary instances off a node.
413

414
    """
415
    node_name = self.items[0]
416
    remote_node = self._checkStringVariable("remote_node", default=None)
417
    iallocator = self._checkStringVariable("iallocator", default=None)
418
    early_r = bool(self._checkIntVariable("early_release", default=0))
419
    dry_run = bool(self.dryRun())
420

    
421
    cl = baserlib.GetClient()
422

    
423
    op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name],
424
                                          iallocator=iallocator,
425
                                          remote_node=remote_node)
426

    
427
    job_id = baserlib.SubmitJob([op], cl)
428
    # we use custom feedback function, instead of print we log the status
429
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
430

    
431
    jobs = []
432
    for iname, node in result:
433
      if dry_run:
434
        jid = None
435
      else:
436
        op = opcodes.OpReplaceDisks(instance_name=iname,
437
                                    remote_node=node, disks=[],
438
                                    mode=constants.REPLACE_DISK_CHG,
439
                                    early_release=early_r)
440
        jid = baserlib.SubmitJob([op])
441
      jobs.append((jid, iname, node))
442

    
443
    return jobs
444

    
445

    
446
class R_2_nodes_name_migrate(baserlib.R_Generic):
447
  """/2/nodes/[node_name]/migrate resource.
448

449
  """
450
  def POST(self):
451
    """Migrate all primary instances from a node.
452

453
    """
454
    node_name = self.items[0]
455

    
456
    if "live" in self.queryargs and "mode" in self.queryargs:
457
      raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
458
                                " be passed")
459
    elif "live" in self.queryargs:
460
      if self._checkIntVariable("live", default=1):
461
        mode = constants.HT_MIGRATION_LIVE
462
      else:
463
        mode = constants.HT_MIGRATION_NONLIVE
464
    else:
465
      mode = self._checkStringVariable("mode", default=None)
466

    
467
    op = opcodes.OpMigrateNode(node_name=node_name, mode=mode)
468

    
469
    return baserlib.SubmitJob([op])
470

    
471

    
472
class R_2_nodes_name_storage(baserlib.R_Generic):
473
  """/2/nodes/[node_name]/storage resource.
474

475
  """
476
  # LUQueryNodeStorage acquires locks, hence restricting access to GET
477
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
478

    
479
  def GET(self):
480
    node_name = self.items[0]
481

    
482
    storage_type = self._checkStringVariable("storage_type", None)
483
    if not storage_type:
484
      raise http.HttpBadRequest("Missing the required 'storage_type'"
485
                                " parameter")
486

    
487
    output_fields = self._checkStringVariable("output_fields", None)
488
    if not output_fields:
489
      raise http.HttpBadRequest("Missing the required 'output_fields'"
490
                                " parameter")
491

    
492
    op = opcodes.OpQueryNodeStorage(nodes=[node_name],
493
                                    storage_type=storage_type,
494
                                    output_fields=output_fields.split(","))
495
    return baserlib.SubmitJob([op])
496

    
497

    
498
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
499
  """/2/nodes/[node_name]/storage/modify resource.
500

501
  """
502
  def PUT(self):
503
    node_name = self.items[0]
504

    
505
    storage_type = self._checkStringVariable("storage_type", None)
506
    if not storage_type:
507
      raise http.HttpBadRequest("Missing the required 'storage_type'"
508
                                " parameter")
509

    
510
    name = self._checkStringVariable("name", None)
511
    if not name:
512
      raise http.HttpBadRequest("Missing the required 'name'"
513
                                " parameter")
514

    
515
    changes = {}
516

    
517
    if "allocatable" in self.queryargs:
518
      changes[constants.SF_ALLOCATABLE] = \
519
        bool(self._checkIntVariable("allocatable", default=1))
520

    
521
    op = opcodes.OpModifyNodeStorage(node_name=node_name,
522
                                     storage_type=storage_type,
523
                                     name=name,
524
                                     changes=changes)
525
    return baserlib.SubmitJob([op])
526

    
527

    
528
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
529
  """/2/nodes/[node_name]/storage/repair resource.
530

531
  """
532
  def PUT(self):
533
    node_name = self.items[0]
534

    
535
    storage_type = self._checkStringVariable("storage_type", None)
536
    if not storage_type:
537
      raise http.HttpBadRequest("Missing the required 'storage_type'"
538
                                " parameter")
539

    
540
    name = self._checkStringVariable("name", None)
541
    if not name:
542
      raise http.HttpBadRequest("Missing the required 'name'"
543
                                " parameter")
544

    
545
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
546
                                     storage_type=storage_type,
547
                                     name=name)
548
    return baserlib.SubmitJob([op])
549

    
550

    
551
def _ParseCreateGroupRequest(data, dry_run):
552
  """Parses a request for creating a node group.
553

554
  @rtype: L{opcodes.OpGroupAdd}
555
  @return: Group creation opcode
556

557
  """
558
  group_name = baserlib.CheckParameter(data, "name")
559
  alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
560

    
561
  return opcodes.OpGroupAdd(group_name=group_name,
562
                            alloc_policy=alloc_policy,
563
                            dry_run=dry_run)
564

    
565

    
566
class R_2_groups(baserlib.R_Generic):
567
  """/2/groups resource.
568

569
  """
570
  def GET(self):
571
    """Returns a list of all node groups.
572

573
    """
574
    client = baserlib.GetClient()
575

    
576
    if self.useBulk():
577
      bulkdata = client.QueryGroups([], G_FIELDS, False)
578
      return baserlib.MapBulkFields(bulkdata, G_FIELDS)
579
    else:
580
      data = client.QueryGroups([], ["name"], False)
581
      groupnames = [row[0] for row in data]
582
      return baserlib.BuildUriList(groupnames, "/2/groups/%s",
583
                                   uri_fields=("name", "uri"))
584

    
585
  def POST(self):
586
    """Create a node group.
587

588
    @return: a job id
589

590
    """
591
    baserlib.CheckType(self.request_body, dict, "Body contents")
592
    op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
593
    return baserlib.SubmitJob([op])
594

    
595

    
596
class R_2_groups_name(baserlib.R_Generic):
597
  """/2/groups/[group_name] resource.
598

599
  """
600
  def GET(self):
601
    """Send information about a node group.
602

603
    """
604
    group_name = self.items[0]
605
    client = baserlib.GetClient()
606

    
607
    result = baserlib.HandleItemQueryErrors(client.QueryGroups,
608
                                            names=[group_name], fields=G_FIELDS,
609
                                            use_locking=self.useLocking())
610

    
611
    return baserlib.MapFields(G_FIELDS, result[0])
612

    
613
  def DELETE(self):
614
    """Delete a node group.
615

616
    """
617
    op = opcodes.OpGroupRemove(group_name=self.items[0],
618
                               dry_run=bool(self.dryRun()))
619

    
620
    return baserlib.SubmitJob([op])
621

    
622

    
623
def _ParseModifyGroupRequest(name, data):
624
  """Parses a request for modifying a node group.
625

626
  @rtype: L{opcodes.OpGroupSetParams}
627
  @return: Group modify opcode
628

629
  """
630
  alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
631
  return opcodes.OpGroupSetParams(group_name=name, alloc_policy=alloc_policy)
632

    
633

    
634
class R_2_groups_name_modify(baserlib.R_Generic):
635
  """/2/groups/[group_name]/modify resource.
636

637
  """
638
  def PUT(self):
639
    """Changes some parameters of node group.
640

641
    @return: a job id
642

643
    """
644
    baserlib.CheckType(self.request_body, dict, "Body contents")
645

    
646
    op = _ParseModifyGroupRequest(self.items[0], self.request_body)
647

    
648
    return baserlib.SubmitJob([op])
649

    
650

    
651
def _ParseRenameGroupRequest(name, data, dry_run):
652
  """Parses a request for renaming a node group.
653

654
  @type name: string
655
  @param name: name of the node group to rename
656
  @type data: dict
657
  @param data: the body received by the rename request
658
  @type dry_run: bool
659
  @param dry_run: whether to perform a dry run
660

661
  @rtype: L{opcodes.OpGroupRename}
662
  @return: Node group rename opcode
663

664
  """
665
  old_name = name
666
  new_name = baserlib.CheckParameter(data, "new_name")
667

    
668
  return opcodes.OpGroupRename(old_name=old_name, new_name=new_name,
669
                               dry_run=dry_run)
670

    
671

    
672
class R_2_groups_name_rename(baserlib.R_Generic):
673
  """/2/groups/[group_name]/rename resource.
674

675
  """
676
  def PUT(self):
677
    """Changes the name of a node group.
678

679
    @return: a job id
680

681
    """
682
    baserlib.CheckType(self.request_body, dict, "Body contents")
683
    op = _ParseRenameGroupRequest(self.items[0], self.request_body,
684
                                  self.dryRun())
685
    return baserlib.SubmitJob([op])
686

    
687

    
688
class R_2_groups_name_assign_nodes(baserlib.R_Generic):
689
  """/2/groups/[group_name]/assign-nodes resource.
690

691
  """
692
  def PUT(self):
693
    """Assigns nodes to a group.
694

695
    @return: a job id
696

697
    """
698
    op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
699
      "group_name": self.items[0],
700
      "dry_run": self.dryRun(),
701
      "force": self.useForce(),
702
      })
703

    
704
    return baserlib.SubmitJob([op])
705

    
706

    
707
def _ParseInstanceCreateRequestVersion1(data, dry_run):
708
  """Parses an instance creation request version 1.
709

710
  @rtype: L{opcodes.OpInstanceCreate}
711
  @return: Instance creation opcode
712

713
  """
714
  # Disks
715
  disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
716

    
717
  disks = []
718
  for idx, i in enumerate(disks_input):
719
    baserlib.CheckType(i, dict, "Disk %d specification" % idx)
720

    
721
    # Size is mandatory
722
    try:
723
      size = i[constants.IDISK_SIZE]
724
    except KeyError:
725
      raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
726
                                " size" % idx)
727

    
728
    disk = {
729
      constants.IDISK_SIZE: size,
730
      }
731

    
732
    # Optional disk access mode
733
    try:
734
      disk_access = i[constants.IDISK_MODE]
735
    except KeyError:
736
      pass
737
    else:
738
      disk[constants.IDISK_MODE] = disk_access
739

    
740
    disks.append(disk)
741

    
742
  assert len(disks_input) == len(disks)
743

    
744
  # Network interfaces
745
  nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
746

    
747
  nics = []
748
  for idx, i in enumerate(nics_input):
749
    baserlib.CheckType(i, dict, "NIC %d specification" % idx)
750

    
751
    nic = {}
752

    
753
    for field in constants.INIC_PARAMS:
754
      try:
755
        value = i[field]
756
      except KeyError:
757
        continue
758

    
759
      nic[field] = value
760

    
761
    nics.append(nic)
762

    
763
  assert len(nics_input) == len(nics)
764

    
765
  # HV/BE parameters
766
  hvparams = baserlib.CheckParameter(data, "hvparams", default={})
767
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
768

    
769
  beparams = baserlib.CheckParameter(data, "beparams", default={})
770
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
771

    
772
  return opcodes.OpInstanceCreate(
773
    mode=baserlib.CheckParameter(data, "mode"),
774
    instance_name=baserlib.CheckParameter(data, "name"),
775
    os_type=baserlib.CheckParameter(data, "os"),
776
    osparams=baserlib.CheckParameter(data, "osparams", default={}),
777
    force_variant=baserlib.CheckParameter(data, "force_variant",
778
                                          default=False),
779
    no_install=baserlib.CheckParameter(data, "no_install", default=False),
780
    pnode=baserlib.CheckParameter(data, "pnode", default=None),
781
    snode=baserlib.CheckParameter(data, "snode", default=None),
782
    disk_template=baserlib.CheckParameter(data, "disk_template"),
783
    disks=disks,
784
    nics=nics,
785
    src_node=baserlib.CheckParameter(data, "src_node", default=None),
786
    src_path=baserlib.CheckParameter(data, "src_path", default=None),
787
    start=baserlib.CheckParameter(data, "start", default=True),
788
    wait_for_sync=True,
789
    ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
790
    name_check=baserlib.CheckParameter(data, "name_check", default=True),
791
    file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
792
                                             default=None),
793
    file_driver=baserlib.CheckParameter(data, "file_driver",
794
                                        default=constants.FD_LOOP),
795
    source_handshake=baserlib.CheckParameter(data, "source_handshake",
796
                                             default=None),
797
    source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
798
                                           default=None),
799
    source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
800
                                                 default=None),
801
    iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
802
    hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
803
    hvparams=hvparams,
804
    beparams=beparams,
805
    dry_run=dry_run,
806
    )
807

    
808

    
809
class R_2_instances(baserlib.R_Generic):
810
  """/2/instances resource.
811

812
  """
813
  def GET(self):
814
    """Returns a list of all available instances.
815

816
    """
817
    client = baserlib.GetClient()
818

    
819
    use_locking = self.useLocking()
820
    if self.useBulk():
821
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
822
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
823
    else:
824
      instancesdata = client.QueryInstances([], ["name"], use_locking)
825
      instanceslist = [row[0] for row in instancesdata]
826
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
827
                                   uri_fields=("id", "uri"))
828

    
829
  def _ParseVersion0CreateRequest(self):
830
    """Parses an instance creation request version 0.
831

832
    Request data version 0 is deprecated and should not be used anymore.
833

834
    @rtype: L{opcodes.OpInstanceCreate}
835
    @return: Instance creation opcode
836

837
    """
838
    # Do not modify anymore, request data version 0 is deprecated
839
    beparams = baserlib.MakeParamsDict(self.request_body,
840
                                       constants.BES_PARAMETERS)
841
    hvparams = baserlib.MakeParamsDict(self.request_body,
842
                                       constants.HVS_PARAMETERS)
843
    fn = self.getBodyParameter
844

    
845
    # disk processing
846
    disk_data = fn('disks')
847
    if not isinstance(disk_data, list):
848
      raise http.HttpBadRequest("The 'disks' parameter should be a list")
849
    disks = []
850
    for idx, d in enumerate(disk_data):
851
      if not isinstance(d, int):
852
        raise http.HttpBadRequest("Disk %d specification wrong: should"
853
                                  " be an integer" % idx)
854
      disks.append({"size": d})
855

    
856
    # nic processing (one nic only)
857
    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
858
    if fn("ip", None) is not None:
859
      nics[0]["ip"] = fn("ip")
860
    if fn("mode", None) is not None:
861
      nics[0]["mode"] = fn("mode")
862
    if fn("link", None) is not None:
863
      nics[0]["link"] = fn("link")
864
    if fn("bridge", None) is not None:
865
      nics[0]["bridge"] = fn("bridge")
866

    
867
    # Do not modify anymore, request data version 0 is deprecated
868
    return opcodes.OpInstanceCreate(
869
      mode=constants.INSTANCE_CREATE,
870
      instance_name=fn('name'),
871
      disks=disks,
872
      disk_template=fn('disk_template'),
873
      os_type=fn('os'),
874
      pnode=fn('pnode', None),
875
      snode=fn('snode', None),
876
      iallocator=fn('iallocator', None),
877
      nics=nics,
878
      start=fn('start', True),
879
      ip_check=fn('ip_check', True),
880
      name_check=fn('name_check', True),
881
      wait_for_sync=True,
882
      hypervisor=fn('hypervisor', None),
883
      hvparams=hvparams,
884
      beparams=beparams,
885
      file_storage_dir=fn('file_storage_dir', None),
886
      file_driver=fn('file_driver', constants.FD_LOOP),
887
      dry_run=bool(self.dryRun()),
888
      )
889

    
890
  def POST(self):
891
    """Create an instance.
892

893
    @return: a job id
894

895
    """
896
    if not isinstance(self.request_body, dict):
897
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
898

    
899
    # Default to request data version 0
900
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
901

    
902
    if data_version == 0:
903
      op = self._ParseVersion0CreateRequest()
904
    elif data_version == 1:
905
      op = _ParseInstanceCreateRequestVersion1(self.request_body,
906
                                               self.dryRun())
907
    else:
908
      raise http.HttpBadRequest("Unsupported request data version %s" %
909
                                data_version)
910

    
911
    return baserlib.SubmitJob([op])
912

    
913

    
914
class R_2_instances_name(baserlib.R_Generic):
915
  """/2/instances/[instance_name] resource.
916

917
  """
918
  def GET(self):
919
    """Send information about an instance.
920

921
    """
922
    client = baserlib.GetClient()
923
    instance_name = self.items[0]
924

    
925
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
926
                                            names=[instance_name],
927
                                            fields=I_FIELDS,
928
                                            use_locking=self.useLocking())
929

    
930
    return baserlib.MapFields(I_FIELDS, result[0])
931

    
932
  def DELETE(self):
933
    """Delete an instance.
934

935
    """
936
    op = opcodes.OpRemoveInstance(instance_name=self.items[0],
937
                                  ignore_failures=False,
938
                                  dry_run=bool(self.dryRun()))
939
    return baserlib.SubmitJob([op])
940

    
941

    
942
class R_2_instances_name_info(baserlib.R_Generic):
943
  """/2/instances/[instance_name]/info resource.
944

945
  """
946
  def GET(self):
947
    """Request detailed instance information.
948

949
    """
950
    instance_name = self.items[0]
951
    static = bool(self._checkIntVariable("static", default=0))
952

    
953
    op = opcodes.OpQueryInstanceData(instances=[instance_name],
954
                                     static=static)
955
    return baserlib.SubmitJob([op])
956

    
957

    
958
class R_2_instances_name_reboot(baserlib.R_Generic):
959
  """/2/instances/[instance_name]/reboot resource.
960

961
  Implements an instance reboot.
962

963
  """
964
  def POST(self):
965
    """Reboot an instance.
966

967
    The URI takes type=[hard|soft|full] and
968
    ignore_secondaries=[False|True] parameters.
969

970
    """
971
    instance_name = self.items[0]
972
    reboot_type = self.queryargs.get('type',
973
                                     [constants.INSTANCE_REBOOT_HARD])[0]
974
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
975
    op = opcodes.OpRebootInstance(instance_name=instance_name,
976
                                  reboot_type=reboot_type,
977
                                  ignore_secondaries=ignore_secondaries,
978
                                  dry_run=bool(self.dryRun()))
979

    
980
    return baserlib.SubmitJob([op])
981

    
982

    
983
class R_2_instances_name_startup(baserlib.R_Generic):
984
  """/2/instances/[instance_name]/startup resource.
985

986
  Implements an instance startup.
987

988
  """
989
  def PUT(self):
990
    """Startup an instance.
991

992
    The URI takes force=[False|True] parameter to start the instance
993
    if even if secondary disks are failing.
994

995
    """
996
    instance_name = self.items[0]
997
    force_startup = bool(self._checkIntVariable('force'))
998
    op = opcodes.OpStartupInstance(instance_name=instance_name,
999
                                   force=force_startup,
1000
                                   dry_run=bool(self.dryRun()))
1001

    
1002
    return baserlib.SubmitJob([op])
1003

    
1004

    
1005
class R_2_instances_name_shutdown(baserlib.R_Generic):
1006
  """/2/instances/[instance_name]/shutdown resource.
1007

1008
  Implements an instance shutdown.
1009

1010
  """
1011
  def PUT(self):
1012
    """Shutdown an instance.
1013

1014
    """
1015
    instance_name = self.items[0]
1016
    op = opcodes.OpShutdownInstance(instance_name=instance_name,
1017
                                    dry_run=bool(self.dryRun()))
1018

    
1019
    return baserlib.SubmitJob([op])
1020

    
1021

    
1022
def _ParseInstanceReinstallRequest(name, data):
1023
  """Parses a request for reinstalling an instance.
1024

1025
  """
1026
  if not isinstance(data, dict):
1027
    raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1028

    
1029
  ostype = baserlib.CheckParameter(data, "os")
1030
  start = baserlib.CheckParameter(data, "start", exptype=bool,
1031
                                  default=True)
1032
  osparams = baserlib.CheckParameter(data, "osparams", default=None)
1033

    
1034
  ops = [
1035
    opcodes.OpShutdownInstance(instance_name=name),
1036
    opcodes.OpReinstallInstance(instance_name=name, os_type=ostype,
1037
                                osparams=osparams),
1038
    ]
1039

    
1040
  if start:
1041
    ops.append(opcodes.OpStartupInstance(instance_name=name, force=False))
1042

    
1043
  return ops
1044

    
1045

    
1046
class R_2_instances_name_reinstall(baserlib.R_Generic):
1047
  """/2/instances/[instance_name]/reinstall resource.
1048

1049
  Implements an instance reinstall.
1050

1051
  """
1052
  def POST(self):
1053
    """Reinstall an instance.
1054

1055
    The URI takes os=name and nostartup=[0|1] optional
1056
    parameters. By default, the instance will be started
1057
    automatically.
1058

1059
    """
1060
    if self.request_body:
1061
      if self.queryargs:
1062
        raise http.HttpBadRequest("Can't combine query and body parameters")
1063

    
1064
      body = self.request_body
1065
    else:
1066
      if not self.queryargs:
1067
        raise http.HttpBadRequest("Missing query parameters")
1068
      # Legacy interface, do not modify/extend
1069
      body = {
1070
        "os": self._checkStringVariable("os"),
1071
        "start": not self._checkIntVariable("nostartup"),
1072
        }
1073

    
1074
    ops = _ParseInstanceReinstallRequest(self.items[0], body)
1075

    
1076
    return baserlib.SubmitJob(ops)
1077

    
1078

    
1079
class R_2_instances_name_replace_disks(baserlib.R_Generic):
1080
  """/2/instances/[instance_name]/replace-disks resource.
1081

1082
  """
1083
  def POST(self):
1084
    """Replaces disks on an instance.
1085

1086
    """
1087
    instance_name = self.items[0]
1088
    remote_node = self._checkStringVariable("remote_node", default=None)
1089
    mode = self._checkStringVariable("mode", default=None)
1090
    raw_disks = self._checkStringVariable("disks", default=None)
1091
    iallocator = self._checkStringVariable("iallocator", default=None)
1092

    
1093
    if raw_disks:
1094
      try:
1095
        disks = [int(part) for part in raw_disks.split(",")]
1096
      except ValueError, err:
1097
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1098
    else:
1099
      disks = []
1100

    
1101
    op = opcodes.OpReplaceDisks(instance_name=instance_name,
1102
                                remote_node=remote_node,
1103
                                mode=mode,
1104
                                disks=disks,
1105
                                iallocator=iallocator)
1106

    
1107
    return baserlib.SubmitJob([op])
1108

    
1109

    
1110
class R_2_instances_name_activate_disks(baserlib.R_Generic):
1111
  """/2/instances/[instance_name]/activate-disks resource.
1112

1113
  """
1114
  def PUT(self):
1115
    """Activate disks for an instance.
1116

1117
    The URI might contain ignore_size to ignore current recorded size.
1118

1119
    """
1120
    instance_name = self.items[0]
1121
    ignore_size = bool(self._checkIntVariable('ignore_size'))
1122

    
1123
    op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1124
                                         ignore_size=ignore_size)
1125

    
1126
    return baserlib.SubmitJob([op])
1127

    
1128

    
1129
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1130
  """/2/instances/[instance_name]/deactivate-disks resource.
1131

1132
  """
1133
  def PUT(self):
1134
    """Deactivate disks for an instance.
1135

1136
    """
1137
    instance_name = self.items[0]
1138

    
1139
    op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1140

    
1141
    return baserlib.SubmitJob([op])
1142

    
1143

    
1144
class R_2_instances_name_prepare_export(baserlib.R_Generic):
1145
  """/2/instances/[instance_name]/prepare-export resource.
1146

1147
  """
1148
  def PUT(self):
1149
    """Prepares an export for an instance.
1150

1151
    @return: a job id
1152

1153
    """
1154
    instance_name = self.items[0]
1155
    mode = self._checkStringVariable("mode")
1156

    
1157
    op = opcodes.OpBackupPrepare(instance_name=instance_name,
1158
                                 mode=mode)
1159

    
1160
    return baserlib.SubmitJob([op])
1161

    
1162

    
1163
def _ParseExportInstanceRequest(name, data):
1164
  """Parses a request for an instance export.
1165

1166
  @rtype: L{opcodes.OpBackupExport}
1167
  @return: Instance export opcode
1168

1169
  """
1170
  mode = baserlib.CheckParameter(data, "mode",
1171
                                 default=constants.EXPORT_MODE_LOCAL)
1172
  target_node = baserlib.CheckParameter(data, "destination")
1173
  shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
1174
  remove_instance = baserlib.CheckParameter(data, "remove_instance",
1175
                                            exptype=bool, default=False)
1176
  x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
1177
  destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
1178
                                                default=None)
1179

    
1180
  return opcodes.OpBackupExport(instance_name=name,
1181
                                mode=mode,
1182
                                target_node=target_node,
1183
                                shutdown=shutdown,
1184
                                remove_instance=remove_instance,
1185
                                x509_key_name=x509_key_name,
1186
                                destination_x509_ca=destination_x509_ca)
1187

    
1188

    
1189
class R_2_instances_name_export(baserlib.R_Generic):
1190
  """/2/instances/[instance_name]/export resource.
1191

1192
  """
1193
  def PUT(self):
1194
    """Exports an instance.
1195

1196
    @return: a job id
1197

1198
    """
1199
    if not isinstance(self.request_body, dict):
1200
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1201

    
1202
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1203

    
1204
    return baserlib.SubmitJob([op])
1205

    
1206

    
1207
def _ParseMigrateInstanceRequest(name, data):
1208
  """Parses a request for an instance migration.
1209

1210
  @rtype: L{opcodes.OpInstanceMigrate}
1211
  @return: Instance migration opcode
1212

1213
  """
1214
  mode = baserlib.CheckParameter(data, "mode", default=None)
1215
  cleanup = baserlib.CheckParameter(data, "cleanup", exptype=bool,
1216
                                    default=False)
1217

    
1218
  return opcodes.OpInstanceMigrate(instance_name=name, mode=mode,
1219
                                   cleanup=cleanup)
1220

    
1221

    
1222
class R_2_instances_name_migrate(baserlib.R_Generic):
1223
  """/2/instances/[instance_name]/migrate resource.
1224

1225
  """
1226
  def PUT(self):
1227
    """Migrates an instance.
1228

1229
    @return: a job id
1230

1231
    """
1232
    baserlib.CheckType(self.request_body, dict, "Body contents")
1233

    
1234
    op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1235

    
1236
    return baserlib.SubmitJob([op])
1237

    
1238

    
1239
def _ParseRenameInstanceRequest(name, data):
1240
  """Parses a request for renaming an instance.
1241

1242
  @rtype: L{opcodes.OpRenameInstance}
1243
  @return: Instance rename opcode
1244

1245
  """
1246
  new_name = baserlib.CheckParameter(data, "new_name")
1247
  ip_check = baserlib.CheckParameter(data, "ip_check", default=True)
1248
  name_check = baserlib.CheckParameter(data, "name_check", default=True)
1249

    
1250
  return opcodes.OpRenameInstance(instance_name=name, new_name=new_name,
1251
                                  name_check=name_check, ip_check=ip_check)
1252

    
1253

    
1254
class R_2_instances_name_rename(baserlib.R_Generic):
1255
  """/2/instances/[instance_name]/rename resource.
1256

1257
  """
1258
  def PUT(self):
1259
    """Changes the name of an instance.
1260

1261
    @return: a job id
1262

1263
    """
1264
    baserlib.CheckType(self.request_body, dict, "Body contents")
1265

    
1266
    op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1267

    
1268
    return baserlib.SubmitJob([op])
1269

    
1270

    
1271
def _ParseModifyInstanceRequest(name, data):
1272
  """Parses a request for modifying an instance.
1273

1274
  @rtype: L{opcodes.OpSetInstanceParams}
1275
  @return: Instance modify opcode
1276

1277
  """
1278
  osparams = baserlib.CheckParameter(data, "osparams", default={})
1279
  force = baserlib.CheckParameter(data, "force", default=False)
1280
  nics = baserlib.CheckParameter(data, "nics", default=[])
1281
  disks = baserlib.CheckParameter(data, "disks", default=[])
1282
  disk_template = baserlib.CheckParameter(data, "disk_template", default=None)
1283
  remote_node = baserlib.CheckParameter(data, "remote_node", default=None)
1284
  os_name = baserlib.CheckParameter(data, "os_name", default=None)
1285
  force_variant = baserlib.CheckParameter(data, "force_variant", default=False)
1286

    
1287
  # HV/BE parameters
1288
  hvparams = baserlib.CheckParameter(data, "hvparams", default={})
1289
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES,
1290
                      allowed_values=[constants.VALUE_DEFAULT])
1291

    
1292
  beparams = baserlib.CheckParameter(data, "beparams", default={})
1293
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES,
1294
                      allowed_values=[constants.VALUE_DEFAULT])
1295

    
1296
  return opcodes.OpSetInstanceParams(instance_name=name, hvparams=hvparams,
1297
                                     beparams=beparams, osparams=osparams,
1298
                                     force=force, nics=nics, disks=disks,
1299
                                     disk_template=disk_template,
1300
                                     remote_node=remote_node, os_name=os_name,
1301
                                     force_variant=force_variant)
1302

    
1303

    
1304
class R_2_instances_name_modify(baserlib.R_Generic):
1305
  """/2/instances/[instance_name]/modify resource.
1306

1307
  """
1308
  def PUT(self):
1309
    """Changes some parameters of an instance.
1310

1311
    @return: a job id
1312

1313
    """
1314
    baserlib.CheckType(self.request_body, dict, "Body contents")
1315

    
1316
    op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1317

    
1318
    return baserlib.SubmitJob([op])
1319

    
1320

    
1321
class R_2_instances_name_disk_grow(baserlib.R_Generic):
1322
  """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1323

1324
  """
1325
  def POST(self):
1326
    """Increases the size of an instance disk.
1327

1328
    @return: a job id
1329

1330
    """
1331
    op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1332
      "instance_name": self.items[0],
1333
      "disk": int(self.items[1]),
1334
      })
1335

    
1336
    return baserlib.SubmitJob([op])
1337

    
1338

    
1339
class _R_Tags(baserlib.R_Generic):
1340
  """ Quasiclass for tagging resources
1341

1342
  Manages tags. When inheriting this class you must define the
1343
  TAG_LEVEL for it.
1344

1345
  """
1346
  TAG_LEVEL = None
1347

    
1348
  def __init__(self, items, queryargs, req):
1349
    """A tag resource constructor.
1350

1351
    We have to override the default to sort out cluster naming case.
1352

1353
    """
1354
    baserlib.R_Generic.__init__(self, items, queryargs, req)
1355

    
1356
    if self.TAG_LEVEL == constants.TAG_CLUSTER:
1357
      self.name = None
1358
    else:
1359
      self.name = items[0]
1360

    
1361
  def GET(self):
1362
    """Returns a list of tags.
1363

1364
    Example: ["tag1", "tag2", "tag3"]
1365

1366
    """
1367
    # pylint: disable-msg=W0212
1368
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1369

    
1370
  def PUT(self):
1371
    """Add a set of tags.
1372

1373
    The request as a list of strings should be PUT to this URI. And
1374
    you'll have back a job id.
1375

1376
    """
1377
    # pylint: disable-msg=W0212
1378
    if 'tag' not in self.queryargs:
1379
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1380
                                " the 'tag' parameter")
1381
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1382
                              self.queryargs['tag'], name=self.name,
1383
                              dry_run=bool(self.dryRun()))
1384

    
1385
  def DELETE(self):
1386
    """Delete a tag.
1387

1388
    In order to delete a set of tags, the DELETE
1389
    request should be addressed to URI like:
1390
    /tags?tag=[tag]&tag=[tag]
1391

1392
    """
1393
    # pylint: disable-msg=W0212
1394
    if 'tag' not in self.queryargs:
1395
      # no we not gonna delete all tags
1396
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1397
                                " tag(s) using the 'tag' parameter")
1398
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1399
                                 self.queryargs['tag'],
1400
                                 name=self.name,
1401
                                 dry_run=bool(self.dryRun()))
1402

    
1403

    
1404
class R_2_instances_name_tags(_R_Tags):
1405
  """ /2/instances/[instance_name]/tags resource.
1406

1407
  Manages per-instance tags.
1408

1409
  """
1410
  TAG_LEVEL = constants.TAG_INSTANCE
1411

    
1412

    
1413
class R_2_nodes_name_tags(_R_Tags):
1414
  """ /2/nodes/[node_name]/tags resource.
1415

1416
  Manages per-node tags.
1417

1418
  """
1419
  TAG_LEVEL = constants.TAG_NODE
1420

    
1421

    
1422
class R_2_tags(_R_Tags):
1423
  """ /2/tags resource.
1424

1425
  Manages cluster tags.
1426

1427
  """
1428
  TAG_LEVEL = constants.TAG_CLUSTER