Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ 8c5acc2e

History | View | Annotate | Download (41.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import utils
49
from ganeti import rapi
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "oper_vcpus", "status",
62
            "custom_hvparams", "custom_beparams", "custom_nicparams",
63
            ] + _COMMON_FIELDS
64

    
65
N_FIELDS = ["name", "offline", "master_candidate", "drained",
66
            "dtotal", "dfree",
67
            "mtotal", "mnode", "mfree",
68
            "pinst_cnt", "sinst_cnt",
69
            "ctotal", "cnodes", "csockets",
70
            "pip", "sip", "role",
71
            "pinst_list", "sinst_list",
72
            "master_capable", "vm_capable",
73
            "group.uuid",
74
            ] + _COMMON_FIELDS
75

    
76
G_FIELDS = ["name", "uuid",
77
            "alloc_policy",
78
            "node_cnt", "node_list",
79
            "ctime", "mtime", "serial_no",
80
            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
81

    
82
_NR_DRAINED = "drained"
83
_NR_MASTER_CANDIATE = "master-candidate"
84
_NR_MASTER = "master"
85
_NR_OFFLINE = "offline"
86
_NR_REGULAR = "regular"
87

    
88
_NR_MAP = {
89
  "M": _NR_MASTER,
90
  "C": _NR_MASTER_CANDIATE,
91
  "D": _NR_DRAINED,
92
  "O": _NR_OFFLINE,
93
  "R": _NR_REGULAR,
94
  }
95

    
96
# Request data version field
97
_REQ_DATA_VERSION = "__version__"
98

    
99
# Feature string for instance creation request data version 1
100
_INST_CREATE_REQV1 = "instance-create-reqv1"
101

    
102
# Feature string for instance reinstall request version 1
103
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
104

    
105
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
106
_WFJC_TIMEOUT = 10
107

    
108

    
109
class R_version(baserlib.R_Generic):
110
  """/version resource.
111

112
  This resource should be used to determine the remote API version and
113
  to adapt clients accordingly.
114

115
  """
116
  @staticmethod
117
  def GET():
118
    """Returns the remote API version.
119

120
    """
121
    return constants.RAPI_VERSION
122

    
123

    
124
class R_2_info(baserlib.R_Generic):
125
  """/2/info resource.
126

127
  """
128
  @staticmethod
129
  def GET():
130
    """Returns cluster information.
131

132
    """
133
    client = baserlib.GetClient()
134
    return client.QueryClusterInfo()
135

    
136

    
137
class R_2_features(baserlib.R_Generic):
138
  """/2/features resource.
139

140
  """
141
  @staticmethod
142
  def GET():
143
    """Returns list of optional RAPI features implemented.
144

145
    """
146
    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
147

    
148

    
149
class R_2_os(baserlib.R_Generic):
150
  """/2/os resource.
151

152
  """
153
  @staticmethod
154
  def GET():
155
    """Return a list of all OSes.
156

157
    Can return error 500 in case of a problem.
158

159
    Example: ["debian-etch"]
160

161
    """
162
    cl = baserlib.GetClient()
163
    op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
164
    job_id = baserlib.SubmitJob([op], cl)
165
    # we use custom feedback function, instead of print we log the status
166
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
167
    diagnose_data = result[0]
168

    
169
    if not isinstance(diagnose_data, list):
170
      raise http.HttpBadGateway(message="Can't get OS list")
171

    
172
    os_names = []
173
    for (name, variants) in diagnose_data:
174
      os_names.extend(cli.CalculateOSNames(name, variants))
175

    
176
    return os_names
177

    
178

    
179
class R_2_redist_config(baserlib.R_Generic):
180
  """/2/redistribute-config resource.
181

182
  """
183
  @staticmethod
184
  def PUT():
185
    """Redistribute configuration to all nodes.
186

187
    """
188
    return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
189

    
190

    
191
class R_2_cluster_modify(baserlib.R_Generic):
192
  """/2/modify resource.
193

194
  """
195
  def PUT(self):
196
    """Modifies cluster parameters.
197

198
    @return: a job id
199

200
    """
201
    op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
202
                             None)
203

    
204
    return baserlib.SubmitJob([op])
205

    
206

    
207
class R_2_jobs(baserlib.R_Generic):
208
  """/2/jobs resource.
209

210
  """
211
  @staticmethod
212
  def GET():
213
    """Returns a dictionary of jobs.
214

215
    @return: a dictionary with jobs id and uri.
216

217
    """
218
    fields = ["id"]
219
    cl = baserlib.GetClient()
220
    # Convert the list of lists to the list of ids
221
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
222
    return baserlib.BuildUriList(result, "/2/jobs/%s",
223
                                 uri_fields=("id", "uri"))
224

    
225

    
226
class R_2_jobs_id(baserlib.R_Generic):
227
  """/2/jobs/[job_id] resource.
228

229
  """
230
  def GET(self):
231
    """Returns a job status.
232

233
    @return: a dictionary with job parameters.
234
        The result includes:
235
            - id: job ID as a number
236
            - status: current job status as a string
237
            - ops: involved OpCodes as a list of dictionaries for each
238
              opcodes in the job
239
            - opstatus: OpCodes status as a list
240
            - opresult: OpCodes results as a list of lists
241

242
    """
243
    fields = ["id", "ops", "status", "summary",
244
              "opstatus", "opresult", "oplog",
245
              "received_ts", "start_ts", "end_ts",
246
              ]
247
    job_id = self.items[0]
248
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
249
    if result is None:
250
      raise http.HttpNotFound()
251
    return baserlib.MapFields(fields, result)
252

    
253
  def DELETE(self):
254
    """Cancel not-yet-started job.
255

256
    """
257
    job_id = self.items[0]
258
    result = baserlib.GetClient().CancelJob(job_id)
259
    return result
260

    
261

    
262
class R_2_jobs_id_wait(baserlib.R_Generic):
263
  """/2/jobs/[job_id]/wait resource.
264

265
  """
266
  # WaitForJobChange provides access to sensitive information and blocks
267
  # machine resources (it's a blocking RAPI call), hence restricting access.
268
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
269

    
270
  def GET(self):
271
    """Waits for job changes.
272

273
    """
274
    job_id = self.items[0]
275

    
276
    fields = self.getBodyParameter("fields")
277
    prev_job_info = self.getBodyParameter("previous_job_info", None)
278
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
279

    
280
    if not isinstance(fields, list):
281
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
282

    
283
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
284
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
285
                                " be a list")
286

    
287
    if not (prev_log_serial is None or
288
            isinstance(prev_log_serial, (int, long))):
289
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
290
                                " be a number")
291

    
292
    client = baserlib.GetClient()
293
    result = client.WaitForJobChangeOnce(job_id, fields,
294
                                         prev_job_info, prev_log_serial,
295
                                         timeout=_WFJC_TIMEOUT)
296
    if not result:
297
      raise http.HttpNotFound()
298

    
299
    if result == constants.JOB_NOTCHANGED:
300
      # No changes
301
      return None
302

    
303
    (job_info, log_entries) = result
304

    
305
    return {
306
      "job_info": job_info,
307
      "log_entries": log_entries,
308
      }
309

    
310

    
311
class R_2_nodes(baserlib.R_Generic):
312
  """/2/nodes resource.
313

314
  """
315
  def GET(self):
316
    """Returns a list of all nodes.
317

318
    """
319
    client = baserlib.GetClient()
320

    
321
    if self.useBulk():
322
      bulkdata = client.QueryNodes([], N_FIELDS, False)
323
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
324
    else:
325
      nodesdata = client.QueryNodes([], ["name"], False)
326
      nodeslist = [row[0] for row in nodesdata]
327
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
328
                                   uri_fields=("id", "uri"))
329

    
330

    
331
class R_2_nodes_name(baserlib.R_Generic):
332
  """/2/nodes/[node_name] resource.
333

334
  """
335
  def GET(self):
336
    """Send information about a node.
337

338
    """
339
    node_name = self.items[0]
340
    client = baserlib.GetClient()
341

    
342
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
343
                                            names=[node_name], fields=N_FIELDS,
344
                                            use_locking=self.useLocking())
345

    
346
    return baserlib.MapFields(N_FIELDS, result[0])
347

    
348

    
349
class R_2_nodes_name_role(baserlib.R_Generic):
350
  """ /2/nodes/[node_name]/role resource.
351

352
  """
353
  def GET(self):
354
    """Returns the current node role.
355

356
    @return: Node role
357

358
    """
359
    node_name = self.items[0]
360
    client = baserlib.GetClient()
361
    result = client.QueryNodes(names=[node_name], fields=["role"],
362
                               use_locking=self.useLocking())
363

    
364
    return _NR_MAP[result[0][0]]
365

    
366
  def PUT(self):
367
    """Sets the node role.
368

369
    @return: a job id
370

371
    """
372
    if not isinstance(self.request_body, basestring):
373
      raise http.HttpBadRequest("Invalid body contents, not a string")
374

    
375
    node_name = self.items[0]
376
    role = self.request_body
377

    
378
    if role == _NR_REGULAR:
379
      candidate = False
380
      offline = False
381
      drained = False
382

    
383
    elif role == _NR_MASTER_CANDIATE:
384
      candidate = True
385
      offline = drained = None
386

    
387
    elif role == _NR_DRAINED:
388
      drained = True
389
      candidate = offline = None
390

    
391
    elif role == _NR_OFFLINE:
392
      offline = True
393
      candidate = drained = None
394

    
395
    else:
396
      raise http.HttpBadRequest("Can't set '%s' role" % role)
397

    
398
    op = opcodes.OpNodeSetParams(node_name=node_name,
399
                                 master_candidate=candidate,
400
                                 offline=offline,
401
                                 drained=drained,
402
                                 force=bool(self.useForce()))
403

    
404
    return baserlib.SubmitJob([op])
405

    
406

    
407
class R_2_nodes_name_evacuate(baserlib.R_Generic):
408
  """/2/nodes/[node_name]/evacuate resource.
409

410
  """
411
  def POST(self):
412
    """Evacuate all secondary instances off a node.
413

414
    """
415
    node_name = self.items[0]
416
    remote_node = self._checkStringVariable("remote_node", default=None)
417
    iallocator = self._checkStringVariable("iallocator", default=None)
418
    early_r = bool(self._checkIntVariable("early_release", default=0))
419
    dry_run = bool(self.dryRun())
420

    
421
    cl = baserlib.GetClient()
422

    
423
    op = opcodes.OpNodeEvacStrategy(nodes=[node_name],
424
                                    iallocator=iallocator,
425
                                    remote_node=remote_node)
426

    
427
    job_id = baserlib.SubmitJob([op], cl)
428
    # we use custom feedback function, instead of print we log the status
429
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
430

    
431
    jobs = []
432
    for iname, node in result[0]:
433
      if dry_run:
434
        jid = None
435
      else:
436
        op = opcodes.OpInstanceReplaceDisks(instance_name=iname,
437
                                            remote_node=node, disks=[],
438
                                            mode=constants.REPLACE_DISK_CHG,
439
                                            early_release=early_r)
440
        jid = baserlib.SubmitJob([op])
441
      jobs.append((jid, iname, node))
442

    
443
    return jobs
444

    
445

    
446
class R_2_nodes_name_migrate(baserlib.R_Generic):
447
  """/2/nodes/[node_name]/migrate resource.
448

449
  """
450
  def POST(self):
451
    """Migrate all primary instances from a node.
452

453
    """
454
    node_name = self.items[0]
455

    
456
    if "live" in self.queryargs and "mode" in self.queryargs:
457
      raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
458
                                " be passed")
459
    elif "live" in self.queryargs:
460
      if self._checkIntVariable("live", default=1):
461
        mode = constants.HT_MIGRATION_LIVE
462
      else:
463
        mode = constants.HT_MIGRATION_NONLIVE
464
    else:
465
      mode = self._checkStringVariable("mode", default=None)
466

    
467
    op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode)
468

    
469
    return baserlib.SubmitJob([op])
470

    
471

    
472
class R_2_nodes_name_storage(baserlib.R_Generic):
473
  """/2/nodes/[node_name]/storage resource.
474

475
  """
476
  # LUNodeQueryStorage acquires locks, hence restricting access to GET
477
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
478

    
479
  def GET(self):
480
    node_name = self.items[0]
481

    
482
    storage_type = self._checkStringVariable("storage_type", None)
483
    if not storage_type:
484
      raise http.HttpBadRequest("Missing the required 'storage_type'"
485
                                " parameter")
486

    
487
    output_fields = self._checkStringVariable("output_fields", None)
488
    if not output_fields:
489
      raise http.HttpBadRequest("Missing the required 'output_fields'"
490
                                " parameter")
491

    
492
    op = opcodes.OpNodeQueryStorage(nodes=[node_name],
493
                                    storage_type=storage_type,
494
                                    output_fields=output_fields.split(","))
495
    return baserlib.SubmitJob([op])
496

    
497

    
498
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
499
  """/2/nodes/[node_name]/storage/modify resource.
500

501
  """
502
  def PUT(self):
503
    node_name = self.items[0]
504

    
505
    storage_type = self._checkStringVariable("storage_type", None)
506
    if not storage_type:
507
      raise http.HttpBadRequest("Missing the required 'storage_type'"
508
                                " parameter")
509

    
510
    name = self._checkStringVariable("name", None)
511
    if not name:
512
      raise http.HttpBadRequest("Missing the required 'name'"
513
                                " parameter")
514

    
515
    changes = {}
516

    
517
    if "allocatable" in self.queryargs:
518
      changes[constants.SF_ALLOCATABLE] = \
519
        bool(self._checkIntVariable("allocatable", default=1))
520

    
521
    op = opcodes.OpNodeModifyStorage(node_name=node_name,
522
                                     storage_type=storage_type,
523
                                     name=name,
524
                                     changes=changes)
525
    return baserlib.SubmitJob([op])
526

    
527

    
528
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
529
  """/2/nodes/[node_name]/storage/repair resource.
530

531
  """
532
  def PUT(self):
533
    node_name = self.items[0]
534

    
535
    storage_type = self._checkStringVariable("storage_type", None)
536
    if not storage_type:
537
      raise http.HttpBadRequest("Missing the required 'storage_type'"
538
                                " parameter")
539

    
540
    name = self._checkStringVariable("name", None)
541
    if not name:
542
      raise http.HttpBadRequest("Missing the required 'name'"
543
                                " parameter")
544

    
545
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
546
                                     storage_type=storage_type,
547
                                     name=name)
548
    return baserlib.SubmitJob([op])
549

    
550

    
551
def _ParseCreateGroupRequest(data, dry_run):
552
  """Parses a request for creating a node group.
553

554
  @rtype: L{opcodes.OpGroupAdd}
555
  @return: Group creation opcode
556

557
  """
558
  group_name = baserlib.CheckParameter(data, "name")
559
  alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
560

    
561
  return opcodes.OpGroupAdd(group_name=group_name,
562
                            alloc_policy=alloc_policy,
563
                            dry_run=dry_run)
564

    
565

    
566
class R_2_groups(baserlib.R_Generic):
567
  """/2/groups resource.
568

569
  """
570
  def GET(self):
571
    """Returns a list of all node groups.
572

573
    """
574
    client = baserlib.GetClient()
575

    
576
    if self.useBulk():
577
      bulkdata = client.QueryGroups([], G_FIELDS, False)
578
      return baserlib.MapBulkFields(bulkdata, G_FIELDS)
579
    else:
580
      data = client.QueryGroups([], ["name"], False)
581
      groupnames = [row[0] for row in data]
582
      return baserlib.BuildUriList(groupnames, "/2/groups/%s",
583
                                   uri_fields=("name", "uri"))
584

    
585
  def POST(self):
586
    """Create a node group.
587

588
    @return: a job id
589

590
    """
591
    baserlib.CheckType(self.request_body, dict, "Body contents")
592
    op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
593
    return baserlib.SubmitJob([op])
594

    
595

    
596
class R_2_groups_name(baserlib.R_Generic):
597
  """/2/groups/[group_name] resource.
598

599
  """
600
  def GET(self):
601
    """Send information about a node group.
602

603
    """
604
    group_name = self.items[0]
605
    client = baserlib.GetClient()
606

    
607
    result = baserlib.HandleItemQueryErrors(client.QueryGroups,
608
                                            names=[group_name], fields=G_FIELDS,
609
                                            use_locking=self.useLocking())
610

    
611
    return baserlib.MapFields(G_FIELDS, result[0])
612

    
613
  def DELETE(self):
614
    """Delete a node group.
615

616
    """
617
    op = opcodes.OpGroupRemove(group_name=self.items[0],
618
                               dry_run=bool(self.dryRun()))
619

    
620
    return baserlib.SubmitJob([op])
621

    
622

    
623
def _ParseModifyGroupRequest(name, data):
624
  """Parses a request for modifying a node group.
625

626
  @rtype: L{opcodes.OpGroupSetParams}
627
  @return: Group modify opcode
628

629
  """
630
  alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
631
  return opcodes.OpGroupSetParams(group_name=name, alloc_policy=alloc_policy)
632

    
633

    
634
class R_2_groups_name_modify(baserlib.R_Generic):
635
  """/2/groups/[group_name]/modify resource.
636

637
  """
638
  def PUT(self):
639
    """Changes some parameters of node group.
640

641
    @return: a job id
642

643
    """
644
    baserlib.CheckType(self.request_body, dict, "Body contents")
645

    
646
    op = _ParseModifyGroupRequest(self.items[0], self.request_body)
647

    
648
    return baserlib.SubmitJob([op])
649

    
650

    
651
def _ParseRenameGroupRequest(name, data, dry_run):
652
  """Parses a request for renaming a node group.
653

654
  @type name: string
655
  @param name: name of the node group to rename
656
  @type data: dict
657
  @param data: the body received by the rename request
658
  @type dry_run: bool
659
  @param dry_run: whether to perform a dry run
660

661
  @rtype: L{opcodes.OpGroupRename}
662
  @return: Node group rename opcode
663

664
  """
665
  old_name = name
666
  new_name = baserlib.CheckParameter(data, "new_name")
667

    
668
  return opcodes.OpGroupRename(old_name=old_name, new_name=new_name,
669
                               dry_run=dry_run)
670

    
671

    
672
class R_2_groups_name_rename(baserlib.R_Generic):
673
  """/2/groups/[group_name]/rename resource.
674

675
  """
676
  def PUT(self):
677
    """Changes the name of a node group.
678

679
    @return: a job id
680

681
    """
682
    baserlib.CheckType(self.request_body, dict, "Body contents")
683
    op = _ParseRenameGroupRequest(self.items[0], self.request_body,
684
                                  self.dryRun())
685
    return baserlib.SubmitJob([op])
686

    
687

    
688
class R_2_groups_name_assign_nodes(baserlib.R_Generic):
689
  """/2/groups/[group_name]/assign-nodes resource.
690

691
  """
692
  def PUT(self):
693
    """Assigns nodes to a group.
694

695
    @return: a job id
696

697
    """
698
    op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
699
      "group_name": self.items[0],
700
      "dry_run": self.dryRun(),
701
      "force": self.useForce(),
702
      })
703

    
704
    return baserlib.SubmitJob([op])
705

    
706

    
707
def _ParseInstanceCreateRequestVersion1(data, dry_run):
708
  """Parses an instance creation request version 1.
709

710
  @rtype: L{opcodes.OpInstanceCreate}
711
  @return: Instance creation opcode
712

713
  """
714
  # Disks
715
  disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
716

    
717
  disks = []
718
  for idx, i in enumerate(disks_input):
719
    baserlib.CheckType(i, dict, "Disk %d specification" % idx)
720

    
721
    # Size is mandatory
722
    try:
723
      size = i[constants.IDISK_SIZE]
724
    except KeyError:
725
      raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
726
                                " size" % idx)
727

    
728
    disk = {
729
      constants.IDISK_SIZE: size,
730
      }
731

    
732
    # Optional disk access mode
733
    try:
734
      disk_access = i[constants.IDISK_MODE]
735
    except KeyError:
736
      pass
737
    else:
738
      disk[constants.IDISK_MODE] = disk_access
739

    
740
    disks.append(disk)
741

    
742
  assert len(disks_input) == len(disks)
743

    
744
  # Network interfaces
745
  nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
746

    
747
  nics = []
748
  for idx, i in enumerate(nics_input):
749
    baserlib.CheckType(i, dict, "NIC %d specification" % idx)
750

    
751
    nic = {}
752

    
753
    for field in constants.INIC_PARAMS:
754
      try:
755
        value = i[field]
756
      except KeyError:
757
        continue
758

    
759
      nic[field] = value
760

    
761
    nics.append(nic)
762

    
763
  assert len(nics_input) == len(nics)
764

    
765
  # HV/BE parameters
766
  hvparams = baserlib.CheckParameter(data, "hvparams", default={})
767
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
768

    
769
  beparams = baserlib.CheckParameter(data, "beparams", default={})
770
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
771

    
772
  return opcodes.OpInstanceCreate(
773
    mode=baserlib.CheckParameter(data, "mode"),
774
    instance_name=baserlib.CheckParameter(data, "name"),
775
    os_type=baserlib.CheckParameter(data, "os"),
776
    osparams=baserlib.CheckParameter(data, "osparams", default={}),
777
    force_variant=baserlib.CheckParameter(data, "force_variant",
778
                                          default=False),
779
    no_install=baserlib.CheckParameter(data, "no_install", default=False),
780
    pnode=baserlib.CheckParameter(data, "pnode", default=None),
781
    snode=baserlib.CheckParameter(data, "snode", default=None),
782
    disk_template=baserlib.CheckParameter(data, "disk_template"),
783
    disks=disks,
784
    nics=nics,
785
    src_node=baserlib.CheckParameter(data, "src_node", default=None),
786
    src_path=baserlib.CheckParameter(data, "src_path", default=None),
787
    start=baserlib.CheckParameter(data, "start", default=True),
788
    wait_for_sync=baserlib.CheckParameter(data, "wait_for_sync", default=True),
789
    ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
790
    name_check=baserlib.CheckParameter(data, "name_check", default=True),
791
    file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
792
                                             default=None),
793
    file_driver=baserlib.CheckParameter(data, "file_driver",
794
                                        default=constants.FD_LOOP),
795
    source_handshake=baserlib.CheckParameter(data, "source_handshake",
796
                                             default=None),
797
    source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
798
                                           default=None),
799
    source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
800
                                                 default=None),
801
    iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
802
    hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
803
    tags=baserlib.CheckParameter(data, "tags", default=[]),
804
    hvparams=hvparams,
805
    beparams=beparams,
806
    dry_run=dry_run,
807
    )
808

    
809

    
810
class R_2_instances(baserlib.R_Generic):
811
  """/2/instances resource.
812

813
  """
814
  def GET(self):
815
    """Returns a list of all available instances.
816

817
    """
818
    client = baserlib.GetClient()
819

    
820
    use_locking = self.useLocking()
821
    if self.useBulk():
822
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
823
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
824
    else:
825
      instancesdata = client.QueryInstances([], ["name"], use_locking)
826
      instanceslist = [row[0] for row in instancesdata]
827
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
828
                                   uri_fields=("id", "uri"))
829

    
830
  def _ParseVersion0CreateRequest(self):
831
    """Parses an instance creation request version 0.
832

833
    Request data version 0 is deprecated and should not be used anymore.
834

835
    @rtype: L{opcodes.OpInstanceCreate}
836
    @return: Instance creation opcode
837

838
    """
839
    # Do not modify anymore, request data version 0 is deprecated
840
    beparams = baserlib.MakeParamsDict(self.request_body,
841
                                       constants.BES_PARAMETERS)
842
    hvparams = baserlib.MakeParamsDict(self.request_body,
843
                                       constants.HVS_PARAMETERS)
844
    fn = self.getBodyParameter
845

    
846
    # disk processing
847
    disk_data = fn('disks')
848
    if not isinstance(disk_data, list):
849
      raise http.HttpBadRequest("The 'disks' parameter should be a list")
850
    disks = []
851
    for idx, d in enumerate(disk_data):
852
      if not isinstance(d, int):
853
        raise http.HttpBadRequest("Disk %d specification wrong: should"
854
                                  " be an integer" % idx)
855
      disks.append({"size": d})
856

    
857
    # nic processing (one nic only)
858
    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
859
    if fn("ip", None) is not None:
860
      nics[0]["ip"] = fn("ip")
861
    if fn("mode", None) is not None:
862
      nics[0]["mode"] = fn("mode")
863
    if fn("link", None) is not None:
864
      nics[0]["link"] = fn("link")
865
    if fn("bridge", None) is not None:
866
      nics[0]["bridge"] = fn("bridge")
867

    
868
    # Do not modify anymore, request data version 0 is deprecated
869
    return opcodes.OpInstanceCreate(
870
      mode=constants.INSTANCE_CREATE,
871
      instance_name=fn('name'),
872
      disks=disks,
873
      disk_template=fn('disk_template'),
874
      os_type=fn('os'),
875
      pnode=fn('pnode', None),
876
      snode=fn('snode', None),
877
      iallocator=fn('iallocator', None),
878
      nics=nics,
879
      start=fn('start', True),
880
      ip_check=fn('ip_check', True),
881
      name_check=fn('name_check', True),
882
      wait_for_sync=True,
883
      hypervisor=fn('hypervisor', None),
884
      hvparams=hvparams,
885
      beparams=beparams,
886
      file_storage_dir=fn('file_storage_dir', None),
887
      file_driver=fn('file_driver', constants.FD_LOOP),
888
      dry_run=bool(self.dryRun()),
889
      )
890

    
891
  def POST(self):
892
    """Create an instance.
893

894
    @return: a job id
895

896
    """
897
    if not isinstance(self.request_body, dict):
898
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
899

    
900
    # Default to request data version 0
901
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
902

    
903
    if data_version == 0:
904
      op = self._ParseVersion0CreateRequest()
905
    elif data_version == 1:
906
      op = _ParseInstanceCreateRequestVersion1(self.request_body,
907
                                               self.dryRun())
908
    else:
909
      raise http.HttpBadRequest("Unsupported request data version %s" %
910
                                data_version)
911

    
912
    return baserlib.SubmitJob([op])
913

    
914

    
915
class R_2_instances_name(baserlib.R_Generic):
916
  """/2/instances/[instance_name] resource.
917

918
  """
919
  def GET(self):
920
    """Send information about an instance.
921

922
    """
923
    client = baserlib.GetClient()
924
    instance_name = self.items[0]
925

    
926
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
927
                                            names=[instance_name],
928
                                            fields=I_FIELDS,
929
                                            use_locking=self.useLocking())
930

    
931
    return baserlib.MapFields(I_FIELDS, result[0])
932

    
933
  def DELETE(self):
934
    """Delete an instance.
935

936
    """
937
    op = opcodes.OpInstanceRemove(instance_name=self.items[0],
938
                                  ignore_failures=False,
939
                                  dry_run=bool(self.dryRun()))
940
    return baserlib.SubmitJob([op])
941

    
942

    
943
class R_2_instances_name_info(baserlib.R_Generic):
944
  """/2/instances/[instance_name]/info resource.
945

946
  """
947
  def GET(self):
948
    """Request detailed instance information.
949

950
    """
951
    instance_name = self.items[0]
952
    static = bool(self._checkIntVariable("static", default=0))
953

    
954
    op = opcodes.OpInstanceQueryData(instances=[instance_name],
955
                                     static=static)
956
    return baserlib.SubmitJob([op])
957

    
958

    
959
class R_2_instances_name_reboot(baserlib.R_Generic):
960
  """/2/instances/[instance_name]/reboot resource.
961

962
  Implements an instance reboot.
963

964
  """
965
  def POST(self):
966
    """Reboot an instance.
967

968
    The URI takes type=[hard|soft|full] and
969
    ignore_secondaries=[False|True] parameters.
970

971
    """
972
    instance_name = self.items[0]
973
    reboot_type = self.queryargs.get('type',
974
                                     [constants.INSTANCE_REBOOT_HARD])[0]
975
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
976
    op = opcodes.OpInstanceReboot(instance_name=instance_name,
977
                                  reboot_type=reboot_type,
978
                                  ignore_secondaries=ignore_secondaries,
979
                                  dry_run=bool(self.dryRun()))
980

    
981
    return baserlib.SubmitJob([op])
982

    
983

    
984
class R_2_instances_name_startup(baserlib.R_Generic):
985
  """/2/instances/[instance_name]/startup resource.
986

987
  Implements an instance startup.
988

989
  """
990
  def PUT(self):
991
    """Startup an instance.
992

993
    The URI takes force=[False|True] parameter to start the instance
994
    if even if secondary disks are failing.
995

996
    """
997
    instance_name = self.items[0]
998
    force_startup = bool(self._checkIntVariable('force'))
999
    op = opcodes.OpInstanceStartup(instance_name=instance_name,
1000
                                   force=force_startup,
1001
                                   dry_run=bool(self.dryRun()))
1002

    
1003
    return baserlib.SubmitJob([op])
1004

    
1005

    
1006
class R_2_instances_name_shutdown(baserlib.R_Generic):
1007
  """/2/instances/[instance_name]/shutdown resource.
1008

1009
  Implements an instance shutdown.
1010

1011
  """
1012
  def PUT(self):
1013
    """Shutdown an instance.
1014

1015
    """
1016
    instance_name = self.items[0]
1017
    op = opcodes.OpInstanceShutdown(instance_name=instance_name,
1018
                                    dry_run=bool(self.dryRun()))
1019

    
1020
    return baserlib.SubmitJob([op])
1021

    
1022

    
1023
def _ParseInstanceReinstallRequest(name, data):
1024
  """Parses a request for reinstalling an instance.
1025

1026
  """
1027
  if not isinstance(data, dict):
1028
    raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1029

    
1030
  ostype = baserlib.CheckParameter(data, "os", default=None)
1031
  start = baserlib.CheckParameter(data, "start", exptype=bool,
1032
                                  default=True)
1033
  osparams = baserlib.CheckParameter(data, "osparams", default=None)
1034

    
1035
  ops = [
1036
    opcodes.OpInstanceShutdown(instance_name=name),
1037
    opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
1038
                                osparams=osparams),
1039
    ]
1040

    
1041
  if start:
1042
    ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
1043

    
1044
  return ops
1045

    
1046

    
1047
class R_2_instances_name_reinstall(baserlib.R_Generic):
1048
  """/2/instances/[instance_name]/reinstall resource.
1049

1050
  Implements an instance reinstall.
1051

1052
  """
1053
  def POST(self):
1054
    """Reinstall an instance.
1055

1056
    The URI takes os=name and nostartup=[0|1] optional
1057
    parameters. By default, the instance will be started
1058
    automatically.
1059

1060
    """
1061
    if self.request_body:
1062
      if self.queryargs:
1063
        raise http.HttpBadRequest("Can't combine query and body parameters")
1064

    
1065
      body = self.request_body
1066
    elif self.queryargs:
1067
      # Legacy interface, do not modify/extend
1068
      body = {
1069
        "os": self._checkStringVariable("os"),
1070
        "start": not self._checkIntVariable("nostartup"),
1071
        }
1072
    else:
1073
      body = {}
1074

    
1075
    ops = _ParseInstanceReinstallRequest(self.items[0], body)
1076

    
1077
    return baserlib.SubmitJob(ops)
1078

    
1079

    
1080
class R_2_instances_name_replace_disks(baserlib.R_Generic):
1081
  """/2/instances/[instance_name]/replace-disks resource.
1082

1083
  """
1084
  def POST(self):
1085
    """Replaces disks on an instance.
1086

1087
    """
1088
    instance_name = self.items[0]
1089
    remote_node = self._checkStringVariable("remote_node", default=None)
1090
    mode = self._checkStringVariable("mode", default=None)
1091
    raw_disks = self._checkStringVariable("disks", default=None)
1092
    iallocator = self._checkStringVariable("iallocator", default=None)
1093

    
1094
    if raw_disks:
1095
      try:
1096
        disks = [int(part) for part in raw_disks.split(",")]
1097
      except ValueError, err:
1098
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1099
    else:
1100
      disks = []
1101

    
1102
    op = opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
1103
                                        remote_node=remote_node,
1104
                                        mode=mode,
1105
                                        disks=disks,
1106
                                        iallocator=iallocator)
1107

    
1108
    return baserlib.SubmitJob([op])
1109

    
1110

    
1111
class R_2_instances_name_activate_disks(baserlib.R_Generic):
1112
  """/2/instances/[instance_name]/activate-disks resource.
1113

1114
  """
1115
  def PUT(self):
1116
    """Activate disks for an instance.
1117

1118
    The URI might contain ignore_size to ignore current recorded size.
1119

1120
    """
1121
    instance_name = self.items[0]
1122
    ignore_size = bool(self._checkIntVariable('ignore_size'))
1123

    
1124
    op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1125
                                         ignore_size=ignore_size)
1126

    
1127
    return baserlib.SubmitJob([op])
1128

    
1129

    
1130
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1131
  """/2/instances/[instance_name]/deactivate-disks resource.
1132

1133
  """
1134
  def PUT(self):
1135
    """Deactivate disks for an instance.
1136

1137
    """
1138
    instance_name = self.items[0]
1139

    
1140
    op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1141

    
1142
    return baserlib.SubmitJob([op])
1143

    
1144

    
1145
class R_2_instances_name_prepare_export(baserlib.R_Generic):
1146
  """/2/instances/[instance_name]/prepare-export resource.
1147

1148
  """
1149
  def PUT(self):
1150
    """Prepares an export for an instance.
1151

1152
    @return: a job id
1153

1154
    """
1155
    instance_name = self.items[0]
1156
    mode = self._checkStringVariable("mode")
1157

    
1158
    op = opcodes.OpBackupPrepare(instance_name=instance_name,
1159
                                 mode=mode)
1160

    
1161
    return baserlib.SubmitJob([op])
1162

    
1163

    
1164
def _ParseExportInstanceRequest(name, data):
1165
  """Parses a request for an instance export.
1166

1167
  @rtype: L{opcodes.OpBackupExport}
1168
  @return: Instance export opcode
1169

1170
  """
1171
  mode = baserlib.CheckParameter(data, "mode",
1172
                                 default=constants.EXPORT_MODE_LOCAL)
1173
  target_node = baserlib.CheckParameter(data, "destination")
1174
  shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
1175
  remove_instance = baserlib.CheckParameter(data, "remove_instance",
1176
                                            exptype=bool, default=False)
1177
  x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
1178
  destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
1179
                                                default=None)
1180

    
1181
  return opcodes.OpBackupExport(instance_name=name,
1182
                                mode=mode,
1183
                                target_node=target_node,
1184
                                shutdown=shutdown,
1185
                                remove_instance=remove_instance,
1186
                                x509_key_name=x509_key_name,
1187
                                destination_x509_ca=destination_x509_ca)
1188

    
1189

    
1190
class R_2_instances_name_export(baserlib.R_Generic):
1191
  """/2/instances/[instance_name]/export resource.
1192

1193
  """
1194
  def PUT(self):
1195
    """Exports an instance.
1196

1197
    @return: a job id
1198

1199
    """
1200
    if not isinstance(self.request_body, dict):
1201
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1202

    
1203
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1204

    
1205
    return baserlib.SubmitJob([op])
1206

    
1207

    
1208
def _ParseMigrateInstanceRequest(name, data):
1209
  """Parses a request for an instance migration.
1210

1211
  @rtype: L{opcodes.OpInstanceMigrate}
1212
  @return: Instance migration opcode
1213

1214
  """
1215
  mode = baserlib.CheckParameter(data, "mode", default=None)
1216
  cleanup = baserlib.CheckParameter(data, "cleanup", exptype=bool,
1217
                                    default=False)
1218

    
1219
  return opcodes.OpInstanceMigrate(instance_name=name, mode=mode,
1220
                                   cleanup=cleanup)
1221

    
1222

    
1223
class R_2_instances_name_migrate(baserlib.R_Generic):
1224
  """/2/instances/[instance_name]/migrate resource.
1225

1226
  """
1227
  def PUT(self):
1228
    """Migrates an instance.
1229

1230
    @return: a job id
1231

1232
    """
1233
    baserlib.CheckType(self.request_body, dict, "Body contents")
1234

    
1235
    op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1236

    
1237
    return baserlib.SubmitJob([op])
1238

    
1239

    
1240
def _ParseRenameInstanceRequest(name, data):
1241
  """Parses a request for renaming an instance.
1242

1243
  @rtype: L{opcodes.OpInstanceRename}
1244
  @return: Instance rename opcode
1245

1246
  """
1247
  new_name = baserlib.CheckParameter(data, "new_name")
1248
  ip_check = baserlib.CheckParameter(data, "ip_check", default=True)
1249
  name_check = baserlib.CheckParameter(data, "name_check", default=True)
1250

    
1251
  return opcodes.OpInstanceRename(instance_name=name, new_name=new_name,
1252
                                  name_check=name_check, ip_check=ip_check)
1253

    
1254

    
1255
class R_2_instances_name_rename(baserlib.R_Generic):
1256
  """/2/instances/[instance_name]/rename resource.
1257

1258
  """
1259
  def PUT(self):
1260
    """Changes the name of an instance.
1261

1262
    @return: a job id
1263

1264
    """
1265
    baserlib.CheckType(self.request_body, dict, "Body contents")
1266

    
1267
    op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1268

    
1269
    return baserlib.SubmitJob([op])
1270

    
1271

    
1272
def _ParseModifyInstanceRequest(name, data):
1273
  """Parses a request for modifying an instance.
1274

1275
  @rtype: L{opcodes.OpInstanceSetParams}
1276
  @return: Instance modify opcode
1277

1278
  """
1279
  osparams = baserlib.CheckParameter(data, "osparams", default={})
1280
  force = baserlib.CheckParameter(data, "force", default=False)
1281
  nics = baserlib.CheckParameter(data, "nics", default=[])
1282
  disks = baserlib.CheckParameter(data, "disks", default=[])
1283
  disk_template = baserlib.CheckParameter(data, "disk_template", default=None)
1284
  remote_node = baserlib.CheckParameter(data, "remote_node", default=None)
1285
  os_name = baserlib.CheckParameter(data, "os_name", default=None)
1286
  force_variant = baserlib.CheckParameter(data, "force_variant", default=False)
1287

    
1288
  # HV/BE parameters
1289
  hvparams = baserlib.CheckParameter(data, "hvparams", default={})
1290
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES,
1291
                      allowed_values=[constants.VALUE_DEFAULT])
1292

    
1293
  beparams = baserlib.CheckParameter(data, "beparams", default={})
1294
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES,
1295
                      allowed_values=[constants.VALUE_DEFAULT])
1296

    
1297
  return opcodes.OpInstanceSetParams(instance_name=name, hvparams=hvparams,
1298
                                     beparams=beparams, osparams=osparams,
1299
                                     force=force, nics=nics, disks=disks,
1300
                                     disk_template=disk_template,
1301
                                     remote_node=remote_node, os_name=os_name,
1302
                                     force_variant=force_variant)
1303

    
1304

    
1305
class R_2_instances_name_modify(baserlib.R_Generic):
1306
  """/2/instances/[instance_name]/modify resource.
1307

1308
  """
1309
  def PUT(self):
1310
    """Changes some parameters of an instance.
1311

1312
    @return: a job id
1313

1314
    """
1315
    baserlib.CheckType(self.request_body, dict, "Body contents")
1316

    
1317
    op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1318

    
1319
    return baserlib.SubmitJob([op])
1320

    
1321

    
1322
class R_2_instances_name_disk_grow(baserlib.R_Generic):
1323
  """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1324

1325
  """
1326
  def POST(self):
1327
    """Increases the size of an instance disk.
1328

1329
    @return: a job id
1330

1331
    """
1332
    op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1333
      "instance_name": self.items[0],
1334
      "disk": int(self.items[1]),
1335
      })
1336

    
1337
    return baserlib.SubmitJob([op])
1338

    
1339

    
1340
class R_2_instances_name_console(baserlib.R_Generic):
1341
  """/2/instances/[instance_name]/console resource.
1342

1343
  """
1344
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1345

    
1346
  def GET(self):
1347
    """Request information for connecting to instance's console.
1348

1349
    @return: Serialized instance console description, see
1350
             L{objects.InstanceConsole}
1351

1352
    """
1353
    client = baserlib.GetClient()
1354

    
1355
    ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1356

    
1357
    if console is None:
1358
      raise http.HttpServiceUnavailable("Instance console unavailable")
1359

    
1360
    assert isinstance(console, dict)
1361
    return console
1362

    
1363

    
1364
class _R_Tags(baserlib.R_Generic):
1365
  """ Quasiclass for tagging resources
1366

1367
  Manages tags. When inheriting this class you must define the
1368
  TAG_LEVEL for it.
1369

1370
  """
1371
  TAG_LEVEL = None
1372

    
1373
  def __init__(self, items, queryargs, req):
1374
    """A tag resource constructor.
1375

1376
    We have to override the default to sort out cluster naming case.
1377

1378
    """
1379
    baserlib.R_Generic.__init__(self, items, queryargs, req)
1380

    
1381
    if self.TAG_LEVEL == constants.TAG_CLUSTER:
1382
      self.name = None
1383
    else:
1384
      self.name = items[0]
1385

    
1386
  def GET(self):
1387
    """Returns a list of tags.
1388

1389
    Example: ["tag1", "tag2", "tag3"]
1390

1391
    """
1392
    # pylint: disable-msg=W0212
1393
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1394

    
1395
  def PUT(self):
1396
    """Add a set of tags.
1397

1398
    The request as a list of strings should be PUT to this URI. And
1399
    you'll have back a job id.
1400

1401
    """
1402
    # pylint: disable-msg=W0212
1403
    if 'tag' not in self.queryargs:
1404
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1405
                                " the 'tag' parameter")
1406
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1407
                              self.queryargs['tag'], name=self.name,
1408
                              dry_run=bool(self.dryRun()))
1409

    
1410
  def DELETE(self):
1411
    """Delete a tag.
1412

1413
    In order to delete a set of tags, the DELETE
1414
    request should be addressed to URI like:
1415
    /tags?tag=[tag]&tag=[tag]
1416

1417
    """
1418
    # pylint: disable-msg=W0212
1419
    if 'tag' not in self.queryargs:
1420
      # no we not gonna delete all tags
1421
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1422
                                " tag(s) using the 'tag' parameter")
1423
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1424
                                 self.queryargs['tag'],
1425
                                 name=self.name,
1426
                                 dry_run=bool(self.dryRun()))
1427

    
1428

    
1429
class R_2_instances_name_tags(_R_Tags):
1430
  """ /2/instances/[instance_name]/tags resource.
1431

1432
  Manages per-instance tags.
1433

1434
  """
1435
  TAG_LEVEL = constants.TAG_INSTANCE
1436

    
1437

    
1438
class R_2_nodes_name_tags(_R_Tags):
1439
  """ /2/nodes/[node_name]/tags resource.
1440

1441
  Manages per-node tags.
1442

1443
  """
1444
  TAG_LEVEL = constants.TAG_NODE
1445

    
1446

    
1447
class R_2_tags(_R_Tags):
1448
  """ /2/tags resource.
1449

1450
  Manages cluster tags.
1451

1452
  """
1453
  TAG_LEVEL = constants.TAG_CLUSTER