Statistics
| Branch: | Tag: | Revision:

root / lib / rapi / rlib2.py @ 4b163794

History | View | Annotate | Download (35.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Remote API version 2 baserlib.library.
23

24
  PUT or POST?
25
  ============
26

27
  According to RFC2616 the main difference between PUT and POST is that
28
  POST can create new resources but PUT can only create the resource the
29
  URI was pointing to on the PUT request.
30

31
  To be in context of this module for instance creation POST on
32
  /2/instances is legitim while PUT would be not, due to it does create a
33
  new entity and not just replace /2/instances with it.
34

35
  So when adding new methods, if they are operating on the URI entity itself,
36
  PUT should be prefered over POST.
37

38
"""
39

    
40
# pylint: disable-msg=C0103
41

    
42
# C0103: Invalid name, since the R_* names are not conforming
43

    
44
from ganeti import opcodes
45
from ganeti import http
46
from ganeti import constants
47
from ganeti import cli
48
from ganeti import rapi
49
from ganeti import ht
50
from ganeti.rapi import baserlib
51

    
52

    
53
_COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54
I_FIELDS = ["name", "admin_state", "os",
55
            "pnode", "snodes",
56
            "disk_template",
57
            "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58
            "network_port",
59
            "disk.sizes", "disk_usage",
60
            "beparams", "hvparams",
61
            "oper_state", "oper_ram", "oper_vcpus", "status",
62
            "custom_hvparams", "custom_beparams", "custom_nicparams",
63
            ] + _COMMON_FIELDS
64

    
65
N_FIELDS = ["name", "offline", "master_candidate", "drained",
66
            "dtotal", "dfree",
67
            "mtotal", "mnode", "mfree",
68
            "pinst_cnt", "sinst_cnt",
69
            "ctotal", "cnodes", "csockets",
70
            "pip", "sip", "role",
71
            "pinst_list", "sinst_list",
72
            "master_capable", "vm_capable",
73
            "group.uuid",
74
            ] + _COMMON_FIELDS
75

    
76
G_FIELDS = ["name", "uuid",
77
            "alloc_policy",
78
            "node_cnt", "node_list",
79
            "ctime", "mtime", "serial_no",
80
            ]  # "tags" is missing to be able to use _COMMON_FIELDS here.
81

    
82
_NR_DRAINED = "drained"
83
_NR_MASTER_CANDIATE = "master-candidate"
84
_NR_MASTER = "master"
85
_NR_OFFLINE = "offline"
86
_NR_REGULAR = "regular"
87

    
88
_NR_MAP = {
89
  "M": _NR_MASTER,
90
  "C": _NR_MASTER_CANDIATE,
91
  "D": _NR_DRAINED,
92
  "O": _NR_OFFLINE,
93
  "R": _NR_REGULAR,
94
  }
95

    
96
# Request data version field
97
_REQ_DATA_VERSION = "__version__"
98

    
99
# Feature string for instance creation request data version 1
100
_INST_CREATE_REQV1 = "instance-create-reqv1"
101

    
102
# Feature string for instance reinstall request version 1
103
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
104

    
105
# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
106
_WFJC_TIMEOUT = 10
107

    
108

    
109
class R_version(baserlib.R_Generic):
110
  """/version resource.
111

112
  This resource should be used to determine the remote API version and
113
  to adapt clients accordingly.
114

115
  """
116
  @staticmethod
117
  def GET():
118
    """Returns the remote API version.
119

120
    """
121
    return constants.RAPI_VERSION
122

    
123

    
124
class R_2_info(baserlib.R_Generic):
125
  """/2/info resource.
126

127
  """
128
  @staticmethod
129
  def GET():
130
    """Returns cluster information.
131

132
    """
133
    client = baserlib.GetClient()
134
    return client.QueryClusterInfo()
135

    
136

    
137
class R_2_features(baserlib.R_Generic):
138
  """/2/features resource.
139

140
  """
141
  @staticmethod
142
  def GET():
143
    """Returns list of optional RAPI features implemented.
144

145
    """
146
    return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
147

    
148

    
149
class R_2_os(baserlib.R_Generic):
150
  """/2/os resource.
151

152
  """
153
  @staticmethod
154
  def GET():
155
    """Return a list of all OSes.
156

157
    Can return error 500 in case of a problem.
158

159
    Example: ["debian-etch"]
160

161
    """
162
    cl = baserlib.GetClient()
163
    op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
164
    job_id = baserlib.SubmitJob([op], cl)
165
    # we use custom feedback function, instead of print we log the status
166
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
167
    diagnose_data = result[0]
168

    
169
    if not isinstance(diagnose_data, list):
170
      raise http.HttpBadGateway(message="Can't get OS list")
171

    
172
    os_names = []
173
    for (name, variants) in diagnose_data:
174
      os_names.extend(cli.CalculateOSNames(name, variants))
175

    
176
    return os_names
177

    
178

    
179
class R_2_redist_config(baserlib.R_Generic):
180
  """/2/redistribute-config resource.
181

182
  """
183
  @staticmethod
184
  def PUT():
185
    """Redistribute configuration to all nodes.
186

187
    """
188
    return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
189

    
190

    
191
class R_2_cluster_modify(baserlib.R_Generic):
192
  """/2/modify resource.
193

194
  """
195
  def PUT(self):
196
    """Modifies cluster parameters.
197

198
    @return: a job id
199

200
    """
201
    op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
202
                             None)
203

    
204
    return baserlib.SubmitJob([op])
205

    
206

    
207
class R_2_jobs(baserlib.R_Generic):
208
  """/2/jobs resource.
209

210
  """
211
  @staticmethod
212
  def GET():
213
    """Returns a dictionary of jobs.
214

215
    @return: a dictionary with jobs id and uri.
216

217
    """
218
    fields = ["id"]
219
    cl = baserlib.GetClient()
220
    # Convert the list of lists to the list of ids
221
    result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
222
    return baserlib.BuildUriList(result, "/2/jobs/%s",
223
                                 uri_fields=("id", "uri"))
224

    
225

    
226
class R_2_jobs_id(baserlib.R_Generic):
227
  """/2/jobs/[job_id] resource.
228

229
  """
230
  def GET(self):
231
    """Returns a job status.
232

233
    @return: a dictionary with job parameters.
234
        The result includes:
235
            - id: job ID as a number
236
            - status: current job status as a string
237
            - ops: involved OpCodes as a list of dictionaries for each
238
              opcodes in the job
239
            - opstatus: OpCodes status as a list
240
            - opresult: OpCodes results as a list of lists
241

242
    """
243
    fields = ["id", "ops", "status", "summary",
244
              "opstatus", "opresult", "oplog",
245
              "received_ts", "start_ts", "end_ts",
246
              ]
247
    job_id = self.items[0]
248
    result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
249
    if result is None:
250
      raise http.HttpNotFound()
251
    return baserlib.MapFields(fields, result)
252

    
253
  def DELETE(self):
254
    """Cancel not-yet-started job.
255

256
    """
257
    job_id = self.items[0]
258
    result = baserlib.GetClient().CancelJob(job_id)
259
    return result
260

    
261

    
262
class R_2_jobs_id_wait(baserlib.R_Generic):
263
  """/2/jobs/[job_id]/wait resource.
264

265
  """
266
  # WaitForJobChange provides access to sensitive information and blocks
267
  # machine resources (it's a blocking RAPI call), hence restricting access.
268
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
269

    
270
  def GET(self):
271
    """Waits for job changes.
272

273
    """
274
    job_id = self.items[0]
275

    
276
    fields = self.getBodyParameter("fields")
277
    prev_job_info = self.getBodyParameter("previous_job_info", None)
278
    prev_log_serial = self.getBodyParameter("previous_log_serial", None)
279

    
280
    if not isinstance(fields, list):
281
      raise http.HttpBadRequest("The 'fields' parameter should be a list")
282

    
283
    if not (prev_job_info is None or isinstance(prev_job_info, list)):
284
      raise http.HttpBadRequest("The 'previous_job_info' parameter should"
285
                                " be a list")
286

    
287
    if not (prev_log_serial is None or
288
            isinstance(prev_log_serial, (int, long))):
289
      raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
290
                                " be a number")
291

    
292
    client = baserlib.GetClient()
293
    result = client.WaitForJobChangeOnce(job_id, fields,
294
                                         prev_job_info, prev_log_serial,
295
                                         timeout=_WFJC_TIMEOUT)
296
    if not result:
297
      raise http.HttpNotFound()
298

    
299
    if result == constants.JOB_NOTCHANGED:
300
      # No changes
301
      return None
302

    
303
    (job_info, log_entries) = result
304

    
305
    return {
306
      "job_info": job_info,
307
      "log_entries": log_entries,
308
      }
309

    
310

    
311
class R_2_nodes(baserlib.R_Generic):
312
  """/2/nodes resource.
313

314
  """
315
  def GET(self):
316
    """Returns a list of all nodes.
317

318
    """
319
    client = baserlib.GetClient()
320

    
321
    if self.useBulk():
322
      bulkdata = client.QueryNodes([], N_FIELDS, False)
323
      return baserlib.MapBulkFields(bulkdata, N_FIELDS)
324
    else:
325
      nodesdata = client.QueryNodes([], ["name"], False)
326
      nodeslist = [row[0] for row in nodesdata]
327
      return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
328
                                   uri_fields=("id", "uri"))
329

    
330

    
331
class R_2_nodes_name(baserlib.R_Generic):
332
  """/2/nodes/[node_name] resource.
333

334
  """
335
  def GET(self):
336
    """Send information about a node.
337

338
    """
339
    node_name = self.items[0]
340
    client = baserlib.GetClient()
341

    
342
    result = baserlib.HandleItemQueryErrors(client.QueryNodes,
343
                                            names=[node_name], fields=N_FIELDS,
344
                                            use_locking=self.useLocking())
345

    
346
    return baserlib.MapFields(N_FIELDS, result[0])
347

    
348

    
349
class R_2_nodes_name_role(baserlib.R_Generic):
350
  """ /2/nodes/[node_name]/role resource.
351

352
  """
353
  def GET(self):
354
    """Returns the current node role.
355

356
    @return: Node role
357

358
    """
359
    node_name = self.items[0]
360
    client = baserlib.GetClient()
361
    result = client.QueryNodes(names=[node_name], fields=["role"],
362
                               use_locking=self.useLocking())
363

    
364
    return _NR_MAP[result[0][0]]
365

    
366
  def PUT(self):
367
    """Sets the node role.
368

369
    @return: a job id
370

371
    """
372
    if not isinstance(self.request_body, basestring):
373
      raise http.HttpBadRequest("Invalid body contents, not a string")
374

    
375
    node_name = self.items[0]
376
    role = self.request_body
377

    
378
    if role == _NR_REGULAR:
379
      candidate = False
380
      offline = False
381
      drained = False
382

    
383
    elif role == _NR_MASTER_CANDIATE:
384
      candidate = True
385
      offline = drained = None
386

    
387
    elif role == _NR_DRAINED:
388
      drained = True
389
      candidate = offline = None
390

    
391
    elif role == _NR_OFFLINE:
392
      offline = True
393
      candidate = drained = None
394

    
395
    else:
396
      raise http.HttpBadRequest("Can't set '%s' role" % role)
397

    
398
    op = opcodes.OpNodeSetParams(node_name=node_name,
399
                                 master_candidate=candidate,
400
                                 offline=offline,
401
                                 drained=drained,
402
                                 force=bool(self.useForce()))
403

    
404
    return baserlib.SubmitJob([op])
405

    
406

    
407
class R_2_nodes_name_evacuate(baserlib.R_Generic):
408
  """/2/nodes/[node_name]/evacuate resource.
409

410
  """
411
  def POST(self):
412
    """Evacuate all secondary instances off a node.
413

414
    """
415
    node_name = self.items[0]
416
    remote_node = self._checkStringVariable("remote_node", default=None)
417
    iallocator = self._checkStringVariable("iallocator", default=None)
418
    early_r = bool(self._checkIntVariable("early_release", default=0))
419
    dry_run = bool(self.dryRun())
420

    
421
    cl = baserlib.GetClient()
422

    
423
    op = opcodes.OpNodeEvacStrategy(nodes=[node_name],
424
                                    iallocator=iallocator,
425
                                    remote_node=remote_node)
426

    
427
    job_id = baserlib.SubmitJob([op], cl)
428
    # we use custom feedback function, instead of print we log the status
429
    result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
430

    
431
    jobs = []
432
    for iname, node in result:
433
      if dry_run:
434
        jid = None
435
      else:
436
        op = opcodes.OpInstanceReplaceDisks(instance_name=iname,
437
                                            remote_node=node, disks=[],
438
                                            mode=constants.REPLACE_DISK_CHG,
439
                                            early_release=early_r)
440
        jid = baserlib.SubmitJob([op])
441
      jobs.append((jid, iname, node))
442

    
443
    return jobs
444

    
445

    
446
class R_2_nodes_name_migrate(baserlib.R_Generic):
447
  """/2/nodes/[node_name]/migrate resource.
448

449
  """
450
  def POST(self):
451
    """Migrate all primary instances from a node.
452

453
    """
454
    node_name = self.items[0]
455

    
456
    if "live" in self.queryargs and "mode" in self.queryargs:
457
      raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
458
                                " be passed")
459
    elif "live" in self.queryargs:
460
      if self._checkIntVariable("live", default=1):
461
        mode = constants.HT_MIGRATION_LIVE
462
      else:
463
        mode = constants.HT_MIGRATION_NONLIVE
464
    else:
465
      mode = self._checkStringVariable("mode", default=None)
466

    
467
    op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode)
468

    
469
    return baserlib.SubmitJob([op])
470

    
471

    
472
class R_2_nodes_name_storage(baserlib.R_Generic):
473
  """/2/nodes/[node_name]/storage resource.
474

475
  """
476
  # LUNodeQueryStorage acquires locks, hence restricting access to GET
477
  GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
478

    
479
  def GET(self):
480
    node_name = self.items[0]
481

    
482
    storage_type = self._checkStringVariable("storage_type", None)
483
    if not storage_type:
484
      raise http.HttpBadRequest("Missing the required 'storage_type'"
485
                                " parameter")
486

    
487
    output_fields = self._checkStringVariable("output_fields", None)
488
    if not output_fields:
489
      raise http.HttpBadRequest("Missing the required 'output_fields'"
490
                                " parameter")
491

    
492
    op = opcodes.OpNodeQueryStorage(nodes=[node_name],
493
                                    storage_type=storage_type,
494
                                    output_fields=output_fields.split(","))
495
    return baserlib.SubmitJob([op])
496

    
497

    
498
class R_2_nodes_name_storage_modify(baserlib.R_Generic):
499
  """/2/nodes/[node_name]/storage/modify resource.
500

501
  """
502
  def PUT(self):
503
    node_name = self.items[0]
504

    
505
    storage_type = self._checkStringVariable("storage_type", None)
506
    if not storage_type:
507
      raise http.HttpBadRequest("Missing the required 'storage_type'"
508
                                " parameter")
509

    
510
    name = self._checkStringVariable("name", None)
511
    if not name:
512
      raise http.HttpBadRequest("Missing the required 'name'"
513
                                " parameter")
514

    
515
    changes = {}
516

    
517
    if "allocatable" in self.queryargs:
518
      changes[constants.SF_ALLOCATABLE] = \
519
        bool(self._checkIntVariable("allocatable", default=1))
520

    
521
    op = opcodes.OpNodeModifyStorage(node_name=node_name,
522
                                     storage_type=storage_type,
523
                                     name=name,
524
                                     changes=changes)
525
    return baserlib.SubmitJob([op])
526

    
527

    
528
class R_2_nodes_name_storage_repair(baserlib.R_Generic):
529
  """/2/nodes/[node_name]/storage/repair resource.
530

531
  """
532
  def PUT(self):
533
    node_name = self.items[0]
534

    
535
    storage_type = self._checkStringVariable("storage_type", None)
536
    if not storage_type:
537
      raise http.HttpBadRequest("Missing the required 'storage_type'"
538
                                " parameter")
539

    
540
    name = self._checkStringVariable("name", None)
541
    if not name:
542
      raise http.HttpBadRequest("Missing the required 'name'"
543
                                " parameter")
544

    
545
    op = opcodes.OpRepairNodeStorage(node_name=node_name,
546
                                     storage_type=storage_type,
547
                                     name=name)
548
    return baserlib.SubmitJob([op])
549

    
550

    
551
def _ParseCreateGroupRequest(data, dry_run):
552
  """Parses a request for creating a node group.
553

554
  @rtype: L{opcodes.OpGroupAdd}
555
  @return: Group creation opcode
556

557
  """
558
  override = {
559
    "dry_run": dry_run,
560
    }
561

    
562
  rename = {
563
    "name": "group_name",
564
    }
565

    
566
  return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
567
                             rename=rename)
568

    
569

    
570
class R_2_groups(baserlib.R_Generic):
571
  """/2/groups resource.
572

573
  """
574
  def GET(self):
575
    """Returns a list of all node groups.
576

577
    """
578
    client = baserlib.GetClient()
579

    
580
    if self.useBulk():
581
      bulkdata = client.QueryGroups([], G_FIELDS, False)
582
      return baserlib.MapBulkFields(bulkdata, G_FIELDS)
583
    else:
584
      data = client.QueryGroups([], ["name"], False)
585
      groupnames = [row[0] for row in data]
586
      return baserlib.BuildUriList(groupnames, "/2/groups/%s",
587
                                   uri_fields=("name", "uri"))
588

    
589
  def POST(self):
590
    """Create a node group.
591

592
    @return: a job id
593

594
    """
595
    baserlib.CheckType(self.request_body, dict, "Body contents")
596
    op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
597
    return baserlib.SubmitJob([op])
598

    
599

    
600
class R_2_groups_name(baserlib.R_Generic):
601
  """/2/groups/[group_name] resource.
602

603
  """
604
  def GET(self):
605
    """Send information about a node group.
606

607
    """
608
    group_name = self.items[0]
609
    client = baserlib.GetClient()
610

    
611
    result = baserlib.HandleItemQueryErrors(client.QueryGroups,
612
                                            names=[group_name], fields=G_FIELDS,
613
                                            use_locking=self.useLocking())
614

    
615
    return baserlib.MapFields(G_FIELDS, result[0])
616

    
617
  def DELETE(self):
618
    """Delete a node group.
619

620
    """
621
    op = opcodes.OpGroupRemove(group_name=self.items[0],
622
                               dry_run=bool(self.dryRun()))
623

    
624
    return baserlib.SubmitJob([op])
625

    
626

    
627
def _ParseModifyGroupRequest(name, data):
628
  """Parses a request for modifying a node group.
629

630
  @rtype: L{opcodes.OpGroupSetParams}
631
  @return: Group modify opcode
632

633
  """
634
  return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
635
    "group_name": name,
636
    })
637

    
638

    
639

    
640
class R_2_groups_name_modify(baserlib.R_Generic):
641
  """/2/groups/[group_name]/modify resource.
642

643
  """
644
  def PUT(self):
645
    """Changes some parameters of node group.
646

647
    @return: a job id
648

649
    """
650
    baserlib.CheckType(self.request_body, dict, "Body contents")
651

    
652
    op = _ParseModifyGroupRequest(self.items[0], self.request_body)
653

    
654
    return baserlib.SubmitJob([op])
655

    
656

    
657
def _ParseRenameGroupRequest(name, data, dry_run):
658
  """Parses a request for renaming a node group.
659

660
  @type name: string
661
  @param name: name of the node group to rename
662
  @type data: dict
663
  @param data: the body received by the rename request
664
  @type dry_run: bool
665
  @param dry_run: whether to perform a dry run
666

667
  @rtype: L{opcodes.OpGroupRename}
668
  @return: Node group rename opcode
669

670
  """
671
  return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
672
    "group_name": name,
673
    "dry_run": dry_run,
674
    })
675

    
676

    
677
class R_2_groups_name_rename(baserlib.R_Generic):
678
  """/2/groups/[group_name]/rename resource.
679

680
  """
681
  def PUT(self):
682
    """Changes the name of a node group.
683

684
    @return: a job id
685

686
    """
687
    baserlib.CheckType(self.request_body, dict, "Body contents")
688
    op = _ParseRenameGroupRequest(self.items[0], self.request_body,
689
                                  self.dryRun())
690
    return baserlib.SubmitJob([op])
691

    
692

    
693
class R_2_groups_name_assign_nodes(baserlib.R_Generic):
694
  """/2/groups/[group_name]/assign-nodes resource.
695

696
  """
697
  def PUT(self):
698
    """Assigns nodes to a group.
699

700
    @return: a job id
701

702
    """
703
    op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
704
      "group_name": self.items[0],
705
      "dry_run": self.dryRun(),
706
      "force": self.useForce(),
707
      })
708

    
709
    return baserlib.SubmitJob([op])
710

    
711

    
712
def _ParseInstanceCreateRequestVersion1(data, dry_run):
713
  """Parses an instance creation request version 1.
714

715
  @rtype: L{opcodes.OpInstanceCreate}
716
  @return: Instance creation opcode
717

718
  """
719
  override = {
720
    "dry_run": dry_run,
721
    }
722

    
723
  rename = {
724
    "os": "os_type",
725
    "name": "instance_name",
726
    }
727

    
728
  return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
729
                             rename=rename)
730

    
731

    
732
class R_2_instances(baserlib.R_Generic):
733
  """/2/instances resource.
734

735
  """
736
  def GET(self):
737
    """Returns a list of all available instances.
738

739
    """
740
    client = baserlib.GetClient()
741

    
742
    use_locking = self.useLocking()
743
    if self.useBulk():
744
      bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
745
      return baserlib.MapBulkFields(bulkdata, I_FIELDS)
746
    else:
747
      instancesdata = client.QueryInstances([], ["name"], use_locking)
748
      instanceslist = [row[0] for row in instancesdata]
749
      return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
750
                                   uri_fields=("id", "uri"))
751

    
752
  def _ParseVersion0CreateRequest(self):
753
    """Parses an instance creation request version 0.
754

755
    Request data version 0 is deprecated and should not be used anymore.
756

757
    @rtype: L{opcodes.OpInstanceCreate}
758
    @return: Instance creation opcode
759

760
    """
761
    # Do not modify anymore, request data version 0 is deprecated
762
    beparams = baserlib.MakeParamsDict(self.request_body,
763
                                       constants.BES_PARAMETERS)
764
    hvparams = baserlib.MakeParamsDict(self.request_body,
765
                                       constants.HVS_PARAMETERS)
766
    fn = self.getBodyParameter
767

    
768
    # disk processing
769
    disk_data = fn('disks')
770
    if not isinstance(disk_data, list):
771
      raise http.HttpBadRequest("The 'disks' parameter should be a list")
772
    disks = []
773
    for idx, d in enumerate(disk_data):
774
      if not isinstance(d, int):
775
        raise http.HttpBadRequest("Disk %d specification wrong: should"
776
                                  " be an integer" % idx)
777
      disks.append({"size": d})
778

    
779
    # nic processing (one nic only)
780
    nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
781
    if fn("ip", None) is not None:
782
      nics[0]["ip"] = fn("ip")
783
    if fn("mode", None) is not None:
784
      nics[0]["mode"] = fn("mode")
785
    if fn("link", None) is not None:
786
      nics[0]["link"] = fn("link")
787
    if fn("bridge", None) is not None:
788
      nics[0]["bridge"] = fn("bridge")
789

    
790
    # Do not modify anymore, request data version 0 is deprecated
791
    return opcodes.OpInstanceCreate(
792
      mode=constants.INSTANCE_CREATE,
793
      instance_name=fn('name'),
794
      disks=disks,
795
      disk_template=fn('disk_template'),
796
      os_type=fn('os'),
797
      pnode=fn('pnode', None),
798
      snode=fn('snode', None),
799
      iallocator=fn('iallocator', None),
800
      nics=nics,
801
      start=fn('start', True),
802
      ip_check=fn('ip_check', True),
803
      name_check=fn('name_check', True),
804
      wait_for_sync=True,
805
      hypervisor=fn('hypervisor', None),
806
      hvparams=hvparams,
807
      beparams=beparams,
808
      file_storage_dir=fn('file_storage_dir', None),
809
      file_driver=fn('file_driver', constants.FD_LOOP),
810
      dry_run=bool(self.dryRun()),
811
      )
812

    
813
  def POST(self):
814
    """Create an instance.
815

816
    @return: a job id
817

818
    """
819
    if not isinstance(self.request_body, dict):
820
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
821

    
822
    # Default to request data version 0
823
    data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
824

    
825
    if data_version == 0:
826
      op = self._ParseVersion0CreateRequest()
827
    elif data_version == 1:
828
      data = self.request_body.copy()
829
      # Remove "__version__"
830
      data.pop(_REQ_DATA_VERSION, None)
831
      op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
832
    else:
833
      raise http.HttpBadRequest("Unsupported request data version %s" %
834
                                data_version)
835

    
836
    return baserlib.SubmitJob([op])
837

    
838

    
839
class R_2_instances_name(baserlib.R_Generic):
840
  """/2/instances/[instance_name] resource.
841

842
  """
843
  def GET(self):
844
    """Send information about an instance.
845

846
    """
847
    client = baserlib.GetClient()
848
    instance_name = self.items[0]
849

    
850
    result = baserlib.HandleItemQueryErrors(client.QueryInstances,
851
                                            names=[instance_name],
852
                                            fields=I_FIELDS,
853
                                            use_locking=self.useLocking())
854

    
855
    return baserlib.MapFields(I_FIELDS, result[0])
856

    
857
  def DELETE(self):
858
    """Delete an instance.
859

860
    """
861
    op = opcodes.OpInstanceRemove(instance_name=self.items[0],
862
                                  ignore_failures=False,
863
                                  dry_run=bool(self.dryRun()))
864
    return baserlib.SubmitJob([op])
865

    
866

    
867
class R_2_instances_name_info(baserlib.R_Generic):
868
  """/2/instances/[instance_name]/info resource.
869

870
  """
871
  def GET(self):
872
    """Request detailed instance information.
873

874
    """
875
    instance_name = self.items[0]
876
    static = bool(self._checkIntVariable("static", default=0))
877

    
878
    op = opcodes.OpInstanceQueryData(instances=[instance_name],
879
                                     static=static)
880
    return baserlib.SubmitJob([op])
881

    
882

    
883
class R_2_instances_name_reboot(baserlib.R_Generic):
884
  """/2/instances/[instance_name]/reboot resource.
885

886
  Implements an instance reboot.
887

888
  """
889
  def POST(self):
890
    """Reboot an instance.
891

892
    The URI takes type=[hard|soft|full] and
893
    ignore_secondaries=[False|True] parameters.
894

895
    """
896
    instance_name = self.items[0]
897
    reboot_type = self.queryargs.get('type',
898
                                     [constants.INSTANCE_REBOOT_HARD])[0]
899
    ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
900
    op = opcodes.OpInstanceReboot(instance_name=instance_name,
901
                                  reboot_type=reboot_type,
902
                                  ignore_secondaries=ignore_secondaries,
903
                                  dry_run=bool(self.dryRun()))
904

    
905
    return baserlib.SubmitJob([op])
906

    
907

    
908
class R_2_instances_name_startup(baserlib.R_Generic):
909
  """/2/instances/[instance_name]/startup resource.
910

911
  Implements an instance startup.
912

913
  """
914
  def PUT(self):
915
    """Startup an instance.
916

917
    The URI takes force=[False|True] parameter to start the instance
918
    if even if secondary disks are failing.
919

920
    """
921
    instance_name = self.items[0]
922
    force_startup = bool(self._checkIntVariable('force'))
923
    op = opcodes.OpInstanceStartup(instance_name=instance_name,
924
                                   force=force_startup,
925
                                   dry_run=bool(self.dryRun()))
926

    
927
    return baserlib.SubmitJob([op])
928

    
929

    
930
class R_2_instances_name_shutdown(baserlib.R_Generic):
931
  """/2/instances/[instance_name]/shutdown resource.
932

933
  Implements an instance shutdown.
934

935
  """
936
  def PUT(self):
937
    """Shutdown an instance.
938

939
    """
940
    instance_name = self.items[0]
941
    op = opcodes.OpInstanceShutdown(instance_name=instance_name,
942
                                    dry_run=bool(self.dryRun()))
943

    
944
    return baserlib.SubmitJob([op])
945

    
946

    
947
def _ParseInstanceReinstallRequest(name, data):
948
  """Parses a request for reinstalling an instance.
949

950
  """
951
  if not isinstance(data, dict):
952
    raise http.HttpBadRequest("Invalid body contents, not a dictionary")
953

    
954
  ostype = baserlib.CheckParameter(data, "os")
955
  start = baserlib.CheckParameter(data, "start", exptype=bool,
956
                                  default=True)
957
  osparams = baserlib.CheckParameter(data, "osparams", default=None)
958

    
959
  ops = [
960
    opcodes.OpInstanceShutdown(instance_name=name),
961
    opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
962
                                osparams=osparams),
963
    ]
964

    
965
  if start:
966
    ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
967

    
968
  return ops
969

    
970

    
971
class R_2_instances_name_reinstall(baserlib.R_Generic):
972
  """/2/instances/[instance_name]/reinstall resource.
973

974
  Implements an instance reinstall.
975

976
  """
977
  def POST(self):
978
    """Reinstall an instance.
979

980
    The URI takes os=name and nostartup=[0|1] optional
981
    parameters. By default, the instance will be started
982
    automatically.
983

984
    """
985
    if self.request_body:
986
      if self.queryargs:
987
        raise http.HttpBadRequest("Can't combine query and body parameters")
988

    
989
      body = self.request_body
990
    else:
991
      if not self.queryargs:
992
        raise http.HttpBadRequest("Missing query parameters")
993
      # Legacy interface, do not modify/extend
994
      body = {
995
        "os": self._checkStringVariable("os"),
996
        "start": not self._checkIntVariable("nostartup"),
997
        }
998

    
999
    ops = _ParseInstanceReinstallRequest(self.items[0], body)
1000

    
1001
    return baserlib.SubmitJob(ops)
1002

    
1003

    
1004
def _ParseInstanceReplaceDisksRequest(name, data):
1005
  """Parses a request for an instance export.
1006

1007
  @rtype: L{opcodes.OpInstanceReplaceDisks}
1008
  @return: Instance export opcode
1009

1010
  """
1011
  override = {
1012
    "instance_name": name,
1013
    }
1014

    
1015
  # Parse disks
1016
  try:
1017
    raw_disks = data["disks"]
1018
  except KeyError:
1019
    pass
1020
  else:
1021
    if not ht.TListOf(ht.TInt)(raw_disks): # pylint: disable-msg=E1102
1022
      # Backwards compatibility for strings of the format "1, 2, 3"
1023
      try:
1024
        data["disks"] = [int(part) for part in raw_disks.split(",")]
1025
      except (TypeError, ValueError), err:
1026
        raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1027

    
1028
  return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
1029

    
1030

    
1031
class R_2_instances_name_replace_disks(baserlib.R_Generic):
1032
  """/2/instances/[instance_name]/replace-disks resource.
1033

1034
  """
1035
  def POST(self):
1036
    """Replaces disks on an instance.
1037

1038
    """
1039
    op = _ParseInstanceReplaceDisksRequest(self.items[0], self.request_body)
1040

    
1041
    return baserlib.SubmitJob([op])
1042

    
1043

    
1044
class R_2_instances_name_activate_disks(baserlib.R_Generic):
1045
  """/2/instances/[instance_name]/activate-disks resource.
1046

1047
  """
1048
  def PUT(self):
1049
    """Activate disks for an instance.
1050

1051
    The URI might contain ignore_size to ignore current recorded size.
1052

1053
    """
1054
    instance_name = self.items[0]
1055
    ignore_size = bool(self._checkIntVariable('ignore_size'))
1056

    
1057
    op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1058
                                         ignore_size=ignore_size)
1059

    
1060
    return baserlib.SubmitJob([op])
1061

    
1062

    
1063
class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1064
  """/2/instances/[instance_name]/deactivate-disks resource.
1065

1066
  """
1067
  def PUT(self):
1068
    """Deactivate disks for an instance.
1069

1070
    """
1071
    instance_name = self.items[0]
1072

    
1073
    op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1074

    
1075
    return baserlib.SubmitJob([op])
1076

    
1077

    
1078
class R_2_instances_name_prepare_export(baserlib.R_Generic):
1079
  """/2/instances/[instance_name]/prepare-export resource.
1080

1081
  """
1082
  def PUT(self):
1083
    """Prepares an export for an instance.
1084

1085
    @return: a job id
1086

1087
    """
1088
    instance_name = self.items[0]
1089
    mode = self._checkStringVariable("mode")
1090

    
1091
    op = opcodes.OpBackupPrepare(instance_name=instance_name,
1092
                                 mode=mode)
1093

    
1094
    return baserlib.SubmitJob([op])
1095

    
1096

    
1097
def _ParseExportInstanceRequest(name, data):
1098
  """Parses a request for an instance export.
1099

1100
  @rtype: L{opcodes.OpBackupExport}
1101
  @return: Instance export opcode
1102

1103
  """
1104
  # Rename "destination" to "target_node"
1105
  try:
1106
    data["target_node"] = data.pop("destination")
1107
  except KeyError:
1108
    pass
1109

    
1110
  return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1111
    "instance_name": name,
1112
    })
1113

    
1114

    
1115
class R_2_instances_name_export(baserlib.R_Generic):
1116
  """/2/instances/[instance_name]/export resource.
1117

1118
  """
1119
  def PUT(self):
1120
    """Exports an instance.
1121

1122
    @return: a job id
1123

1124
    """
1125
    if not isinstance(self.request_body, dict):
1126
      raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1127

    
1128
    op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1129

    
1130
    return baserlib.SubmitJob([op])
1131

    
1132

    
1133
def _ParseMigrateInstanceRequest(name, data):
1134
  """Parses a request for an instance migration.
1135

1136
  @rtype: L{opcodes.OpInstanceMigrate}
1137
  @return: Instance migration opcode
1138

1139
  """
1140
  return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1141
    "instance_name": name,
1142
    })
1143

    
1144

    
1145
class R_2_instances_name_migrate(baserlib.R_Generic):
1146
  """/2/instances/[instance_name]/migrate resource.
1147

1148
  """
1149
  def PUT(self):
1150
    """Migrates an instance.
1151

1152
    @return: a job id
1153

1154
    """
1155
    baserlib.CheckType(self.request_body, dict, "Body contents")
1156

    
1157
    op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1158

    
1159
    return baserlib.SubmitJob([op])
1160

    
1161

    
1162
def _ParseRenameInstanceRequest(name, data):
1163
  """Parses a request for renaming an instance.
1164

1165
  @rtype: L{opcodes.OpInstanceRename}
1166
  @return: Instance rename opcode
1167

1168
  """
1169
  return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1170
    "instance_name": name,
1171
    })
1172

    
1173

    
1174
class R_2_instances_name_rename(baserlib.R_Generic):
1175
  """/2/instances/[instance_name]/rename resource.
1176

1177
  """
1178
  def PUT(self):
1179
    """Changes the name of an instance.
1180

1181
    @return: a job id
1182

1183
    """
1184
    baserlib.CheckType(self.request_body, dict, "Body contents")
1185

    
1186
    op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1187

    
1188
    return baserlib.SubmitJob([op])
1189

    
1190

    
1191
def _ParseModifyInstanceRequest(name, data):
1192
  """Parses a request for modifying an instance.
1193

1194
  @rtype: L{opcodes.OpInstanceSetParams}
1195
  @return: Instance modify opcode
1196

1197
  """
1198
  return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1199
    "instance_name": name,
1200
    })
1201

    
1202

    
1203
class R_2_instances_name_modify(baserlib.R_Generic):
1204
  """/2/instances/[instance_name]/modify resource.
1205

1206
  """
1207
  def PUT(self):
1208
    """Changes some parameters of an instance.
1209

1210
    @return: a job id
1211

1212
    """
1213
    baserlib.CheckType(self.request_body, dict, "Body contents")
1214

    
1215
    op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1216

    
1217
    return baserlib.SubmitJob([op])
1218

    
1219

    
1220
class R_2_instances_name_disk_grow(baserlib.R_Generic):
1221
  """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1222

1223
  """
1224
  def POST(self):
1225
    """Increases the size of an instance disk.
1226

1227
    @return: a job id
1228

1229
    """
1230
    op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1231
      "instance_name": self.items[0],
1232
      "disk": int(self.items[1]),
1233
      })
1234

    
1235
    return baserlib.SubmitJob([op])
1236

    
1237

    
1238
class _R_Tags(baserlib.R_Generic):
1239
  """ Quasiclass for tagging resources
1240

1241
  Manages tags. When inheriting this class you must define the
1242
  TAG_LEVEL for it.
1243

1244
  """
1245
  TAG_LEVEL = None
1246

    
1247
  def __init__(self, items, queryargs, req):
1248
    """A tag resource constructor.
1249

1250
    We have to override the default to sort out cluster naming case.
1251

1252
    """
1253
    baserlib.R_Generic.__init__(self, items, queryargs, req)
1254

    
1255
    if self.TAG_LEVEL == constants.TAG_CLUSTER:
1256
      self.name = None
1257
    else:
1258
      self.name = items[0]
1259

    
1260
  def GET(self):
1261
    """Returns a list of tags.
1262

1263
    Example: ["tag1", "tag2", "tag3"]
1264

1265
    """
1266
    # pylint: disable-msg=W0212
1267
    return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1268

    
1269
  def PUT(self):
1270
    """Add a set of tags.
1271

1272
    The request as a list of strings should be PUT to this URI. And
1273
    you'll have back a job id.
1274

1275
    """
1276
    # pylint: disable-msg=W0212
1277
    if 'tag' not in self.queryargs:
1278
      raise http.HttpBadRequest("Please specify tag(s) to add using the"
1279
                                " the 'tag' parameter")
1280
    return baserlib._Tags_PUT(self.TAG_LEVEL,
1281
                              self.queryargs['tag'], name=self.name,
1282
                              dry_run=bool(self.dryRun()))
1283

    
1284
  def DELETE(self):
1285
    """Delete a tag.
1286

1287
    In order to delete a set of tags, the DELETE
1288
    request should be addressed to URI like:
1289
    /tags?tag=[tag]&tag=[tag]
1290

1291
    """
1292
    # pylint: disable-msg=W0212
1293
    if 'tag' not in self.queryargs:
1294
      # no we not gonna delete all tags
1295
      raise http.HttpBadRequest("Cannot delete all tags - please specify"
1296
                                " tag(s) using the 'tag' parameter")
1297
    return baserlib._Tags_DELETE(self.TAG_LEVEL,
1298
                                 self.queryargs['tag'],
1299
                                 name=self.name,
1300
                                 dry_run=bool(self.dryRun()))
1301

    
1302

    
1303
class R_2_instances_name_tags(_R_Tags):
1304
  """ /2/instances/[instance_name]/tags resource.
1305

1306
  Manages per-instance tags.
1307

1308
  """
1309
  TAG_LEVEL = constants.TAG_INSTANCE
1310

    
1311

    
1312
class R_2_nodes_name_tags(_R_Tags):
1313
  """ /2/nodes/[node_name]/tags resource.
1314

1315
  Manages per-node tags.
1316

1317
  """
1318
  TAG_LEVEL = constants.TAG_NODE
1319

    
1320

    
1321
class R_2_tags(_R_Tags):
1322
  """ /2/tags resource.
1323

1324
  Manages cluster tags.
1325

1326
  """
1327
  TAG_LEVEL = constants.TAG_CLUSTER