Statistics
| Branch: | Tag: | Revision:

root / lib / backend.py @ 9c1c3c19

History | View | Annotate | Download (137.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions used by the node daemon
23

24
@var _ALLOWED_UPLOAD_FILES: denotes which files are accepted in
25
     the L{UploadFile} function
26
@var _ALLOWED_CLEAN_DIRS: denotes which directories are accepted
27
     in the L{_CleanDirectory} function
28

29
"""
30

    
31
# pylint: disable=E1103
32

    
33
# E1103: %s %r has no %r member (but some types could not be
34
# inferred), because the _TryOSFromDisk returns either (True, os_obj)
35
# or (False, "string") which confuses pylint
36

    
37

    
38
import os
39
import os.path
40
import shutil
41
import time
42
import stat
43
import errno
44
import re
45
import random
46
import logging
47
import tempfile
48
import zlib
49
import base64
50
import signal
51

    
52
from ganeti import errors
53
from ganeti import utils
54
from ganeti import ssh
55
from ganeti import hypervisor
56
from ganeti import constants
57
from ganeti.storage import bdev
58
from ganeti.storage import drbd
59
from ganeti.storage import filestorage
60
from ganeti import objects
61
from ganeti import ssconf
62
from ganeti import serializer
63
from ganeti import netutils
64
from ganeti import runtime
65
from ganeti import compat
66
from ganeti import pathutils
67
from ganeti import vcluster
68
from ganeti import ht
69
from ganeti.storage.base import BlockDev
70
from ganeti.storage.drbd import DRBD8
71
from ganeti import hooksmaster
72

    
73

    
74
_BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
75
_ALLOWED_CLEAN_DIRS = compat.UniqueFrozenset([
76
  pathutils.DATA_DIR,
77
  pathutils.JOB_QUEUE_ARCHIVE_DIR,
78
  pathutils.QUEUE_DIR,
79
  pathutils.CRYPTO_KEYS_DIR,
80
  ])
81
_MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60
82
_X509_KEY_FILE = "key"
83
_X509_CERT_FILE = "cert"
84
_IES_STATUS_FILE = "status"
85
_IES_PID_FILE = "pid"
86
_IES_CA_FILE = "ca"
87

    
88
#: Valid LVS output line regex
89
_LVSLINE_REGEX = re.compile("^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$")
90

    
91
# Actions for the master setup script
92
_MASTER_START = "start"
93
_MASTER_STOP = "stop"
94

    
95
#: Maximum file permissions for restricted command directory and executables
96
_RCMD_MAX_MODE = (stat.S_IRWXU |
97
                  stat.S_IRGRP | stat.S_IXGRP |
98
                  stat.S_IROTH | stat.S_IXOTH)
99

    
100
#: Delay before returning an error for restricted commands
101
_RCMD_INVALID_DELAY = 10
102

    
103
#: How long to wait to acquire lock for restricted commands (shorter than
104
#: L{_RCMD_INVALID_DELAY}) to reduce blockage of noded forks when many
105
#: command requests arrive
106
_RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8
107

    
108

    
109
class RPCFail(Exception):
110
  """Class denoting RPC failure.
111

112
  Its argument is the error message.
113

114
  """
115

    
116

    
117
def _GetInstReasonFilename(instance_name):
118
  """Path of the file containing the reason of the instance status change.
119

120
  @type instance_name: string
121
  @param instance_name: The name of the instance
122
  @rtype: string
123
  @return: The path of the file
124

125
  """
126
  return utils.PathJoin(pathutils.INSTANCE_REASON_DIR, instance_name)
127

    
128

    
129
def _StoreInstReasonTrail(instance_name, trail):
130
  """Serialize a reason trail related to an instance change of state to file.
131

132
  The exact location of the file depends on the name of the instance and on
133
  the configuration of the Ganeti cluster defined at deploy time.
134

135
  @type instance_name: string
136
  @param instance_name: The name of the instance
137
  @rtype: None
138

139
  """
140
  json = serializer.DumpJson(trail)
141
  filename = _GetInstReasonFilename(instance_name)
142
  utils.WriteFile(filename, data=json)
143

    
144

    
145
def _Fail(msg, *args, **kwargs):
146
  """Log an error and the raise an RPCFail exception.
147

148
  This exception is then handled specially in the ganeti daemon and
149
  turned into a 'failed' return type. As such, this function is a
150
  useful shortcut for logging the error and returning it to the master
151
  daemon.
152

153
  @type msg: string
154
  @param msg: the text of the exception
155
  @raise RPCFail
156

157
  """
158
  if args:
159
    msg = msg % args
160
  if "log" not in kwargs or kwargs["log"]: # if we should log this error
161
    if "exc" in kwargs and kwargs["exc"]:
162
      logging.exception(msg)
163
    else:
164
      logging.error(msg)
165
  raise RPCFail(msg)
166

    
167

    
168
def _GetConfig():
169
  """Simple wrapper to return a SimpleStore.
170

171
  @rtype: L{ssconf.SimpleStore}
172
  @return: a SimpleStore instance
173

174
  """
175
  return ssconf.SimpleStore()
176

    
177

    
178
def _GetSshRunner(cluster_name):
179
  """Simple wrapper to return an SshRunner.
180

181
  @type cluster_name: str
182
  @param cluster_name: the cluster name, which is needed
183
      by the SshRunner constructor
184
  @rtype: L{ssh.SshRunner}
185
  @return: an SshRunner instance
186

187
  """
188
  return ssh.SshRunner(cluster_name)
189

    
190

    
191
def _Decompress(data):
192
  """Unpacks data compressed by the RPC client.
193

194
  @type data: list or tuple
195
  @param data: Data sent by RPC client
196
  @rtype: str
197
  @return: Decompressed data
198

199
  """
200
  assert isinstance(data, (list, tuple))
201
  assert len(data) == 2
202
  (encoding, content) = data
203
  if encoding == constants.RPC_ENCODING_NONE:
204
    return content
205
  elif encoding == constants.RPC_ENCODING_ZLIB_BASE64:
206
    return zlib.decompress(base64.b64decode(content))
207
  else:
208
    raise AssertionError("Unknown data encoding")
209

    
210

    
211
def _CleanDirectory(path, exclude=None):
212
  """Removes all regular files in a directory.
213

214
  @type path: str
215
  @param path: the directory to clean
216
  @type exclude: list
217
  @param exclude: list of files to be excluded, defaults
218
      to the empty list
219

220
  """
221
  if path not in _ALLOWED_CLEAN_DIRS:
222
    _Fail("Path passed to _CleanDirectory not in allowed clean targets: '%s'",
223
          path)
224

    
225
  if not os.path.isdir(path):
226
    return
227
  if exclude is None:
228
    exclude = []
229
  else:
230
    # Normalize excluded paths
231
    exclude = [os.path.normpath(i) for i in exclude]
232

    
233
  for rel_name in utils.ListVisibleFiles(path):
234
    full_name = utils.PathJoin(path, rel_name)
235
    if full_name in exclude:
236
      continue
237
    if os.path.isfile(full_name) and not os.path.islink(full_name):
238
      utils.RemoveFile(full_name)
239

    
240

    
241
def _BuildUploadFileList():
242
  """Build the list of allowed upload files.
243

244
  This is abstracted so that it's built only once at module import time.
245

246
  """
247
  allowed_files = set([
248
    pathutils.CLUSTER_CONF_FILE,
249
    pathutils.ETC_HOSTS,
250
    pathutils.SSH_KNOWN_HOSTS_FILE,
251
    pathutils.VNC_PASSWORD_FILE,
252
    pathutils.RAPI_CERT_FILE,
253
    pathutils.SPICE_CERT_FILE,
254
    pathutils.SPICE_CACERT_FILE,
255
    pathutils.RAPI_USERS_FILE,
256
    pathutils.CONFD_HMAC_KEY,
257
    pathutils.CLUSTER_DOMAIN_SECRET_FILE,
258
    ])
259

    
260
  for hv_name in constants.HYPER_TYPES:
261
    hv_class = hypervisor.GetHypervisorClass(hv_name)
262
    allowed_files.update(hv_class.GetAncillaryFiles()[0])
263

    
264
  assert pathutils.FILE_STORAGE_PATHS_FILE not in allowed_files, \
265
    "Allowed file storage paths should never be uploaded via RPC"
266

    
267
  return frozenset(allowed_files)
268

    
269

    
270
_ALLOWED_UPLOAD_FILES = _BuildUploadFileList()
271

    
272

    
273
def JobQueuePurge():
274
  """Removes job queue files and archived jobs.
275

276
  @rtype: tuple
277
  @return: True, None
278

279
  """
280
  _CleanDirectory(pathutils.QUEUE_DIR, exclude=[pathutils.JOB_QUEUE_LOCK_FILE])
281
  _CleanDirectory(pathutils.JOB_QUEUE_ARCHIVE_DIR)
282

    
283

    
284
def GetMasterInfo():
285
  """Returns master information.
286

287
  This is an utility function to compute master information, either
288
  for consumption here or from the node daemon.
289

290
  @rtype: tuple
291
  @return: master_netdev, master_ip, master_name, primary_ip_family,
292
    master_netmask
293
  @raise RPCFail: in case of errors
294

295
  """
296
  try:
297
    cfg = _GetConfig()
298
    master_netdev = cfg.GetMasterNetdev()
299
    master_ip = cfg.GetMasterIP()
300
    master_netmask = cfg.GetMasterNetmask()
301
    master_node = cfg.GetMasterNode()
302
    primary_ip_family = cfg.GetPrimaryIPFamily()
303
  except errors.ConfigurationError, err:
304
    _Fail("Cluster configuration incomplete: %s", err, exc=True)
305
  return (master_netdev, master_ip, master_node, primary_ip_family,
306
          master_netmask)
307

    
308

    
309
def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn):
310
  """Decorator that runs hooks before and after the decorated function.
311

312
  @type hook_opcode: string
313
  @param hook_opcode: opcode of the hook
314
  @type hooks_path: string
315
  @param hooks_path: path of the hooks
316
  @type env_builder_fn: function
317
  @param env_builder_fn: function that returns a dictionary containing the
318
    environment variables for the hooks. Will get all the parameters of the
319
    decorated function.
320
  @raise RPCFail: in case of pre-hook failure
321

322
  """
323
  def decorator(fn):
324
    def wrapper(*args, **kwargs):
325
      _, myself = ssconf.GetMasterAndMyself()
326
      nodes = ([myself], [myself])  # these hooks run locally
327

    
328
      env_fn = compat.partial(env_builder_fn, *args, **kwargs)
329

    
330
      cfg = _GetConfig()
331
      hr = HooksRunner()
332
      hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes,
333
                                   hr.RunLocalHooks, None, env_fn,
334
                                   logging.warning, cfg.GetClusterName(),
335
                                   cfg.GetMasterNode())
336
      hm.RunPhase(constants.HOOKS_PHASE_PRE)
337
      result = fn(*args, **kwargs)
338
      hm.RunPhase(constants.HOOKS_PHASE_POST)
339

    
340
      return result
341
    return wrapper
342
  return decorator
343

    
344

    
345
def _BuildMasterIpEnv(master_params, use_external_mip_script=None):
346
  """Builds environment variables for master IP hooks.
347

348
  @type master_params: L{objects.MasterNetworkParameters}
349
  @param master_params: network parameters of the master
350
  @type use_external_mip_script: boolean
351
  @param use_external_mip_script: whether to use an external master IP
352
    address setup script (unused, but necessary per the implementation of the
353
    _RunLocalHooks decorator)
354

355
  """
356
  # pylint: disable=W0613
357
  ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family)
358
  env = {
359
    "MASTER_NETDEV": master_params.netdev,
360
    "MASTER_IP": master_params.ip,
361
    "MASTER_NETMASK": str(master_params.netmask),
362
    "CLUSTER_IP_VERSION": str(ver),
363
  }
364

    
365
  return env
366

    
367

    
368
def _RunMasterSetupScript(master_params, action, use_external_mip_script):
369
  """Execute the master IP address setup script.
370

371
  @type master_params: L{objects.MasterNetworkParameters}
372
  @param master_params: network parameters of the master
373
  @type action: string
374
  @param action: action to pass to the script. Must be one of
375
    L{backend._MASTER_START} or L{backend._MASTER_STOP}
376
  @type use_external_mip_script: boolean
377
  @param use_external_mip_script: whether to use an external master IP
378
    address setup script
379
  @raise backend.RPCFail: if there are errors during the execution of the
380
    script
381

382
  """
383
  env = _BuildMasterIpEnv(master_params)
384

    
385
  if use_external_mip_script:
386
    setup_script = pathutils.EXTERNAL_MASTER_SETUP_SCRIPT
387
  else:
388
    setup_script = pathutils.DEFAULT_MASTER_SETUP_SCRIPT
389

    
390
  result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
391

    
392
  if result.failed:
393
    _Fail("Failed to %s the master IP. Script return value: %s, output: '%s'" %
394
          (action, result.exit_code, result.output), log=True)
395

    
396

    
397
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
398
               _BuildMasterIpEnv)
399
def ActivateMasterIp(master_params, use_external_mip_script):
400
  """Activate the IP address of the master daemon.
401

402
  @type master_params: L{objects.MasterNetworkParameters}
403
  @param master_params: network parameters of the master
404
  @type use_external_mip_script: boolean
405
  @param use_external_mip_script: whether to use an external master IP
406
    address setup script
407
  @raise RPCFail: in case of errors during the IP startup
408

409
  """
410
  _RunMasterSetupScript(master_params, _MASTER_START,
411
                        use_external_mip_script)
412

    
413

    
414
def StartMasterDaemons(no_voting):
415
  """Activate local node as master node.
416

417
  The function will start the master daemons (ganeti-masterd and ganeti-rapi).
418

419
  @type no_voting: boolean
420
  @param no_voting: whether to start ganeti-masterd without a node vote
421
      but still non-interactively
422
  @rtype: None
423

424
  """
425

    
426
  if no_voting:
427
    masterd_args = "--no-voting --yes-do-it"
428
  else:
429
    masterd_args = ""
430

    
431
  env = {
432
    "EXTRA_MASTERD_ARGS": masterd_args,
433
    }
434

    
435
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-master"], env=env)
436
  if result.failed:
437
    msg = "Can't start Ganeti master: %s" % result.output
438
    logging.error(msg)
439
    _Fail(msg)
440

    
441

    
442
@RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown",
443
               _BuildMasterIpEnv)
444
def DeactivateMasterIp(master_params, use_external_mip_script):
445
  """Deactivate the master IP on this node.
446

447
  @type master_params: L{objects.MasterNetworkParameters}
448
  @param master_params: network parameters of the master
449
  @type use_external_mip_script: boolean
450
  @param use_external_mip_script: whether to use an external master IP
451
    address setup script
452
  @raise RPCFail: in case of errors during the IP turndown
453

454
  """
455
  _RunMasterSetupScript(master_params, _MASTER_STOP,
456
                        use_external_mip_script)
457

    
458

    
459
def StopMasterDaemons():
460
  """Stop the master daemons on this node.
461

462
  Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node.
463

464
  @rtype: None
465

466
  """
467
  # TODO: log and report back to the caller the error failures; we
468
  # need to decide in which case we fail the RPC for this
469

    
470
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-master"])
471
  if result.failed:
472
    logging.error("Could not stop Ganeti master, command %s had exitcode %s"
473
                  " and error %s",
474
                  result.cmd, result.exit_code, result.output)
475

    
476

    
477
def ChangeMasterNetmask(old_netmask, netmask, master_ip, master_netdev):
478
  """Change the netmask of the master IP.
479

480
  @param old_netmask: the old value of the netmask
481
  @param netmask: the new value of the netmask
482
  @param master_ip: the master IP
483
  @param master_netdev: the master network device
484

485
  """
486
  if old_netmask == netmask:
487
    return
488

    
489
  if not netutils.IPAddress.Own(master_ip):
490
    _Fail("The master IP address is not up, not attempting to change its"
491
          " netmask")
492

    
493
  result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add",
494
                         "%s/%s" % (master_ip, netmask),
495
                         "dev", master_netdev, "label",
496
                         "%s:0" % master_netdev])
497
  if result.failed:
498
    _Fail("Could not set the new netmask on the master IP address")
499

    
500
  result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del",
501
                         "%s/%s" % (master_ip, old_netmask),
502
                         "dev", master_netdev, "label",
503
                         "%s:0" % master_netdev])
504
  if result.failed:
505
    _Fail("Could not bring down the master IP address with the old netmask")
506

    
507

    
508
def EtcHostsModify(mode, host, ip):
509
  """Modify a host entry in /etc/hosts.
510

511
  @param mode: The mode to operate. Either add or remove entry
512
  @param host: The host to operate on
513
  @param ip: The ip associated with the entry
514

515
  """
516
  if mode == constants.ETC_HOSTS_ADD:
517
    if not ip:
518
      RPCFail("Mode 'add' needs 'ip' parameter, but parameter not"
519
              " present")
520
    utils.AddHostToEtcHosts(host, ip)
521
  elif mode == constants.ETC_HOSTS_REMOVE:
522
    if ip:
523
      RPCFail("Mode 'remove' does not allow 'ip' parameter, but"
524
              " parameter is present")
525
    utils.RemoveHostFromEtcHosts(host)
526
  else:
527
    RPCFail("Mode not supported")
528

    
529

    
530
def LeaveCluster(modify_ssh_setup):
531
  """Cleans up and remove the current node.
532

533
  This function cleans up and prepares the current node to be removed
534
  from the cluster.
535

536
  If processing is successful, then it raises an
537
  L{errors.QuitGanetiException} which is used as a special case to
538
  shutdown the node daemon.
539

540
  @param modify_ssh_setup: boolean
541

542
  """
543
  _CleanDirectory(pathutils.DATA_DIR)
544
  _CleanDirectory(pathutils.CRYPTO_KEYS_DIR)
545
  JobQueuePurge()
546

    
547
  if modify_ssh_setup:
548
    try:
549
      priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
550

    
551
      utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
552

    
553
      utils.RemoveFile(priv_key)
554
      utils.RemoveFile(pub_key)
555
    except errors.OpExecError:
556
      logging.exception("Error while processing ssh files")
557

    
558
  try:
559
    utils.RemoveFile(pathutils.CONFD_HMAC_KEY)
560
    utils.RemoveFile(pathutils.RAPI_CERT_FILE)
561
    utils.RemoveFile(pathutils.SPICE_CERT_FILE)
562
    utils.RemoveFile(pathutils.SPICE_CACERT_FILE)
563
    utils.RemoveFile(pathutils.NODED_CERT_FILE)
564
  except: # pylint: disable=W0702
565
    logging.exception("Error while removing cluster secrets")
566

    
567
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.CONFD])
568
  if result.failed:
569
    logging.error("Command %s failed with exitcode %s and error %s",
570
                  result.cmd, result.exit_code, result.output)
571

    
572
  # Raise a custom exception (handled in ganeti-noded)
573
  raise errors.QuitGanetiException(True, "Shutdown scheduled")
574

    
575

    
576
def _CheckStorageParams(params, num_params):
577
  """Performs sanity checks for storage parameters.
578

579
  @type params: list
580
  @param params: list of storage parameters
581
  @type num_params: int
582
  @param num_params: expected number of parameters
583

584
  """
585
  if params is None:
586
    raise errors.ProgrammerError("No storage parameters for storage"
587
                                 " reporting is provided.")
588
  if not isinstance(params, list):
589
    raise errors.ProgrammerError("The storage parameters are not of type"
590
                                 " list: '%s'" % params)
591
  if not len(params) == num_params:
592
    raise errors.ProgrammerError("Did not receive the expected number of"
593
                                 "storage parameters: expected %s,"
594
                                 " received '%s'" % (num_params, len(params)))
595

    
596

    
597
def _CheckLvmStorageParams(params):
598
  """Performs sanity check for the 'exclusive storage' flag.
599

600
  @see: C{_CheckStorageParams}
601

602
  """
603
  _CheckStorageParams(params, 1)
604
  excl_stor = params[0]
605
  if not isinstance(params[0], bool):
606
    raise errors.ProgrammerError("Exclusive storage parameter is not"
607
                                 " boolean: '%s'." % excl_stor)
608
  return excl_stor
609

    
610

    
611
def _GetLvmVgSpaceInfo(name, params):
612
  """Wrapper around C{_GetVgInfo} which checks the storage parameters.
613

614
  @type name: string
615
  @param name: name of the volume group
616
  @type params: list
617
  @param params: list of storage parameters, which in this case should be
618
    containing only one for exclusive storage
619

620
  """
621
  excl_stor = _CheckLvmStorageParams(params)
622
  return _GetVgInfo(name, excl_stor)
623

    
624

    
625
def _GetVgInfo(
626
    name, excl_stor, info_fn=bdev.LogicalVolume.GetVGInfo):
627
  """Retrieves information about a LVM volume group.
628

629
  """
630
  # TODO: GetVGInfo supports returning information for multiple VGs at once
631
  vginfo = info_fn([name], excl_stor)
632
  if vginfo:
633
    vg_free = int(round(vginfo[0][0], 0))
634
    vg_size = int(round(vginfo[0][1], 0))
635
  else:
636
    vg_free = None
637
    vg_size = None
638

    
639
  return {
640
    "type": constants.ST_LVM_VG,
641
    "name": name,
642
    "storage_free": vg_free,
643
    "storage_size": vg_size,
644
    }
645

    
646

    
647
def _GetLvmPvSpaceInfo(name, params):
648
  """Wrapper around C{_GetVgSpindlesInfo} with sanity checks.
649

650
  @see: C{_GetLvmVgSpaceInfo}
651

652
  """
653
  excl_stor = _CheckLvmStorageParams(params)
654
  return _GetVgSpindlesInfo(name, excl_stor)
655

    
656

    
657
def _GetVgSpindlesInfo(
658
    name, excl_stor, info_fn=bdev.LogicalVolume.GetVgSpindlesInfo):
659
  """Retrieves information about spindles in an LVM volume group.
660

661
  @type name: string
662
  @param name: VG name
663
  @type excl_stor: bool
664
  @param excl_stor: exclusive storage
665
  @rtype: dict
666
  @return: dictionary whose keys are "name", "vg_free", "vg_size" for VG name,
667
      free spindles, total spindles respectively
668

669
  """
670
  if excl_stor:
671
    (vg_free, vg_size) = info_fn(name)
672
  else:
673
    vg_free = 0
674
    vg_size = 0
675
  return {
676
    "type": constants.ST_LVM_PV,
677
    "name": name,
678
    "storage_free": vg_free,
679
    "storage_size": vg_size,
680
    }
681

    
682

    
683
def _GetHvInfo(name, hvparams, get_hv_fn=hypervisor.GetHypervisor):
684
  """Retrieves node information from a hypervisor.
685

686
  The information returned depends on the hypervisor. Common items:
687

688
    - vg_size is the size of the configured volume group in MiB
689
    - vg_free is the free size of the volume group in MiB
690
    - memory_dom0 is the memory allocated for domain0 in MiB
691
    - memory_free is the currently available (free) ram in MiB
692
    - memory_total is the total number of ram in MiB
693
    - hv_version: the hypervisor version, if available
694

695
  @type hvparams: dict of string
696
  @param hvparams: the hypervisor's hvparams
697

698
  """
699
  return get_hv_fn(name).GetNodeInfo(hvparams=hvparams)
700

    
701

    
702
def _GetHvInfoAll(hv_specs, get_hv_fn=hypervisor.GetHypervisor):
703
  """Retrieves node information for all hypervisors.
704

705
  See C{_GetHvInfo} for information on the output.
706

707
  @type hv_specs: list of pairs (string, dict of strings)
708
  @param hv_specs: list of pairs of a hypervisor's name and its hvparams
709

710
  """
711
  if hv_specs is None:
712
    return None
713

    
714
  result = []
715
  for hvname, hvparams in hv_specs:
716
    result.append(_GetHvInfo(hvname, hvparams, get_hv_fn))
717
  return result
718

    
719

    
720
def _GetNamedNodeInfo(names, fn):
721
  """Calls C{fn} for all names in C{names} and returns a dictionary.
722

723
  @rtype: None or dict
724

725
  """
726
  if names is None:
727
    return None
728
  else:
729
    return map(fn, names)
730

    
731

    
732
def GetNodeInfo(storage_units, hv_specs):
733
  """Gives back a hash with different information about the node.
734

735
  @type storage_units: list of tuples (string, string, list)
736
  @param storage_units: List of tuples (storage unit, identifier, parameters) to
737
    ask for disk space information. In case of lvm-vg, the identifier is
738
    the VG name. The parameters can contain additional, storage-type-specific
739
    parameters, for example exclusive storage for lvm storage.
740
  @type hv_specs: list of pairs (string, dict of strings)
741
  @param hv_specs: list of pairs of a hypervisor's name and its hvparams
742
  @rtype: tuple; (string, None/dict, None/dict)
743
  @return: Tuple containing boot ID, volume group information and hypervisor
744
    information
745

746
  """
747
  bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
748
  storage_info = _GetNamedNodeInfo(
749
    storage_units,
750
    (lambda (storage_type, storage_key, storage_params):
751
        _ApplyStorageInfoFunction(storage_type, storage_key, storage_params)))
752
  hv_info = _GetHvInfoAll(hv_specs)
753
  return (bootid, storage_info, hv_info)
754

    
755

    
756
def _GetFileStorageSpaceInfo(path, params):
757
  """Wrapper around filestorage.GetSpaceInfo.
758

759
  The purpose of this wrapper is to call filestorage.GetFileStorageSpaceInfo
760
  and ignore the *args parameter to not leak it into the filestorage
761
  module's code.
762

763
  @see: C{filestorage.GetFileStorageSpaceInfo} for description of the
764
    parameters.
765

766
  """
767
  _CheckStorageParams(params, 0)
768
  return filestorage.GetFileStorageSpaceInfo(path)
769

    
770

    
771
# FIXME: implement storage reporting for all missing storage types.
772
_STORAGE_TYPE_INFO_FN = {
773
  constants.ST_BLOCK: None,
774
  constants.ST_DISKLESS: None,
775
  constants.ST_EXT: None,
776
  constants.ST_FILE: _GetFileStorageSpaceInfo,
777
  constants.ST_LVM_PV: _GetLvmPvSpaceInfo,
778
  constants.ST_LVM_VG: _GetLvmVgSpaceInfo,
779
  constants.ST_RADOS: None,
780
}
781

    
782

    
783
def _ApplyStorageInfoFunction(storage_type, storage_key, *args):
784
  """Looks up and applies the correct function to calculate free and total
785
  storage for the given storage type.
786

787
  @type storage_type: string
788
  @param storage_type: the storage type for which the storage shall be reported.
789
  @type storage_key: string
790
  @param storage_key: identifier of a storage unit, e.g. the volume group name
791
    of an LVM storage unit
792
  @type args: any
793
  @param args: various parameters that can be used for storage reporting. These
794
    parameters and their semantics vary from storage type to storage type and
795
    are just propagated in this function.
796
  @return: the results of the application of the storage space function (see
797
    _STORAGE_TYPE_INFO_FN) if storage space reporting is implemented for that
798
    storage type
799
  @raises NotImplementedError: for storage types who don't support space
800
    reporting yet
801
  """
802
  fn = _STORAGE_TYPE_INFO_FN[storage_type]
803
  if fn is not None:
804
    return fn(storage_key, *args)
805
  else:
806
    raise NotImplementedError
807

    
808

    
809
def _CheckExclusivePvs(pvi_list):
810
  """Check that PVs are not shared among LVs
811

812
  @type pvi_list: list of L{objects.LvmPvInfo} objects
813
  @param pvi_list: information about the PVs
814

815
  @rtype: list of tuples (string, list of strings)
816
  @return: offending volumes, as tuples: (pv_name, [lv1_name, lv2_name...])
817

818
  """
819
  res = []
820
  for pvi in pvi_list:
821
    if len(pvi.lv_list) > 1:
822
      res.append((pvi.name, pvi.lv_list))
823
  return res
824

    
825

    
826
def _VerifyHypervisors(what, vm_capable, result, all_hvparams,
827
                       get_hv_fn=hypervisor.GetHypervisor):
828
  """Verifies the hypervisor. Appends the results to the 'results' list.
829

830
  @type what: C{dict}
831
  @param what: a dictionary of things to check
832
  @type vm_capable: boolean
833
  @param vm_capable: whether or not this node is vm capable
834
  @type result: dict
835
  @param result: dictionary of verification results; results of the
836
    verifications in this function will be added here
837
  @type all_hvparams: dict of dict of string
838
  @param all_hvparams: dictionary mapping hypervisor names to hvparams
839
  @type get_hv_fn: function
840
  @param get_hv_fn: function to retrieve the hypervisor, to improve testability
841

842
  """
843
  if not vm_capable:
844
    return
845

    
846
  if constants.NV_HYPERVISOR in what:
847
    result[constants.NV_HYPERVISOR] = {}
848
    for hv_name in what[constants.NV_HYPERVISOR]:
849
      hvparams = all_hvparams[hv_name]
850
      try:
851
        val = get_hv_fn(hv_name).Verify(hvparams=hvparams)
852
      except errors.HypervisorError, err:
853
        val = "Error while checking hypervisor: %s" % str(err)
854
      result[constants.NV_HYPERVISOR][hv_name] = val
855

    
856

    
857
def _VerifyHvparams(what, vm_capable, result,
858
                    get_hv_fn=hypervisor.GetHypervisor):
859
  """Verifies the hvparams. Appends the results to the 'results' list.
860

861
  @type what: C{dict}
862
  @param what: a dictionary of things to check
863
  @type vm_capable: boolean
864
  @param vm_capable: whether or not this node is vm capable
865
  @type result: dict
866
  @param result: dictionary of verification results; results of the
867
    verifications in this function will be added here
868
  @type get_hv_fn: function
869
  @param get_hv_fn: function to retrieve the hypervisor, to improve testability
870

871
  """
872
  if not vm_capable:
873
    return
874

    
875
  if constants.NV_HVPARAMS in what:
876
    result[constants.NV_HVPARAMS] = []
877
    for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
878
      try:
879
        logging.info("Validating hv %s, %s", hv_name, hvparms)
880
        get_hv_fn(hv_name).ValidateParameters(hvparms)
881
      except errors.HypervisorError, err:
882
        result[constants.NV_HVPARAMS].append((source, hv_name, str(err)))
883

    
884

    
885
def _VerifyInstanceList(what, vm_capable, result, all_hvparams):
886
  """Verifies the instance list.
887

888
  @type what: C{dict}
889
  @param what: a dictionary of things to check
890
  @type vm_capable: boolean
891
  @param vm_capable: whether or not this node is vm capable
892
  @type result: dict
893
  @param result: dictionary of verification results; results of the
894
    verifications in this function will be added here
895
  @type all_hvparams: dict of dict of string
896
  @param all_hvparams: dictionary mapping hypervisor names to hvparams
897

898
  """
899
  if constants.NV_INSTANCELIST in what and vm_capable:
900
    # GetInstanceList can fail
901
    try:
902
      val = GetInstanceList(what[constants.NV_INSTANCELIST],
903
                            all_hvparams=all_hvparams)
904
    except RPCFail, err:
905
      val = str(err)
906
    result[constants.NV_INSTANCELIST] = val
907

    
908

    
909
def _VerifyNodeInfo(what, vm_capable, result, all_hvparams):
910
  """Verifies the node info.
911

912
  @type what: C{dict}
913
  @param what: a dictionary of things to check
914
  @type vm_capable: boolean
915
  @param vm_capable: whether or not this node is vm capable
916
  @type result: dict
917
  @param result: dictionary of verification results; results of the
918
    verifications in this function will be added here
919
  @type all_hvparams: dict of dict of string
920
  @param all_hvparams: dictionary mapping hypervisor names to hvparams
921

922
  """
923
  if constants.NV_HVINFO in what and vm_capable:
924
    hvname = what[constants.NV_HVINFO]
925
    hyper = hypervisor.GetHypervisor(hvname)
926
    hvparams = all_hvparams[hvname]
927
    result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
928

    
929

    
930
def VerifyNode(what, cluster_name, all_hvparams):
931
  """Verify the status of the local node.
932

933
  Based on the input L{what} parameter, various checks are done on the
934
  local node.
935

936
  If the I{filelist} key is present, this list of
937
  files is checksummed and the file/checksum pairs are returned.
938

939
  If the I{nodelist} key is present, we check that we have
940
  connectivity via ssh with the target nodes (and check the hostname
941
  report).
942

943
  If the I{node-net-test} key is present, we check that we have
944
  connectivity to the given nodes via both primary IP and, if
945
  applicable, secondary IPs.
946

947
  @type what: C{dict}
948
  @param what: a dictionary of things to check:
949
      - filelist: list of files for which to compute checksums
950
      - nodelist: list of nodes we should check ssh communication with
951
      - node-net-test: list of nodes we should check node daemon port
952
        connectivity with
953
      - hypervisor: list with hypervisors to run the verify for
954
  @type cluster_name: string
955
  @param cluster_name: the cluster's name
956
  @type all_hvparams: dict of dict of strings
957
  @param all_hvparams: a dictionary mapping hypervisor names to hvparams
958
  @rtype: dict
959
  @return: a dictionary with the same keys as the input dict, and
960
      values representing the result of the checks
961

962
  """
963
  result = {}
964
  my_name = netutils.Hostname.GetSysName()
965
  port = netutils.GetDaemonPort(constants.NODED)
966
  vm_capable = my_name not in what.get(constants.NV_VMNODES, [])
967

    
968
  _VerifyHypervisors(what, vm_capable, result, all_hvparams)
969
  _VerifyHvparams(what, vm_capable, result)
970

    
971
  if constants.NV_FILELIST in what:
972
    fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath,
973
                                              what[constants.NV_FILELIST]))
974
    result[constants.NV_FILELIST] = \
975
      dict((vcluster.MakeVirtualPath(key), value)
976
           for (key, value) in fingerprints.items())
977

    
978
  if constants.NV_NODELIST in what:
979
    (nodes, bynode) = what[constants.NV_NODELIST]
980

    
981
    # Add nodes from other groups (different for each node)
982
    try:
983
      nodes.extend(bynode[my_name])
984
    except KeyError:
985
      pass
986

    
987
    # Use a random order
988
    random.shuffle(nodes)
989

    
990
    # Try to contact all nodes
991
    val = {}
992
    for node in nodes:
993
      success, message = _GetSshRunner(cluster_name).VerifyNodeHostname(node)
994
      if not success:
995
        val[node] = message
996

    
997
    result[constants.NV_NODELIST] = val
998

    
999
  if constants.NV_NODENETTEST in what:
1000
    result[constants.NV_NODENETTEST] = tmp = {}
1001
    my_pip = my_sip = None
1002
    for name, pip, sip in what[constants.NV_NODENETTEST]:
1003
      if name == my_name:
1004
        my_pip = pip
1005
        my_sip = sip
1006
        break
1007
    if not my_pip:
1008
      tmp[my_name] = ("Can't find my own primary/secondary IP"
1009
                      " in the node list")
1010
    else:
1011
      for name, pip, sip in what[constants.NV_NODENETTEST]:
1012
        fail = []
1013
        if not netutils.TcpPing(pip, port, source=my_pip):
1014
          fail.append("primary")
1015
        if sip != pip:
1016
          if not netutils.TcpPing(sip, port, source=my_sip):
1017
            fail.append("secondary")
1018
        if fail:
1019
          tmp[name] = ("failure using the %s interface(s)" %
1020
                       " and ".join(fail))
1021

    
1022
  if constants.NV_MASTERIP in what:
1023
    # FIXME: add checks on incoming data structures (here and in the
1024
    # rest of the function)
1025
    master_name, master_ip = what[constants.NV_MASTERIP]
1026
    if master_name == my_name:
1027
      source = constants.IP4_ADDRESS_LOCALHOST
1028
    else:
1029
      source = None
1030
    result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
1031
                                                     source=source)
1032

    
1033
  if constants.NV_USERSCRIPTS in what:
1034
    result[constants.NV_USERSCRIPTS] = \
1035
      [script for script in what[constants.NV_USERSCRIPTS]
1036
       if not utils.IsExecutable(script)]
1037

    
1038
  if constants.NV_OOB_PATHS in what:
1039
    result[constants.NV_OOB_PATHS] = tmp = []
1040
    for path in what[constants.NV_OOB_PATHS]:
1041
      try:
1042
        st = os.stat(path)
1043
      except OSError, err:
1044
        tmp.append("error stating out of band helper: %s" % err)
1045
      else:
1046
        if stat.S_ISREG(st.st_mode):
1047
          if stat.S_IMODE(st.st_mode) & stat.S_IXUSR:
1048
            tmp.append(None)
1049
          else:
1050
            tmp.append("out of band helper %s is not executable" % path)
1051
        else:
1052
          tmp.append("out of band helper %s is not a file" % path)
1053

    
1054
  if constants.NV_LVLIST in what and vm_capable:
1055
    try:
1056
      val = GetVolumeList(utils.ListVolumeGroups().keys())
1057
    except RPCFail, err:
1058
      val = str(err)
1059
    result[constants.NV_LVLIST] = val
1060

    
1061
  _VerifyInstanceList(what, vm_capable, result, all_hvparams)
1062

    
1063
  if constants.NV_VGLIST in what and vm_capable:
1064
    result[constants.NV_VGLIST] = utils.ListVolumeGroups()
1065

    
1066
  if constants.NV_PVLIST in what and vm_capable:
1067
    check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what
1068
    val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
1069
                                       filter_allocatable=False,
1070
                                       include_lvs=check_exclusive_pvs)
1071
    if check_exclusive_pvs:
1072
      result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val)
1073
      for pvi in val:
1074
        # Avoid sending useless data on the wire
1075
        pvi.lv_list = []
1076
    result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val)
1077

    
1078
  if constants.NV_VERSION in what:
1079
    result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
1080
                                    constants.RELEASE_VERSION)
1081

    
1082
  _VerifyNodeInfo(what, vm_capable, result, all_hvparams)
1083

    
1084
  if constants.NV_DRBDVERSION in what and vm_capable:
1085
    try:
1086
      drbd_version = DRBD8.GetProcInfo().GetVersionString()
1087
    except errors.BlockDeviceError, err:
1088
      logging.warning("Can't get DRBD version", exc_info=True)
1089
      drbd_version = str(err)
1090
    result[constants.NV_DRBDVERSION] = drbd_version
1091

    
1092
  if constants.NV_DRBDLIST in what and vm_capable:
1093
    try:
1094
      used_minors = drbd.DRBD8.GetUsedDevs()
1095
    except errors.BlockDeviceError, err:
1096
      logging.warning("Can't get used minors list", exc_info=True)
1097
      used_minors = str(err)
1098
    result[constants.NV_DRBDLIST] = used_minors
1099

    
1100
  if constants.NV_DRBDHELPER in what and vm_capable:
1101
    status = True
1102
    try:
1103
      payload = drbd.DRBD8.GetUsermodeHelper()
1104
    except errors.BlockDeviceError, err:
1105
      logging.error("Can't get DRBD usermode helper: %s", str(err))
1106
      status = False
1107
      payload = str(err)
1108
    result[constants.NV_DRBDHELPER] = (status, payload)
1109

    
1110
  if constants.NV_NODESETUP in what:
1111
    result[constants.NV_NODESETUP] = tmpr = []
1112
    if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"):
1113
      tmpr.append("The sysfs filesytem doesn't seem to be mounted"
1114
                  " under /sys, missing required directories /sys/block"
1115
                  " and /sys/class/net")
1116
    if (not os.path.isdir("/proc/sys") or
1117
        not os.path.isfile("/proc/sysrq-trigger")):
1118
      tmpr.append("The procfs filesystem doesn't seem to be mounted"
1119
                  " under /proc, missing required directory /proc/sys and"
1120
                  " the file /proc/sysrq-trigger")
1121

    
1122
  if constants.NV_TIME in what:
1123
    result[constants.NV_TIME] = utils.SplitTime(time.time())
1124

    
1125
  if constants.NV_OSLIST in what and vm_capable:
1126
    result[constants.NV_OSLIST] = DiagnoseOS()
1127

    
1128
  if constants.NV_BRIDGES in what and vm_capable:
1129
    result[constants.NV_BRIDGES] = [bridge
1130
                                    for bridge in what[constants.NV_BRIDGES]
1131
                                    if not utils.BridgeExists(bridge)]
1132

    
1133
  if what.get(constants.NV_ACCEPTED_STORAGE_PATHS) == my_name:
1134
    result[constants.NV_ACCEPTED_STORAGE_PATHS] = \
1135
        filestorage.ComputeWrongFileStoragePaths()
1136

    
1137
  if what.get(constants.NV_FILE_STORAGE_PATH):
1138
    pathresult = filestorage.CheckFileStoragePath(
1139
        what[constants.NV_FILE_STORAGE_PATH])
1140
    if pathresult:
1141
      result[constants.NV_FILE_STORAGE_PATH] = pathresult
1142

    
1143
  return result
1144

    
1145

    
1146
def GetBlockDevSizes(devices):
1147
  """Return the size of the given block devices
1148

1149
  @type devices: list
1150
  @param devices: list of block device nodes to query
1151
  @rtype: dict
1152
  @return:
1153
    dictionary of all block devices under /dev (key). The value is their
1154
    size in MiB.
1155

1156
    {'/dev/disk/by-uuid/123456-12321231-312312-312': 124}
1157

1158
  """
1159
  DEV_PREFIX = "/dev/"
1160
  blockdevs = {}
1161

    
1162
  for devpath in devices:
1163
    if not utils.IsBelowDir(DEV_PREFIX, devpath):
1164
      continue
1165

    
1166
    try:
1167
      st = os.stat(devpath)
1168
    except EnvironmentError, err:
1169
      logging.warning("Error stat()'ing device %s: %s", devpath, str(err))
1170
      continue
1171

    
1172
    if stat.S_ISBLK(st.st_mode):
1173
      result = utils.RunCmd(["blockdev", "--getsize64", devpath])
1174
      if result.failed:
1175
        # We don't want to fail, just do not list this device as available
1176
        logging.warning("Cannot get size for block device %s", devpath)
1177
        continue
1178

    
1179
      size = int(result.stdout) / (1024 * 1024)
1180
      blockdevs[devpath] = size
1181
  return blockdevs
1182

    
1183

    
1184
def GetVolumeList(vg_names):
1185
  """Compute list of logical volumes and their size.
1186

1187
  @type vg_names: list
1188
  @param vg_names: the volume groups whose LVs we should list, or
1189
      empty for all volume groups
1190
  @rtype: dict
1191
  @return:
1192
      dictionary of all partions (key) with value being a tuple of
1193
      their size (in MiB), inactive and online status::
1194

1195
        {'xenvg/test1': ('20.06', True, True)}
1196

1197
      in case of errors, a string is returned with the error
1198
      details.
1199

1200
  """
1201
  lvs = {}
1202
  sep = "|"
1203
  if not vg_names:
1204
    vg_names = []
1205
  result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1206
                         "--separator=%s" % sep,
1207
                         "-ovg_name,lv_name,lv_size,lv_attr"] + vg_names)
1208
  if result.failed:
1209
    _Fail("Failed to list logical volumes, lvs output: %s", result.output)
1210

    
1211
  for line in result.stdout.splitlines():
1212
    line = line.strip()
1213
    match = _LVSLINE_REGEX.match(line)
1214
    if not match:
1215
      logging.error("Invalid line returned from lvs output: '%s'", line)
1216
      continue
1217
    vg_name, name, size, attr = match.groups()
1218
    inactive = attr[4] == "-"
1219
    online = attr[5] == "o"
1220
    virtual = attr[0] == "v"
1221
    if virtual:
1222
      # we don't want to report such volumes as existing, since they
1223
      # don't really hold data
1224
      continue
1225
    lvs[vg_name + "/" + name] = (size, inactive, online)
1226

    
1227
  return lvs
1228

    
1229

    
1230
def ListVolumeGroups():
1231
  """List the volume groups and their size.
1232

1233
  @rtype: dict
1234
  @return: dictionary with keys volume name and values the
1235
      size of the volume
1236

1237
  """
1238
  return utils.ListVolumeGroups()
1239

    
1240

    
1241
def NodeVolumes():
1242
  """List all volumes on this node.
1243

1244
  @rtype: list
1245
  @return:
1246
    A list of dictionaries, each having four keys:
1247
      - name: the logical volume name,
1248
      - size: the size of the logical volume
1249
      - dev: the physical device on which the LV lives
1250
      - vg: the volume group to which it belongs
1251

1252
    In case of errors, we return an empty list and log the
1253
    error.
1254

1255
    Note that since a logical volume can live on multiple physical
1256
    volumes, the resulting list might include a logical volume
1257
    multiple times.
1258

1259
  """
1260
  result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1261
                         "--separator=|",
1262
                         "--options=lv_name,lv_size,devices,vg_name"])
1263
  if result.failed:
1264
    _Fail("Failed to list logical volumes, lvs output: %s",
1265
          result.output)
1266

    
1267
  def parse_dev(dev):
1268
    return dev.split("(")[0]
1269

    
1270
  def handle_dev(dev):
1271
    return [parse_dev(x) for x in dev.split(",")]
1272

    
1273
  def map_line(line):
1274
    line = [v.strip() for v in line]
1275
    return [{"name": line[0], "size": line[1],
1276
             "dev": dev, "vg": line[3]} for dev in handle_dev(line[2])]
1277

    
1278
  all_devs = []
1279
  for line in result.stdout.splitlines():
1280
    if line.count("|") >= 3:
1281
      all_devs.extend(map_line(line.split("|")))
1282
    else:
1283
      logging.warning("Strange line in the output from lvs: '%s'", line)
1284
  return all_devs
1285

    
1286

    
1287
def BridgesExist(bridges_list):
1288
  """Check if a list of bridges exist on the current node.
1289

1290
  @rtype: boolean
1291
  @return: C{True} if all of them exist, C{False} otherwise
1292

1293
  """
1294
  missing = []
1295
  for bridge in bridges_list:
1296
    if not utils.BridgeExists(bridge):
1297
      missing.append(bridge)
1298

    
1299
  if missing:
1300
    _Fail("Missing bridges %s", utils.CommaJoin(missing))
1301

    
1302

    
1303
def GetInstanceListForHypervisor(hname, hvparams=None,
1304
                                 get_hv_fn=hypervisor.GetHypervisor):
1305
  """Provides a list of instances of the given hypervisor.
1306

1307
  @type hname: string
1308
  @param hname: name of the hypervisor
1309
  @type hvparams: dict of strings
1310
  @param hvparams: hypervisor parameters for the given hypervisor
1311
  @type get_hv_fn: function
1312
  @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1313
    name; optional parameter to increase testability
1314

1315
  @rtype: list
1316
  @return: a list of all running instances on the current node
1317
    - instance1.example.com
1318
    - instance2.example.com
1319

1320
  """
1321
  results = []
1322
  try:
1323
    hv = get_hv_fn(hname)
1324
    names = hv.ListInstances(hvparams=hvparams)
1325
    results.extend(names)
1326
  except errors.HypervisorError, err:
1327
    _Fail("Error enumerating instances (hypervisor %s): %s",
1328
          hname, err, exc=True)
1329
  return results
1330

    
1331

    
1332
def GetInstanceList(hypervisor_list, all_hvparams=None,
1333
                    get_hv_fn=hypervisor.GetHypervisor):
1334
  """Provides a list of instances.
1335

1336
  @type hypervisor_list: list
1337
  @param hypervisor_list: the list of hypervisors to query information
1338
  @type all_hvparams: dict of dict of strings
1339
  @param all_hvparams: a dictionary mapping hypervisor types to respective
1340
    cluster-wide hypervisor parameters
1341
  @type get_hv_fn: function
1342
  @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1343
    name; optional parameter to increase testability
1344

1345
  @rtype: list
1346
  @return: a list of all running instances on the current node
1347
    - instance1.example.com
1348
    - instance2.example.com
1349

1350
  """
1351
  results = []
1352
  for hname in hypervisor_list:
1353
    hvparams = all_hvparams[hname]
1354
    results.extend(GetInstanceListForHypervisor(hname, hvparams=hvparams,
1355
                                                get_hv_fn=get_hv_fn))
1356
  return results
1357

    
1358

    
1359
def GetInstanceInfo(instance, hname, hvparams=None):
1360
  """Gives back the information about an instance as a dictionary.
1361

1362
  @type instance: string
1363
  @param instance: the instance name
1364
  @type hname: string
1365
  @param hname: the hypervisor type of the instance
1366
  @type hvparams: dict of strings
1367
  @param hvparams: the instance's hvparams
1368

1369
  @rtype: dict
1370
  @return: dictionary with the following keys:
1371
      - memory: memory size of instance (int)
1372
      - state: xen state of instance (string)
1373
      - time: cpu time of instance (float)
1374
      - vcpus: the number of vcpus (int)
1375

1376
  """
1377
  output = {}
1378

    
1379
  iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance,
1380
                                                          hvparams=hvparams)
1381
  if iinfo is not None:
1382
    output["memory"] = iinfo[2]
1383
    output["vcpus"] = iinfo[3]
1384
    output["state"] = iinfo[4]
1385
    output["time"] = iinfo[5]
1386

    
1387
  return output
1388

    
1389

    
1390
def GetInstanceMigratable(instance):
1391
  """Computes whether an instance can be migrated.
1392

1393
  @type instance: L{objects.Instance}
1394
  @param instance: object representing the instance to be checked.
1395

1396
  @rtype: tuple
1397
  @return: tuple of (result, description) where:
1398
      - result: whether the instance can be migrated or not
1399
      - description: a description of the issue, if relevant
1400

1401
  """
1402
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1403
  iname = instance.name
1404
  if iname not in hyper.ListInstances(instance.hvparams):
1405
    _Fail("Instance %s is not running", iname)
1406

    
1407
  for idx in range(len(instance.disks)):
1408
    link_name = _GetBlockDevSymlinkPath(iname, idx)
1409
    if not os.path.islink(link_name):
1410
      logging.warning("Instance %s is missing symlink %s for disk %d",
1411
                      iname, link_name, idx)
1412

    
1413

    
1414
def GetAllInstancesInfo(hypervisor_list, all_hvparams):
1415
  """Gather data about all instances.
1416

1417
  This is the equivalent of L{GetInstanceInfo}, except that it
1418
  computes data for all instances at once, thus being faster if one
1419
  needs data about more than one instance.
1420

1421
  @type hypervisor_list: list
1422
  @param hypervisor_list: list of hypervisors to query for instance data
1423
  @type all_hvparams: dict of dict of strings
1424
  @param all_hvparams: mapping of hypervisor names to hvparams
1425

1426
  @rtype: dict
1427
  @return: dictionary of instance: data, with data having the following keys:
1428
      - memory: memory size of instance (int)
1429
      - state: xen state of instance (string)
1430
      - time: cpu time of instance (float)
1431
      - vcpus: the number of vcpus
1432

1433
  """
1434
  output = {}
1435

    
1436
  for hname in hypervisor_list:
1437
    hvparams = all_hvparams[hname]
1438
    iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo(hvparams)
1439
    if iinfo:
1440
      for name, _, memory, vcpus, state, times in iinfo:
1441
        value = {
1442
          "memory": memory,
1443
          "vcpus": vcpus,
1444
          "state": state,
1445
          "time": times,
1446
          }
1447
        if name in output:
1448
          # we only check static parameters, like memory and vcpus,
1449
          # and not state and time which can change between the
1450
          # invocations of the different hypervisors
1451
          for key in "memory", "vcpus":
1452
            if value[key] != output[name][key]:
1453
              _Fail("Instance %s is running twice"
1454
                    " with different parameters", name)
1455
        output[name] = value
1456

    
1457
  return output
1458

    
1459

    
1460
def _InstanceLogName(kind, os_name, instance, component):
1461
  """Compute the OS log filename for a given instance and operation.
1462

1463
  The instance name and os name are passed in as strings since not all
1464
  operations have these as part of an instance object.
1465

1466
  @type kind: string
1467
  @param kind: the operation type (e.g. add, import, etc.)
1468
  @type os_name: string
1469
  @param os_name: the os name
1470
  @type instance: string
1471
  @param instance: the name of the instance being imported/added/etc.
1472
  @type component: string or None
1473
  @param component: the name of the component of the instance being
1474
      transferred
1475

1476
  """
1477
  # TODO: Use tempfile.mkstemp to create unique filename
1478
  if component:
1479
    assert "/" not in component
1480
    c_msg = "-%s" % component
1481
  else:
1482
    c_msg = ""
1483
  base = ("%s-%s-%s%s-%s.log" %
1484
          (kind, os_name, instance, c_msg, utils.TimestampForFilename()))
1485
  return utils.PathJoin(pathutils.LOG_OS_DIR, base)
1486

    
1487

    
1488
def InstanceOsAdd(instance, reinstall, debug):
1489
  """Add an OS to an instance.
1490

1491
  @type instance: L{objects.Instance}
1492
  @param instance: Instance whose OS is to be installed
1493
  @type reinstall: boolean
1494
  @param reinstall: whether this is an instance reinstall
1495
  @type debug: integer
1496
  @param debug: debug level, passed to the OS scripts
1497
  @rtype: None
1498

1499
  """
1500
  inst_os = OSFromDisk(instance.os)
1501

    
1502
  create_env = OSEnvironment(instance, inst_os, debug)
1503
  if reinstall:
1504
    create_env["INSTANCE_REINSTALL"] = "1"
1505

    
1506
  logfile = _InstanceLogName("add", instance.os, instance.name, None)
1507

    
1508
  result = utils.RunCmd([inst_os.create_script], env=create_env,
1509
                        cwd=inst_os.path, output=logfile, reset_env=True)
1510
  if result.failed:
1511
    logging.error("os create command '%s' returned error: %s, logfile: %s,"
1512
                  " output: %s", result.cmd, result.fail_reason, logfile,
1513
                  result.output)
1514
    lines = [utils.SafeEncode(val)
1515
             for val in utils.TailFile(logfile, lines=20)]
1516
    _Fail("OS create script failed (%s), last lines in the"
1517
          " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1518

    
1519

    
1520
def RunRenameInstance(instance, old_name, debug):
1521
  """Run the OS rename script for an instance.
1522

1523
  @type instance: L{objects.Instance}
1524
  @param instance: Instance whose OS is to be installed
1525
  @type old_name: string
1526
  @param old_name: previous instance name
1527
  @type debug: integer
1528
  @param debug: debug level, passed to the OS scripts
1529
  @rtype: boolean
1530
  @return: the success of the operation
1531

1532
  """
1533
  inst_os = OSFromDisk(instance.os)
1534

    
1535
  rename_env = OSEnvironment(instance, inst_os, debug)
1536
  rename_env["OLD_INSTANCE_NAME"] = old_name
1537

    
1538
  logfile = _InstanceLogName("rename", instance.os,
1539
                             "%s-%s" % (old_name, instance.name), None)
1540

    
1541
  result = utils.RunCmd([inst_os.rename_script], env=rename_env,
1542
                        cwd=inst_os.path, output=logfile, reset_env=True)
1543

    
1544
  if result.failed:
1545
    logging.error("os create command '%s' returned error: %s output: %s",
1546
                  result.cmd, result.fail_reason, result.output)
1547
    lines = [utils.SafeEncode(val)
1548
             for val in utils.TailFile(logfile, lines=20)]
1549
    _Fail("OS rename script failed (%s), last lines in the"
1550
          " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1551

    
1552

    
1553
def _GetBlockDevSymlinkPath(instance_name, idx, _dir=None):
1554
  """Returns symlink path for block device.
1555

1556
  """
1557
  if _dir is None:
1558
    _dir = pathutils.DISK_LINKS_DIR
1559

    
1560
  return utils.PathJoin(_dir,
1561
                        ("%s%s%s" %
1562
                         (instance_name, constants.DISK_SEPARATOR, idx)))
1563

    
1564

    
1565
def _SymlinkBlockDev(instance_name, device_path, idx):
1566
  """Set up symlinks to a instance's block device.
1567

1568
  This is an auxiliary function run when an instance is start (on the primary
1569
  node) or when an instance is migrated (on the target node).
1570

1571

1572
  @param instance_name: the name of the target instance
1573
  @param device_path: path of the physical block device, on the node
1574
  @param idx: the disk index
1575
  @return: absolute path to the disk's symlink
1576

1577
  """
1578
  link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1579
  try:
1580
    os.symlink(device_path, link_name)
1581
  except OSError, err:
1582
    if err.errno == errno.EEXIST:
1583
      if (not os.path.islink(link_name) or
1584
          os.readlink(link_name) != device_path):
1585
        os.remove(link_name)
1586
        os.symlink(device_path, link_name)
1587
    else:
1588
      raise
1589

    
1590
  return link_name
1591

    
1592

    
1593
def _RemoveBlockDevLinks(instance_name, disks):
1594
  """Remove the block device symlinks belonging to the given instance.
1595

1596
  """
1597
  for idx, _ in enumerate(disks):
1598
    link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1599
    if os.path.islink(link_name):
1600
      try:
1601
        os.remove(link_name)
1602
      except OSError:
1603
        logging.exception("Can't remove symlink '%s'", link_name)
1604

    
1605

    
1606
def _GatherAndLinkBlockDevs(instance):
1607
  """Set up an instance's block device(s).
1608

1609
  This is run on the primary node at instance startup. The block
1610
  devices must be already assembled.
1611

1612
  @type instance: L{objects.Instance}
1613
  @param instance: the instance whose disks we shoul assemble
1614
  @rtype: list
1615
  @return: list of (disk_object, device_path)
1616

1617
  """
1618
  block_devices = []
1619
  for idx, disk in enumerate(instance.disks):
1620
    device = _RecursiveFindBD(disk)
1621
    if device is None:
1622
      raise errors.BlockDeviceError("Block device '%s' is not set up." %
1623
                                    str(disk))
1624
    device.Open()
1625
    try:
1626
      link_name = _SymlinkBlockDev(instance.name, device.dev_path, idx)
1627
    except OSError, e:
1628
      raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
1629
                                    e.strerror)
1630

    
1631
    block_devices.append((disk, link_name))
1632

    
1633
  return block_devices
1634

    
1635

    
1636
def StartInstance(instance, startup_paused, reason, store_reason=True):
1637
  """Start an instance.
1638

1639
  @type instance: L{objects.Instance}
1640
  @param instance: the instance object
1641
  @type startup_paused: bool
1642
  @param instance: pause instance at startup?
1643
  @type reason: list of reasons
1644
  @param reason: the reason trail for this startup
1645
  @type store_reason: boolean
1646
  @param store_reason: whether to store the shutdown reason trail on file
1647
  @rtype: None
1648

1649
  """
1650
  running_instances = GetInstanceListForHypervisor(instance.hypervisor,
1651
                                                   instance.hvparams)
1652

    
1653
  if instance.name in running_instances:
1654
    logging.info("Instance %s already running, not starting", instance.name)
1655
    return
1656

    
1657
  try:
1658
    block_devices = _GatherAndLinkBlockDevs(instance)
1659
    hyper = hypervisor.GetHypervisor(instance.hypervisor)
1660
    hyper.StartInstance(instance, block_devices, startup_paused)
1661
    if store_reason:
1662
      _StoreInstReasonTrail(instance.name, reason)
1663
  except errors.BlockDeviceError, err:
1664
    _Fail("Block device error: %s", err, exc=True)
1665
  except errors.HypervisorError, err:
1666
    _RemoveBlockDevLinks(instance.name, instance.disks)
1667
    _Fail("Hypervisor error: %s", err, exc=True)
1668

    
1669

    
1670
def InstanceShutdown(instance, timeout, reason, store_reason=True):
1671
  """Shut an instance down.
1672

1673
  @note: this functions uses polling with a hardcoded timeout.
1674

1675
  @type instance: L{objects.Instance}
1676
  @param instance: the instance object
1677
  @type timeout: integer
1678
  @param timeout: maximum timeout for soft shutdown
1679
  @type reason: list of reasons
1680
  @param reason: the reason trail for this shutdown
1681
  @type store_reason: boolean
1682
  @param store_reason: whether to store the shutdown reason trail on file
1683
  @rtype: None
1684

1685
  """
1686
  hv_name = instance.hypervisor
1687
  hyper = hypervisor.GetHypervisor(hv_name)
1688
  iname = instance.name
1689

    
1690
  if instance.name not in hyper.ListInstances(instance.hvparams):
1691
    logging.info("Instance %s not running, doing nothing", iname)
1692
    return
1693

    
1694
  class _TryShutdown:
1695
    def __init__(self):
1696
      self.tried_once = False
1697

    
1698
    def __call__(self):
1699
      if iname not in hyper.ListInstances(instance.hvparams):
1700
        return
1701

    
1702
      try:
1703
        hyper.StopInstance(instance, retry=self.tried_once)
1704
        if store_reason:
1705
          _StoreInstReasonTrail(instance.name, reason)
1706
      except errors.HypervisorError, err:
1707
        if iname not in hyper.ListInstances(instance.hvparams):
1708
          # if the instance is no longer existing, consider this a
1709
          # success and go to cleanup
1710
          return
1711

    
1712
        _Fail("Failed to stop instance %s: %s", iname, err)
1713

    
1714
      self.tried_once = True
1715

    
1716
      raise utils.RetryAgain()
1717

    
1718
  try:
1719
    utils.Retry(_TryShutdown(), 5, timeout)
1720
  except utils.RetryTimeout:
1721
    # the shutdown did not succeed
1722
    logging.error("Shutdown of '%s' unsuccessful, forcing", iname)
1723

    
1724
    try:
1725
      hyper.StopInstance(instance, force=True)
1726
    except errors.HypervisorError, err:
1727
      if iname in hyper.ListInstances(instance.hvparams):
1728
        # only raise an error if the instance still exists, otherwise
1729
        # the error could simply be "instance ... unknown"!
1730
        _Fail("Failed to force stop instance %s: %s", iname, err)
1731

    
1732
    time.sleep(1)
1733

    
1734
    if iname in hyper.ListInstances(instance.hvparams):
1735
      _Fail("Could not shutdown instance %s even by destroy", iname)
1736

    
1737
  try:
1738
    hyper.CleanupInstance(instance.name)
1739
  except errors.HypervisorError, err:
1740
    logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
1741

    
1742
  _RemoveBlockDevLinks(iname, instance.disks)
1743

    
1744

    
1745
def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
1746
  """Reboot an instance.
1747

1748
  @type instance: L{objects.Instance}
1749
  @param instance: the instance object to reboot
1750
  @type reboot_type: str
1751
  @param reboot_type: the type of reboot, one the following
1752
    constants:
1753
      - L{constants.INSTANCE_REBOOT_SOFT}: only reboot the
1754
        instance OS, do not recreate the VM
1755
      - L{constants.INSTANCE_REBOOT_HARD}: tear down and
1756
        restart the VM (at the hypervisor level)
1757
      - the other reboot type (L{constants.INSTANCE_REBOOT_FULL}) is
1758
        not accepted here, since that mode is handled differently, in
1759
        cmdlib, and translates into full stop and start of the
1760
        instance (instead of a call_instance_reboot RPC)
1761
  @type shutdown_timeout: integer
1762
  @param shutdown_timeout: maximum timeout for soft shutdown
1763
  @type reason: list of reasons
1764
  @param reason: the reason trail for this reboot
1765
  @rtype: None
1766

1767
  """
1768
  running_instances = GetInstanceListForHypervisor(instance.hypervisor,
1769
                                                   instance.hvparams)
1770

    
1771
  if instance.name not in running_instances:
1772
    _Fail("Cannot reboot instance %s that is not running", instance.name)
1773

    
1774
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1775
  if reboot_type == constants.INSTANCE_REBOOT_SOFT:
1776
    try:
1777
      hyper.RebootInstance(instance)
1778
    except errors.HypervisorError, err:
1779
      _Fail("Failed to soft reboot instance %s: %s", instance.name, err)
1780
  elif reboot_type == constants.INSTANCE_REBOOT_HARD:
1781
    try:
1782
      InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
1783
      result = StartInstance(instance, False, reason, store_reason=False)
1784
      _StoreInstReasonTrail(instance.name, reason)
1785
      return result
1786
    except errors.HypervisorError, err:
1787
      _Fail("Failed to hard reboot instance %s: %s", instance.name, err)
1788
  else:
1789
    _Fail("Invalid reboot_type received: %s", reboot_type)
1790

    
1791

    
1792
def InstanceBalloonMemory(instance, memory):
1793
  """Resize an instance's memory.
1794

1795
  @type instance: L{objects.Instance}
1796
  @param instance: the instance object
1797
  @type memory: int
1798
  @param memory: new memory amount in MB
1799
  @rtype: None
1800

1801
  """
1802
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1803
  running = hyper.ListInstances(instance.hvparams)
1804
  if instance.name not in running:
1805
    logging.info("Instance %s is not running, cannot balloon", instance.name)
1806
    return
1807
  try:
1808
    hyper.BalloonInstanceMemory(instance, memory)
1809
  except errors.HypervisorError, err:
1810
    _Fail("Failed to balloon instance memory: %s", err, exc=True)
1811

    
1812

    
1813
def MigrationInfo(instance):
1814
  """Gather information about an instance to be migrated.
1815

1816
  @type instance: L{objects.Instance}
1817
  @param instance: the instance definition
1818

1819
  """
1820
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1821
  try:
1822
    info = hyper.MigrationInfo(instance)
1823
  except errors.HypervisorError, err:
1824
    _Fail("Failed to fetch migration information: %s", err, exc=True)
1825
  return info
1826

    
1827

    
1828
def AcceptInstance(instance, info, target):
1829
  """Prepare the node to accept an instance.
1830

1831
  @type instance: L{objects.Instance}
1832
  @param instance: the instance definition
1833
  @type info: string/data (opaque)
1834
  @param info: migration information, from the source node
1835
  @type target: string
1836
  @param target: target host (usually ip), on this node
1837

1838
  """
1839
  # TODO: why is this required only for DTS_EXT_MIRROR?
1840
  if instance.disk_template in constants.DTS_EXT_MIRROR:
1841
    # Create the symlinks, as the disks are not active
1842
    # in any way
1843
    try:
1844
      _GatherAndLinkBlockDevs(instance)
1845
    except errors.BlockDeviceError, err:
1846
      _Fail("Block device error: %s", err, exc=True)
1847

    
1848
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1849
  try:
1850
    hyper.AcceptInstance(instance, info, target)
1851
  except errors.HypervisorError, err:
1852
    if instance.disk_template in constants.DTS_EXT_MIRROR:
1853
      _RemoveBlockDevLinks(instance.name, instance.disks)
1854
    _Fail("Failed to accept instance: %s", err, exc=True)
1855

    
1856

    
1857
def FinalizeMigrationDst(instance, info, success):
1858
  """Finalize any preparation to accept an instance.
1859

1860
  @type instance: L{objects.Instance}
1861
  @param instance: the instance definition
1862
  @type info: string/data (opaque)
1863
  @param info: migration information, from the source node
1864
  @type success: boolean
1865
  @param success: whether the migration was a success or a failure
1866

1867
  """
1868
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1869
  try:
1870
    hyper.FinalizeMigrationDst(instance, info, success)
1871
  except errors.HypervisorError, err:
1872
    _Fail("Failed to finalize migration on the target node: %s", err, exc=True)
1873

    
1874

    
1875
def MigrateInstance(cluster_name, instance, target, live):
1876
  """Migrates an instance to another node.
1877

1878
  @type cluster_name: string
1879
  @param cluster_name: name of the cluster
1880
  @type instance: L{objects.Instance}
1881
  @param instance: the instance definition
1882
  @type target: string
1883
  @param target: the target node name
1884
  @type live: boolean
1885
  @param live: whether the migration should be done live or not (the
1886
      interpretation of this parameter is left to the hypervisor)
1887
  @raise RPCFail: if migration fails for some reason
1888

1889
  """
1890
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1891

    
1892
  try:
1893
    hyper.MigrateInstance(cluster_name, instance, target, live)
1894
  except errors.HypervisorError, err:
1895
    _Fail("Failed to migrate instance: %s", err, exc=True)
1896

    
1897

    
1898
def FinalizeMigrationSource(instance, success, live):
1899
  """Finalize the instance migration on the source node.
1900

1901
  @type instance: L{objects.Instance}
1902
  @param instance: the instance definition of the migrated instance
1903
  @type success: bool
1904
  @param success: whether the migration succeeded or not
1905
  @type live: bool
1906
  @param live: whether the user requested a live migration or not
1907
  @raise RPCFail: If the execution fails for some reason
1908

1909
  """
1910
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1911

    
1912
  try:
1913
    hyper.FinalizeMigrationSource(instance, success, live)
1914
  except Exception, err:  # pylint: disable=W0703
1915
    _Fail("Failed to finalize the migration on the source node: %s", err,
1916
          exc=True)
1917

    
1918

    
1919
def GetMigrationStatus(instance):
1920
  """Get the migration status
1921

1922
  @type instance: L{objects.Instance}
1923
  @param instance: the instance that is being migrated
1924
  @rtype: L{objects.MigrationStatus}
1925
  @return: the status of the current migration (one of
1926
           L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
1927
           progress info that can be retrieved from the hypervisor
1928
  @raise RPCFail: If the migration status cannot be retrieved
1929

1930
  """
1931
  hyper = hypervisor.GetHypervisor(instance.hypervisor)
1932
  try:
1933
    return hyper.GetMigrationStatus(instance)
1934
  except Exception, err:  # pylint: disable=W0703
1935
    _Fail("Failed to get migration status: %s", err, exc=True)
1936

    
1937

    
1938
def BlockdevCreate(disk, size, owner, on_primary, info, excl_stor):
1939
  """Creates a block device for an instance.
1940

1941
  @type disk: L{objects.Disk}
1942
  @param disk: the object describing the disk we should create
1943
  @type size: int
1944
  @param size: the size of the physical underlying device, in MiB
1945
  @type owner: str
1946
  @param owner: the name of the instance for which disk is created,
1947
      used for device cache data
1948
  @type on_primary: boolean
1949
  @param on_primary:  indicates if it is the primary node or not
1950
  @type info: string
1951
  @param info: string that will be sent to the physical device
1952
      creation, used for example to set (LVM) tags on LVs
1953
  @type excl_stor: boolean
1954
  @param excl_stor: Whether exclusive_storage is active
1955

1956
  @return: the new unique_id of the device (this can sometime be
1957
      computed only after creation), or None. On secondary nodes,
1958
      it's not required to return anything.
1959

1960
  """
1961
  # TODO: remove the obsolete "size" argument
1962
  # pylint: disable=W0613
1963
  clist = []
1964
  if disk.children:
1965
    for child in disk.children:
1966
      try:
1967
        crdev = _RecursiveAssembleBD(child, owner, on_primary)
1968
      except errors.BlockDeviceError, err:
1969
        _Fail("Can't assemble device %s: %s", child, err)
1970
      if on_primary or disk.AssembleOnSecondary():
1971
        # we need the children open in case the device itself has to
1972
        # be assembled
1973
        try:
1974
          # pylint: disable=E1103
1975
          crdev.Open()
1976
        except errors.BlockDeviceError, err:
1977
          _Fail("Can't make child '%s' read-write: %s", child, err)
1978
      clist.append(crdev)
1979

    
1980
  try:
1981
    device = bdev.Create(disk, clist, excl_stor)
1982
  except errors.BlockDeviceError, err:
1983
    _Fail("Can't create block device: %s", err)
1984

    
1985
  if on_primary or disk.AssembleOnSecondary():
1986
    try:
1987
      device.Assemble()
1988
    except errors.BlockDeviceError, err:
1989
      _Fail("Can't assemble device after creation, unusual event: %s", err)
1990
    if on_primary or disk.OpenOnSecondary():
1991
      try:
1992
        device.Open(force=True)
1993
      except errors.BlockDeviceError, err:
1994
        _Fail("Can't make device r/w after creation, unusual event: %s", err)
1995
    DevCacheManager.UpdateCache(device.dev_path, owner,
1996
                                on_primary, disk.iv_name)
1997

    
1998
  device.SetInfo(info)
1999

    
2000
  return device.unique_id
2001

    
2002

    
2003
def _WipeDevice(path, offset, size):
2004
  """This function actually wipes the device.
2005

2006
  @param path: The path to the device to wipe
2007
  @param offset: The offset in MiB in the file
2008
  @param size: The size in MiB to write
2009

2010
  """
2011
  # Internal sizes are always in Mebibytes; if the following "dd" command
2012
  # should use a different block size the offset and size given to this
2013
  # function must be adjusted accordingly before being passed to "dd".
2014
  block_size = 1024 * 1024
2015

    
2016
  cmd = [constants.DD_CMD, "if=/dev/zero", "seek=%d" % offset,
2017
         "bs=%s" % block_size, "oflag=direct", "of=%s" % path,
2018
         "count=%d" % size]
2019
  result = utils.RunCmd(cmd)
2020

    
2021
  if result.failed:
2022
    _Fail("Wipe command '%s' exited with error: %s; output: %s", result.cmd,
2023
          result.fail_reason, result.output)
2024

    
2025

    
2026
def BlockdevWipe(disk, offset, size):
2027
  """Wipes a block device.
2028

2029
  @type disk: L{objects.Disk}
2030
  @param disk: the disk object we want to wipe
2031
  @type offset: int
2032
  @param offset: The offset in MiB in the file
2033
  @type size: int
2034
  @param size: The size in MiB to write
2035

2036
  """
2037
  try:
2038
    rdev = _RecursiveFindBD(disk)
2039
  except errors.BlockDeviceError:
2040
    rdev = None
2041

    
2042
  if not rdev:
2043
    _Fail("Cannot execute wipe for device %s: device not found", disk.iv_name)
2044

    
2045
  # Do cross verify some of the parameters
2046
  if offset < 0:
2047
    _Fail("Negative offset")
2048
  if size < 0:
2049
    _Fail("Negative size")
2050
  if offset > rdev.size:
2051
    _Fail("Offset is bigger than device size")
2052
  if (offset + size) > rdev.size:
2053
    _Fail("The provided offset and size to wipe is bigger than device size")
2054

    
2055
  _WipeDevice(rdev.dev_path, offset, size)
2056

    
2057

    
2058
def BlockdevPauseResumeSync(disks, pause):
2059
  """Pause or resume the sync of the block device.
2060

2061
  @type disks: list of L{objects.Disk}
2062
  @param disks: the disks object we want to pause/resume
2063
  @type pause: bool
2064
  @param pause: Wheater to pause or resume
2065

2066
  """
2067
  success = []
2068
  for disk in disks:
2069
    try:
2070
      rdev = _RecursiveFindBD(disk)
2071
    except errors.BlockDeviceError:
2072
      rdev = None
2073

    
2074
    if not rdev:
2075
      success.append((False, ("Cannot change sync for device %s:"
2076
                              " device not found" % disk.iv_name)))
2077
      continue
2078

    
2079
    result = rdev.PauseResumeSync(pause)
2080

    
2081
    if result:
2082
      success.append((result, None))
2083
    else:
2084
      if pause:
2085
        msg = "Pause"
2086
      else:
2087
        msg = "Resume"
2088
      success.append((result, "%s for device %s failed" % (msg, disk.iv_name)))
2089

    
2090
  return success
2091

    
2092

    
2093
def BlockdevRemove(disk):
2094
  """Remove a block device.
2095

2096
  @note: This is intended to be called recursively.
2097

2098
  @type disk: L{objects.Disk}
2099
  @param disk: the disk object we should remove
2100
  @rtype: boolean
2101
  @return: the success of the operation
2102

2103
  """
2104
  msgs = []
2105
  try:
2106
    rdev = _RecursiveFindBD(disk)
2107
  except errors.BlockDeviceError, err:
2108
    # probably can't attach
2109
    logging.info("Can't attach to device %s in remove", disk)
2110
    rdev = None
2111
  if rdev is not None:
2112
    r_path = rdev.dev_path
2113
    try:
2114
      rdev.Remove()
2115
    except errors.BlockDeviceError, err:
2116
      msgs.append(str(err))
2117
    if not msgs:
2118
      DevCacheManager.RemoveCache(r_path)
2119

    
2120
  if disk.children:
2121
    for child in disk.children:
2122
      try:
2123
        BlockdevRemove(child)
2124
      except RPCFail, err:
2125
        msgs.append(str(err))
2126

    
2127
  if msgs:
2128
    _Fail("; ".join(msgs))
2129

    
2130

    
2131
def _RecursiveAssembleBD(disk, owner, as_primary):
2132
  """Activate a block device for an instance.
2133

2134
  This is run on the primary and secondary nodes for an instance.
2135

2136
  @note: this function is called recursively.
2137

2138
  @type disk: L{objects.Disk}
2139
  @param disk: the disk we try to assemble
2140
  @type owner: str
2141
  @param owner: the name of the instance which owns the disk
2142
  @type as_primary: boolean
2143
  @param as_primary: if we should make the block device
2144
      read/write
2145

2146
  @return: the assembled device or None (in case no device
2147
      was assembled)
2148
  @raise errors.BlockDeviceError: in case there is an error
2149
      during the activation of the children or the device
2150
      itself
2151

2152
  """
2153
  children = []
2154
  if disk.children:
2155
    mcn = disk.ChildrenNeeded()
2156
    if mcn == -1:
2157
      mcn = 0 # max number of Nones allowed
2158
    else:
2159
      mcn = len(disk.children) - mcn # max number of Nones
2160
    for chld_disk in disk.children:
2161
      try:
2162
        cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary)
2163
      except errors.BlockDeviceError, err:
2164
        if children.count(None) >= mcn:
2165
          raise
2166
        cdev = None
2167
        logging.error("Error in child activation (but continuing): %s",
2168
                      str(err))
2169
      children.append(cdev)
2170

    
2171
  if as_primary or disk.AssembleOnSecondary():
2172
    r_dev = bdev.Assemble(disk, children)
2173
    result = r_dev
2174
    if as_primary or disk.OpenOnSecondary():
2175
      r_dev.Open()
2176
    DevCacheManager.UpdateCache(r_dev.dev_path, owner,
2177
                                as_primary, disk.iv_name)
2178

    
2179
  else:
2180
    result = True
2181
  return result
2182

    
2183

    
2184
def BlockdevAssemble(disk, owner, as_primary, idx):
2185
  """Activate a block device for an instance.
2186

2187
  This is a wrapper over _RecursiveAssembleBD.
2188

2189
  @rtype: str or boolean
2190
  @return: a C{/dev/...} path for primary nodes, and
2191
      C{True} for secondary nodes
2192

2193
  """
2194
  try:
2195
    result = _RecursiveAssembleBD(disk, owner, as_primary)
2196
    if isinstance(result, BlockDev):
2197
      # pylint: disable=E1103
2198
      result = result.dev_path
2199
      if as_primary:
2200
        _SymlinkBlockDev(owner, result, idx)
2201
  except errors.BlockDeviceError, err:
2202
    _Fail("Error while assembling disk: %s", err, exc=True)
2203
  except OSError, err:
2204
    _Fail("Error while symlinking disk: %s", err, exc=True)
2205

    
2206
  return result
2207

    
2208

    
2209
def BlockdevShutdown(disk):
2210
  """Shut down a block device.
2211

2212
  First, if the device is assembled (Attach() is successful), then
2213
  the device is shutdown. Then the children of the device are
2214
  shutdown.
2215

2216
  This function is called recursively. Note that we don't cache the
2217
  children or such, as oppossed to assemble, shutdown of different
2218
  devices doesn't require that the upper device was active.
2219

2220
  @type disk: L{objects.Disk}
2221
  @param disk: the description of the disk we should
2222
      shutdown
2223
  @rtype: None
2224

2225
  """
2226
  msgs = []
2227
  r_dev = _RecursiveFindBD(disk)
2228
  if r_dev is not None:
2229
    r_path = r_dev.dev_path
2230
    try:
2231
      r_dev.Shutdown()
2232
      DevCacheManager.RemoveCache(r_path)
2233
    except errors.BlockDeviceError, err:
2234
      msgs.append(str(err))
2235

    
2236
  if disk.children:
2237
    for child in disk.children:
2238
      try:
2239
        BlockdevShutdown(child)
2240
      except RPCFail, err:
2241
        msgs.append(str(err))
2242

    
2243
  if msgs:
2244
    _Fail("; ".join(msgs))
2245

    
2246

    
2247
def BlockdevAddchildren(parent_cdev, new_cdevs):
2248
  """Extend a mirrored block device.
2249

2250
  @type parent_cdev: L{objects.Disk}
2251
  @param parent_cdev: the disk to which we should add children
2252
  @type new_cdevs: list of L{objects.Disk}
2253
  @param new_cdevs: the list of children which we should add
2254
  @rtype: None
2255

2256
  """
2257
  parent_bdev = _RecursiveFindBD(parent_cdev)
2258
  if parent_bdev is None:
2259
    _Fail("Can't find parent device '%s' in add children", parent_cdev)
2260
  new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
2261
  if new_bdevs.count(None) > 0:
2262
    _Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs)
2263
  parent_bdev.AddChildren(new_bdevs)
2264

    
2265

    
2266
def BlockdevRemovechildren(parent_cdev, new_cdevs):
2267
  """Shrink a mirrored block device.
2268

2269
  @type parent_cdev: L{objects.Disk}
2270
  @param parent_cdev: the disk from which we should remove children
2271
  @type new_cdevs: list of L{objects.Disk}
2272
  @param new_cdevs: the list of children which we should remove
2273
  @rtype: None
2274

2275
  """
2276
  parent_bdev = _RecursiveFindBD(parent_cdev)
2277
  if parent_bdev is None:
2278
    _Fail("Can't find parent device '%s' in remove children", parent_cdev)
2279
  devs = []
2280
  for disk in new_cdevs:
2281
    rpath = disk.StaticDevPath()
2282
    if rpath is None:
2283
      bd = _RecursiveFindBD(disk)
2284
      if bd is None:
2285
        _Fail("Can't find device %s while removing children", disk)
2286
      else:
2287
        devs.append(bd.dev_path)
2288
    else:
2289
      if not utils.IsNormAbsPath(rpath):
2290
        _Fail("Strange path returned from StaticDevPath: '%s'", rpath)
2291
      devs.append(rpath)
2292
  parent_bdev.RemoveChildren(devs)
2293

    
2294

    
2295
def BlockdevGetmirrorstatus(disks):
2296
  """Get the mirroring status of a list of devices.
2297

2298
  @type disks: list of L{objects.Disk}
2299
  @param disks: the list of disks which we should query
2300
  @rtype: disk
2301
  @return: List of L{objects.BlockDevStatus}, one for each disk
2302
  @raise errors.BlockDeviceError: if any of the disks cannot be
2303
      found
2304

2305
  """
2306
  stats = []
2307
  for dsk in disks:
2308
    rbd = _RecursiveFindBD(dsk)
2309
    if rbd is None:
2310
      _Fail("Can't find device %s", dsk)
2311

    
2312
    stats.append(rbd.CombinedSyncStatus())
2313

    
2314
  return stats
2315

    
2316

    
2317
def BlockdevGetmirrorstatusMulti(disks):
2318
  """Get the mirroring status of a list of devices.
2319

2320
  @type disks: list of L{objects.Disk}
2321
  @param disks: the list of disks which we should query
2322
  @rtype: disk
2323
  @return: List of tuples, (bool, status), one for each disk; bool denotes
2324
    success/failure, status is L{objects.BlockDevStatus} on success, string
2325
    otherwise
2326

2327
  """
2328
  result = []
2329
  for disk in disks:
2330
    try:
2331
      rbd = _RecursiveFindBD(disk)
2332
      if rbd is None:
2333
        result.append((False, "Can't find device %s" % disk))
2334
        continue
2335

    
2336
      status = rbd.CombinedSyncStatus()
2337
    except errors.BlockDeviceError, err:
2338
      logging.exception("Error while getting disk status")
2339
      result.append((False, str(err)))
2340
    else:
2341
      result.append((True, status))
2342

    
2343
  assert len(disks) == len(result)
2344

    
2345
  return result
2346

    
2347

    
2348
def _RecursiveFindBD(disk):
2349
  """Check if a device is activated.
2350

2351
  If so, return information about the real device.
2352

2353
  @type disk: L{objects.Disk}
2354
  @param disk: the disk object we need to find
2355

2356
  @return: None if the device can't be found,
2357
      otherwise the device instance
2358

2359
  """
2360
  children = []
2361
  if disk.children:
2362
    for chdisk in disk.children:
2363
      children.append(_RecursiveFindBD(chdisk))
2364

    
2365
  return bdev.FindDevice(disk, children)
2366

    
2367

    
2368
def _OpenRealBD(disk):
2369
  """Opens the underlying block device of a disk.
2370

2371
  @type disk: L{objects.Disk}
2372
  @param disk: the disk object we want to open
2373

2374
  """
2375
  real_disk = _RecursiveFindBD(disk)
2376
  if real_disk is None:
2377
    _Fail("Block device '%s' is not set up", disk)
2378

    
2379
  real_disk.Open()
2380

    
2381
  return real_disk
2382

    
2383

    
2384
def BlockdevFind(disk):
2385
  """Check if a device is activated.
2386

2387
  If it is, return information about the real device.
2388

2389
  @type disk: L{objects.Disk}
2390
  @param disk: the disk to find
2391
  @rtype: None or objects.BlockDevStatus
2392
  @return: None if the disk cannot be found, otherwise a the current
2393
           information
2394

2395
  """
2396
  try:
2397
    rbd = _RecursiveFindBD(disk)
2398
  except errors.BlockDeviceError, err:
2399
    _Fail("Failed to find device: %s", err, exc=True)
2400

    
2401
  if rbd is None:
2402
    return None
2403

    
2404
  return rbd.GetSyncStatus()
2405

    
2406

    
2407
def BlockdevGetdimensions(disks):
2408
  """Computes the size of the given disks.
2409

2410
  If a disk is not found, returns None instead.
2411

2412
  @type disks: list of L{objects.Disk}
2413
  @param disks: the list of disk to compute the size for
2414
  @rtype: list
2415
  @return: list with elements None if the disk cannot be found,
2416
      otherwise the pair (size, spindles), where spindles is None if the
2417
      device doesn't support that
2418

2419
  """
2420
  result = []
2421
  for cf in disks:
2422
    try:
2423
      rbd = _RecursiveFindBD(cf)
2424
    except errors.BlockDeviceError:
2425
      result.append(None)
2426
      continue
2427
    if rbd is None:
2428
      result.append(None)
2429
    else:
2430
      result.append(rbd.GetActualDimensions())
2431
  return result
2432

    
2433

    
2434
def BlockdevExport(disk, dest_node, dest_path, cluster_name):
2435
  """Export a block device to a remote node.
2436

2437
  @type disk: L{objects.Disk}
2438
  @param disk: the description of the disk to export
2439
  @type dest_node: str
2440
  @param dest_node: the destination node to export to
2441
  @type dest_path: str
2442
  @param dest_path: the destination path on the target node
2443
  @type cluster_name: str
2444
  @param cluster_name: the cluster name, needed for SSH hostalias
2445
  @rtype: None
2446

2447
  """
2448
  real_disk = _OpenRealBD(disk)
2449

    
2450
  # the block size on the read dd is 1MiB to match our units
2451
  expcmd = utils.BuildShellCmd("set -e; set -o pipefail; "
2452
                               "dd if=%s bs=1048576 count=%s",
2453
                               real_disk.dev_path, str(disk.size))
2454

    
2455
  # we set here a smaller block size as, due to ssh buffering, more
2456
  # than 64-128k will mostly ignored; we use nocreat to fail if the
2457
  # device is not already there or we pass a wrong path; we use
2458
  # notrunc to no attempt truncate on an LV device; we use oflag=dsync
2459
  # to not buffer too much memory; this means that at best, we flush
2460
  # every 64k, which will not be very fast
2461
  destcmd = utils.BuildShellCmd("dd of=%s conv=nocreat,notrunc bs=65536"
2462
                                " oflag=dsync", dest_path)
2463

    
2464
  remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node,
2465
                                                   constants.SSH_LOGIN_USER,
2466
                                                   destcmd)
2467

    
2468
  # all commands have been checked, so we're safe to combine them
2469
  command = "|".join([expcmd, utils.ShellQuoteArgs(remotecmd)])
2470

    
2471
  result = utils.RunCmd(["bash", "-c", command])
2472

    
2473
  if result.failed:
2474
    _Fail("Disk copy command '%s' returned error: %s"
2475
          " output: %s", command, result.fail_reason, result.output)
2476

    
2477

    
2478
def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
2479
  """Write a file to the filesystem.
2480

2481
  This allows the master to overwrite(!) a file. It will only perform
2482
  the operation if the file belongs to a list of configuration files.
2483

2484
  @type file_name: str
2485
  @param file_name: the target file name
2486
  @type data: str
2487
  @param data: the new contents of the file
2488
  @type mode: int
2489
  @param mode: the mode to give the file (can be None)
2490
  @type uid: string
2491
  @param uid: the owner of the file
2492
  @type gid: string
2493
  @param gid: the group of the file
2494
  @type atime: float
2495
  @param atime: the atime to set on the file (can be None)
2496
  @type mtime: float
2497
  @param mtime: the mtime to set on the file (can be None)
2498
  @rtype: None
2499

2500
  """
2501
  file_name = vcluster.LocalizeVirtualPath(file_name)
2502

    
2503
  if not os.path.isabs(file_name):
2504
    _Fail("Filename passed to UploadFile is not absolute: '%s'", file_name)
2505

    
2506
  if file_name not in _ALLOWED_UPLOAD_FILES:
2507
    _Fail("Filename passed to UploadFile not in allowed upload targets: '%s'",
2508
          file_name)
2509

    
2510
  raw_data = _Decompress(data)
2511

    
2512
  if not (isinstance(uid, basestring) and isinstance(gid, basestring)):
2513
    _Fail("Invalid username/groupname type")
2514

    
2515
  getents = runtime.GetEnts()
2516
  uid = getents.LookupUser(uid)
2517
  gid = getents.LookupGroup(gid)
2518

    
2519
  utils.SafeWriteFile(file_name, None,
2520
                      data=raw_data, mode=mode, uid=uid, gid=gid,
2521
                      atime=atime, mtime=mtime)
2522

    
2523

    
2524
def RunOob(oob_program, command, node, timeout):
2525
  """Executes oob_program with given command on given node.
2526

2527
  @param oob_program: The path to the executable oob_program
2528
  @param command: The command to invoke on oob_program
2529
  @param node: The node given as an argument to the program
2530
  @param timeout: Timeout after which we kill the oob program
2531

2532
  @return: stdout
2533
  @raise RPCFail: If execution fails for some reason
2534

2535
  """
2536
  result = utils.RunCmd([oob_program, command, node], timeout=timeout)
2537

    
2538
  if result.failed:
2539
    _Fail("'%s' failed with reason '%s'; output: %s", result.cmd,
2540
          result.fail_reason, result.output)
2541

    
2542
  return result.stdout
2543

    
2544

    
2545
def _OSOndiskAPIVersion(os_dir):
2546
  """Compute and return the API version of a given OS.
2547

2548
  This function will try to read the API version of the OS residing in
2549
  the 'os_dir' directory.
2550

2551
  @type os_dir: str
2552
  @param os_dir: the directory in which we should look for the OS
2553
  @rtype: tuple
2554
  @return: tuple (status, data) with status denoting the validity and
2555
      data holding either the vaid versions or an error message
2556

2557
  """
2558
  api_file = utils.PathJoin(os_dir, constants.OS_API_FILE)
2559

    
2560
  try:
2561
    st = os.stat(api_file)
2562
  except EnvironmentError, err:
2563
    return False, ("Required file '%s' not found under path %s: %s" %
2564
                   (constants.OS_API_FILE, os_dir, utils.ErrnoOrStr(err)))
2565

    
2566
  if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2567
    return False, ("File '%s' in %s is not a regular file" %
2568
                   (constants.OS_API_FILE, os_dir))
2569

    
2570
  try:
2571
    api_versions = utils.ReadFile(api_file).splitlines()
2572
  except EnvironmentError, err:
2573
    return False, ("Error while reading the API version file at %s: %s" %
2574
                   (api_file, utils.ErrnoOrStr(err)))
2575

    
2576
  try:
2577
    api_versions = [int(version.strip()) for version in api_versions]
2578
  except (TypeError, ValueError), err:
2579
    return False, ("API version(s) can't be converted to integer: %s" %
2580
                   str(err))
2581

    
2582
  return True, api_versions
2583

    
2584

    
2585
def DiagnoseOS(top_dirs=None):
2586
  """Compute the validity for all OSes.
2587

2588
  @type top_dirs: list
2589
  @param top_dirs: the list of directories in which to
2590
      search (if not given defaults to
2591
      L{pathutils.OS_SEARCH_PATH})
2592
  @rtype: list of L{objects.OS}
2593
  @return: a list of tuples (name, path, status, diagnose, variants,
2594
      parameters, api_version) for all (potential) OSes under all
2595
      search paths, where:
2596
          - name is the (potential) OS name
2597
          - path is the full path to the OS
2598
          - status True/False is the validity of the OS
2599
          - diagnose is the error message for an invalid OS, otherwise empty
2600
          - variants is a list of supported OS variants, if any
2601
          - parameters is a list of (name, help) parameters, if any
2602
          - api_version is a list of support OS API versions
2603

2604
  """
2605
  if top_dirs is None:
2606
    top_dirs = pathutils.OS_SEARCH_PATH
2607

    
2608
  result = []
2609
  for dir_name in top_dirs:
2610
    if os.path.isdir(dir_name):
2611
      try:
2612
        f_names = utils.ListVisibleFiles(dir_name)
2613
      except EnvironmentError, err:
2614
        logging.exception("Can't list the OS directory %s: %s", dir_name, err)
2615
        break
2616
      for name in f_names:
2617
        os_path = utils.PathJoin(dir_name, name)
2618
        status, os_inst = _TryOSFromDisk(name, base_dir=dir_name)
2619
        if status:
2620
          diagnose = ""
2621
          variants = os_inst.supported_variants
2622
          parameters = os_inst.supported_parameters
2623
          api_versions = os_inst.api_versions
2624
        else:
2625
          diagnose = os_inst
2626
          variants = parameters = api_versions = []
2627
        result.append((name, os_path, status, diagnose, variants,
2628
                       parameters, api_versions))
2629

    
2630
  return result
2631

    
2632

    
2633
def _TryOSFromDisk(name, base_dir=None):
2634
  """Create an OS instance from disk.
2635

2636
  This function will return an OS instance if the given name is a
2637
  valid OS name.
2638

2639
  @type base_dir: string
2640
  @keyword base_dir: Base directory containing OS installations.
2641
                     Defaults to a search in all the OS_SEARCH_PATH dirs.
2642
  @rtype: tuple
2643
  @return: success and either the OS instance if we find a valid one,
2644
      or error message
2645

2646
  """
2647
  if base_dir is None:
2648
    os_dir = utils.FindFile(name, pathutils.OS_SEARCH_PATH, os.path.isdir)
2649
  else:
2650
    os_dir = utils.FindFile(name, [base_dir], os.path.isdir)
2651

    
2652
  if os_dir is None:
2653
    return False, "Directory for OS %s not found in search path" % name
2654

    
2655
  status, api_versions = _OSOndiskAPIVersion(os_dir)
2656
  if not status:
2657
    # push the error up
2658
    return status, api_versions
2659

    
2660
  if not constants.OS_API_VERSIONS.intersection(api_versions):
2661
    return False, ("API version mismatch for path '%s': found %s, want %s." %
2662
                   (os_dir, api_versions, constants.OS_API_VERSIONS))
2663

    
2664
  # OS Files dictionary, we will populate it with the absolute path
2665
  # names; if the value is True, then it is a required file, otherwise
2666
  # an optional one
2667
  os_files = dict.fromkeys(constants.OS_SCRIPTS, True)
2668

    
2669
  if max(api_versions) >= constants.OS_API_V15:
2670
    os_files[constants.OS_VARIANTS_FILE] = False
2671

    
2672
  if max(api_versions) >= constants.OS_API_V20:
2673
    os_files[constants.OS_PARAMETERS_FILE] = True
2674
  else:
2675
    del os_files[constants.OS_SCRIPT_VERIFY]
2676

    
2677
  for (filename, required) in os_files.items():
2678
    os_files[filename] = utils.PathJoin(os_dir, filename)
2679

    
2680
    try:
2681
      st = os.stat(os_files[filename])
2682
    except EnvironmentError, err:
2683
      if err.errno == errno.ENOENT and not required:
2684
        del os_files[filename]
2685
        continue
2686
      return False, ("File '%s' under path '%s' is missing (%s)" %
2687
                     (filename, os_dir, utils.ErrnoOrStr(err)))
2688

    
2689
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2690
      return False, ("File '%s' under path '%s' is not a regular file" %
2691
                     (filename, os_dir))
2692

    
2693
    if filename in constants.OS_SCRIPTS:
2694
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
2695
        return False, ("File '%s' under path '%s' is not executable" %
2696
                       (filename, os_dir))
2697

    
2698
  variants = []
2699
  if constants.OS_VARIANTS_FILE in os_files:
2700
    variants_file = os_files[constants.OS_VARIANTS_FILE]
2701
    try:
2702
      variants = \
2703
        utils.FilterEmptyLinesAndComments(utils.ReadFile(variants_file))
2704
    except EnvironmentError, err:
2705
      # we accept missing files, but not other errors
2706
      if err.errno != errno.ENOENT:
2707
        return False, ("Error while reading the OS variants file at %s: %s" %
2708
                       (variants_file, utils.ErrnoOrStr(err)))
2709

    
2710
  parameters = []
2711
  if constants.OS_PARAMETERS_FILE in os_files:
2712
    parameters_file = os_files[constants.OS_PARAMETERS_FILE]
2713
    try:
2714
      parameters = utils.ReadFile(parameters_file).splitlines()
2715
    except EnvironmentError, err:
2716
      return False, ("Error while reading the OS parameters file at %s: %s" %
2717
                     (parameters_file, utils.ErrnoOrStr(err)))
2718
    parameters = [v.split(None, 1) for v in parameters]
2719

    
2720
  os_obj = objects.OS(name=name, path=os_dir,
2721
                      create_script=os_files[constants.OS_SCRIPT_CREATE],
2722
                      export_script=os_files[constants.OS_SCRIPT_EXPORT],
2723
                      import_script=os_files[constants.OS_SCRIPT_IMPORT],
2724
                      rename_script=os_files[constants.OS_SCRIPT_RENAME],
2725
                      verify_script=os_files.get(constants.OS_SCRIPT_VERIFY,
2726
                                                 None),
2727
                      supported_variants=variants,
2728
                      supported_parameters=parameters,
2729
                      api_versions=api_versions)
2730
  return True, os_obj
2731

    
2732

    
2733
def OSFromDisk(name, base_dir=None):
2734
  """Create an OS instance from disk.
2735

2736
  This function will return an OS instance if the given name is a
2737
  valid OS name. Otherwise, it will raise an appropriate
2738
  L{RPCFail} exception, detailing why this is not a valid OS.
2739

2740
  This is just a wrapper over L{_TryOSFromDisk}, which doesn't raise
2741
  an exception but returns true/false status data.
2742

2743
  @type base_dir: string
2744
  @keyword base_dir: Base directory containing OS installations.
2745
                     Defaults to a search in all the OS_SEARCH_PATH dirs.
2746
  @rtype: L{objects.OS}
2747
  @return: the OS instance if we find a valid one
2748
  @raise RPCFail: if we don't find a valid OS
2749

2750
  """
2751
  name_only = objects.OS.GetName(name)
2752
  status, payload = _TryOSFromDisk(name_only, base_dir)
2753

    
2754
  if not status:
2755
    _Fail(payload)
2756

    
2757
  return payload
2758

    
2759

    
2760
def OSCoreEnv(os_name, inst_os, os_params, debug=0):
2761
  """Calculate the basic environment for an os script.
2762

2763
  @type os_name: str
2764
  @param os_name: full operating system name (including variant)
2765
  @type inst_os: L{objects.OS}
2766
  @param inst_os: operating system for which the environment is being built
2767
  @type os_params: dict
2768
  @param os_params: the OS parameters
2769
  @type debug: integer
2770
  @param debug: debug level (0 or 1, for OS Api 10)
2771
  @rtype: dict
2772
  @return: dict of environment variables
2773
  @raise errors.BlockDeviceError: if the block device
2774
      cannot be found
2775

2776
  """
2777
  result = {}
2778
  api_version = \
2779
    max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
2780
  result["OS_API_VERSION"] = "%d" % api_version
2781
  result["OS_NAME"] = inst_os.name
2782
  result["DEBUG_LEVEL"] = "%d" % debug
2783

    
2784
  # OS variants
2785
  if api_version >= constants.OS_API_V15 and inst_os.supported_variants:
2786
    variant = objects.OS.GetVariant(os_name)
2787
    if not variant:
2788
      variant = inst_os.supported_variants[0]
2789
  else:
2790
    variant = ""
2791
  result["OS_VARIANT"] = variant
2792

    
2793
  # OS params
2794
  for pname, pvalue in os_params.items():
2795
    result["OSP_%s" % pname.upper()] = pvalue
2796

    
2797
  # Set a default path otherwise programs called by OS scripts (or
2798
  # even hooks called from OS scripts) might break, and we don't want
2799
  # to have each script require setting a PATH variable
2800
  result["PATH"] = constants.HOOKS_PATH
2801

    
2802
  return result
2803

    
2804

    
2805
def OSEnvironment(instance, inst_os, debug=0):
2806
  """Calculate the environment for an os script.
2807

2808
  @type instance: L{objects.Instance}
2809
  @param instance: target instance for the os script run
2810
  @type inst_os: L{objects.OS}
2811
  @param inst_os: operating system for which the environment is being built
2812
  @type debug: integer
2813
  @param debug: debug level (0 or 1, for OS Api 10)
2814
  @rtype: dict
2815
  @return: dict of environment variables
2816
  @raise errors.BlockDeviceError: if the block device
2817
      cannot be found
2818

2819
  """
2820
  result = OSCoreEnv(instance.os, inst_os, instance.osparams, debug=debug)
2821

    
2822
  for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]:
2823
    result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr))
2824

    
2825
  result["HYPERVISOR"] = instance.hypervisor
2826
  result["DISK_COUNT"] = "%d" % len(instance.disks)
2827
  result["NIC_COUNT"] = "%d" % len(instance.nics)
2828
  result["INSTANCE_SECONDARY_NODES"] = \
2829
      ("%s" % " ".join(instance.secondary_nodes))
2830

    
2831
  # Disks
2832
  for idx, disk in enumerate(instance.disks):
2833
    real_disk = _OpenRealBD(disk)
2834
    result["DISK_%d_PATH" % idx] = real_disk.dev_path
2835
    result["DISK_%d_ACCESS" % idx] = disk.mode
2836
    result["DISK_%d_UUID" % idx] = disk.uuid
2837
    if disk.name:
2838
      result["DISK_%d_NAME" % idx] = disk.name
2839
    if constants.HV_DISK_TYPE in instance.hvparams:
2840
      result["DISK_%d_FRONTEND_TYPE" % idx] = \
2841
        instance.hvparams[constants.HV_DISK_TYPE]
2842
    if disk.dev_type in constants.LDS_BLOCK:
2843
      result["DISK_%d_BACKEND_TYPE" % idx] = "block"
2844
    elif disk.dev_type == constants.LD_FILE:
2845
      result["DISK_%d_BACKEND_TYPE" % idx] = \
2846
        "file:%s" % disk.physical_id[0]
2847

    
2848
  # NICs
2849
  for idx, nic in enumerate(instance.nics):
2850
    result["NIC_%d_MAC" % idx] = nic.mac
2851
    result["NIC_%d_UUID" % idx] = nic.uuid
2852
    if nic.name:
2853
      result["NIC_%d_NAME" % idx] = nic.name
2854
    if nic.ip:
2855
      result["NIC_%d_IP" % idx] = nic.ip
2856
    result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
2857
    if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2858
      result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
2859
    if nic.nicparams[constants.NIC_LINK]:
2860
      result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
2861
    if nic.netinfo:
2862
      nobj = objects.Network.FromDict(nic.netinfo)
2863
      result.update(nobj.HooksDict("NIC_%d_" % idx))
2864
    if constants.HV_NIC_TYPE in instance.hvparams:
2865
      result["NIC_%d_FRONTEND_TYPE" % idx] = \
2866
        instance.hvparams[constants.HV_NIC_TYPE]
2867

    
2868
  # HV/BE params
2869
  for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
2870
    for key, value in source.items():
2871
      result["INSTANCE_%s_%s" % (kind, key)] = str(value)
2872

    
2873
  return result
2874

    
2875

    
2876
def DiagnoseExtStorage(top_dirs=None):
2877
  """Compute the validity for all ExtStorage Providers.
2878

2879
  @type top_dirs: list
2880
  @param top_dirs: the list of directories in which to
2881
      search (if not given defaults to
2882
      L{pathutils.ES_SEARCH_PATH})
2883
  @rtype: list of L{objects.ExtStorage}
2884
  @return: a list of tuples (name, path, status, diagnose, parameters)
2885
      for all (potential) ExtStorage Providers under all
2886
      search paths, where:
2887
          - name is the (potential) ExtStorage Provider
2888
          - path is the full path to the ExtStorage Provider
2889
          - status True/False is the validity of the ExtStorage Provider
2890
          - diagnose is the error message for an invalid ExtStorage Provider,
2891
            otherwise empty
2892
          - parameters is a list of (name, help) parameters, if any
2893

2894
  """
2895
  if top_dirs is None:
2896
    top_dirs = pathutils.ES_SEARCH_PATH
2897

    
2898
  result = []
2899
  for dir_name in top_dirs:
2900
    if os.path.isdir(dir_name):
2901
      try:
2902
        f_names = utils.ListVisibleFiles(dir_name)
2903
      except EnvironmentError, err:
2904
        logging.exception("Can't list the ExtStorage directory %s: %s",
2905
                          dir_name, err)
2906
        break
2907
      for name in f_names:
2908
        es_path = utils.PathJoin(dir_name, name)
2909
        status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name)
2910
        if status:
2911
          diagnose = ""
2912
          parameters = es_inst.supported_parameters
2913
        else:
2914
          diagnose = es_inst
2915
          parameters = []
2916
        result.append((name, es_path, status, diagnose, parameters))
2917

    
2918
  return result
2919

    
2920

    
2921
def BlockdevGrow(disk, amount, dryrun, backingstore, excl_stor):
2922
  """Grow a stack of block devices.
2923

2924
  This function is called recursively, with the childrens being the
2925
  first ones to resize.
2926

2927
  @type disk: L{objects.Disk}
2928
  @param disk: the disk to be grown
2929
  @type amount: integer
2930
  @param amount: the amount (in mebibytes) to grow with
2931
  @type dryrun: boolean
2932
  @param dryrun: whether to execute the operation in simulation mode
2933
      only, without actually increasing the size
2934
  @param backingstore: whether to execute the operation on backing storage
2935
      only, or on "logical" storage only; e.g. DRBD is logical storage,
2936
      whereas LVM, file, RBD are backing storage
2937
  @rtype: (status, result)
2938
  @type excl_stor: boolean
2939
  @param excl_stor: Whether exclusive_storage is active
2940
  @return: a tuple with the status of the operation (True/False), and
2941
      the errors message if status is False
2942

2943
  """
2944
  r_dev = _RecursiveFindBD(disk)
2945
  if r_dev is None:
2946
    _Fail("Cannot find block device %s", disk)
2947

    
2948
  try:
2949
    r_dev.Grow(amount, dryrun, backingstore, excl_stor)
2950
  except errors.BlockDeviceError, err:
2951
    _Fail("Failed to grow block device: %s", err, exc=True)
2952

    
2953

    
2954
def BlockdevSnapshot(disk):
2955
  """Create a snapshot copy of a block device.
2956

2957
  This function is called recursively, and the snapshot is actually created
2958
  just for the leaf lvm backend device.
2959

2960
  @type disk: L{objects.Disk}
2961
  @param disk: the disk to be snapshotted
2962
  @rtype: string
2963
  @return: snapshot disk ID as (vg, lv)
2964

2965
  """
2966
  if disk.dev_type == constants.LD_DRBD8:
2967
    if not disk.children:
2968
      _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
2969
            disk.unique_id)
2970
    return BlockdevSnapshot(disk.children[0])
2971
  elif disk.dev_type == constants.LD_LV:
2972
    r_dev = _RecursiveFindBD(disk)
2973
    if r_dev is not None:
2974
      # FIXME: choose a saner value for the snapshot size
2975
      # let's stay on the safe side and ask for the full size, for now
2976
      return r_dev.Snapshot(disk.size)
2977
    else:
2978
      _Fail("Cannot find block device %s", disk)
2979
  else:
2980
    _Fail("Cannot snapshot non-lvm block device '%s' of type '%s'",
2981
          disk.unique_id, disk.dev_type)
2982

    
2983

    
2984
def BlockdevSetInfo(disk, info):
2985
  """Sets 'metadata' information on block devices.
2986

2987
  This function sets 'info' metadata on block devices. Initial
2988
  information is set at device creation; this function should be used
2989
  for example after renames.
2990

2991
  @type disk: L{objects.Disk}
2992
  @param disk: the disk to be grown
2993
  @type info: string
2994
  @param info: new 'info' metadata
2995
  @rtype: (status, result)
2996
  @return: a tuple with the status of the operation (True/False), and
2997
      the errors message if status is False
2998

2999
  """
3000
  r_dev = _RecursiveFindBD(disk)
3001
  if r_dev is None:
3002
    _Fail("Cannot find block device %s", disk)
3003

    
3004
  try:
3005
    r_dev.SetInfo(info)
3006
  except errors.BlockDeviceError, err:
3007
    _Fail("Failed to set information on block device: %s", err, exc=True)
3008

    
3009

    
3010
def FinalizeExport(instance, snap_disks):
3011
  """Write out the export configuration information.
3012

3013
  @type instance: L{objects.Instance}
3014
  @param instance: the instance which we export, used for
3015
      saving configuration
3016
  @type snap_disks: list of L{objects.Disk}
3017
  @param snap_disks: list of snapshot block devices, which
3018
      will be used to get the actual name of the dump file
3019

3020
  @rtype: None
3021

3022
  """
3023
  destdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name + ".new")
3024
  finaldestdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name)
3025

    
3026
  config = objects.SerializableConfigParser()
3027

    
3028
  config.add_section(constants.INISECT_EXP)
3029
  config.set(constants.INISECT_EXP, "version", "0")
3030
  config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time()))
3031
  config.set(constants.INISECT_EXP, "source", instance.primary_node)
3032
  config.set(constants.INISECT_EXP, "os", instance.os)
3033
  config.set(constants.INISECT_EXP, "compression", "none")
3034

    
3035
  config.add_section(constants.INISECT_INS)
3036
  config.set(constants.INISECT_INS, "name", instance.name)
3037
  config.set(constants.INISECT_INS, "maxmem", "%d" %
3038
             instance.beparams[constants.BE_MAXMEM])
3039
  config.set(constants.INISECT_INS, "minmem", "%d" %
3040
             instance.beparams[constants.BE_MINMEM])
3041
  # "memory" is deprecated, but useful for exporting to old ganeti versions
3042
  config.set(constants.INISECT_INS, "memory", "%d" %
3043
             instance.beparams[constants.BE_MAXMEM])
3044
  config.set(constants.INISECT_INS, "vcpus", "%d" %
3045
             instance.beparams[constants.BE_VCPUS])
3046
  config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
3047
  config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor)
3048
  config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags()))
3049

    
3050
  nic_total = 0
3051
  for nic_count, nic in enumerate(instance.nics):
3052
    nic_total += 1
3053
    config.set(constants.INISECT_INS, "nic%d_mac" %
3054
               nic_count, "%s" % nic.mac)
3055
    config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip)
3056
    config.set(constants.INISECT_INS, "nic%d_network" % nic_count,
3057
               "%s" % nic.network)
3058
    for param in constants.NICS_PARAMETER_TYPES:
3059
      config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param),
3060
                 "%s" % nic.nicparams.get(param, None))
3061
  # TODO: redundant: on load can read nics until it doesn't exist
3062
  config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total)
3063

    
3064
  disk_total = 0
3065
  for disk_count, disk in enumerate(snap_disks):
3066
    if disk:
3067
      disk_total += 1
3068
      config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
3069
                 ("%s" % disk.iv_name))
3070
      config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
3071
                 ("%s" % disk.physical_id[1]))
3072
      config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
3073
                 ("%d" % disk.size))
3074

    
3075
  config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total)
3076

    
3077
  # New-style hypervisor/backend parameters
3078

    
3079
  config.add_section(constants.INISECT_HYP)
3080
  for name, value in instance.hvparams.items():
3081
    if name not in constants.HVC_GLOBALS:
3082
      config.set(constants.INISECT_HYP, name, str(value))
3083

    
3084
  config.add_section(constants.INISECT_BEP)
3085
  for name, value in instance.beparams.items():
3086
    config.set(constants.INISECT_BEP, name, str(value))
3087

    
3088
  config.add_section(constants.INISECT_OSP)
3089
  for name, value in instance.osparams.items():
3090
    config.set(constants.INISECT_OSP, name, str(value))
3091

    
3092
  utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE),
3093
                  data=config.Dumps())
3094
  shutil.rmtree(finaldestdir, ignore_errors=True)
3095
  shutil.move(destdir, finaldestdir)
3096

    
3097

    
3098
def ExportInfo(dest):
3099
  """Get export configuration information.
3100

3101
  @type dest: str
3102
  @param dest: directory containing the export
3103

3104
  @rtype: L{objects.SerializableConfigParser}
3105
  @return: a serializable config file containing the
3106
      export info
3107

3108
  """
3109
  cff = utils.PathJoin(dest, constants.EXPORT_CONF_FILE)
3110

    
3111
  config = objects.SerializableConfigParser()
3112
  config.read(cff)
3113

    
3114
  if (not config.has_section(constants.INISECT_EXP) or
3115
      not config.has_section(constants.INISECT_INS)):
3116
    _Fail("Export info file doesn't have the required fields")
3117

    
3118
  return config.Dumps()
3119

    
3120

    
3121
def ListExports():
3122
  """Return a list of exports currently available on this machine.
3123

3124
  @rtype: list
3125
  @return: list of the exports
3126

3127
  """
3128
  if os.path.isdir(pathutils.EXPORT_DIR):
3129
    return sorted(utils.ListVisibleFiles(pathutils.EXPORT_DIR))
3130
  else:
3131
    _Fail("No exports directory")
3132

    
3133

    
3134
def RemoveExport(export):
3135
  """Remove an existing export from the node.
3136

3137
  @type export: str
3138
  @param export: the name of the export to remove
3139
  @rtype: None
3140

3141
  """
3142
  target = utils.PathJoin(pathutils.EXPORT_DIR, export)
3143

    
3144
  try:
3145
    shutil.rmtree(target)
3146
  except EnvironmentError, err:
3147
    _Fail("Error while removing the export: %s", err, exc=True)
3148

    
3149

    
3150
def BlockdevRename(devlist):
3151
  """Rename a list of block devices.
3152

3153
  @type devlist: list of tuples
3154
  @param devlist: list of tuples of the form  (disk,
3155
      new_logical_id, new_physical_id); disk is an
3156
      L{objects.Disk} object describing the current disk,
3157
      and new logical_id/physical_id is the name we
3158
      rename it to
3159
  @rtype: boolean
3160
  @return: True if all renames succeeded, False otherwise
3161

3162
  """
3163
  msgs = []
3164
  result = True
3165
  for disk, unique_id in devlist:
3166
    dev = _RecursiveFindBD(disk)
3167
    if dev is None:
3168
      msgs.append("Can't find device %s in rename" % str(disk))
3169
      result = False
3170
      continue
3171
    try:
3172
      old_rpath = dev.dev_path
3173
      dev.Rename(unique_id)
3174
      new_rpath = dev.dev_path
3175
      if old_rpath != new_rpath:
3176
        DevCacheManager.RemoveCache(old_rpath)
3177
        # FIXME: we should add the new cache information here, like:
3178
        # DevCacheManager.UpdateCache(new_rpath, owner, ...)
3179
        # but we don't have the owner here - maybe parse from existing
3180
        # cache? for now, we only lose lvm data when we rename, which
3181
        # is less critical than DRBD or MD
3182
    except errors.BlockDeviceError, err:
3183
      msgs.append("Can't rename device '%s' to '%s': %s" %
3184
                  (dev, unique_id, err))
3185
      logging.exception("Can't rename device '%s' to '%s'", dev, unique_id)
3186
      result = False
3187
  if not result:
3188
    _Fail("; ".join(msgs))
3189

    
3190

    
3191
def _TransformFileStorageDir(fs_dir):
3192
  """Checks whether given file_storage_dir is valid.
3193

3194
  Checks wheter the given fs_dir is within the cluster-wide default
3195
  file_storage_dir or the shared_file_storage_dir, which are stored in
3196
  SimpleStore. Only paths under those directories are allowed.
3197

3198
  @type fs_dir: str
3199
  @param fs_dir: the path to check
3200

3201
  @return: the normalized path if valid, None otherwise
3202

3203
  """
3204
  filestorage.CheckFileStoragePath(fs_dir)
3205

    
3206
  return os.path.normpath(fs_dir)
3207

    
3208

    
3209
def CreateFileStorageDir(file_storage_dir):
3210
  """Create file storage directory.
3211

3212
  @type file_storage_dir: str
3213
  @param file_storage_dir: directory to create
3214

3215
  @rtype: tuple
3216
  @return: tuple with first element a boolean indicating wheter dir
3217
      creation was successful or not
3218

3219
  """
3220
  file_storage_dir = _TransformFileStorageDir(file_storage_dir)
3221
  if os.path.exists(file_storage_dir):
3222
    if not os.path.isdir(file_storage_dir):
3223
      _Fail("Specified storage dir '%s' is not a directory",
3224
            file_storage_dir)
3225
  else:
3226
    try:
3227
      os.makedirs(file_storage_dir, 0750)
3228
    except OSError, err:
3229
      _Fail("Cannot create file storage directory '%s': %s",
3230
            file_storage_dir, err, exc=True)
3231

    
3232

    
3233
def RemoveFileStorageDir(file_storage_dir):
3234
  """Remove file storage directory.
3235

3236
  Remove it only if it's empty. If not log an error and return.
3237

3238
  @type file_storage_dir: str
3239
  @param file_storage_dir: the directory we should cleanup
3240
  @rtype: tuple (success,)
3241
  @return: tuple of one element, C{success}, denoting
3242
      whether the operation was successful
3243

3244
  """
3245
  file_storage_dir = _TransformFileStorageDir(file_storage_dir)
3246
  if os.path.exists(file_storage_dir):
3247
    if not os.path.isdir(file_storage_dir):
3248
      _Fail("Specified Storage directory '%s' is not a directory",
3249
            file_storage_dir)
3250
    # deletes dir only if empty, otherwise we want to fail the rpc call
3251
    try:
3252
      os.rmdir(file_storage_dir)
3253
    except OSError, err:
3254
      _Fail("Cannot remove file storage directory '%s': %s",
3255
            file_storage_dir, err)
3256

    
3257

    
3258
def RenameFileStorageDir(old_file_storage_dir, new_file_storage_dir):
3259
  """Rename the file storage directory.
3260

3261
  @type old_file_storage_dir: str
3262
  @param old_file_storage_dir: the current path
3263
  @type new_file_storage_dir: str
3264
  @param new_file_storage_dir: the name we should rename to
3265
  @rtype: tuple (success,)
3266
  @return: tuple of one element, C{success}, denoting
3267
      whether the operation was successful
3268

3269
  """
3270
  old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir)
3271
  new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir)
3272
  if not os.path.exists(new_file_storage_dir):
3273
    if os.path.isdir(old_file_storage_dir):
3274
      try:
3275
        os.rename(old_file_storage_dir, new_file_storage_dir)
3276
      except OSError, err:
3277
        _Fail("Cannot rename '%s' to '%s': %s",
3278
              old_file_storage_dir, new_file_storage_dir, err)
3279
    else:
3280
      _Fail("Specified storage dir '%s' is not a directory",
3281
            old_file_storage_dir)
3282
  else:
3283
    if os.path.exists(old_file_storage_dir):
3284
      _Fail("Cannot rename '%s' to '%s': both locations exist",
3285
            old_file_storage_dir, new_file_storage_dir)
3286

    
3287

    
3288
def _EnsureJobQueueFile(file_name):
3289
  """Checks whether the given filename is in the queue directory.
3290

3291
  @type file_name: str
3292
  @param file_name: the file name we should check
3293
  @rtype: None
3294
  @raises RPCFail: if the file is not valid
3295

3296
  """
3297
  if not utils.IsBelowDir(pathutils.QUEUE_DIR, file_name):
3298
    _Fail("Passed job queue file '%s' does not belong to"
3299
          " the queue directory '%s'", file_name, pathutils.QUEUE_DIR)
3300

    
3301

    
3302
def JobQueueUpdate(file_name, content):
3303
  """Updates a file in the queue directory.
3304

3305
  This is just a wrapper over L{utils.io.WriteFile}, with proper
3306
  checking.
3307

3308
  @type file_name: str
3309
  @param file_name: the job file name
3310
  @type content: str
3311
  @param content: the new job contents
3312
  @rtype: boolean
3313
  @return: the success of the operation
3314

3315
  """
3316
  file_name = vcluster.LocalizeVirtualPath(file_name)
3317

    
3318
  _EnsureJobQueueFile(file_name)
3319
  getents = runtime.GetEnts()
3320

    
3321
  # Write and replace the file atomically
3322
  utils.WriteFile(file_name, data=_Decompress(content), uid=getents.masterd_uid,
3323
                  gid=getents.daemons_gid, mode=constants.JOB_QUEUE_FILES_PERMS)
3324

    
3325

    
3326
def JobQueueRename(old, new):
3327
  """Renames a job queue file.
3328

3329
  This is just a wrapper over os.rename with proper checking.
3330

3331
  @type old: str
3332
  @param old: the old (actual) file name
3333
  @type new: str
3334
  @param new: the desired file name
3335
  @rtype: tuple
3336
  @return: the success of the operation and payload
3337

3338
  """
3339
  old = vcluster.LocalizeVirtualPath(old)
3340
  new = vcluster.LocalizeVirtualPath(new)
3341

    
3342
  _EnsureJobQueueFile(old)
3343
  _EnsureJobQueueFile(new)
3344

    
3345
  getents = runtime.GetEnts()
3346

    
3347
  utils.RenameFile(old, new, mkdir=True, mkdir_mode=0750,
3348
                   dir_uid=getents.masterd_uid, dir_gid=getents.daemons_gid)
3349

    
3350

    
3351
def BlockdevClose(instance_name, disks):
3352
  """Closes the given block devices.
3353

3354
  This means they will be switched to secondary mode (in case of
3355
  DRBD).
3356

3357
  @param instance_name: if the argument is not empty, the symlinks
3358
      of this instance will be removed
3359
  @type disks: list of L{objects.Disk}
3360
  @param disks: the list of disks to be closed
3361
  @rtype: tuple (success, message)
3362
  @return: a tuple of success and message, where success
3363
      indicates the succes of the operation, and message
3364
      which will contain the error details in case we
3365
      failed
3366

3367
  """
3368
  bdevs = []
3369
  for cf in disks:
3370
    rd = _RecursiveFindBD(cf)
3371
    if rd is None:
3372
      _Fail("Can't find device %s", cf)
3373
    bdevs.append(rd)
3374

    
3375
  msg = []
3376
  for rd in bdevs:
3377
    try:
3378
      rd.Close()
3379
    except errors.BlockDeviceError, err:
3380
      msg.append(str(err))
3381
  if msg:
3382
    _Fail("Can't make devices secondary: %s", ",".join(msg))
3383
  else:
3384
    if instance_name:
3385
      _RemoveBlockDevLinks(instance_name, disks)
3386

    
3387

    
3388
def ValidateHVParams(hvname, hvparams):
3389
  """Validates the given hypervisor parameters.
3390

3391
  @type hvname: string
3392
  @param hvname: the hypervisor name
3393
  @type hvparams: dict
3394
  @param hvparams: the hypervisor parameters to be validated
3395
  @rtype: None
3396

3397
  """
3398
  try:
3399
    hv_type = hypervisor.GetHypervisor(hvname)
3400
    hv_type.ValidateParameters(hvparams)
3401
  except errors.HypervisorError, err:
3402
    _Fail(str(err), log=False)
3403

    
3404

    
3405
def _CheckOSPList(os_obj, parameters):
3406
  """Check whether a list of parameters is supported by the OS.
3407

3408
  @type os_obj: L{objects.OS}
3409
  @param os_obj: OS object to check
3410
  @type parameters: list
3411
  @param parameters: the list of parameters to check
3412

3413
  """
3414
  supported = [v[0] for v in os_obj.supported_parameters]
3415
  delta = frozenset(parameters).difference(supported)
3416
  if delta:
3417
    _Fail("The following parameters are not supported"
3418
          " by the OS %s: %s" % (os_obj.name, utils.CommaJoin(delta)))
3419

    
3420

    
3421
def ValidateOS(required, osname, checks, osparams):
3422
  """Validate the given OS' parameters.
3423

3424
  @type required: boolean
3425
  @param required: whether absence of the OS should translate into
3426
      failure or not
3427
  @type osname: string
3428
  @param osname: the OS to be validated
3429
  @type checks: list
3430
  @param checks: list of the checks to run (currently only 'parameters')
3431
  @type osparams: dict
3432
  @param osparams: dictionary with OS parameters
3433
  @rtype: boolean
3434
  @return: True if the validation passed, or False if the OS was not
3435
      found and L{required} was false
3436

3437
  """
3438
  if not constants.OS_VALIDATE_CALLS.issuperset(checks):
3439
    _Fail("Unknown checks required for OS %s: %s", osname,
3440
          set(checks).difference(constants.OS_VALIDATE_CALLS))
3441

    
3442
  name_only = objects.OS.GetName(osname)
3443
  status, tbv = _TryOSFromDisk(name_only, None)
3444

    
3445
  if not status:
3446
    if required:
3447
      _Fail(tbv)
3448
    else:
3449
      return False
3450

    
3451
  if max(tbv.api_versions) < constants.OS_API_V20:
3452
    return True
3453

    
3454
  if constants.OS_VALIDATE_PARAMETERS in checks:
3455
    _CheckOSPList(tbv, osparams.keys())
3456

    
3457
  validate_env = OSCoreEnv(osname, tbv, osparams)
3458
  result = utils.RunCmd([tbv.verify_script] + checks, env=validate_env,
3459
                        cwd=tbv.path, reset_env=True)
3460
  if result.failed:
3461
    logging.error("os validate command '%s' returned error: %s output: %s",
3462
                  result.cmd, result.fail_reason, result.output)
3463
    _Fail("OS validation script failed (%s), output: %s",
3464
          result.fail_reason, result.output, log=False)
3465

    
3466
  return True
3467

    
3468

    
3469
def DemoteFromMC():
3470
  """Demotes the current node from master candidate role.
3471

3472
  """
3473
  # try to ensure we're not the master by mistake
3474
  master, myself = ssconf.GetMasterAndMyself()
3475
  if master == myself:
3476
    _Fail("ssconf status shows I'm the master node, will not demote")
3477

    
3478
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "check", constants.MASTERD])
3479
  if not result.failed:
3480
    _Fail("The master daemon is running, will not demote")
3481

    
3482
  try:
3483
    if os.path.isfile(pathutils.CLUSTER_CONF_FILE):
3484
      utils.CreateBackup(pathutils.CLUSTER_CONF_FILE)
3485
  except EnvironmentError, err:
3486
    if err.errno != errno.ENOENT:
3487
      _Fail("Error while backing up cluster file: %s", err, exc=True)
3488

    
3489
  utils.RemoveFile(pathutils.CLUSTER_CONF_FILE)
3490

    
3491

    
3492
def _GetX509Filenames(cryptodir, name):
3493
  """Returns the full paths for the private key and certificate.
3494

3495
  """
3496
  return (utils.PathJoin(cryptodir, name),
3497
          utils.PathJoin(cryptodir, name, _X509_KEY_FILE),
3498
          utils.PathJoin(cryptodir, name, _X509_CERT_FILE))
3499

    
3500

    
3501
def CreateX509Certificate(validity, cryptodir=pathutils.CRYPTO_KEYS_DIR):
3502
  """Creates a new X509 certificate for SSL/TLS.
3503

3504
  @type validity: int
3505
  @param validity: Validity in seconds
3506
  @rtype: tuple; (string, string)
3507
  @return: Certificate name and public part
3508

3509
  """
3510
  (key_pem, cert_pem) = \
3511
    utils.GenerateSelfSignedX509Cert(netutils.Hostname.GetSysName(),
3512
                                     min(validity, _MAX_SSL_CERT_VALIDITY))
3513

    
3514
  cert_dir = tempfile.mkdtemp(dir=cryptodir,
3515
                              prefix="x509-%s-" % utils.TimestampForFilename())
3516
  try:
3517
    name = os.path.basename(cert_dir)
3518
    assert len(name) > 5
3519

    
3520
    (_, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
3521

    
3522
    utils.WriteFile(key_file, mode=0400, data=key_pem)
3523
    utils.WriteFile(cert_file, mode=0400, data=cert_pem)
3524

    
3525
    # Never return private key as it shouldn't leave the node
3526
    return (name, cert_pem)
3527
  except Exception:
3528
    shutil.rmtree(cert_dir, ignore_errors=True)
3529
    raise
3530

    
3531

    
3532
def RemoveX509Certificate(name, cryptodir=pathutils.CRYPTO_KEYS_DIR):
3533
  """Removes a X509 certificate.
3534

3535
  @type name: string
3536
  @param name: Certificate name
3537

3538
  """
3539
  (cert_dir, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
3540

    
3541
  utils.RemoveFile(key_file)
3542
  utils.RemoveFile(cert_file)
3543

    
3544
  try:
3545
    os.rmdir(cert_dir)
3546
  except EnvironmentError, err:
3547
    _Fail("Cannot remove certificate directory '%s': %s",
3548
          cert_dir, err)
3549

    
3550

    
3551
def _GetImportExportIoCommand(instance, mode, ieio, ieargs):
3552
  """Returns the command for the requested input/output.
3553

3554
  @type instance: L{objects.Instance}
3555
  @param instance: The instance object
3556
  @param mode: Import/export mode
3557
  @param ieio: Input/output type
3558
  @param ieargs: Input/output arguments
3559

3560
  """
3561
  assert mode in (constants.IEM_IMPORT, constants.IEM_EXPORT)
3562

    
3563
  env = None
3564
  prefix = None
3565
  suffix = None
3566
  exp_size = None
3567

    
3568
  if ieio == constants.IEIO_FILE:
3569
    (filename, ) = ieargs
3570

    
3571
    if not utils.IsNormAbsPath(filename):
3572
      _Fail("Path '%s' is not normalized or absolute", filename)
3573

    
3574
    real_filename = os.path.realpath(filename)
3575
    directory = os.path.dirname(real_filename)
3576

    
3577
    if not utils.IsBelowDir(pathutils.EXPORT_DIR, real_filename):
3578
      _Fail("File '%s' is not under exports directory '%s': %s",
3579
            filename, pathutils.EXPORT_DIR, real_filename)
3580

    
3581
    # Create directory
3582
    utils.Makedirs(directory, mode=0750)
3583

    
3584
    quoted_filename = utils.ShellQuote(filename)
3585

    
3586
    if mode == constants.IEM_IMPORT:
3587
      suffix = "> %s" % quoted_filename
3588
    elif mode == constants.IEM_EXPORT:
3589
      suffix = "< %s" % quoted_filename
3590

    
3591
      # Retrieve file size
3592
      try:
3593
        st = os.stat(filename)
3594
      except EnvironmentError, err:
3595
        logging.error("Can't stat(2) %s: %s", filename, err)
3596
      else:
3597
        exp_size = utils.BytesToMebibyte(st.st_size)
3598

    
3599
  elif ieio == constants.IEIO_RAW_DISK:
3600
    (disk, ) = ieargs
3601

    
3602
    real_disk = _OpenRealBD(disk)
3603

    
3604
    if mode == constants.IEM_IMPORT:
3605
      # we set here a smaller block size as, due to transport buffering, more
3606
      # than 64-128k will mostly ignored; we use nocreat to fail if the device
3607
      # is not already there or we pass a wrong path; we use notrunc to no
3608
      # attempt truncate on an LV device; we use oflag=dsync to not buffer too
3609
      # much memory; this means that at best, we flush every 64k, which will
3610
      # not be very fast
3611
      suffix = utils.BuildShellCmd(("| dd of=%s conv=nocreat,notrunc"
3612
                                    " bs=%s oflag=dsync"),
3613
                                    real_disk.dev_path,
3614
                                    str(64 * 1024))
3615

    
3616
    elif mode == constants.IEM_EXPORT:
3617
      # the block size on the read dd is 1MiB to match our units
3618
      prefix = utils.BuildShellCmd("dd if=%s bs=%s count=%s |",
3619
                                   real_disk.dev_path,
3620
                                   str(1024 * 1024), # 1 MB
3621
                                   str(disk.size))
3622
      exp_size = disk.size
3623

    
3624
  elif ieio == constants.IEIO_SCRIPT:
3625
    (disk, disk_index, ) = ieargs
3626

    
3627
    assert isinstance(disk_index, (int, long))
3628

    
3629
    real_disk = _OpenRealBD(disk)
3630

    
3631
    inst_os = OSFromDisk(instance.os)
3632
    env = OSEnvironment(instance, inst_os)
3633

    
3634
    if mode == constants.IEM_IMPORT:
3635
      env["IMPORT_DEVICE"] = env["DISK_%d_PATH" % disk_index]
3636
      env["IMPORT_INDEX"] = str(disk_index)
3637
      script = inst_os.import_script
3638

    
3639
    elif mode == constants.IEM_EXPORT:
3640
      env["EXPORT_DEVICE"] = real_disk.dev_path
3641
      env["EXPORT_INDEX"] = str(disk_index)
3642
      script = inst_os.export_script
3643

    
3644
    # TODO: Pass special environment only to script
3645
    script_cmd = utils.BuildShellCmd("( cd %s && %s; )", inst_os.path, script)
3646

    
3647
    if mode == constants.IEM_IMPORT:
3648
      suffix = "| %s" % script_cmd
3649

    
3650
    elif mode == constants.IEM_EXPORT:
3651
      prefix = "%s |" % script_cmd
3652

    
3653
    # Let script predict size
3654
    exp_size = constants.IE_CUSTOM_SIZE
3655

    
3656
  else:
3657
    _Fail("Invalid %s I/O mode %r", mode, ieio)
3658

    
3659
  return (env, prefix, suffix, exp_size)
3660

    
3661

    
3662
def _CreateImportExportStatusDir(prefix):
3663
  """Creates status directory for import/export.
3664

3665
  """
3666
  return tempfile.mkdtemp(dir=pathutils.IMPORT_EXPORT_DIR,
3667
                          prefix=("%s-%s-" %
3668
                                  (prefix, utils.TimestampForFilename())))
3669

    
3670

    
3671
def StartImportExportDaemon(mode, opts, host, port, instance, component,
3672
                            ieio, ieioargs):
3673
  """Starts an import or export daemon.
3674

3675
  @param mode: Import/output mode
3676
  @type opts: L{objects.ImportExportOptions}
3677
  @param opts: Daemon options
3678
  @type host: string
3679
  @param host: Remote host for export (None for import)
3680
  @type port: int
3681
  @param port: Remote port for export (None for import)
3682
  @type instance: L{objects.Instance}
3683
  @param instance: Instance object
3684
  @type component: string
3685
  @param component: which part of the instance is transferred now,
3686
      e.g. 'disk/0'
3687
  @param ieio: Input/output type
3688
  @param ieioargs: Input/output arguments
3689

3690
  """
3691
  if mode == constants.IEM_IMPORT:
3692
    prefix = "import"
3693

    
3694
    if not (host is None and port is None):
3695
      _Fail("Can not specify host or port on import")
3696

    
3697
  elif mode == constants.IEM_EXPORT:
3698
    prefix = "export"
3699

    
3700
    if host is None or port is None:
3701
      _Fail("Host and port must be specified for an export")
3702

    
3703
  else:
3704
    _Fail("Invalid mode %r", mode)
3705

    
3706
  if (opts.key_name is None) ^ (opts.ca_pem is None):
3707
    _Fail("Cluster certificate can only be used for both key and CA")
3708

    
3709
  (cmd_env, cmd_prefix, cmd_suffix, exp_size) = \
3710
    _GetImportExportIoCommand(instance, mode, ieio, ieioargs)
3711

    
3712
  if opts.key_name is None:
3713
    # Use server.pem
3714
    key_path = pathutils.NODED_CERT_FILE
3715
    cert_path = pathutils.NODED_CERT_FILE
3716
    assert opts.ca_pem is None
3717
  else:
3718
    (_, key_path, cert_path) = _GetX509Filenames(pathutils.CRYPTO_KEYS_DIR,
3719
                                                 opts.key_name)
3720
    assert opts.ca_pem is not None
3721

    
3722
  for i in [key_path, cert_path]:
3723
    if not os.path.exists(i):
3724
      _Fail("File '%s' does not exist" % i)
3725

    
3726
  status_dir = _CreateImportExportStatusDir("%s-%s" % (prefix, component))
3727
  try:
3728
    status_file = utils.PathJoin(status_dir, _IES_STATUS_FILE)
3729
    pid_file = utils.PathJoin(status_dir, _IES_PID_FILE)
3730
    ca_file = utils.PathJoin(status_dir, _IES_CA_FILE)
3731

    
3732
    if opts.ca_pem is None:
3733
      # Use server.pem
3734
      ca = utils.ReadFile(pathutils.NODED_CERT_FILE)
3735
    else:
3736
      ca = opts.ca_pem
3737

    
3738
    # Write CA file
3739
    utils.WriteFile(ca_file, data=ca, mode=0400)
3740

    
3741
    cmd = [
3742
      pathutils.IMPORT_EXPORT_DAEMON,
3743
      status_file, mode,
3744
      "--key=%s" % key_path,
3745
      "--cert=%s" % cert_path,
3746
      "--ca=%s" % ca_file,
3747
      ]
3748

    
3749
    if host:
3750
      cmd.append("--host=%s" % host)
3751

    
3752
    if port:
3753
      cmd.append("--port=%s" % port)
3754

    
3755
    if opts.ipv6:
3756
      cmd.append("--ipv6")
3757
    else:
3758
      cmd.append("--ipv4")
3759

    
3760
    if opts.compress:
3761
      cmd.append("--compress=%s" % opts.compress)
3762

    
3763
    if opts.magic:
3764
      cmd.append("--magic=%s" % opts.magic)
3765

    
3766
    if exp_size is not None:
3767
      cmd.append("--expected-size=%s" % exp_size)
3768

    
3769
    if cmd_prefix:
3770
      cmd.append("--cmd-prefix=%s" % cmd_prefix)
3771

    
3772
    if cmd_suffix:
3773
      cmd.append("--cmd-suffix=%s" % cmd_suffix)
3774

    
3775
    if mode == constants.IEM_EXPORT:
3776
      # Retry connection a few times when connecting to remote peer
3777
      cmd.append("--connect-retries=%s" % constants.RIE_CONNECT_RETRIES)
3778
      cmd.append("--connect-timeout=%s" % constants.RIE_CONNECT_ATTEMPT_TIMEOUT)
3779
    elif opts.connect_timeout is not None:
3780
      assert mode == constants.IEM_IMPORT
3781
      # Overall timeout for establishing connection while listening
3782
      cmd.append("--connect-timeout=%s" % opts.connect_timeout)
3783

    
3784
    logfile = _InstanceLogName(prefix, instance.os, instance.name, component)
3785

    
3786
    # TODO: Once _InstanceLogName uses tempfile.mkstemp, StartDaemon has
3787
    # support for receiving a file descriptor for output
3788
    utils.StartDaemon(cmd, env=cmd_env, pidfile=pid_file,
3789
                      output=logfile)
3790

    
3791
    # The import/export name is simply the status directory name
3792
    return os.path.basename(status_dir)
3793

    
3794
  except Exception:
3795
    shutil.rmtree(status_dir, ignore_errors=True)
3796
    raise
3797

    
3798

    
3799
def GetImportExportStatus(names):
3800
  """Returns import/export daemon status.
3801

3802
  @type names: sequence
3803
  @param names: List of names
3804
  @rtype: List of dicts
3805
  @return: Returns a list of the state of each named import/export or None if a
3806
           status couldn't be read
3807

3808
  """
3809
  result = []
3810

    
3811
  for name in names:
3812
    status_file = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name,
3813
                                 _IES_STATUS_FILE)
3814

    
3815
    try:
3816
      data = utils.ReadFile(status_file)
3817
    except EnvironmentError, err:
3818
      if err.errno != errno.ENOENT:
3819
        raise
3820
      data = None
3821

    
3822
    if not data:
3823
      result.append(None)
3824
      continue
3825

    
3826
    result.append(serializer.LoadJson(data))
3827

    
3828
  return result
3829

    
3830

    
3831
def AbortImportExport(name):
3832
  """Sends SIGTERM to a running import/export daemon.
3833

3834
  """
3835
  logging.info("Abort import/export %s", name)
3836

    
3837
  status_dir = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name)
3838
  pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE))
3839

    
3840
  if pid:
3841
    logging.info("Import/export %s is running with PID %s, sending SIGTERM",
3842
                 name, pid)
3843
    utils.IgnoreProcessNotFound(os.kill, pid, signal.SIGTERM)
3844

    
3845

    
3846
def CleanupImportExport(name):
3847
  """Cleanup after an import or export.
3848

3849
  If the import/export daemon is still running it's killed. Afterwards the
3850
  whole status directory is removed.
3851

3852
  """
3853
  logging.info("Finalizing import/export %s", name)
3854

    
3855
  status_dir = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name)
3856

    
3857
  pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE))
3858

    
3859
  if pid:
3860
    logging.info("Import/export %s is still running with PID %s",
3861
                 name, pid)
3862
    utils.KillProcess(pid, waitpid=False)
3863

    
3864
  shutil.rmtree(status_dir, ignore_errors=True)
3865

    
3866

    
3867
def _SetPhysicalId(target_node_uuid, nodes_ip, disks):
3868
  """Sets the correct physical ID on all passed disks.
3869

3870
  """
3871
  for cf in disks:
3872
    cf.SetPhysicalID(target_node_uuid, nodes_ip)
3873

    
3874

    
3875
def _FindDisks(target_node_uuid, nodes_ip, disks):
3876
  """Sets the physical ID on disks and returns the block devices.
3877

3878
  """
3879
  _SetPhysicalId(target_node_uuid, nodes_ip, disks)
3880

    
3881
  bdevs = []
3882

    
3883
  for cf in disks:
3884
    rd = _RecursiveFindBD(cf)
3885
    if rd is None:
3886
      _Fail("Can't find device %s", cf)
3887
    bdevs.append(rd)
3888
  return bdevs
3889

    
3890

    
3891
def DrbdDisconnectNet(target_node_uuid, nodes_ip, disks):
3892
  """Disconnects the network on a list of drbd devices.
3893

3894
  """
3895
  bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
3896

    
3897
  # disconnect disks
3898
  for rd in bdevs:
3899
    try:
3900
      rd.DisconnectNet()
3901
    except errors.BlockDeviceError, err:
3902
      _Fail("Can't change network configuration to standalone mode: %s",
3903
            err, exc=True)
3904

    
3905

    
3906
def DrbdAttachNet(target_node_uuid, nodes_ip, disks, instance_name,
3907
                  multimaster):
3908
  """Attaches the network on a list of drbd devices.
3909

3910
  """
3911
  bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
3912

    
3913
  if multimaster:
3914
    for idx, rd in enumerate(bdevs):
3915
      try:
3916
        _SymlinkBlockDev(instance_name, rd.dev_path, idx)
3917
      except EnvironmentError, err:
3918
        _Fail("Can't create symlink: %s", err)
3919
  # reconnect disks, switch to new master configuration and if
3920
  # needed primary mode
3921
  for rd in bdevs:
3922
    try:
3923
      rd.AttachNet(multimaster)
3924
    except errors.BlockDeviceError, err:
3925
      _Fail("Can't change network configuration: %s", err)
3926

    
3927
  # wait until the disks are connected; we need to retry the re-attach
3928
  # if the device becomes standalone, as this might happen if the one
3929
  # node disconnects and reconnects in a different mode before the
3930
  # other node reconnects; in this case, one or both of the nodes will
3931
  # decide it has wrong configuration and switch to standalone
3932

    
3933
  def _Attach():
3934
    all_connected = True
3935

    
3936
    for rd in bdevs:
3937
      stats = rd.GetProcStatus()
3938

    
3939
      all_connected = (all_connected and
3940
                       (stats.is_connected or stats.is_in_resync))
3941

    
3942
      if stats.is_standalone:
3943
        # peer had different config info and this node became
3944
        # standalone, even though this should not happen with the
3945
        # new staged way of changing disk configs
3946
        try:
3947
          rd.AttachNet(multimaster)
3948
        except errors.BlockDeviceError, err:
3949
          _Fail("Can't change network configuration: %s", err)
3950

    
3951
    if not all_connected:
3952
      raise utils.RetryAgain()
3953

    
3954
  try:
3955
    # Start with a delay of 100 miliseconds and go up to 5 seconds
3956
    utils.Retry(_Attach, (0.1, 1.5, 5.0), 2 * 60)
3957
  except utils.RetryTimeout:
3958
    _Fail("Timeout in disk reconnecting")
3959

    
3960
  if multimaster:
3961
    # change to primary mode
3962
    for rd in bdevs:
3963
      try:
3964
        rd.Open()
3965
      except errors.BlockDeviceError, err:
3966
        _Fail("Can't change to primary mode: %s", err)
3967

    
3968

    
3969
def DrbdWaitSync(target_node_uuid, nodes_ip, disks):
3970
  """Wait until DRBDs have synchronized.
3971

3972
  """
3973
  def _helper(rd):
3974
    stats = rd.GetProcStatus()
3975
    if not (stats.is_connected or stats.is_in_resync):
3976
      raise utils.RetryAgain()
3977
    return stats
3978

    
3979
  bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
3980

    
3981
  min_resync = 100
3982
  alldone = True
3983
  for rd in bdevs:
3984
    try:
3985
      # poll each second for 15 seconds
3986
      stats = utils.Retry(_helper, 1, 15, args=[rd])
3987
    except utils.RetryTimeout:
3988
      stats = rd.GetProcStatus()
3989
      # last check
3990
      if not (stats.is_connected or stats.is_in_resync):
3991
        _Fail("DRBD device %s is not in sync: stats=%s", rd, stats)
3992
    alldone = alldone and (not stats.is_in_resync)
3993
    if stats.sync_percent is not None:
3994
      min_resync = min(min_resync, stats.sync_percent)
3995

    
3996
  return (alldone, min_resync)
3997

    
3998

    
3999
def DrbdNeedsActivation(target_node_uuid, nodes_ip, disks):
4000
  """Checks which of the passed disks needs activation and returns their UUIDs.
4001

4002
  """
4003
  _SetPhysicalId(target_node_uuid, nodes_ip, disks)
4004
  faulty_disks = []
4005

    
4006
  for disk in disks:
4007
    rd = _RecursiveFindBD(disk)
4008
    if rd is None:
4009
      faulty_disks.append(disk)
4010
      continue
4011

    
4012
    stats = rd.GetProcStatus()
4013
    if stats.is_standalone or stats.is_diskless:
4014
      faulty_disks.append(disk)
4015

    
4016
  return [disk.uuid for disk in faulty_disks]
4017

    
4018

    
4019
def GetDrbdUsermodeHelper():
4020
  """Returns DRBD usermode helper currently configured.
4021

4022
  """
4023
  try:
4024
    return drbd.DRBD8.GetUsermodeHelper()
4025
  except errors.BlockDeviceError, err:
4026
    _Fail(str(err))
4027

    
4028

    
4029
def PowercycleNode(hypervisor_type, hvparams=None):
4030
  """Hard-powercycle the node.
4031

4032
  Because we need to return first, and schedule the powercycle in the
4033
  background, we won't be able to report failures nicely.
4034

4035
  """
4036
  hyper = hypervisor.GetHypervisor(hypervisor_type)
4037
  try:
4038
    pid = os.fork()
4039
  except OSError:
4040
    # if we can't fork, we'll pretend that we're in the child process
4041
    pid = 0
4042
  if pid > 0:
4043
    return "Reboot scheduled in 5 seconds"
4044
  # ensure the child is running on ram
4045
  try:
4046
    utils.Mlockall()
4047
  except Exception: # pylint: disable=W0703
4048
    pass
4049
  time.sleep(5)
4050
  hyper.PowercycleNode(hvparams=hvparams)
4051

    
4052

    
4053
def _VerifyRestrictedCmdName(cmd):
4054
  """Verifies a restricted command name.
4055

4056
  @type cmd: string
4057
  @param cmd: Command name
4058
  @rtype: tuple; (boolean, string or None)
4059
  @return: The tuple's first element is the status; if C{False}, the second
4060
    element is an error message string, otherwise it's C{None}
4061

4062
  """
4063
  if not cmd.strip():
4064
    return (False, "Missing command name")
4065

    
4066
  if os.path.basename(cmd) != cmd:
4067
    return (False, "Invalid command name")
4068

    
4069
  if not constants.EXT_PLUGIN_MASK.match(cmd):
4070
    return (False, "Command name contains forbidden characters")
4071

    
4072
  return (True, None)
4073

    
4074

    
4075
def _CommonRestrictedCmdCheck(path, owner):
4076
  """Common checks for restricted command file system directories and files.
4077

4078
  @type path: string
4079
  @param path: Path to check
4080
  @param owner: C{None} or tuple containing UID and GID
4081
  @rtype: tuple; (boolean, string or C{os.stat} result)
4082
  @return: The tuple's first element is the status; if C{False}, the second
4083
    element is an error message string, otherwise it's the result of C{os.stat}
4084

4085
  """
4086
  if owner is None:
4087
    # Default to root as owner
4088
    owner = (0, 0)
4089

    
4090
  try:
4091
    st = os.stat(path)
4092
  except EnvironmentError, err:
4093
    return (False, "Can't stat(2) '%s': %s" % (path, err))
4094

    
4095
  if stat.S_IMODE(st.st_mode) & (~_RCMD_MAX_MODE):
4096
    return (False, "Permissions on '%s' are too permissive" % path)
4097

    
4098
  if (st.st_uid, st.st_gid) != owner:
4099
    (owner_uid, owner_gid) = owner
4100
    return (False, "'%s' is not owned by %s:%s" % (path, owner_uid, owner_gid))
4101

    
4102
  return (True, st)
4103

    
4104

    
4105
def _VerifyRestrictedCmdDirectory(path, _owner=None):
4106
  """Verifies restricted command directory.
4107

4108
  @type path: string
4109
  @param path: Path to check
4110
  @rtype: tuple; (boolean, string or None)
4111
  @return: The tuple's first element is the status; if C{False}, the second
4112
    element is an error message string, otherwise it's C{None}
4113

4114
  """
4115
  (status, value) = _CommonRestrictedCmdCheck(path, _owner)
4116

    
4117
  if not status:
4118
    return (False, value)
4119

    
4120
  if not stat.S_ISDIR(value.st_mode):
4121
    return (False, "Path '%s' is not a directory" % path)
4122

    
4123
  return (True, None)
4124

    
4125

    
4126
def _VerifyRestrictedCmd(path, cmd, _owner=None):
4127
  """Verifies a whole restricted command and returns its executable filename.
4128

4129
  @type path: string
4130
  @param path: Directory containing restricted commands
4131
  @type cmd: string
4132
  @param cmd: Command name
4133
  @rtype: tuple; (boolean, string)
4134
  @return: The tuple's first element is the status; if C{False}, the second
4135
    element is an error message string, otherwise the second element is the
4136
    absolute path to the executable
4137

4138
  """
4139
  executable = utils.PathJoin(path, cmd)
4140

    
4141
  (status, msg) = _CommonRestrictedCmdCheck(executable, _owner)
4142

    
4143
  if not status:
4144
    return (False, msg)
4145

    
4146
  if not utils.IsExecutable(executable):
4147
    return (False, "access(2) thinks '%s' can't be executed" % executable)
4148

    
4149
  return (True, executable)
4150

    
4151

    
4152
def _PrepareRestrictedCmd(path, cmd,
4153
                          _verify_dir=_VerifyRestrictedCmdDirectory,
4154
                          _verify_name=_VerifyRestrictedCmdName,
4155
                          _verify_cmd=_VerifyRestrictedCmd):
4156
  """Performs a number of tests on a restricted command.
4157

4158
  @type path: string
4159
  @param path: Directory containing restricted commands
4160
  @type cmd: string
4161
  @param cmd: Command name
4162
  @return: Same as L{_VerifyRestrictedCmd}
4163

4164
  """
4165
  # Verify the directory first
4166
  (status, msg) = _verify_dir(path)
4167
  if status:
4168
    # Check command if everything was alright
4169
    (status, msg) = _verify_name(cmd)
4170

    
4171
  if not status:
4172
    return (False, msg)
4173

    
4174
  # Check actual executable
4175
  return _verify_cmd(path, cmd)
4176

    
4177

    
4178
def RunRestrictedCmd(cmd,
4179
                     _lock_timeout=_RCMD_LOCK_TIMEOUT,
4180
                     _lock_file=pathutils.RESTRICTED_COMMANDS_LOCK_FILE,
4181
                     _path=pathutils.RESTRICTED_COMMANDS_DIR,
4182
                     _sleep_fn=time.sleep,
4183
                     _prepare_fn=_PrepareRestrictedCmd,
4184
                     _runcmd_fn=utils.RunCmd,
4185
                     _enabled=constants.ENABLE_RESTRICTED_COMMANDS):
4186
  """Executes a restricted command after performing strict tests.
4187

4188
  @type cmd: string
4189
  @param cmd: Command name
4190
  @rtype: string
4191
  @return: Command output
4192
  @raise RPCFail: In case of an error
4193

4194
  """
4195
  logging.info("Preparing to run restricted command '%s'", cmd)
4196

    
4197
  if not _enabled:
4198
    _Fail("Restricted commands disabled at configure time")
4199

    
4200
  lock = None
4201
  try:
4202
    cmdresult = None
4203
    try:
4204
      lock = utils.FileLock.Open(_lock_file)
4205
      lock.Exclusive(blocking=True, timeout=_lock_timeout)
4206

    
4207
      (status, value) = _prepare_fn(_path, cmd)
4208

    
4209
      if status:
4210
        cmdresult = _runcmd_fn([value], env={}, reset_env=True,
4211
                               postfork_fn=lambda _: lock.Unlock())
4212
      else:
4213
        logging.error(value)
4214
    except Exception: # pylint: disable=W0703
4215
      # Keep original error in log
4216
      logging.exception("Caught exception")
4217

    
4218
    if cmdresult is None:
4219
      logging.info("Sleeping for %0.1f seconds before returning",
4220
                   _RCMD_INVALID_DELAY)
4221
      _sleep_fn(_RCMD_INVALID_DELAY)
4222

    
4223
      # Do not include original error message in returned error
4224
      _Fail("Executing command '%s' failed" % cmd)
4225
    elif cmdresult.failed or cmdresult.fail_reason:
4226
      _Fail("Restricted command '%s' failed: %s; output: %s",
4227
            cmd, cmdresult.fail_reason, cmdresult.output)
4228
    else:
4229
      return cmdresult.output
4230
  finally:
4231
    if lock is not None:
4232
      # Release lock at last
4233
      lock.Close()
4234
      lock = None
4235

    
4236

    
4237
def SetWatcherPause(until, _filename=pathutils.WATCHER_PAUSEFILE):
4238
  """Creates or removes the watcher pause file.
4239

4240
  @type until: None or number
4241
  @param until: Unix timestamp saying until when the watcher shouldn't run
4242

4243
  """
4244
  if until is None:
4245
    logging.info("Received request to no longer pause watcher")
4246
    utils.RemoveFile(_filename)
4247
  else:
4248
    logging.info("Received request to pause watcher until %s", until)
4249

    
4250
    if not ht.TNumber(until):
4251
      _Fail("Duration must be numeric")
4252

    
4253
    utils.WriteFile(_filename, data="%d\n" % (until, ), mode=0644)
4254

    
4255

    
4256
class HooksRunner(object):
4257
  """Hook runner.
4258

4259
  This class is instantiated on the node side (ganeti-noded) and not
4260
  on the master side.
4261

4262
  """
4263
  def __init__(self, hooks_base_dir=None):
4264
    """Constructor for hooks runner.
4265

4266
    @type hooks_base_dir: str or None
4267
    @param hooks_base_dir: if not None, this overrides the
4268
        L{pathutils.HOOKS_BASE_DIR} (useful for unittests)
4269

4270
    """
4271
    if hooks_base_dir is None:
4272
      hooks_base_dir = pathutils.HOOKS_BASE_DIR
4273
    # yeah, _BASE_DIR is not valid for attributes, we use it like a
4274
    # constant
4275
    self._BASE_DIR = hooks_base_dir # pylint: disable=C0103
4276

    
4277
  def RunLocalHooks(self, node_list, hpath, phase, env):
4278
    """Check that the hooks will be run only locally and then run them.
4279

4280
    """
4281
    assert len(node_list) == 1
4282
    node = node_list[0]
4283
    _, myself = ssconf.GetMasterAndMyself()
4284
    assert node == myself
4285

    
4286
    results = self.RunHooks(hpath, phase, env)
4287

    
4288
    # Return values in the form expected by HooksMaster
4289
    return {node: (None, False, results)}
4290

    
4291
  def RunHooks(self, hpath, phase, env):
4292
    """Run the scripts in the hooks directory.
4293

4294
    @type hpath: str
4295
    @param hpath: the path to the hooks directory which
4296
        holds the scripts
4297
    @type phase: str
4298
    @param phase: either L{constants.HOOKS_PHASE_PRE} or
4299
        L{constants.HOOKS_PHASE_POST}
4300
    @type env: dict
4301
    @param env: dictionary with the environment for the hook
4302
    @rtype: list
4303
    @return: list of 3-element tuples:
4304
      - script path
4305
      - script result, either L{constants.HKR_SUCCESS} or
4306
        L{constants.HKR_FAIL}
4307
      - output of the script
4308

4309
    @raise errors.ProgrammerError: for invalid input
4310
        parameters
4311

4312
    """
4313
    if phase == constants.HOOKS_PHASE_PRE:
4314
      suffix = "pre"
4315
    elif phase == constants.HOOKS_PHASE_POST:
4316
      suffix = "post"
4317
    else:
4318
      _Fail("Unknown hooks phase '%s'", phase)
4319

    
4320
    subdir = "%s-%s.d" % (hpath, suffix)
4321
    dir_name = utils.PathJoin(self._BASE_DIR, subdir)
4322

    
4323
    results = []
4324

    
4325
    if not os.path.isdir(dir_name):
4326
      # for non-existing/non-dirs, we simply exit instead of logging a
4327
      # warning at every operation
4328
      return results
4329

    
4330
    runparts_results = utils.RunParts(dir_name, env=env, reset_env=True)
4331

    
4332
    for (relname, relstatus, runresult) in runparts_results:
4333
      if relstatus == constants.RUNPARTS_SKIP:
4334
        rrval = constants.HKR_SKIP
4335
        output = ""
4336
      elif relstatus == constants.RUNPARTS_ERR:
4337
        rrval = constants.HKR_FAIL
4338
        output = "Hook script execution error: %s" % runresult
4339
      elif relstatus == constants.RUNPARTS_RUN:
4340
        if runresult.failed:
4341
          rrval = constants.HKR_FAIL
4342
        else:
4343
          rrval = constants.HKR_SUCCESS
4344
        output = utils.SafeEncode(runresult.output.strip())
4345
      results.append(("%s/%s" % (subdir, relname), rrval, output))
4346

    
4347
    return results
4348

    
4349

    
4350
class IAllocatorRunner(object):
4351
  """IAllocator runner.
4352

4353
  This class is instantiated on the node side (ganeti-noded) and not on
4354
  the master side.
4355

4356
  """
4357
  @staticmethod
4358
  def Run(name, idata):
4359
    """Run an iallocator script.
4360

4361
    @type name: str
4362
    @param name: the iallocator script name
4363
    @type idata: str
4364
    @param idata: the allocator input data
4365

4366
    @rtype: tuple
4367
    @return: two element tuple of:
4368
       - status
4369
       - either error message or stdout of allocator (for success)
4370

4371
    """
4372
    alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH,
4373
                                  os.path.isfile)
4374
    if alloc_script is None:
4375
      _Fail("iallocator module '%s' not found in the search path", name)
4376

    
4377
    fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
4378
    try:
4379
      os.write(fd, idata)
4380
      os.close(fd)
4381
      result = utils.RunCmd([alloc_script, fin_name])
4382
      if result.failed:
4383
        _Fail("iallocator module '%s' failed: %s, output '%s'",
4384
              name, result.fail_reason, result.output)
4385
    finally:
4386
      os.unlink(fin_name)
4387

    
4388
    return result.stdout
4389

    
4390

    
4391
class DevCacheManager(object):
4392
  """Simple class for managing a cache of block device information.
4393

4394
  """
4395
  _DEV_PREFIX = "/dev/"
4396
  _ROOT_DIR = pathutils.BDEV_CACHE_DIR
4397

    
4398
  @classmethod
4399
  def _ConvertPath(cls, dev_path):
4400
    """Converts a /dev/name path to the cache file name.
4401

4402
    This replaces slashes with underscores and strips the /dev
4403
    prefix. It then returns the full path to the cache file.
4404

4405
    @type dev_path: str
4406
    @param dev_path: the C{/dev/} path name
4407
    @rtype: str
4408
    @return: the converted path name
4409

4410
    """
4411
    if dev_path.startswith(cls._DEV_PREFIX):
4412
      dev_path = dev_path[len(cls._DEV_PREFIX):]
4413
    dev_path = dev_path.replace("/", "_")
4414
    fpath = utils.PathJoin(cls._ROOT_DIR, "bdev_%s" % dev_path)
4415
    return fpath
4416

    
4417
  @classmethod
4418
  def UpdateCache(cls, dev_path, owner, on_primary, iv_name):
4419
    """Updates the cache information for a given device.
4420

4421
    @type dev_path: str
4422
    @param dev_path: the pathname of the device
4423
    @type owner: str
4424
    @param owner: the owner (instance name) of the device
4425
    @type on_primary: bool
4426
    @param on_primary: whether this is the primary
4427
        node nor not
4428
    @type iv_name: str
4429
    @param iv_name: the instance-visible name of the
4430
        device, as in objects.Disk.iv_name
4431

4432
    @rtype: None
4433

4434
    """
4435
    if dev_path is None:
4436
      logging.error("DevCacheManager.UpdateCache got a None dev_path")
4437
      return
4438
    fpath = cls._ConvertPath(dev_path)
4439
    if on_primary:
4440
      state = "primary"
4441
    else:
4442
      state = "secondary"
4443
    if iv_name is None:
4444
      iv_name = "not_visible"
4445
    fdata = "%s %s %s\n" % (str(owner), state, iv_name)
4446
    try:
4447
      utils.WriteFile(fpath, data=fdata)
4448
    except EnvironmentError, err:
4449
      logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
4450

    
4451
  @classmethod
4452
  def RemoveCache(cls, dev_path):
4453
    """Remove data for a dev_path.
4454

4455
    This is just a wrapper over L{utils.io.RemoveFile} with a converted
4456
    path name and logging.
4457

4458
    @type dev_path: str
4459
    @param dev_path: the pathname of the device
4460

4461
    @rtype: None
4462

4463
    """
4464
    if dev_path is None:
4465
      logging.error("DevCacheManager.RemoveCache got a None dev_path")
4466
      return
4467
    fpath = cls._ConvertPath(dev_path)
4468
    try:
4469
      utils.RemoveFile(fpath)
4470
    except EnvironmentError, err:
4471
      logging.exception("Can't update bdev cache for %s: %s", dev_path, err)