Revision 5bbd3f7f

b/doc/hooks.rst
104 104
  be left
105 105

  
106 106

  
107
All informations about the cluster is passed using environment
107
All information about the cluster is passed using environment
108 108
variables. Different operations will have sligthly different
109 109
environments, but most of the variables are common.
110 110

  
b/doc/iallocator.rst
233 233
also a dict having three keys:
234 234

  
235 235
success
236
  a boolean value denoting if the allocation was successfull or not
236
  a boolean value denoting if the allocation was successful or not
237 237

  
238 238
info
239 239
  a string with information from the scripts; if the allocation fails,
b/lib/backend.py
308 308

  
309 309

  
310 310
def GetNodeInfo(vgname, hypervisor_type):
311
  """Gives back a hash with different informations about the node.
311
  """Gives back a hash with different information about the node.
312 312

  
313 313
  @type vgname: C{string}
314 314
  @param vgname: the name of the volume group to ask for disk space information
......
581 581

  
582 582

  
583 583
def GetInstanceInfo(instance, hname):
584
  """Gives back the informations about an instance as a dictionary.
584
  """Gives back the information about an instance as a dictionary.
585 585

  
586 586
  @type instance: string
587 587
  @param instance: the instance name
......
746 746

  
747 747

  
748 748
def _GetVGInfo(vg_name):
749
  """Get informations about the volume group.
749
  """Get information about the volume group.
750 750

  
751 751
  @type vg_name: str
752 752
  @param vg_name: the volume group which we query
......
1032 1032
    msg = "Failed to accept instance"
1033 1033
    logging.exception(msg)
1034 1034
    return (False, '%s: %s' % (msg, err))
1035
  return (True, "Accept successfull")
1035
  return (True, "Accept successful")
1036 1036

  
1037 1037

  
1038 1038
def FinalizeMigration(instance, info, success):
......
1080 1080
    msg = "Failed to migrate instance"
1081 1081
    logging.exception(msg)
1082 1082
    return (False, "%s: %s" % (msg, err))
1083
  return (True, "Migration successfull")
1083
  return (True, "Migration successful")
1084 1084

  
1085 1085

  
1086 1086
def BlockdevCreate(disk, size, owner, on_primary, info):
......
1273 1273
def BlockdevShutdown(disk):
1274 1274
  """Shut down a block device.
1275 1275

  
1276
  First, if the device is assembled (Attach() is successfull), then
1276
  First, if the device is assembled (Attach() is successful), then
1277 1277
  the device is shutdown. Then the children of the device are
1278 1278
  shutdown.
1279 1279

  
......
1391 1391
def _RecursiveFindBD(disk):
1392 1392
  """Check if a device is activated.
1393 1393

  
1394
  If so, return informations about the real device.
1394
  If so, return information about the real device.
1395 1395

  
1396 1396
  @type disk: L{objects.Disk}
1397 1397
  @param disk: the disk object we need to find
......
1411 1411
def BlockdevFind(disk):
1412 1412
  """Check if a device is activated.
1413 1413

  
1414
  If it is, return informations about the real device.
1414
  If it is, return information about the real device.
1415 1415

  
1416 1416
  @type disk: L{objects.Disk}
1417 1417
  @param disk: the disk to find
......
2094 2094
  @param file_storage_dir: the directory we should cleanup
2095 2095
  @rtype: tuple (success,)
2096 2096
  @return: tuple of one element, C{success}, denoting
2097
      whether the operation was successfull
2097
      whether the operation was successful
2098 2098

  
2099 2099
  """
2100 2100
  file_storage_dir = _TransformFileStorageDir(file_storage_dir)
b/lib/bdev.py
161 161
    """Remove this device.
162 162

  
163 163
    This makes sense only for some of the device types: LV and file
164
    storeage. Also note that if the device can't attach, the removal
164
    storage. Also note that if the device can't attach, the removal
165 165
    can't be completed.
166 166

  
167 167
    """
......
444 444
  def Assemble(self):
445 445
    """Assemble the device.
446 446

  
447
    We alway run `lvchange -ay` on the LV to ensure it's active before
447
    We always run `lvchange -ay` on the LV to ensure it's active before
448 448
    use, as there were cases when xenvg was not active after boot
449 449
    (also possibly after disk issues).
450 450

  
......
1258 1258

  
1259 1259

  
1260 1260
    If sync_percent is None, it means all is ok
1261
    If estimated_time is None, it means we can't esimate
1261
    If estimated_time is None, it means we can't estimate
1262 1262
    the time needed, otherwise it's the time left in seconds.
1263 1263

  
1264 1264

  
1265 1265
    We set the is_degraded parameter to True on two conditions:
1266 1266
    network not connected or local disk missing.
1267 1267

  
1268
    We compute the ldisk parameter based on wheter we have a local
1268
    We compute the ldisk parameter based on whether we have a local
1269 1269
    disk or not.
1270 1270

  
1271 1271
    @rtype: tuple
......
1335 1335

  
1336 1336
    ever_disconnected = _IgnoreError(self._ShutdownNet, self.minor)
1337 1337
    timeout_limit = time.time() + self._NET_RECONFIG_TIMEOUT
1338
    sleep_time = 0.100 # we start the retry time at 100 miliseconds
1338
    sleep_time = 0.100 # we start the retry time at 100 milliseconds
1339 1339
    while time.time() < timeout_limit:
1340 1340
      status = self.GetProcStatus()
1341 1341
      if status.is_standalone:
1342 1342
        break
1343 1343
      # retry the disconnect, it seems possible that due to a
1344 1344
      # well-time disconnect on the peer, my disconnect command might
1345
      # be ingored and forgotten
1345
      # be ignored and forgotten
1346 1346
      ever_disconnected = _IgnoreError(self._ShutdownNet, self.minor) or \
1347 1347
                          ever_disconnected
1348 1348
      time.sleep(sleep_time)
......
1647 1647
  def Shutdown(self):
1648 1648
    """Shutdown the device.
1649 1649

  
1650
    This is a no-op for the file type, as we don't deacivate
1650
    This is a no-op for the file type, as we don't deactivate
1651 1651
    the file on shutdown.
1652 1652

  
1653 1653
    """
b/lib/bootstrap.py
477 477

  
478 478
  @type node_list: list
479 479
  @param node_list: the list of nodes to query for master info; the current
480
      node wil be removed if it is in the list
480
      node will be removed if it is in the list
481 481
  @rtype: list
482 482
  @return: list of (node, votes)
483 483

  
b/lib/cli.py
320 320
def _ParseArgs(argv, commands, aliases):
321 321
  """Parser for the command line arguments.
322 322

  
323
  This function parses the arguements and returns the function which
323
  This function parses the arguments and returns the function which
324 324
  must be executed together with its (modified) arguments.
325 325

  
326 326
  @param argv: the command line
......
438 438
    choices = [('y', True, 'Perform the operation'),
439 439
               ('n', False, 'Do not perform the operation')]
440 440
  if not choices or not isinstance(choices, list):
441
    raise errors.ProgrammerError("Invalid choiches argument to AskUser")
441
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
442 442
  for entry in choices:
443 443
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
444
      raise errors.ProgrammerError("Invalid choiches element to AskUser")
444
      raise errors.ProgrammerError("Invalid choices element to AskUser")
445 445

  
446 446
  answer = choices[-1][1]
447 447
  new_text = []
......
747 747
  except (errors.GenericError, luxi.ProtocolError,
748 748
          JobSubmittedException), err:
749 749
    result, err_msg = FormatError(err)
750
    logging.exception("Error durring command processing")
750
    logging.exception("Error during command processing")
751 751
    ToStderr(err_msg)
752 752

  
753 753
  return result
b/lib/cmdlib.py
68 68
  def __init__(self, processor, op, context, rpc):
69 69
    """Constructor for LogicalUnit.
70 70

  
71
    This needs to be overriden in derived classes in order to check op
71
    This needs to be overridden in derived classes in order to check op
72 72
    validity.
73 73

  
74 74
    """
......
116 116
    CheckPrereq, doing these separate is better because:
117 117

  
118 118
      - ExpandNames is left as as purely a lock-related function
119
      - CheckPrereq is run after we have aquired locks (and possible
119
      - CheckPrereq is run after we have acquired locks (and possible
120 120
        waited for them)
121 121

  
122 122
    The function is allowed to change the self.op attribute so that
......
477 477
  @param nics: list of tuples (ip, bridge, mac) representing
478 478
      the NICs the instance  has
479 479
  @type disk_template: string
480
  @param disk_template: the distk template of the instance
480
  @param disk_template: the disk template of the instance
481 481
  @type disks: list
482 482
  @param disks: the list of (size, mode) pairs
483 483
  @type bep: dict
......
592 592

  
593 593

  
594 594
def _CheckInstanceBridgesExist(lu, instance):
595
  """Check that the brigdes needed by an instance exist.
595
  """Check that the bridges needed by an instance exist.
596 596

  
597 597
  """
598
  # check bridges existance
598
  # check bridges existence
599 599
  brlist = [nic.bridge for nic in instance.nics]
600 600
  result = lu.rpc.call_bridges_exist(instance.primary_node, brlist)
601 601
  result.Raise()
......
616 616

  
617 617
    This checks whether the cluster is empty.
618 618

  
619
    Any errors are signalled by raising errors.OpPrereqError.
619
    Any errors are signaled by raising errors.OpPrereqError.
620 620

  
621 621
    """
622 622
    master = self.cfg.GetMasterNode()
......
669 669
    Test list:
670 670

  
671 671
      - compares ganeti version
672
      - checks vg existance and size > 20G
672
      - checks vg existence and size > 20G
673 673
      - checks config file checksum
674 674
      - checks ssh to other nodes
675 675

  
......
908 908
          if bep[constants.BE_AUTO_BALANCE]:
909 909
            needed_mem += bep[constants.BE_MEMORY]
910 910
        if nodeinfo['mfree'] < needed_mem:
911
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
911
          feedback_fn("  - ERROR: not enough memory on node %s to accommodate"
912 912
                      " failovers should node %s fail" % (node, prinode))
913 913
          bad = True
914 914
    return bad
......
927 927
  def BuildHooksEnv(self):
928 928
    """Build hooks env.
929 929

  
930
    Cluster-Verify hooks just rone in the post phase and their failure makes
930
    Cluster-Verify hooks just ran in the post phase and their failure makes
931 931
    the output be logged in the verify output and the verification to fail.
932 932

  
933 933
    """
......
1194 1194
    return not bad
1195 1195

  
1196 1196
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
1197
    """Analize the post-hooks' result
1197
    """Analyze the post-hooks' result
1198 1198

  
1199 1199
    This method analyses the hook result, handles it, and sends some
1200 1200
    nicely-formatted feedback back to the user.
......
1414 1414

  
1415 1415
  @type disk: L{objects.Disk}
1416 1416
  @param disk: the disk to check
1417
  @rtype: booleean
1417
  @rtype: boolean
1418 1418
  @return: boolean indicating whether a LD_LV dev_type was found or not
1419 1419

  
1420 1420
  """
......
1815 1815
     - it does not have primary or secondary instances
1816 1816
     - it's not the master
1817 1817

  
1818
    Any errors are signalled by raising errors.OpPrereqError.
1818
    Any errors are signaled by raising errors.OpPrereqError.
1819 1819

  
1820 1820
    """
1821 1821
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
......
2136 2136
     - it is resolvable
2137 2137
     - its parameters (single/dual homed) matches the cluster
2138 2138

  
2139
    Any errors are signalled by raising errors.OpPrereqError.
2139
    Any errors are signaled by raising errors.OpPrereqError.
2140 2140

  
2141 2141
    """
2142 2142
    node_name = self.op.node_name
......
2190 2190
        raise errors.OpPrereqError("The master has a private ip but the"
2191 2191
                                   " new node doesn't have one")
2192 2192

  
2193
    # checks reachablity
2193
    # checks reachability
2194 2194
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
2195 2195
      raise errors.OpPrereqError("Node not reachable by ping")
2196 2196

  
......
2866 2866
    _CheckNodeOnline(self, instance.primary_node)
2867 2867

  
2868 2868
    bep = self.cfg.GetClusterInfo().FillBE(instance)
2869
    # check bridges existance
2869
    # check bridges existence
2870 2870
    _CheckInstanceBridgesExist(self, instance)
2871 2871

  
2872 2872
    remote_info = self.rpc.call_instance_info(instance.primary_node,
......
2944 2944

  
2945 2945
    _CheckNodeOnline(self, instance.primary_node)
2946 2946

  
2947
    # check bridges existance
2947
    # check bridges existence
2948 2948
    _CheckInstanceBridgesExist(self, instance)
2949 2949

  
2950 2950
  def Exec(self, feedback_fn):
......
3615 3615
      self.LogInfo("Not checking memory on the secondary node as"
3616 3616
                   " instance will not be started")
3617 3617

  
3618
    # check bridge existance
3618
    # check bridge existence
3619 3619
    brlist = [nic.bridge for nic in instance.nics]
3620 3620
    result = self.rpc.call_bridges_exist(target_node, brlist)
3621 3621
    result.Raise()
......
3753 3753
                         instance.name, i_be[constants.BE_MEMORY],
3754 3754
                         instance.hypervisor)
3755 3755

  
3756
    # check bridge existance
3756
    # check bridge existence
3757 3757
    brlist = [nic.bridge for nic in instance.nics]
3758 3758
    result = self.rpc.call_bridges_exist(target_node, brlist)
3759 3759
    if result.failed or not result.data:
......
6289 6289
    # remove it from its current node. In the future we could fix this by:
6290 6290
    #  - making a tasklet to search (share-lock all), then create the new one,
6291 6291
    #    then one to remove, after
6292
    #  - removing the removal operation altoghether
6292
    #  - removing the removal operation altogether
6293 6293
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
6294 6294

  
6295 6295
  def DeclareLocks(self, level):
b/lib/config.py
796 796
                                    self._config_data.instances.keys())
797 797

  
798 798
  def _UnlockedGetInstanceInfo(self, instance_name):
799
    """Returns informations about an instance.
799
    """Returns information about an instance.
800 800

  
801 801
    This function is for internal use, when the config lock is already held.
802 802

  
......
808 808

  
809 809
  @locking.ssynchronized(_config_lock, shared=1)
810 810
  def GetInstanceInfo(self, instance_name):
811
    """Returns informations about an instance.
811
    """Returns information about an instance.
812 812

  
813
    It takes the information from the configuration file. Other informations of
813
    It takes the information from the configuration file. Other information of
814 814
    an instance are taken from the live systems.
815 815

  
816 816
    @param instance_name: name of the instance, e.g.
......
1208 1208

  
1209 1209
  @locking.ssynchronized(_config_lock, shared=1)
1210 1210
  def GetClusterInfo(self):
1211
    """Returns informations about the cluster
1211
    """Returns information about the cluster
1212 1212

  
1213 1213
    @rtype: L{objects.Cluster}
1214 1214
    @return: the cluster object
b/lib/http/__init__.py
744 744
  def HasMessageBody(self):
745 745
    """Checks whether the HTTP message contains a body.
746 746

  
747
    Can be overriden by subclasses.
747
    Can be overridden by subclasses.
748 748

  
749 749
    """
750 750
    return bool(self._msg.body)
......
937 937
  def ParseStartLine(self, start_line):
938 938
    """Parses the start line of a message.
939 939

  
940
    Must be overriden by subclass.
940
    Must be overridden by subclass.
941 941

  
942 942
    @type start_line: string
943 943
    @param start_line: Start line string
b/lib/http/auth.py
80 80
  def GetAuthRealm(self, req):
81 81
    """Returns the authentication realm for a request.
82 82

  
83
    MAY be overriden by a subclass, which then can return different realms for
83
    MAY be overridden by a subclass, which then can return different realms for
84 84
    different paths. Returning "None" means no authentication is needed for a
85 85
    request.
86 86

  
......
195 195
  def Authenticate(self, req, user, password):
196 196
    """Checks the password for a user.
197 197

  
198
    This function MUST be overriden by a subclass.
198
    This function MUST be overridden by a subclass.
199 199

  
200 200
    """
201 201
    raise NotImplementedError()
b/lib/http/server.py
536 536
  def PreHandleRequest(self, req):
537 537
    """Called before handling a request.
538 538

  
539
    Can be overriden by a subclass.
539
    Can be overridden by a subclass.
540 540

  
541 541
    """
542 542

  
543 543
  def HandleRequest(self, req):
544 544
    """Handles a request.
545 545

  
546
    Must be overriden by subclass.
546
    Must be overridden by subclass.
547 547

  
548 548
    """
549 549
    raise NotImplementedError()
b/lib/jqueue.py
69 69

  
70 70

  
71 71
class _QueuedOpCode(object):
72
  """Encasulates an opcode object.
72
  """Encapsulates an opcode object.
73 73

  
74 74
  @ivar log: holds the execution log and consists of tuples
75 75
  of the form C{(log_serial, timestamp, level, message)}
......
286 286
    """Selectively returns the log entries.
287 287

  
288 288
    @type newer_than: None or int
289
    @param newer_than: if this is None, return all log enties,
289
    @param newer_than: if this is None, return all log entries,
290 290
        otherwise return only the log entries with serial higher
291 291
        than this value
292 292
    @rtype: list
......
469 469

  
470 470

  
471 471
class JobQueue(object):
472
  """Quue used to manaage the jobs.
472
  """Queue used to manage the jobs.
473 473

  
474 474
  @cvar _RE_JOB_FILE: regex matching the valid job file names
475 475

  
......
651 651

  
652 652
    Since we aim to keep consistency should this node (the current
653 653
    master) fail, we will log errors if our rpc fail, and especially
654
    log the case when more than half of the nodes failes.
654
    log the case when more than half of the nodes fails.
655 655

  
656 656
    @param result: the data as returned from the rpc call
657 657
    @type nodes: list
......
934 934
    and in the future we might merge them.
935 935

  
936 936
    @type drain_flag: boolean
937
    @param drain_flag: wheter to set or unset the drain flag
937
    @param drain_flag: Whether to set or unset the drain flag
938 938

  
939 939
    """
940 940
    if drain_flag:
b/lib/locking.py
297 297

  
298 298

  
299 299
# Whenever we want to acquire a full LockSet we pass None as the value
300
# to acquire.  Hide this behing this nicely named constant.
300
# to acquire.  Hide this behind this nicely named constant.
301 301
ALL_SET = None
302 302

  
303 303

  
......
689 689
class GanetiLockManager:
690 690
  """The Ganeti Locking Library
691 691

  
692
  The purpouse of this small library is to manage locking for ganeti clusters
692
  The purpose of this small library is to manage locking for ganeti clusters
693 693
  in a central place, while at the same time doing dynamic checks against
694 694
  possible deadlocks. It will also make it easier to transition to a different
695 695
  lock type should we migrate away from python threads.
......
774 774
    """Acquire a set of resource locks, at the same level.
775 775

  
776 776
    @param level: the level at which the locks shall be acquired;
777
        it must be a memmber of LEVELS.
777
        it must be a member of LEVELS.
778 778
    @param names: the names of the locks which shall be acquired
779 779
        (special lock names, or instance/node names)
780 780
    @param shared: whether to acquire in shared mode; by default
......
809 809
    mode, before releasing them.
810 810

  
811 811
    @param level: the level at which the locks shall be released;
812
        it must be a memmber of LEVELS
812
        it must be a member of LEVELS
813 813
    @param names: the names of the locks which shall be released
814 814
        (defaults to all the locks acquired at that level)
815 815

  
......
827 827
    """Add locks at the specified level.
828 828

  
829 829
    @param level: the level at which the locks shall be added;
830
        it must be a memmber of LEVELS_MOD.
830
        it must be a member of LEVELS_MOD.
831 831
    @param names: names of the locks to acquire
832 832
    @param acquired: whether to acquire the newly added locks
833 833
    @param shared: whether the acquisition will be shared
b/lib/luxi.py
191 191
      raise TimeoutError("Sending timeout: %s" % str(err))
192 192

  
193 193
  def Recv(self):
194
    """Try to receive a messae from the socket.
194
    """Try to receive a message from the socket.
195 195

  
196 196
    In case we already have messages queued, we just return from the
197 197
    queue. Otherwise, we try to read data with a _rwtimeout network
b/lib/mcpu.py
158 158
          self.context.glm.add(level, add_locks, acquired=1, shared=share)
159 159
        except errors.LockError:
160 160
          raise errors.OpPrereqError(
161
            "Coudn't add locks (%s), probably because of a race condition"
161
            "Couldn't add locks (%s), probably because of a race condition"
162 162
            " with another job, who added them first" % add_locks)
163 163
      try:
164 164
        try:
......
187 187
    @type run_notifier: callable (no arguments) or None
188 188
    @param run_notifier:  this function (if callable) will be called when
189 189
                          we are about to call the lu's Exec() method, that
190
                          is, after we have aquired all locks
190
                          is, after we have acquired all locks
191 191

  
192 192
    """
193 193
    if not isinstance(op, opcodes.OpCode):
b/lib/rpc.py
83 83
  calls we can't raise an exception just because one one out of many
84 84
  failed, and therefore we use this class to encapsulate the result.
85 85

  
86
  @ivar data: the data payload, for successfull results, or None
86
  @ivar data: the data payload, for successful results, or None
87 87
  @type failed: boolean
88 88
  @ivar failed: whether the operation failed at RPC level (not
89 89
      application level on the remote node)
......
161 161
  list of nodes, will contact (in parallel) all nodes, and return a
162 162
  dict of results (key: node name, value: result).
163 163

  
164
  One current bug is that generic failure is still signalled by
164
  One current bug is that generic failure is still signaled by
165 165
  'False' result, which is not good. This overloading of values can
166 166
  cause bugs.
167 167

  
......
220 220
    @return: List of RPC results
221 221

  
222 222
    """
223
    assert _http_manager, "RPC module not intialized"
223
    assert _http_manager, "RPC module not initialized"
224 224

  
225 225
    _http_manager.ExecRequests(self.nc.values())
226 226

  
......
269 269
    @type instance: L{objects.Instance}
270 270
    @param instance: an Instance object
271 271
    @type hvp: dict or None
272
    @param hvp: a dictionary with overriden hypervisor parameters
272
    @param hvp: a dictionary with overridden hypervisor parameters
273 273
    @type bep: dict or None
274
    @param bep: a dictionary with overriden backend parameters
274
    @param bep: a dictionary with overridden backend parameters
275 275
    @rtype: dict
276 276
    @return: the instance dict, with the hvparams filled with the
277 277
        cluster defaults
b/lib/ssh.py
201 201
    connected to).
202 202

  
203 203
    This is used to detect problems in ssh known_hosts files
204
    (conflicting known hosts) and incosistencies between dns/hosts
204
    (conflicting known hosts) and inconsistencies between dns/hosts
205 205
    entries and local machine names
206 206

  
207 207
    @param node: nodename of a host to check; can be short or
b/lib/utils.py
136 136
      directory for the command; the default will be /
137 137
  @rtype: L{RunResult}
138 138
  @return: RunResult instance
139
  @raise erors.ProgrammerError: if we call this when forks are disabled
139
  @raise errors.ProgrammerError: if we call this when forks are disabled
140 140

  
141 141
  """
142 142
  if no_fork:
......
701 701
  @type ip: str
702 702
  @param ip: the address to be checked
703 703
  @rtype: a regular expression match object
704
  @return: a regular epression match object, or None if the
704
  @return: a regular expression match object, or None if the
705 705
      address is not valid
706 706

  
707 707
  """
......
734 734

  
735 735
  This function will check all arguments in the args list so that they
736 736
  are valid shell parameters (i.e. they don't contain shell
737
  metacharaters). If everything is ok, it will return the result of
737
  metacharacters). If everything is ok, it will return the result of
738 738
  template % args.
739 739

  
740 740
  @type template: str
......
1063 1063
  @type args: list
1064 1064
  @param args: list of arguments to be quoted
1065 1065
  @rtype: str
1066
  @return: the quoted arguments concatenaned with spaces
1066
  @return: the quoted arguments concatenated with spaces
1067 1067

  
1068 1068
  """
1069 1069
  return ' '.join([ShellQuote(i) for i in args])
......
1080 1080
  @type port: int
1081 1081
  @param port: the port to connect to
1082 1082
  @type timeout: int
1083
  @param timeout: the timeout on the connection attemp
1083
  @param timeout: the timeout on the connection attempt
1084 1084
  @type live_port_needed: boolean
1085 1085
  @param live_port_needed: whether a closed port will cause the
1086 1086
      function to return failure, as if there was a timeout
......
1122 1122
  address.
1123 1123

  
1124 1124
  @type address: string
1125
  @param address: the addres to check
1125
  @param address: the address to check
1126 1126
  @rtype: bool
1127 1127
  @return: True if we own the address
1128 1128

  
......
1218 1218
  @type size: None or int
1219 1219
  @param size: Read at most size bytes
1220 1220
  @rtype: str
1221
  @return: the (possibly partial) conent of the file
1221
  @return: the (possibly partial) content of the file
1222 1222

  
1223 1223
  """
1224 1224
  f = open(file_name, "r")
......
1378 1378
  Element order is preserved.
1379 1379

  
1380 1380
  @type seq: sequence
1381
  @param seq: the sequence with the source elementes
1381
  @param seq: the sequence with the source elements
1382 1382
  @rtype: list
1383 1383
  @return: list of unique elements from seq
1384 1384

  
......
1390 1390
def IsValidMac(mac):
1391 1391
  """Predicate to check if a MAC address is valid.
1392 1392

  
1393
  Checks wether the supplied MAC address is formally correct, only
1393
  Checks whether the supplied MAC address is formally correct, only
1394 1394
  accepts colon separated format.
1395 1395

  
1396 1396
  @type mac: str
......
1831 1831

  
1832 1832
  """
1833 1833
  if isinstance(text, unicode):
1834
    # onli if unicode; if str already, we handle it below
1834
    # only if unicode; if str already, we handle it below
1835 1835
    text = text.encode('ascii', 'backslashreplace')
1836 1836
  resu = ""
1837 1837
  for char in text:
b/man/gnt-node.sgml
91 91
        discussion in <citerefentry>
92 92
        <refentrytitle>gnt-cluster</refentrytitle>
93 93
        <manvolnum>8</manvolnum> </citerefentry> for more
94
        informations.
94
        information.
95 95
      </para>
96 96

  
97 97
      <para>
b/tools/lvmstrap
267 267
   devnum: the device number, e.g. 0x803 (2051 in decimal) for sda3
268 268

  
269 269
  Returns:
270
    None; failure of the check is signalled by raising a
270
    None; failure of the check is signaled by raising a
271 271
      SysconfigError exception
272 272
  """
273 273

  
......
449 449

  
450 450

  
451 451
def DevInfo(name, dev, mountinfo):
452
  """Computes miscellaneous informations about a block device.
452
  """Computes miscellaneous information about a block device.
453 453

  
454 454
  Args:
455 455
    name: the device name, e.g. sda
......
478 478
def ShowDiskInfo(opts):
479 479
  """Shows a nicely formatted block device list for this system.
480 480

  
481
  This function shows the user a table with the informations gathered
481
  This function shows the user a table with the information gathered
482 482
  by the other functions defined, in order to help the user make a
483 483
  choice about which disks should be allocated to our volume group.
484 484

  

Also available in: Unified diff