Fix some pylint-detected issues
[ganeti-local] / man / gnt-instance.sgml
index 8bbd7d4..9332dfa 100644 (file)
@@ -2,7 +2,7 @@
 
   <!-- Fill in your name for FIRSTNAME and SURNAME. -->
   <!-- Please adjust the date whenever revising the manpage. -->
-  <!ENTITY dhdate      "<date>May 16, 2007</date>">
+  <!ENTITY dhdate      "<date>May 29, 2008</date>">
   <!-- SECTION should be 1-8, maybe w/ subsection other parameters are
        allowed: see man(7), man(1). -->
   <!ENTITY dhsection   "<manvolnum>8</manvolnum>">
@@ -20,6 +20,7 @@
     <copyright>
       <year>2006</year>
       <year>2007</year>
+      <year>2008</year>
       <holder>Google Inc.</holder>
     </copyright>
     &dhdate;
           <arg>--swap-size <replaceable>disksize</replaceable></arg>
           <arg>-m <replaceable>memsize</replaceable></arg>
           <sbr>
-          <arg>-o <replaceable>os-type</replaceable></arg>
+
           <arg>-b <replaceable>bridge</replaceable></arg>
           <arg>--mac <replaceable>MAC-address</replaceable></arg>
+          <sbr>
+
           <arg>--hvm-boot-order <replaceable>boot-order</replaceable></arg>
+          <arg>--hvm-acpi <replaceable>ACPI-support</replaceable></arg>
           <sbr>
-          <arg>--kernel <group choice="req">
+
+          <arg>--hvm-pae <replaceable>PAE-support</replaceable></arg>
+          <sbr>
+
+          <arg>--hvm-cdrom-image-path
+            <replaceable>cdrom-image-path</replaceable></arg>
+          <sbr>
+
+          <arg>--hvm-nic-type <replaceable>NICTYPE</replaceable></arg>
+          <sbr>
+
+          <arg>--hvm-disk-type
+          <replaceable>DISKTYPE</replaceable></arg>
+          <sbr>
+
+          <arg>--vnc-bind-address
+            <replaceable>vnc-bind-address</replaceable></arg>
+          <sbr>
+
+          <arg>--kernel<group choice="req">
               <arg>default</arg>
               <arg><replaceable>kernel_path</replaceable></arg>
             </group></arg>
           <sbr>
-          <arg>--initrd <group choice="req">
+
+          <arg>--initrd<group choice="req">
               <arg>default</arg>
               <arg>none</arg>
               <arg><replaceable>initrd_path</replaceable></arg>
-            </group> </arg>
+            </group></arg>
+          <sbr>
+
+          <arg>--file-storage-dir <replaceable>dir_path</replaceable></arg>
+          <arg>--file-driver<group choice="req">
+              <arg>loop</arg>
+              <arg>blktap</arg>
+            </group></arg>
           <sbr>
-          <arg choice="req">-t<group>
+
+          <arg choice="req">-t<group choice="req">
               <arg>diskless</arg>
+              <arg>file</arg>
               <arg>plain</arg>
-              <arg>local_raid1</arg>
-              <arg>remote_raid1</arg>
               <arg>drbd</arg>
-            </group>
-          </arg>
+            </group></arg>
+          <sbr>
+
+          <group choice="req">
+            <arg>-n <replaceable>node<optional>:secondary-node</optional></replaceable></arg>
+            <arg>--iallocator <replaceable>name</replaceable></arg>
+          </group>
           <sbr>
-          <arg choice="req">-n <replaceable>node<optional>:secondary-node</optional></replaceable></arg>
+
+          <arg choice="req">-o <replaceable>os-type</replaceable></arg>
+          <sbr>
+
           <arg choice="req"><replaceable>instance</replaceable></arg>
         </cmdsynopsis>
 
         </para>
 
         <para>
-          The option is only relevant for Xen HVM instances and
-          ignored by all other instances types.
+          The default is not to set an HVM boot order which is
+          interpreted as 'dc'. This option, like all options starting
+          with 'hvm', is only relevant for Xen HVM instances and
+          ignored by all other instance types.
+        </para>
+
+        <para>
+          The <option>--hvm-acpi</option> option specifies if Xen
+          should enable ACPI support for this HVM instance. Valid
+          values are true or false. The default value is false,
+          disabling ACPI support for this instance.
         </para>
 
         <para>
-          The <option>--kernel</option> options allows the instance to
+          The <option>--hvm-pae</option> option specifies if Xen
+          should enabled PAE support for this HVM instance. Valid
+          values are true or false. The default is false, disabling
+          PAE support for this instance.
+        </para>
+
+        <para>
+          The <option>--hvm-cdrom-image-path</option> option specifies the
+          path to the file Xen uses to emulate a virtual CDROM drive
+          for this HVM instance. Valid values are either an
+          absolute path to an existing file or None, which disables
+          virtual CDROM support for this instance. The default is
+          None, disabling virtual CDROM support.
+        </para>
+
+        <para>
+          The <option>--hvm-nic-type</option> specifies the NIC type
+          Xen should use for this HVM instance. Valid choices are
+          rtl8139, ne2k_pci, ne2k_isa and paravirtual with rtl8139
+          as the default. The paravirtual setting is intended for use
+          with the GPL PV drivers inside HVM Windows instances.
+        </para>
+
+        <para>
+          The <option>--hvm-disk-type</option> specifies the disk type
+          Xen should use for the HVM instance. Valid choices are ioemu
+          and paravirtual with ioemu as the default. The paravirtual
+          setting is intended for use with the GPL PV drivers inside
+          HVM Windows instances.
+        </para>
+
+        <para>
+          The <option>--vnc-bind-address</option> option specifies the
+          address that the VNC listener for this instance should bind
+          to. Valid values are IPv4 addresses. Use the address 0.0.0.0
+          to bind to all available interfaces (this is the default)
+          or specify the address of one of the interfaces on the node
+          to restrict listening to that interface.
+        </para>
+
+        <para>
+          The <option>--iallocator</option> option specifies the instance
+          allocator plugin to use. If you pass in this option the allocator
+          will select nodes for this instance automatically, so you don't need
+          to pass them with the <option>-n</option> option. For more
+          information please refer to the instance allocator documentation.
+        </para>
+
+        <para>
+          The <option>--kernel</option> option allows the instance to
           use a custom kernel (if a filename is passed) or to use the
           default kernel (<filename>@CUSTOM_XEN_KERNEL@</filename>), if the
           string <constant>default</constant> is passed.
               </listitem>
             </varlistentry>
             <varlistentry>
-              <term>plain</term>
+              <term>file</term>
               <listitem>
-                <para>Disk devices will be logical volumes.</para>
+                <para>Disk devices will be regular files.</para>
               </listitem>
             </varlistentry>
             <varlistentry>
-              <term>local_raid1</term>
-              <listitem>
-                <para>
-                  Disk devices will be md raid1 arrays over two local
-                  logical volumes.
-                </para>
-              </listitem>
-            </varlistentry>
-            <varlistentry>
-              <term>remote_raid1</term>
+              <term>plain</term>
               <listitem>
-                <para>
-                  Disk devices will be md raid1 arrays with one
-                  component (so it's not actually raid1): a drbd
-                  (0.7.x) device between the instance's primary node
-                  and the node given by the second value of the
-                  <option>--node</option> option.
-                </para>
+                <para>Disk devices will be logical volumes.</para>
               </listitem>
             </varlistentry>
             <varlistentry>
               <listitem>
                 <para>
                   Disk devices will be drbd (version 8.x) on top of
-                  lvm volumes. They are equivalent in functionality to
-                  <replaceable>remote_raid1</replaceable>, but are
-                  recommended for new instances (if you have drbd 8.x
-                  installed).
+                  lvm volumes.
                 </para>
               </listitem>
             </varlistentry>
 
         <para>
           The optional second value of the <option>--node</option> is used for
-          the remote raid template type and specifies the remote node.
+          the drbd template type and specifies the remote node.
         </para>
 
         <para>
         </para>
 
         <para>
+          The <option>--file-storage-dir</option> specifies the relative path
+          under the cluster-wide file storage directory to store file-based
+          disks. It is useful for having different subdirectories for
+          different instances. The full path of the directory where the disk
+          files are stored will consist of cluster-wide file storage directory
+          + optional subdirectory + instance name. Example:
+          /srv/ganeti/file-storage/mysubdir/instance1.example.com. This option
+          is only relevant for instances using the file storage backend.
+        </para>
+
+        <para>
+          The <option>--file-driver</option> specifies the driver to use for
+          file-based disks. Note that currently these drivers work with the
+          xen hypervisor only. This option is only relevant for instances using
+          the file storage backend. The available choices are:
+          <variablelist>
+            <varlistentry>
+              <term>loop</term>
+              <listitem>
+                <para>Kernel loopback driver.</para>
+              </listitem>
+            </varlistentry>
+            <varlistentry>
+              <term>blktap</term>
+              <listitem>
+                <para>blktap driver.</para>
+              </listitem>
+            </varlistentry>
+          </variablelist>
+        </para>
+
+        <para>
+          The loop driver uses loopback devices to access the filesystem
+          within the file. However, running I/O intensive applications
+          in your instance using the loop driver might result in slowdowns.
+          Furthermore, if you use the loopback driver consider increasing
+          the maximum amount of loopback devices (on most systems it's 8)
+          using the max_loop param.
+        </para>
+
+        <para>
+          In order to be able to use the blktap driver you should check
+          if the 'blktapctrl' user space disk agent is running (usually
+          automatically started via xend). This user-level disk I/O
+          interface has the advantage of better performance. Especially
+          if you use a network file system (e.g. NFS) to store your instances
+          this is the recommended choice.
+        </para>
+
+        <para>
           Example:
           <screen>
+# gnt-instance add -t file -s 30g -m 512 -o debian-etch \
+  -n node1.example.com --file-storage-dir=mysubdir instance1.example.com
 # gnt-instance add -t plain -s 30g -m 512 -o debian-etch \
   -n node1.example.com instance1.example.com
-# gnt-instance add -t remote_raid1 -s 30g -m 512 -o debian-etch \
+# gnt-instance add -t drbd -s 30g -m 512 -o debian-etch \
   -n node1.example.com:node2.example.com instance2.example.com
           </screen>
         </para>
           <command>list</command>
           <arg>--no-headers</arg>
           <arg>--separator=<replaceable>SEPARATOR</replaceable></arg>
-          <arg>-o <replaceable>FIELD,...</replaceable></arg>
+          <arg>-o <replaceable>[+]FIELD,...</replaceable></arg>
         </cmdsynopsis>
 
         <para>
                 instance</simpara>
               </listitem>
             </varlistentry>
+            <varlistentry>
+              <term>tags</term>
+              <listitem>
+                <simpara>comma-separated list of the instances's
+                tags</simpara>
+              </listitem>
+            </varlistentry>
+            <varlistentry>
+              <term>serial_no</term>
+              <listitem>
+                <simpara>the so called 'serial number' of the
+                instance; this is a numeric field that is incremented
+                each time the instance is modified, and it can be used
+                to detect modifications</simpara>
+              </listitem>
+            </varlistentry>
           </variablelist>
         </para>
 
         <para>
+          If the value of the option starts with the character
+          <constant>+</constant>, the new fields will be added to the
+          default list. This allows to quickly see the default list
+          plus a few other fields, instead of retyping the entire list
+          of fields.
+        </para>
+
+        <para>
           There is a subtle grouping about the available output
-          fields: all fields except for <option>oper_state</option>
-          and <option>oper_ram</option> are configuration value and
-          not run-time values. So if you don't select any of the
-          <option>oper_*</option> fields, the query will be satisfied
+          fields: all fields except for <option>oper_state</option>,
+          <option>oper_ram</option> and <option>status</option> are
+          configuration value and not run-time values. So if you don't
+          select any of the these fields, the query will be satisfied
           instantly from the cluster configuration, without having to
           ask the remote nodes for the data. This can be helpful for
           big clusters when you only want some data and it makes sense
 
         <cmdsynopsis>
           <command>info</command>
+          <group>
+            <arg>-s</arg>
+            <arg>--static</arg>
+          </group>
           <arg rep="repeat"><replaceable>instance</replaceable></arg>
         </cmdsynopsis>
 
           Show detailed information about the (given) instances. This
           is different from <command>list</command> as it shows
           detailed data about the instance's disks (especially useful
-          for remote raid templates).
+          for drbd disk template).
+        </para>
+
+        <para>
+          If the option <option>-s</option> is used, only information
+          available in the configuration file is returned, without
+          querying nodes, making the operation faster.
         </para>
       </refsect3>
 
           <arg choice="opt">-b <replaceable>bridge</replaceable></arg>
           <arg choice="opt">--mac <replaceable>MAC-address</replaceable></arg>
           <arg>--hvm-boot-order <replaceable>boot-order</replaceable></arg>
+          <arg>--hvm-acpi <replaceable>ACPI-support</replaceable></arg>
+          <arg>--hvm-pae <replaceable>PAE-support</replaceable></arg>
+          <arg>--hvm-cdrom-image-path
+            <replaceable>cdrom-image-path</replaceable></arg>
+          <arg>--hvm-nic-type <replaceable>NICTYPE</replaceable></arg>
+          <arg>--hvm-disk-type <replaceable>DISKTYPE</replaceable></arg>
+          <arg>--vnc-bind-address
+            <replaceable>vnc-bind-address</replaceable></arg>
+
           <sbr>
           <arg>--kernel <group choice="req">
               <arg>default</arg>
         </para>
 
         <para>
+          The <option>--hvm-acpi</option> option specifies if Xen
+          should enable ACPI support for this HVM instance. Valid
+          values are true or false.
+        </para>
+
+        <para>
+          The <option>--hvm-pae</option> option specifies if Xen
+          should enabled PAE support for this HVM instance. Valid
+          values are true or false.
+        </para>
+
+        <para>
+          The <option>--hvm-cdrom-image-path</option> specifies the
+          path to the file xen uses to emulate a virtual CDROM drive
+          for this HVM instance. Valid values are either an
+          absolute path to an existing file or None, which disables
+          virtual CDROM support for this instance.
+        </para>
+
+        <para>
+          The <option>--hvm-nic-type</option> specifies the NIC type
+          Xen should use for this HVM instance. Valid choices are
+          rtl8139, ne2k_pci, ne2k_isa and paravirtual with rtl8139
+          as the default. The paravirtual setting is intended for use
+          with the GPL PV drivers inside HVM Windows instances.
+        </para>
+
+        <para>
+          The <option>--hvm-disk-type</option> specifies the disk type
+          Xen should use for the HVM instance. Valid choices are ioemu
+          and paravirtual with ioemu as the default. The paravirtual
+          setting is intended for use with the GPL PV drivers inside
+          HVM Windows instances.
+        </para>
+
+        <para>
+          The <option>--vnc-bind-address</option> specifies the
+          address that the VNC listener for this instance should bind
+          to. Valid values are IPv4 addresses. Use the address 0.0.0.0
+          to bind to all available interfaces.
+        </para>
+
+        <para>
           All the changes take effect at the next restart. If the
           instance is running, there is no effect on the instance.
         </para>
           <command>reinstall</command>
           <arg choice="opt">-o <replaceable>os-type</replaceable></arg>
           <arg choice="opt">-f <replaceable>force</replaceable></arg>
+          <arg>--select-os</arg>
           <arg choice="req"><replaceable>instance</replaceable></arg>
         </cmdsynopsis>
 
           <option>--os-type</option> is specified, the operating system is
           changed.
         </para>
+
+        <para>
+          The <option>--select-os</option> option switches to an
+          interactive OS reinstall. The user is prompted to select the OS
+          template from the list of available OS templates.
+        </para>
       </refsect3>
 
       <refsect3>
           recreates the hypervisor config for the instance and
           starts the instance. A full reboot does the equivalent
           of <command>gnt-instance shutdown &amp;&amp; gnt-instance
-          startup</command>. The default is soft reboot.
+          startup</command>. The default is hard reboot.
         </para>
 
         <para>
         <title>CONSOLE</title>
         <cmdsynopsis>
           <command>console</command>
+          <arg choice="opt">--show-cmd</arg>
           <arg choice="req"><replaceable>instance</replaceable></arg>
         </cmdsynopsis>
 
         <para>
           Connects to the console of the given instance. If the instance
-          is not up, an error is returned.
+          is not up, an error is returned. Use the <option>--show-cmd</option>
+          option to display the command instead of executing it.
+        </para>
+
+        <para>
+          For HVM instances, this will attempt to connect to the serial
+          console of the instance. To connect to the virtualized
+          "physical" console of a HVM instance, use a VNC client with
+          the connection info from gnt-instance info.
         </para>
 
         <para>
 
         <cmdsynopsis>
           <command>replace-disks</command>
-          <arg choice="opt">--new-secondary <replaceable>NODE</replaceable></arg>
-          <arg choice="req"><replaceable>instance</replaceable></arg>
-        </cmdsynopsis>
 
-        <cmdsynopsis>
-          <command>replace-disks</command>
+          <group choice="req">
+            <arg>--iallocator <replaceable>name</replaceable></arg>
+            <arg>--new-secondary <replaceable>NODE</replaceable></arg>
+          </group>
+          <sbr>
+
           <arg choice="opt">-s</arg>
-          <arg choice="req">--new-secondary <replaceable>NODE</replaceable></arg>
           <arg choice="req"><replaceable>instance</replaceable></arg>
         </cmdsynopsis>
 
         <cmdsynopsis>
           <command>replace-disks</command>
+
           <group>
           <arg choice="req">-s</arg>
           <arg choice="req">-p</arg>
 
         <para>
           This command is a generalized form for adding and replacing
-          disks.
+          disks. It is currently only valid for the mirrored (DRBD)
+          disk template.
         </para>
 
         <para>
-          The first form is usable with the
-          <literal>remote_raid1</literal> disk template. This will
-          replace the disks on both the primary and secondary node,
-          and optionally will change the secondary node to a new one
-          if you pass the <option>--new-secondary</option> option.
-        </para>
-
-        <para>
-          The second and third forms are usable with the
-          <literal>drbd</literal> disk template. The second form will
-          do a secondary replacement, but as opposed to the
-          <literal>remote_raid1</literal> will not replace the disks
-          on the primary, therefore it will execute faster. The third
-          form will replace the disks on either the primary
+          The first form will do a secondary node change, while the
+          second form will replace the disks on either the primary
           (<option>-p</option>) or the secondary (<option>-s</option>)
           node of the instance only, without changing the node.
         </para>
 
+        <para>
+          Specifying <option>--iallocator</option> enables secondary node
+          replacement and and makes the new secondary be selected automatically
+          by the specified allocator plugin.
+        </para>
       </refsect3>
 
       <refsect3>
           successful, the command will show the location and name of
           the block devices:
           <screen>
-node1.example.com:sda:/dev/md0
-node1.example.com:sdb:/dev/md1
+node1.example.com:sda:/dev/drbd0
+node1.example.com:sdb:/dev/drbd1
           </screen>
 
           In this example, <emphasis>node1.example.com</emphasis> is
           the name of the node on which the devices have been
           activated. The <emphasis>sda</emphasis> and
           <emphasis>sdb</emphasis> are the names of the block devices
-          inside the instance. <emphasis>/dev/md0</emphasis> and
-          <emphasis>/dev/md1</emphasis> are the names of the block
+          inside the instance. <emphasis>/dev/drbd0</emphasis> and
+          <emphasis>/dev/drbd1</emphasis> are the names of the block
           devices as visible on the node.
         </para>
 
@@ -935,13 +1163,95 @@ node1.example.com:sdb:/dev/md1
         </cmdsynopsis>
         <para>
           De-activates the block devices of the given instance. Note
-          that if you run this command for a remote raid instance
-          type, while it is running, it will not be able to shutdown
-          the block devices on the primary node, but it will shutdown
-          the block devices on the secondary nodes, thus breaking the
-          replication.
+          that if you run this command for an instance with a drbd
+          disk template, while it is running, it will not be able to
+          shutdown the block devices on the primary node, but it will
+          shutdown the block devices on the secondary nodes, thus
+          breaking the replication.
+        </para>
+
+      </refsect3>
+
+      <refsect3>
+        <title>GROW-DISK</title>
+        <cmdsynopsis>
+          <command>grow-disk</command>
+          <arg>--no-wait-for-sync</arg>
+          <arg choice="req"><replaceable>instance</replaceable></arg>
+          <arg choice="req"><replaceable>disk</replaceable></arg>
+          <arg choice="req"><replaceable>amount</replaceable></arg>
+        </cmdsynopsis>
+
+        <para>
+          Grows an instance's disk. This is only possible for
+          instances having a <literal>plain</literal> or
+          <literal>drbd</literal> disk template.
+        </para>
+
+        <para>
+          Note that this command only change the block device size; it
+          will not grow the actual filesystems, partitions, etc. that
+          live on that disk. Usually, you will need to:
+          <orderedlist>
+            <listitem>
+              <simpara>use <command>gnt-instance grow-disk</command></simpara>
+            </listitem>
+            <listitem>
+              <simpara>reboot the instance (later, at a convenient
+              time)</simpara>
+            </listitem>
+            <listitem>
+              <simpara>use a filesystem resizer, such as
+              <citerefentry> <refentrytitle>ext2online</refentrytitle>
+              <manvolnum>8</manvolnum> </citerefentry> or
+              <citerefentry> <refentrytitle>xfs_growfs</refentrytitle>
+              <manvolnum>8</manvolnum> </citerefentry> to resize the
+              filesystem, or use <citerefentry>
+              <refentrytitle>fdisk</refentrytitle>
+              <manvolnum>8</manvolnum> </citerefentry> to change the
+              partition table on the disk
+              </simpara>
+            </listitem>
+          </orderedlist>
+        </para>
+
+
+        <para>
+          The <replaceable>disk</replaceable> argument is either
+          <literal>sda</literal> or <literal>sdb</literal>. The
+          <replaceable>amount</replaceable> argument is given either
+          as a number (and it represents the amount to increase the
+          disk with in mebibytes) or can be given similar to the
+          arguments in the create instance operation, with a suffix
+          denoting the unit.
+        </para>
+
+        <para>
+          Note that the disk grow operation might complete on one node
+          but fail on the other; this will leave the instance with
+          different-sized LVs on the two nodes, but this will not
+          create problems (except for unused space).
         </para>
 
+        <para>
+          If you do not want gnt-instance to wait for the new disk
+          region to be synced, use the
+          <option>--no-wait-for-sync</option> option.
+        </para>
+
+
+        <para>Example (increase sda for instance1 by 16GiB):
+          <screen>
+# gnt-instance grow-disk instance1.example.com sda 16g
+          </screen>
+        </para>
+
+        <para>
+          Also note that disk shrinking will not be supported; use
+          <command>gnt-backup export</command> and then
+          <command>gnt-backup import</command> to reduce the disk size
+          of an instance.
+        </para>
       </refsect3>
 
     </refsect2>
@@ -961,8 +1271,8 @@ node1.example.com:sdb:/dev/md1
 
         <para>
           Failover will fail the instance over its secondary
-          node. This works only for instances having a remote raid
-          disk layout.
+          node. This works only for instances having a drbd disk
+          template.
         </para>
 
         <para>