[BACK]Return to html.tar CVS log [TXT][DIR] Up to [Development] / failsafe / FailSafe-books / LnxFailSafe_AG

File: [Development] / failsafe / FailSafe-books / LnxFailSafe_AG / html.tar (download)

Revision 1.1, Wed Nov 29 21:58:28 2000 UTC (16 years, 10 months ago) by vasa
Branch: MAIN
CVS Tags: HEAD

New documentation files for the Admin Guide.

html/004075500016050000001000000000000717757442200123445ustar00gfergother00002640000003html/configexample.html010064400016050000001000000047640717757356400160710ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Configuration Examples</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe Configuration Example CLI Script"
HREF="le40790-parent.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe Example with Three-Node Cluster"
HREF="threenode-example.html"></HEAD
><BODY
CLASS="CHAPTER"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le40790-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="threenode-example.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><H1
><A
NAME="CONFIGEXAMPLE"
>Chapter 6. Configuration Examples</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>6.1. <A
HREF="threenode-example.html"
>Linux FailSafe Example with Three-Node Cluster</A
></DT
><DT
>6.2. <A
HREF="threenode-script.html"
>cmgr Script</A
></DT
><DT
>6.3. <A
HREF="localfailover-of-ip.html"
>Local Failover of an IP Address</A
></DT
></DL
></DIV
><P
>This chapter provides an example of a Linux FailSafe configuration that
uses a three-node cluster, and some variations of that configuration. It includes
the following sections:<P
></P
><UL
><LI
><P
><A
HREF="threenode-example.html"
>Section 6.1</A
></P
></LI
><LI
><P
><A
HREF="threenode-script.html"
>Section 6.2</A
></P
></LI
><LI
><P
><A
HREF="localfailover-of-ip.html"
>Section 6.3</A
></P
></LI
></UL
></P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le40790-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="threenode-example.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe Configuration Example CLI Script</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe Example with Three-Node Cluster</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/doc-index.html010064400016050000001000001206030717757407200151040ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Index</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Glossary"
HREF="g7155.html"></HEAD
><BODY
CLASS="INDEX"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="g7155.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
>&nbsp;</TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><H1
><A
NAME="DOC-INDEX"
>Index</A
></H1
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7435"
>Symbols</A
></H1
><DL
><DT
>/etc/failsafe/config/cad.options file,
    <A
HREF="le23103-parent.html#AEN2019"
>Configuring /etc/failsafe/config/cad.options for Linux FailSafe</A
>
  </DT
><DT
>/etc/failsafe/config/cdbd.options file,
    <A
HREF="le23103-parent.html#AEN2057"
>Configuring /etc/failsafe/config/cdbd.options for Linux FailSafe</A
>
  </DT
><DT
>/etc/failsafe/config/cmond.options,
    <A
HREF="le23103-parent.html#LE32812-PARENT"
>Configuring /etc/failsafe/config/cmond.options for Linux FailSafe</A
>
  </DT
><DT
>/etc/hosts file,
    <A
HREF="le84104-parent.html#LE93615-PARENT"
>Planning Network Interface and IP Address Configuration</A
>
  </DT
><DT
>/etc/services file,
    <A
HREF="le23103-parent.html"
>Configuring System Files</A
>
  </DT
><DT
> FailSafe Cluster Manager CLI
  </DT
><DD
><DL
><DT
>template files,
    <A
HREF="le15969-parent.html#LE10673-PARENT"
>CLI Template Scripts</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>using input files,
    <A
HREF="le15969-parent.html#AEN2798"
>Using Input Files of CLI Commands</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
> resource group
  </DT
><DD
><DL
><DT
>definition,
    <A
HREF="le89728-parent.html#AEN341"
>Resource Group</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7462"
>A</A
></H1
><DL
><DT
>action scripts,
    <A
HREF="le89728-parent.html#AEN477"
>Action Scripts</A
>
  </DT
><DT
>activating Linux FailSafe,
    <A
HREF="fs-activatehaservices.html"
>Activating (Starting) Linux FailSafe</A
>
  </DT
><DT
>ACTIVE cluster status,
    <A
HREF="le16877-parent.html#AEN5565"
>Cluster Status</A
>
  </DT
><DT
>additional configuration issues,
    <A
HREF="le13651-parent.html"
>Additional Configuration Issues</A
>
  </DT
><DT
>administration daemon,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>administrative commands,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>application failover domain,
    <A
HREF="le89728-parent.html#AEN440"
>Failover Domain</A
>
  </DT
><DT
>Automatic booting,
    <A
HREF="le13651-parent.html"
>Additional Configuration Issues</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7488"
>B</A
></H1
><DL
><DT
>backup and restore,
    <A
HREF="le37674-parent.html"
>Backing Up and Restoring Configuration With Cluster Manager CLI</A
>
  </DT
><DT
>backup, CDB,
    <A
HREF="le37674-parent.html"
>Backing Up and Restoring Configuration With Cluster Manager CLI</A
>
  </DT
><DT
>base,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7499"
>C</A
></H1
><DL
><DT
>CAD options file,
    <A
HREF="le23103-parent.html#AEN2019"
>Configuring /etc/failsafe/config/cad.options for Linux FailSafe</A
>
  </DT
><DT
>CDB
  </DT
><DD
><DL
><DT
>backup and restore,
    <A
HREF="le37674-parent.html"
>Backing Up and Restoring Configuration With Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>maintenance,
    <A
HREF="le26593-parent.html#LE32026-PARENT"
>CDB Maintenance and Recovery</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>recovery,
    <A
HREF="le26593-parent.html#LE32026-PARENT"
>CDB Maintenance and Recovery</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>cdbd options file,
    <A
HREF="le23103-parent.html#AEN2057"
>Configuring /etc/failsafe/config/cdbd.options for Linux FailSafe</A
>
  </DT
><DT
>CLI
  </DT
><DD
><DL
><DT
>See FailSafe Cluster Manager CLI,
    <A
HREF="le15969-parent.html#AEN2741"
>Invoking the Cluster Manager CLI in Prompt Mode</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>cli log,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>cluster,
    <A
HREF="le89728-parent.html#AEN286"
>Cluster</A
>
  </DT
><DD
><DL
><DT
>error recovery,
    <A
HREF="le26593-parent.html#LE37488-PARENT"
>Cluster Error Recovery</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>membership,
    <A
HREF="le17012-parent.html"
>Node Membership and Resets</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>cluster administration daemon,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>Cluster Manager CLI
  </DT
><DD
><DL
><DT
>See  FailSafe Cluster Manager CLI,
    <A
HREF="le15969-parent.html#AEN2741"
>Invoking the Cluster Manager CLI in Prompt Mode</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>Cluster Manager GUI
  </DT
><DD
><DL
><DT
>See  Linux FailSafe Cluster Manager GUI,
    <A
HREF="le74378-parent.html"
>The Linux FailSafe Cluster Manager Tools</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>cluster membership,
    <A
HREF="le17012-parent.html#AEN6420"
>No Membership Formed</A
>
  </DT
><DT
>cluster node,
    <A
HREF="le89728-parent.html#AEN271"
>Cluster Node (or Node)</A
>
  </DT
><DD
><DL
><DT
>See  node,
    <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Defining Cluster Nodes</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>cluster status,
    <A
HREF="le16877-parent.html#AEN5565"
>Cluster Status</A
>
  </DT
><DT
>cluster_admin  subsystem,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>cluster_control  subsystem,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>cluster_control subsystem,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>cluster_ha  subsystem,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>cmgr-templates directory,
    <A
HREF="le15969-parent.html#LE10673-PARENT"
>CLI Template Scripts</A
>
  </DT
><DT
>command scripts,
    <A
HREF="le15969-parent.html#LE41514-PARENT"
>CLI Command Scripts</A
>
  </DT
><DT
>commands,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>communicate with the network interface agent daemon,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>communication paths,
    <A
HREF="le15726-parent.html#AEN950"
>Communication Paths</A
>
  </DT
><DT
>components,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>concepts,
    <A
HREF="le89728-parent.html"
>Concepts</A
>
  </DT
><DT
>configuration parameters
  </DT
><DD
><DL
><DT
>filesystem,
    <A
HREF="le53947-parent.html#AEN1604"
>Configuration Parameters for Filesystems</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>IP address,
    <A
HREF="le84104-parent.html#LE15769-PARENT"
>Example IP Address Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>logical volumes,
    <A
HREF="le96329-parent.html#LE13082-PARENT"
>Configuration Parameters for Logical Volumes</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>configuration planning
  </DT
><DD
><DL
><DT
>disk,
    <A
HREF="le34382-parent.html"
>Disk Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>filesystem,
    <A
HREF="le53947-parent.html"
>Filesystem Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>IP address,
    <A
HREF="le84104-parent.html"
>IP Address Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>logical volume,
    <A
HREF="le96329-parent.html"
>Logical Volume Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>overview,
    <A
HREF="le57040-parent.html"
>Introduction to Configuration Planning</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>connectivity, testing with GUI,
    <A
HREF="le67057-parent.html"
>Overview of FailSafe Diagnostic Commands</A
>
  </DT
><DT
>control network
  </DT
><DD
><DL
><DT
>changing in cluster,
    <A
HREF="x6931.html"
>Changing Control Networks in a Cluster</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>defining for node,
    <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Defining Cluster Nodes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>recovery,
    <A
HREF="le26593-parent.html#LE32749-PARENT"
>Control Network Failure Recovery</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>crsd  process,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>crsd log,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>ctrl+c ramifications,
    <A
HREF="le36400-parent.html"
>System Operation Considerations</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7624"
>D</A
></H1
><DL
><DT
>deactivating HA services,
    <A
HREF="z957117933glen.html"
>Deactivating (Stopping) Linux FailSafe</A
>
  </DT
><DT
>defaults,
    <A
HREF="le59477-parent.html"
>Setting Configuration Defaults</A
>,
    <A
HREF="le85448-parent.html"
>Setting System Operation Defaults</A
>
  </DT
><DT
>dependency list,
    <A
HREF="le89728-parent.html#AEN396"
>Resource Type Dependency List</A
>
  </DT
><DT
>diagnostic command overview,
    <A
HREF="le67057-parent.html"
>Overview of FailSafe Diagnostic Commands</A
>
  </DT
><DT
>diags log,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>diags_nodename log file,
    <A
HREF="le67057-parent.html"
>Overview of FailSafe Diagnostic Commands</A
>
  </DT
><DT
>DISCOVERY state,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
><DT
>disk configuration planning,
    <A
HREF="le34382-parent.html"
>Disk Configuration</A
>
  </DT
><DT
>disks, shared
  </DT
><DD
><DL
><DT
>and disk controller failure,
    <A
HREF="le85141-parent.html#AEN665"
>Disks</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>and disk failure,
    <A
HREF="le85141-parent.html#AEN665"
>Disks</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>documentation, related,
    <A
HREF="x81.html"
>Related Documentation</A
>
  </DT
><DT
>domain,
    <A
HREF="le89728-parent.html#AEN440"
>Failover Domain</A
>,
    <A
HREF="le53159-parent.html#AEN4235"
>Failover Domain</A
>
  </DT
><DT
>DOWN node state,
    <A
HREF="le16877-parent.html#AEN5493"
>Node Status</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7667"
>E</A
></H1
><DL
><DT
>error state,
    <A
HREF="le16877-parent.html#AEN5408"
>Resource Group Error State</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7672"
>F</A
></H1
><DL
><DT
>failover,
    <A
HREF="le89728-parent.html#AEN419"
>Failover</A
>
  </DT
><DD
><DL
><DT
>and recovery processes,
    <A
HREF="le19267-parent.html"
>Failover and Recovery Processes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>description,
    <A
HREF="le19267-parent.html"
>Failover and Recovery Processes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>of disk storage,
    <A
HREF="le85141-parent.html#AEN665"
>Disks</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>resource group,
    <A
HREF="le41282-parent.html"
>Resource Group Failover</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>failover attributes,
    <A
HREF="le89728-parent.html#AEN452"
>Failover Attribute</A
>,
    <A
HREF="le53159-parent.html#AEN4263"
>Failover Attributes</A
>
  </DT
><DT
>failover domain,
    <A
HREF="le89728-parent.html#AEN440"
>Failover Domain</A
>,
    <A
HREF="le53159-parent.html#AEN4235"
>Failover Domain</A
>
  </DT
><DT
>failover policy,
    <A
HREF="le89728-parent.html#AEN425"
>Failover Policy</A
>
  </DT
><DD
><DL
><DT
>definition,
    <A
HREF="le53159-parent.html#FS-DEFINEFAILOVER"
> Defining a Failover Policy</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>failover attributes,
    <A
HREF="le53159-parent.html#AEN4263"
>Failover Attributes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>failover domain,
    <A
HREF="le53159-parent.html#AEN4235"
>Failover Domain</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>failover script,
    <A
HREF="le53159-parent.html#AEN4215"
>Failover Scripts</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>testing with CLI,
    <A
HREF="le37273-parent.html#LE29671-PARENT"
>Testing Failover Policies with the Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>testing with GUI,
    <A
HREF="le42786-parent.html#FS-TESTFAILOVERPOL"
>Testing Failover Policies with the Cluster Manager GUI</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>failover script,
    <A
HREF="le53159-parent.html#AEN4215"
>Failover Scripts</A
>
  </DT
><DD
><DL
><DT
>description,
    <A
HREF="le89728-parent.html#AEN460"
>Failover Scripts</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>FailSafe Cluster Manager CLI
  </DT
><DD
><DL
><DT
>-c option,
    <A
HREF="le15969-parent.html#AEN2713"
>Entering CLI Commands Directly</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>-f option,
    <A
HREF="le15969-parent.html#AEN2798"
>Using Input Files of CLI Commands</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>-p option,
    <A
HREF="le15969-parent.html#AEN2741"
>Invoking the Cluster Manager CLI in Prompt Mode</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>command line execution,
    <A
HREF="le15969-parent.html#AEN2713"
>Entering CLI Commands Directly</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>command scripts,
    <A
HREF="le15969-parent.html#LE41514-PARENT"
>CLI Command Scripts</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>invoking a shell,
    <A
HREF="le15969-parent.html#AEN2957"
>Invoking a Shell from within CLI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>prompt mode,
    <A
HREF="le15969-parent.html#AEN2741"
>Invoking the Cluster Manager CLI in Prompt Mode</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>FailSafe Cluster Manager GUI
  </DT
><DD
><DL
><DT
>active guides,
    <A
HREF="fs-guioverview.html#AEN2645"
>Performing Tasks</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>FailSafe Manager
  </DT
><DD
><DL
><DT
>overview,
    <A
HREF="fs-guioverview.html#FS-TASKMANOVERVIEW"
>The FailSafe Manager</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>failsafe2  subsystem,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>fault-tolerant systems, definition,
    <A
HREF="le27299-parent.html"
>High Availability and Linux FailSafe</A
>
  </DT
><DT
>file locking and unlocking,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>filesystem
  </DT
><DD
><DL
><DT
>configuration parameters,
    <A
HREF="le53947-parent.html#AEN1604"
>Configuration Parameters for Filesystems</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>configuration planning,
    <A
HREF="le53947-parent.html"
>Filesystem Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>NFS, testing with CLI,
    <A
HREF="le37273-parent.html#AEN6186"
>Testing NFS Filesystems</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>testing with CLI,
    <A
HREF="le37273-parent.html#AEN6162"
>Testing Filesystems</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>filesystem creation,
    <A
HREF="le39637-parent.html"
>Choosing and Configuring devices and Filesystems</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7759"
>G</A
></H1
><DL
><DT
>GUI
  </DT
><DD
><DL
><DT
>See  Linux FailSafe Cluster Manager GUI,
    <A
HREF="le74378-parent.html"
>The Linux FailSafe Cluster Manager Tools</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7765"
>H</A
></H1
><DL
><DT
>haStatus script,
    <A
HREF="le16877-parent.html#LE28488-PARENT"
>Viewing System Status with the haStatus CLI Script</A
>
  </DT
><DT
>ha_agent log,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>ha_cilog  command,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>ha_cmsd  process,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>ha_cmsd log,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>ha_filelock  command,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>ha_fileunlock  command,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>ha_fsd  process,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>ha_fsd log,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>ha_gcd  process,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>ha_gcd log,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>ha_http_ping2  command,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>ha_ifd  process,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>ha_ifd log,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>ha_ifdadmin  command,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>ha_macconfig2  command,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>ha_script log,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>ha_srmd  process,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>ha_srmd log,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>heartbeat network,
    <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Defining Cluster Nodes</A
>
  </DT
><DT
>high-availability
  </DT
><DD
><DL
><DT
>infrastructure,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>hostname,
    <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Defining Cluster Nodes</A
>
  </DT
><DD
><DL
><DT
>control network,
    <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Defining Cluster Nodes</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7836"
>I</A
></H1
><DL
><DT
>INACTIVE cluster status,
    <A
HREF="le16877-parent.html#AEN5565"
>Cluster Status</A
>
  </DT
><DT
>INACTIVE node state,
    <A
HREF="le16877-parent.html#AEN5493"
>Node Status</A
>
  </DT
><DT
>infrastructure,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>INITIALIZING state,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
><DT
>installing Linux FailSafe software,
    <A
HREF="le97755-parent.html"
>Installing Required Software</A
>
  </DT
><DT
>installing resource type,
    <A
HREF="le53159-parent.html#FS-LOADRESOURCETYPE"
>Installing (Loading) a Resource Type on a Cluster</A
>
  </DT
><DT
>INTERNAL ERROR error state,
    <A
HREF="le16877-parent.html#AEN5408"
>Resource Group Error State</A
>
  </DT
><DT
>INTERNAL ERROR state,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
><DT
>IP address
  </DT
><DD
><DL
><DT
>configuration planning,
    <A
HREF="le84104-parent.html"
>IP Address Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>control network,
    <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Defining Cluster Nodes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>local failover,
    <A
HREF="localfailover-of-ip.html"
>Local Failover of an IP Address</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>overview,
    <A
HREF="le85141-parent.html#LE80214-PARENT"
>Network Interfaces and IP Addresses</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>planning,
    <A
HREF="le57040-parent.html"
>Introduction to Configuration Planning</A
>,
    <A
HREF="le84104-parent.html"
>IP Address Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>resource,
    <A
HREF="le53159-parent.html#IPATTRIBUTES"
>IP Address Resource Attributes</A
>,
    <A
HREF="le53159-parent.html#LE20812-PARENT"
>Specifying Resource Attributes with Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7878"
>L</A
></H1
><DL
><DT
>layers,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>Linux FailSafe
  </DT
><DD
><DL
><DT
>features,
    <A
HREF="le94860-parent.html"
>Additional Linux FailSafe Features</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>hardware components,
    <A
HREF="le32900-parent.html"
>Hardware Components of a Linux FailSafe Cluster</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>installation,
    <A
HREF="le97755-parent.html"
>Installing Required Software</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>See  Linux FailSafe,
    <A
HREF="le27299-parent.html"
>High Availability and Linux FailSafe</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>Linux FailSafe Cluster Manager GUI
  </DT
><DD
><DL
><DT
>overview,
    <A
HREF="fs-guioverview.html"
>Using the Linux FailSafe Cluster Manager GUI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>See  Linux FailSafe Cluster Manager GUI,
    <A
HREF="fs-guioverview.html"
>Using the Linux FailSafe Cluster Manager GUI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>tasksets,
    <A
HREF="fs-guioverview.html#AEN2668"
>Using the Linux FailSafe Tasksets</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>local failover, IP address,
    <A
HREF="localfailover-of-ip.html"
>Local Failover of an IP Address</A
>
  </DT
><DT
>lock a file,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>log files,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>,
    <A
HREF="le28847-parent.html"
>FailSafe Log Files</A
>
  </DT
><DT
>log groups,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>log level,
    <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
>
  </DT
><DT
>log messages,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>logical volume
  </DT
><DD
><DL
><DT
>configuration planning,
    <A
HREF="le96329-parent.html"
>Logical Volume Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>creation,
    <A
HREF="le39637-parent.html"
>Choosing and Configuring devices and Filesystems</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>owner,
    <A
HREF="le39637-parent.html"
>Choosing and Configuring devices and Filesystems</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>parameters,
    <A
HREF="le96329-parent.html#LE13082-PARENT"
>Configuration Parameters for Logical Volumes</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>logical volume creation,
    <A
HREF="le39637-parent.html"
>Choosing and Configuring devices and Filesystems</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7933"
>M</A
></H1
><DL
><DT
>MAC address modification and display,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>maintenance mode,
    <A
HREF="le41282-parent.html#FS-STOPMONITORINGRESGROUP"
>Stop Monitoring of a Resource Group (Maintenance Mode)</A
>
  </DT
><DT
>membership,
    <A
HREF="le89728-parent.html#AEN292"
>Node Membership</A
>
  </DT
><DD
><DL
><DT
>cluster,
    <A
HREF="le17012-parent.html"
>Node Membership and Resets</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>node,
    <A
HREF="le17012-parent.html"
>Node Membership and Resets</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>message logging,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>message paths diagram,
    <A
HREF="le15726-parent.html#AEN993"
>When Does FailSafe Execute Action and Failover Scripts</A
>
  </DT
><DT
>MONITOR ACTIVITY UNKNOWN error state,
    <A
HREF="le16877-parent.html#AEN5408"
>Resource Group Error State</A
>
  </DT
><DT
>monitoring
  </DT
><DD
><DL
><DT
>processes,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>monitoring interval,
    <A
HREF="tv.html"
>Configuring Timeout Values and Monitoring Intervals</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN7964"
>N</A
></H1
><DL
><DT
>name restrictions,
    <A
HREF="le28499-parent.html"
>Name Restrictions</A
>
  </DT
><DT
>Netscape node check,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>Netscape servers, testing with CLI,
    <A
HREF="le37273-parent.html#LE62758-PARENT"
>Testing Resource Groups</A
>
  </DT
><DT
>Netscape Web
  </DT
><DD
><DL
><DT
>testing with CLI,
    <A
HREF="le37273-parent.html#AEN6228"
>Testing Netscape-web Resources</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>network connectivity
  </DT
><DD
><DL
><DT
>testing with CLI,
    <A
HREF="le37273-parent.html#LE11186-PARENT"
>Testing Network Connectivity with the Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>testing with GUI,
    <A
HREF="le42786-parent.html#FS-TESTCONNECTIVITY"
>Testing Connectivity with the Cluster Manager GUI</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>network interface
  </DT
><DD
><DL
><DT
>configuration,
    <A
HREF="le97738-parent.html"
>Configuring Network Interfaces</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>overview,
    <A
HREF="le85141-parent.html#LE80214-PARENT"
>Network Interfaces and IP Addresses</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>NFS filesystem testing with CLI,
    <A
HREF="le37273-parent.html#AEN6186"
>Testing NFS Filesystems</A
>
  </DT
><DT
>NO AVAILABLE NODES error state,
    <A
HREF="le16877-parent.html#AEN5408"
>Resource Group Error State</A
>
  </DT
><DT
>NO ERROR error state,
    <A
HREF="le16877-parent.html#AEN5408"
>Resource Group Error State</A
>
  </DT
><DT
>node,
    <A
HREF="le89728-parent.html#AEN271"
>Cluster Node (or Node)</A
>
  </DT
><DD
><DL
><DT
>creation,
    <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Defining Cluster Nodes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>definition,
    <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Defining Cluster Nodes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>deleting,
    <A
HREF="z957104627glen.html#FS-MODIFYDELMACHINE"
>Modifying and Deleting Cluster Nodes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>displaying,
    <A
HREF="z957104627glen.html#AEN3333"
>Displaying Cluster Nodes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>error recovery,
    <A
HREF="le26593-parent.html#LE22743-PARENT"
>Node Error recovery</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>membership,
    <A
HREF="le17012-parent.html"
>Node Membership and Resets</A
>,
    <A
HREF="le17012-parent.html#AEN6393"
>Node Membership and Tie-Breaker Node</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>modifying,
    <A
HREF="z957104627glen.html#FS-MODIFYDELMACHINE"
>Modifying and Deleting Cluster Nodes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>reset,
    <A
HREF="fs-resetmachine.html"
>Resetting Nodes</A
>,
    <A
HREF="le17012-parent.html#AEN6393"
>Node Membership and Tie-Breaker Node</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>state,
    <A
HREF="le16877-parent.html#AEN5493"
>Node Status</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>status,
    <A
HREF="le16877-parent.html#AEN5493"
>Node Status</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>wait time,
    <A
HREF="z957104627glen.html#FS-SETFSPARAMETERS"
>Linux FailSafe HA Parameters</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>node membership,
    <A
HREF="le89728-parent.html#AEN292"
>Node Membership</A
>
  </DT
><DT
>NODE NOT AVAILABLE error state,
    <A
HREF="le16877-parent.html#AEN5408"
>Resource Group Error State</A
>
  </DT
><DT
>node not in a cluster diagram,
    <A
HREF="le15726-parent.html#AEN950"
>Communication Paths</A
>
  </DT
><DT
>NODE UNKNOWN error state,
    <A
HREF="le16877-parent.html#AEN5408"
>Resource Group Error State</A
>
  </DT
><DT
>node-specific resource,
    <A
HREF="le53159-parent.html#FS-DEFINEMACHSPECRESOURCE"
>Defining a Node-Specific Resource</A
>
  </DT
><DT
>node-specific resource type,
    <A
HREF="le53159-parent.html#FS-DEFINEMACHSPECRESTYPE"
>Defining a Node-Specific Resource Type</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN8045"
>O</A
></H1
><DL
><DT
>OFFLINE state,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
><DT
>OFFLINE-PENDING state,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
><DT
>ONLINE state,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
><DT
>ONLINE-MAINTENANCE state,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
><DT
>ONLINE-PENDING state,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
><DT
>ONLINE-READY state,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN8065"
>P</A
></H1
><DL
><DT
>plug-ins,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DT
>pool,
    <A
HREF="le89728-parent.html#AEN280"
>Pool</A
>
  </DT
><DT
>process
  </DT
><DD
><DL
><DT
>membership,
    <A
HREF="le89728-parent.html#AEN300"
>Process Membership</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>monitoring,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN8079"
>R</A
></H1
><DL
><DT
>re-MACing
  </DT
><DD
><DL
><DT
>dedicated backup interfaces required,
    <A
HREF="le84104-parent.html#LE93615-PARENT"
>Planning Network Interface and IP Address Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>determining if required,
    <A
HREF="le84104-parent.html#LE93615-PARENT"
>Planning Network Interface and IP Address Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>read/write actions to the cluster configuration database diagram,
    <A
HREF="le15726-parent.html#AEN950"
>Communication Paths</A
>
  </DT
><DT
>recovery
  </DT
><DD
><DL
><DT
>overview,
    <A
HREF="le14340-parent.html"
>Overview of FailSafe System Recovery</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>procedures,
    <A
HREF="le26593-parent.html"
>Recovery Procedures</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>resetting nodes,
    <A
HREF="fs-resetmachine.html"
>Resetting Nodes</A
>,
    <A
HREF="le17012-parent.html#AEN6393"
>Node Membership and Tie-Breaker Node</A
>
  </DT
><DT
>resource
  </DT
><DD
><DL
><DT
>configuration overview,
    <A
HREF="le53159-parent.html"
>Resource Configuration</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>definition,
    <A
HREF="le89728-parent.html#AEN307"
>Resource</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>definition overview,
    <A
HREF="le53159-parent.html#FS-DEFINERESOURCE"
>Defining Resources</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>deleting,
    <A
HREF="le53159-parent.html#FS-MODIFYDELRESOURCE"
>Modifying and Deleting Resources</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>dependencies,
    <A
HREF="le53159-parent.html#FS-ADDDEPTORESOURCE"
>Adding Dependency to a Resource</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>dependency list,
    <A
HREF="le89728-parent.html#AEN396"
>Resource Type Dependency List</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>displaying,
    <A
HREF="le53159-parent.html#AEN3865"
>Displaying Resources</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>IP address,
    <A
HREF="le53159-parent.html#IPATTRIBUTES"
>IP Address Resource Attributes</A
>,
    <A
HREF="le53159-parent.html#LE20812-PARENT"
>Specifying Resource Attributes with Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>modifying,
    <A
HREF="le53159-parent.html#FS-MODIFYDELRESOURCE"
>Modifying and Deleting Resources</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>name,
    <A
HREF="le89728-parent.html#AEN334"
>Resource Name</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>Netscape Web, testing with CLI,
    <A
HREF="le37273-parent.html#AEN6228"
>Testing Netscape-web Resources</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>NFS,
    <A
HREF="le40511-parent.html"
>Resource Group Creation Example</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>node-specific,
    <A
HREF="le53159-parent.html#FS-DEFINEMACHSPECRESOURCE"
>Defining a Node-Specific Resource</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>owner,
    <A
HREF="le16877-parent.html#AEN5474"
>Resource Owner</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>recovery,
    <A
HREF="le26593-parent.html#LE15209-PARENT"
>Resource Error Recovery</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>statd, testing with CLI,
    <A
HREF="le37273-parent.html#AEN6207"
>Testing statd Resources</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>status,
    <A
HREF="le16877-parent.html#AEN5278"
>Monitoring Resource and Reset Serial Line with the Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>resource group
  </DT
><DD
><DL
><DT
>bringing online,
    <A
HREF="le41282-parent.html"
>Resource Group Failover</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>creation example,
    <A
HREF="le40511-parent.html"
>Resource Group Creation Example</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>definition,
    <A
HREF="le53159-parent.html#FS-DEFINERESGROUP"
>Defining Resource Groups</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>deleting,
    <A
HREF="le53159-parent.html#FS-MODIFYRESGROUP"
>Modifying and Deleting Resource Groups</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>displaying,
    <A
HREF="le53159-parent.html#AEN4548"
>Displaying Resource Groups</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>error state,
    <A
HREF="le16877-parent.html#AEN5408"
>Resource Group Error State</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>failover,
    <A
HREF="le41282-parent.html"
>Resource Group Failover</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>modifying,
    <A
HREF="le53159-parent.html#FS-MODIFYRESGROUP"
>Modifying and Deleting Resource Groups</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>monitoring,
    <A
HREF="le41282-parent.html#FS-STOPMONITORINGRESGROUP"
>Stop Monitoring of a Resource Group (Maintenance Mode)</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>moving,
    <A
HREF="le41282-parent.html#FS-MOVERESGROUP"
>Moving a Resource Group</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>recovery,
    <A
HREF="le26593-parent.html#LE13349-PARENT"
>Resource Group Maintenance and Error Recovery</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>resume monitoring,
    <A
HREF="le41282-parent.html#AEN5785"
>Resume Monitoring of a Resource Group with the Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>state,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>status,
    <A
HREF="le16877-parent.html#AEN5287"
>Querying Resource Status with the Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>stop monitoring,
    <A
HREF="le41282-parent.html#FS-STOPMONITORINGRESGROUP"
>Stop Monitoring of a Resource Group (Maintenance Mode)</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>taking offline,
    <A
HREF="le41282-parent.html"
>Resource Group Failover</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>testing with CLI,
    <A
HREF="le37273-parent.html#LE62758-PARENT"
>Testing Resource Groups</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>resource type
  </DT
><DD
><DL
><DT
>definition,
    <A
HREF="le53159-parent.html#FS-DEFINERESOURCETYPE"
>Defining a Resource Type</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>deleting,
    <A
HREF="le53159-parent.html#FS-MODIFYRESTYPE"
>Modifying and Deleting Resource Types</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>dependencies,
    <A
HREF="le53159-parent.html#FS-ADDDEPTORESTYPE"
>Adding Dependencies to a Resource Type</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>dependency list,
    <A
HREF="le89728-parent.html#AEN396"
>Resource Type Dependency List</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>description,
    <A
HREF="le89728-parent.html#AEN315"
>Resource Type</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>displaying,
    <A
HREF="le53159-parent.html#AEN4169"
>Displaying Resource Types</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>installing,
    <A
HREF="le53159-parent.html#FS-LOADRESOURCETYPE"
>Installing (Loading) a Resource Type on a Cluster</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>modifying,
    <A
HREF="le53159-parent.html#FS-MODIFYRESTYPE"
>Modifying and Deleting Resource Types</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>NFS,
    <A
HREF="le40511-parent.html"
>Resource Group Creation Example</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>node-specific,
    <A
HREF="le53159-parent.html#FS-DEFINEMACHSPECRESTYPE"
>Defining a Node-Specific Resource Type</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>restore, CDB,
    <A
HREF="le37674-parent.html"
>Backing Up and Restoring Configuration With Cluster Manager CLI</A
>
  </DT
><DT
>run-time failover domain,
    <A
HREF="le53159-parent.html#AEN4235"
>Failover Domain</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN8201"
>S</A
></H1
><DL
><DT
>SCSI ID parameter,
    <A
HREF="le13651-parent.html"
>Additional Configuration Issues</A
>
  </DT
><DT
>serial cable recovery,
    <A
HREF="le26593-parent.html#LE33694-PARENT"
>Serial Cable Failure Recovery</A
>
  </DT
><DT
>serial connections
  </DT
><DD
><DL
><DT
>testing with CLI,
    <A
HREF="le37273-parent.html#AEN6044"
> Testing the Serial Connections with the Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>testing with GUI,
    <A
HREF="le42786-parent.html#FS-TESTCONNECTIVITY"
>Testing Connectivity with the Cluster Manager GUI</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>serial port configuration,
    <A
HREF="le90681-parent.html#AEN2461"
>Changing the getty Process</A
>
  </DT
><DT
>SPLIT RESOURCE GROUP error state,
    <A
HREF="le16877-parent.html#AEN5408"
>Resource Group Error State</A
>
  </DT
><DT
>SRMD EXECUTABLE ERROR error state,
    <A
HREF="le16877-parent.html#AEN5408"
>Resource Group Error State</A
>
  </DT
><DT
>starting Linux FailSafe,
    <A
HREF="fs-activatehaservices.html"
>Activating (Starting) Linux FailSafe</A
>
  </DT
><DT
>statd
  </DT
><DD
><DL
><DT
>testing with CLI,
    <A
HREF="le37273-parent.html#AEN6207"
>Testing statd Resources</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>state, resource group,
    <A
HREF="le16877-parent.html#AEN5334"
>Resource Group State</A
>
  </DT
><DT
>status
  </DT
><DD
><DL
><DT
>cluster,
    <A
HREF="le16877-parent.html#AEN5565"
>Cluster Status</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>node,
    <A
HREF="le16877-parent.html#AEN5493"
>Node Status</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>resource,
    <A
HREF="le16877-parent.html#AEN5278"
>Monitoring Resource and Reset Serial Line with the Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>resource group,
    <A
HREF="le16877-parent.html#LE29367-PARENT"
>Resource Group Status</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>system controller,
    <A
HREF="le16877-parent.html#AEN5301"
>Pinging a System Controller with the Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>system, overview,
    <A
HREF="le16877-parent.html"
>System Status</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>stopping HA services,
    <A
HREF="z957117933glen.html"
>Deactivating (Stopping) Linux FailSafe</A
>
  </DT
><DT
>stopping Linux FailSafe,
    <A
HREF="z957117933glen.html"
>Deactivating (Stopping) Linux FailSafe</A
>
  </DT
><DT
>system configuration defaults,
    <A
HREF="le59477-parent.html"
>Setting Configuration Defaults</A
>
  </DT
><DT
>system controller
  </DT
><DD
><DL
><DT
>defining for node,
    <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Defining Cluster Nodes</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>status,
    <A
HREF="le16877-parent.html#AEN5301"
>Pinging a System Controller with the Cluster Manager CLI</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>system files,
    <A
HREF="le23103-parent.html"
>Configuring System Files</A
>
  </DT
><DT
>system operation defaults,
    <A
HREF="le85448-parent.html"
>Setting System Operation Defaults</A
>
  </DT
><DT
>system software
  </DT
><DD
><DL
><DT
>communication paths,
    <A
HREF="le15726-parent.html#AEN950"
>Communication Paths</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>components,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>layers,
    <A
HREF="le15726-parent.html#AEN750"
>Layers</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
><DT
>system status,
    <A
HREF="le16877-parent.html"
>System Status</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN8280"
>T</A
></H1
><DL
><DT
>template files,
    <A
HREF="le15969-parent.html#LE10673-PARENT"
>CLI Template Scripts</A
>
  </DT
><DT
>three-node cluster, example,
    <A
HREF="threenode-example.html"
>Linux FailSafe Example with Three-Node Cluster</A
>
  </DT
><DT
>tie-breaker node,
    <A
HREF="le17012-parent.html#AEN6393"
>Node Membership and Tie-Breaker Node</A
>
  </DT
><DT
>timeout values,
    <A
HREF="tv.html"
>Configuring Timeout Values and Monitoring Intervals</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN8294"
>U</A
></H1
><DL
><DT
>UNKNOWN node state,
    <A
HREF="le16877-parent.html#AEN5493"
>Node Status</A
>
  </DT
><DT
>unlock a file,
    <A
HREF="le15726-parent.html#AEN1064"
>Components</A
>
  </DT
><DT
>UP node state,
    <A
HREF="le16877-parent.html#AEN5493"
>Node Status</A
>
  </DT
></DL
><H1
CLASS="INDEXDIV"
><A
NAME="AEN8305"
>V</A
></H1
><DL
><DT
>volume
  </DT
><DD
><DL
><DT
>testing with CLI,
    <A
HREF="le37273-parent.html#AEN6140"
> Testing Logical Volumes</A
>
  </DT
><DD
><DL
></DL
></DD
></DL
></DD
></DL
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="g7155.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>&nbsp;</TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Glossary</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>&nbsp;</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>name,
    <A
HREF="le89728-parent.html#AEN334"
>Resource Name</A
>
  </DT
><DD
><DL
></DL
></DD
><DT
>Netscape Web, testing whtml/f42.html010064400016050000001000000041430717757303000136160ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>About This Guide</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="NEXT"
TITLE="Audience"
HREF="x46.html"></HEAD
><BODY
CLASS="PREFACE"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="index.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="x46.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="PREFACE"
><H1
><A
NAME="AEN42"
>About This Guide</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>1. <A
HREF="x46.html"
>Audience</A
></DT
><DT
>2. <A
HREF="x50.html"
>Structure of This Guide</A
></DT
><DT
>3. <A
HREF="x81.html"
>Related Documentation</A
></DT
><DT
>4. <A
HREF="x149.html"
>Conventions Used in This Guide</A
></DT
></DL
></DIV
><P
>This guide describes the configuration and administration of a Linux
FailSafe&#8482; highly available system.</P
><P
>This guide was prepared in conjunction with Release 1.0 of the Linux
FailSafe product.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="index.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="x46.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe&#8482;  Administrator's Guide</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Audience</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/fs-activatehaservices.html010064400016050000001000000110200717757362400175070ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Activating (Starting) Linux FailSafe</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe System Operation"
HREF="le99367-parent.html"><LINK
REL="PREVIOUS"
TITLE="System Operation Considerations"
HREF="le36400-parent.html"><LINK
REL="NEXT"
TITLE="System Status"
HREF="le16877-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le36400-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 7. Linux FailSafe System Operation</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le16877-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="FS-ACTIVATEHASERVICES"
>7.3. Activating (Starting) Linux FailSafe</A
></H1
><P
> After you have configured your Linux FailSafe system
and run diagnostic tests on its components, you can activate the highly available
services by starting Linux FailSafe. You can start Linux FailSafe on a systemwide
basis, on all of the nodes in a cluster, or on a specified node only.</P
><DIV
CLASS="CAUTION"
><P
></P
><TABLE
CLASS="CAUTION"
BORDER="1"
WIDTH="100%"
><TR
><TD
ALIGN="CENTER"
><B
>Caution</B
></TD
></TR
><TR
><TD
ALIGN="LEFT"
><P
>When you start HA services on a subset of the nodes, you should make
sure that resource groups are not running in other nodes in the cluster. For
example, if a cluster contains nodes N1, N2, and N3 and HA services are started
on nodes N1 and N2 but not on node N3, you should make sure that resource
groups are not running on node N3. Linux FailSafe will not perform exclusivity
checks on nodes where HA services are not started.</P
></TD
></TR
></TABLE
></DIV
><P
>When you start HA services,  the following actions are performed:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>All nodes in the cluster in the CDB are enabled</P
></LI
><LI
><P
>Linux FailSafe returns success to the user after modifying
the CDB</P
></LI
><LI
><P
>The local CMOND gets notification from <TT
CLASS="LITERAL"
>cdbd</TT
></P
></LI
><LI
><P
>The local CMOND starts all HA processes (CMSD, GCD, SRMD,
FSD) and IFD.</P
></LI
><LI
><P
>CMOND sets <TT
CLASS="FILENAME"
>failsafe2</TT
> <TT
CLASS="FILENAME"
>chkconfig</TT
> flag to <TT
CLASS="LITERAL"
>on</TT
>.</P
></LI
></OL
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5209"
>7.3.1. Activating Linux FailSafe with the Cluster Manager GUI</A
></H2
><P
>To start Linux FailSafe services using the Cluster Manager GUI, perform
the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>On the left side of the display, click on the &#8220;Nodes
&#38; Cluster&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Start
FailSafe HA Services&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5221"
>7.3.2. Activating Linux FailSafe with the Cluster Manager CLI</A
></H2
><P
>To activate Linux FailSafe in a cluster, use the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>start ha_services </B
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>] [<TT
CLASS="USERINPUT"
><B
>for cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le36400-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le16877-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>System Operation Considerations</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le99367-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>System Status</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/fs-guioverview.html010064400016050000001000000262030717757334100162120ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Using the Linux FailSafe Cluster Manager GUI</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Administration Tools"
HREF="le73346-parent.html"><LINK
REL="PREVIOUS"
TITLE="The Linux FailSafe Cluster Manager Tools"
HREF="le74378-parent.html"><LINK
REL="NEXT"
TITLE="Using the FailSafe Cluster Manager CLI"
HREF="le15969-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le74378-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 4. Linux FailSafe Administration Tools</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le15969-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="FS-GUIOVERVIEW"
>4.2. Using the Linux FailSafe Cluster Manager GUI</A
></H1
><P
> The
Linux FailSafe Cluster Manager GUI lets you configure, administer, and monitor
a cluster using a graphical user interface.To ensure that the required privileges
are available for performing all of the tasks, you should log in to the GUI
as <TT
CLASS="FILENAME"
>root</TT
>. However, some or all privileges can be granted
to any user by the system administrator using the Privilege Manager, part
of the Linux Interactive Desktop System Administration (<B
CLASS="COMMAND"
>sysadmdesktop</B
>) product. For more information, see the <I
CLASS="CITETITLE"
>Personal System
Administration Guide</I
>.</P
><P
>The Cluster Manager GUI consists of the FailSafe Cluster View and the
FailSafe Manager and its tasks and tasksets.These interfaces are described
in the following sections.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-CLUSTERVIEWOVERVIEW"
>4.2.1. The FailSafe Cluster View</A
></H2
><P
>The FailSafe Cluster View window provides the following capabilities:</P
><P
></P
><UL
><LI
><P
>Shows the relationships among the cluster items (nodes, resources
groups, etc.)</P
></LI
><LI
><P
>Gives access to every item's configuration and status details</P
></LI
><LI
><P
>Shows health of the cluster</P
></LI
><LI
><P
>Gives access to the FailSafe Manager and to the SYSLOG</P
></LI
><LI
><P
>Gives access to Help information</P
></LI
></UL
><P
>From the FailSafe Cluster View, the user can click on any item to display
key information about it. The items that can be viewed in this way are the
following:</P
><P
></P
><UL
><LI
><P
>Clusters</P
></LI
><LI
><P
>Cluster Nodes</P
></LI
><LI
><P
>Resource Types</P
></LI
><LI
><P
>Resources</P
></LI
><LI
><P
>Resource Groups</P
></LI
><LI
><P
>Failover Policies</P
></LI
></UL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-TASKMANOVERVIEW"
>4.2.2. The FailSafe Manager</A
></H2
><P
>The FailSafe Manager provides access to the
tasks that help you set up and administer your highly available cluster. The
FailSafe Manager also provides access to the FailSafe Guided Configuration
tasksets.</P
><P
></P
><UL
><LI
><P
>Tasksets consist of a group of tasks collected together to
accomplish a larger goal. For example, &#8220;Set Up a New Cluster&#8221;
steps you through the process for creating a new cluster and allows you to
launch the necessary tasks by simply clicking their titles.</P
></LI
><LI
><P
>FailSafe tasksets let you set up and monitor all the components
of a Linux FailSafe cluster using an easy-to-use graphical user interface.</P
></LI
></UL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE35303-PARENT"
>4.2.3. Starting the FailSafe Manager GUI</A
></H2
><P
>You can start the FailSafe Manager GUI by launching either the FailSafe
Manager or the FailSafe Cluster View.</P
><P
>To launch the FailSafe Manager, use one of these methods:</P
><P
></P
><UL
><LI
><P
>Choose &#8220;FailSafe Manager&#8221; from the desktop (KDE
or GNOME) menu.</P
><P
>You will need to restart the desktop panel after installing Linux FailSafe
to see the FailSafe entry in the appropriate menu. To restart the panel, right-click
(ring-click) on the panel, and select <TT
CLASS="USERINPUT"
><B
>restart</B
></TT
>. In order
for this to take effect, the <TT
CLASS="FILENAME"
>sysadm_failsafe_client</TT
> package
must be installed on the client system.</P
></LI
><LI
><P
>Enter the following command line:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>% <TT
CLASS="USERINPUT"
><B
>/usr/bin/fstask</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>In your Web browser, enter <TT
CLASS="LITERAL"
>http://<TT
CLASS="REPLACEABLE"
><I
>server</I
></TT
>/FailSafeManager/</TT
> (where  <TT
CLASS="REPLACEABLE"
><I
>server</I
></TT
>
is the name of node in the pool or cluster that you want to administer) and
press Enter. At the resulting Web page, click on the shield icon.</P
><P
>You can use this method of launching FailSafe Manager if you want to
administer the Cluster Manager GUI from a non-Linux system. If you are running
the Cluster Manager GUI on a Linux system, the preferred method is to use
the desktop panel menu or <B
CLASS="COMMAND"
>/usr/bin/fstask</B
>.</P
><P
>This method of launching FailSafe Manager works only if you have installed
the Java Plug-in, exited all Java processes, restarted your browser, and enabled
Java. If there is a long delay before the shield appears, you can click on
the &#8220;non plug-in&#8221; link, but operational glitches may be the result
of running in the browser-specific Java.</P
></LI
></UL
><P
>To launch the FailSafe Cluster View, use one of these methods<P
></P
><UL
><LI
><P
>Choose "FailSafe Manager" from the desktop (KDE or GNOME)
menu.  </P
><P
>You must restart the desktop panel after installingLinux FailSafe to
see the FailSafe entry in the appropriate menu. To restart the panel, right-click
(ring-click) on the panel, and select restart. In order for this to take effect,
the <TT
CLASS="LITERAL"
>sysadm_failsafe-client</TT
> package must be installed on
the client system. </P
></LI
><LI
><P
>Enter the following command line:<TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>% <TT
CLASS="USERINPUT"
><B
>/usr/bin/fsdetail</B
></TT
></PRE
></TD
></TR
></TABLE
></P
></LI
></UL
></P
><P
>The Cluster Manager GUI allows you to administer the entire cluster
from a single point of administration. When Linux FailSafe daemons have been
activated in a cluster, you must be sure to connect to a node that is running
all the Linux FailSafe daemons to obtain the correct cluster status. When
Linux FailSafe daemons have not yet been activated in a cluster, you can connect
to any node in the pool.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE49425-PARENT"
>4.2.4. Opening the FailSafe Cluster View window</A
></H2
><P
>You can open the FailSafe Cluster View window using either of the following
methods:</P
><P
></P
><UL
><LI
><P
>Click the &#8220;FailSafe Cluster View&#8220; button at the
bottom of the FailSafe Manager window.</P
><P
>This is the preferred method of opening the FailSafe Cluster View window
if you will have both the FailSafe Manager and the FailSafe Cluster View windows
open at the same time, since it reuses the existing Java process to open the
second window instead of starting a new one, which saves memory usage on the
client.</P
></LI
><LI
><P
>Open the FailSafe Cluster View window directly when you start
the FailSafe Manager GUI, as described above in <A
HREF="fs-guioverview.html#LE35303-PARENT"
>Section 4.2.3</A
>.</P
></LI
></UL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2636"
>4.2.5. Viewing Cluster Item Details</A
></H2
><P
>To view the details on any cluster item, use the following procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Open the FailSafe Cluster View Window.</P
></LI
><LI
><P
>Click the name or icon of any item.</P
></LI
></OL
><P
>The configuration and status details will appear in a separate window.
To see the details in the same window, select Options. When you then click
on the Show Details option, the status details will appear in the right side
of the window.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2645"
>4.2.6. Performing Tasks</A
></H2
><P
>To perform an individual
task with the FailSafe GUI, do the following:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Click the name of a category in the left-hand column of the
FailSafe Manager window.</P
><P
>A list of individual tasksets and taskset topics appears in the right-hand
column.</P
></LI
><LI
><P
>Click the title of a task in the right-hand column.</P
><P
>The task window appears.</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>You can click any blue text to get more information about that concept
or input field.</P
></BLOCKQUOTE
></DIV
></LI
><LI
><P
>Enter information in the appropriate fields and click <SPAN
CLASS="INTERFACE"
>OK.</SPAN
> to complete the task. (Some tasks consist of
more than one window; in these cases, click <SPAN
CLASS="INTERFACE"
>Next</SPAN
> to go to the next window, complete the information there, and
then click <SPAN
CLASS="INTERFACE"
>OK.</SPAN
></P
><P
>A dialog box appears confirming the successful completion of the task
and displaying additional tasks that you can launch.</P
></LI
><LI
><P
>Continue launching tasks as needed.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2668"
>4.2.7. Using the Linux FailSafe Tasksets</A
></H2
><P
>The FailSafe Manager
GUI also provides tasksets to guide you through the steps necessary to complete
a goal that encompasses several different tasks. Follow these steps to access
the Linux FailSafe tasksets:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Click the Guided Configuration category in the lefthand column
of the FailSafe Manager window.</P
><P
>A list of tasksets appears in the right-hand column.</P
></LI
><LI
><P
>Click a taskset in the right-hand column.</P
><P
>A window appears and lists the series of tasks necessary to accomplish
the desired goal.</P
></LI
><LI
><P
>Follow the steps shown, launching tasks by clicking them.</P
><P
>As you click a task, its task window appears. After you complete all
of the tasks listed, you can close the taskset window by double-clicking the
upper left corner of its window or clicking Close if there is a Close button
on the window.</P
></LI
></OL
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le74378-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le15969-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>The Linux FailSafe Cluster Manager Tools</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73346-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Using the FailSafe Cluster Manager CLI</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>ING"
>% <TT
CLASS="USERINPUT"
><B
>/usr/bin/fstask</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>In your Web browser, enter <TT
CLASS="LITERAL"
>http://<TT
CLASS="REPLACEABLE"
><I
>server</I
></TT
>/FailSafeManager/</TT
> (where  <TT
CLASS="REPLACEABLE"
><I
>server</I
></TT
>
is the name of node in the pool or cluster that you want to administer) and
press Enter. At the rhtml/fs-resetmachine.html010064400016050000001000000101510717757367300163110ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Resetting Nodes</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe System Operation"
HREF="le99367-parent.html"><LINK
REL="PREVIOUS"
TITLE="Deactivating (Stopping) Linux FailSafe"
HREF="z957117933glen.html"><LINK
REL="NEXT"
TITLE="Backing Up and Restoring Configuration With Cluster
Manager CLI"
HREF="le37674-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="z957117933glen.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 7. Linux FailSafe System Operation</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le37674-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="FS-RESETMACHINE"
>7.7. Resetting Nodes</A
></H1
><P
>You can use Linux FailSafe to reset nodes in a cluster. This sends a
reset command to the system controller port on the specified node. When the
node is reset, other nodes in the cluster will detect this and remove the
node from the active cluster, reallocating any resource groups that were allocated
on that node onto a backup node. The backup node used depends on how you have
configured your system. </P
><P
>Once the node reboots, it will rejoin the cluster. Some resource groups
might move back to the node, depending on how you have configured your system.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5874"
>7.7.1. Resetting a Node with the Cluster Manager GUI</A
></H2
><P
>To reset a Linux FailSafe node using the Cluster Manager GUI, perform
the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>On the left side of the display, click on the &#8220;Nodes
&#38; Cluster&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Reset
a Node&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the node to reset.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5886"
>7.7.2. Resetting a Node with the Cluster Manager CLI</A
></H2
><P
>When Linux FailSafe is running, you can reboot a node with the following
Cluster Manger CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin reset node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>This command uses the Linux FailSafe daemons to reset the specified
node.</P
><P
>You can reset a node in a cluster even when the Linux FailSafe daemons
are not running by using the <B
CLASS="COMMAND"
>standalone</B
> option of the <B
CLASS="COMMAND"
>admin reset </B
>command of the CLI:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin reset standalone node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>This command does not go through the Linux FailSafe daemons.</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="z957117933glen.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le37674-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Deactivating (Stopping) Linux FailSafe</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le99367-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Backing Up and Restoring Configuration With Cluster
Manager CLI</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/fs-setlogparams.html010064400016050000001000000341100717757353300163370ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe System Log Configuration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Cluster Configuration"
HREF="le94219-parent.html"><LINK
REL="PREVIOUS"
TITLE="Resource Configuration"
HREF="le53159-parent.html"><LINK
REL="NEXT"
TITLE="Resource Group Creation Example"
HREF="le40511-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le53159-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 5. Linux FailSafe Cluster Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le40511-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="FS-SETLOGPARAMS"
>5.6. Linux FailSafe System Log Configuration</A
></H1
><P
>Linux
FailSafe maintains system logs for each of the Linux FailSafe daemons. You
can customize the system logs according to the level of logging you wish to
maintain.</P
><P
>A log group is a set of processes that log to the same log file according
to the same logging configuration. All Linux FailSafe daemons make one log
group each. Linux FailSafe maintains the following log groups:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="FILENAME"
>cli</TT
></DT
><DD
><P
>Commands log</P
></DD
><DT
><TT
CLASS="FILENAME"
>crsd</TT
></DT
><DD
><P
>Cluster reset services (<TT
CLASS="FILENAME"
>crsd</TT
>) log</P
></DD
><DT
><TT
CLASS="FILENAME"
>diags</TT
></DT
><DD
><P
>Diagnostics log</P
></DD
><DT
><TT
CLASS="FILENAME"
>ha_agent</TT
></DT
><DD
><P
>HA monitoring agents (<TT
CLASS="FILENAME"
>ha_ifmx2</TT
>) log</P
></DD
><DT
><TT
CLASS="FILENAME"
>ha_cmsd</TT
></DT
><DD
><P
>Cluster membership daemon (<TT
CLASS="FILENAME"
>ha_cmsd</TT
>) log</P
></DD
><DT
><TT
CLASS="FILENAME"
>ha_fsd</TT
></DT
><DD
><P
>Linux FailSafe daemon (<TT
CLASS="FILENAME"
>ha_fsd</TT
>) log</P
></DD
><DT
><TT
CLASS="FILENAME"
>ha_gcd</TT
></DT
><DD
><P
>Group communication daemon (<TT
CLASS="FILENAME"
>ha_gcd</TT
>) log</P
></DD
><DT
><TT
CLASS="FILENAME"
>ha_ifd</TT
></DT
><DD
><P
>network interface monitoring daemon (<TT
CLASS="FILENAME"
>ha_ifd</TT
>) log</P
></DD
><DT
><TT
CLASS="FILENAME"
>ha_script</TT
></DT
><DD
><P
>Action and Failover policy scripts log</P
></DD
><DT
><TT
CLASS="FILENAME"
>ha_srmd</TT
></DT
><DD
><P
>System resource manager (<TT
CLASS="FILENAME"
>ha_srmd</TT
>) log</P
></DD
></DL
></DIV
><P
>Log group configuration information is maintained for all nodes in the
pool for the <TT
CLASS="FILENAME"
>cli</TT
> and <TT
CLASS="FILENAME"
>crsd</TT
> log groups
or for all nodes in the cluster for all other log groups.You can also customize
the log group configuration for a specific node in the cluster or pool.</P
><P
>When you configure a log group, you specify the following information:</P
><P
></P
><UL
><LI
><P
>The log level, specified as character strings with the CUI
and numerically (1 to 19) with the CLI, as described below</P
></LI
><LI
><P
>The log file to log to</P
></LI
><LI
><P
>The node whose specified log group you are customizing (optional)</P
></LI
></UL
><P
>&#13;The log level specifies the verbosity of the logging, controlling the amount
of log messages that Linux FailSafe will write into an associated log group's
file. There are 10 debug level. <A
HREF="fs-setlogparams.html#LE32420-PARENT"
>Table 5-1</A
>, shows the
logging levels as you specify them with the GUI and the CLI.</P
><DIV
CLASS="TABLE"
><A
NAME="LE32420-PARENT"
></A
><P
><B
>Table 5-1. Log Levels</B
></P
><TABLE
BORDER="1"
CLASS="CALSTABLE"
><THEAD
><TR
><TH
WIDTH="19%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>GUI level</P
></TH
><TH
WIDTH="19%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>CLI level</P
></TH
><TH
WIDTH="62%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Meaning</P
></TH
></TR
></THEAD
><TBODY
><TR
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>Off</TT
></P
></TD
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>0</P
></TD
><TD
WIDTH="62%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>No logging</P
></TD
></TR
><TR
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>Minimal</TT
></P
></TD
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>1</P
></TD
><TD
WIDTH="62%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Logs notification of critical errors
and normal operation</P
></TD
></TR
><TR
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>Info</TT
></P
></TD
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>2</P
></TD
><TD
WIDTH="62%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Logs minimal notification plus warning</P
></TD
></TR
><TR
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>Default</TT
></P
></TD
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>5</P
></TD
><TD
WIDTH="62%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Logs all Info messages plus additional
notifications</P
></TD
></TR
><TR
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>Debug0</TT
></P
></TD
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>10</P
></TD
><TD
WIDTH="62%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
></TR
><TR
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>...</P
></TD
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="62%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>Debug0</TT
> through <TT
CLASS="LITERAL"
>Debug9</TT
> (11 -19 in CLI) log increasingly more debug information,
including data structures. Many megabytes of disk space can be consumed on
the server when debug levels are used in a log configuration.</P
></TD
></TR
><TR
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>Debug9</TT
></P
></TD
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>19</P
></TD
><TD
WIDTH="62%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
></TR
></TBODY
></TABLE
></DIV
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>Notifications of critical errors and normal operations are always sent
to <TT
CLASS="FILENAME"
>/var/log/failsafe/</TT
>. Changes you make to the log level
for a log group do not affect <TT
CLASS="FILENAME"
>SYSLOG</TT
>.</P
></BLOCKQUOTE
></DIV
><P
>The Linux FailSafe software appends the node name to the name of the
log file you specify. For example, when you specify the log file name for
a log group as <TT
CLASS="FILENAME"
>/var/log/failsafe/cli</TT
>, the file name will
be <TT
CLASS="FILENAME"
>/var/log/failsafe/cli_</TT
><B
CLASS="COMMAND"
>nodename</B
>.</P
><P
>The
default log file names are as follows.</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="FILENAME"
>/var/log/failsafe/cmsd_</TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></DT
><DD
><P
>log file for cluster membership services daemon in node <TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></P
></DD
><DT
><TT
CLASS="FILENAME"
>/var/log/failsafe/gcd_</TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></DT
><DD
><P
>log file for group communication daemon in node <TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></P
></DD
><DT
><TT
CLASS="FILENAME"
>/var/log/failsafe/srmd_</TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></DT
><DD
><P
>log file for system resource manager daemon in node <TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></P
></DD
><DT
><TT
CLASS="FILENAME"
>/var/log/failsafe/failsafe_</TT
> <TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></DT
><DD
><P
>log file for Linux FailSafe daemon, a policy implementor for resource
groups, in node  <TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></P
></DD
><DT
><TT
CLASS="FILENAME"
>/var/log/failsafe/</TT
><TT
CLASS="REPLACEABLE"
><I
>agent_nodename</I
></TT
></DT
><DD
><P
>log file for monitoring agent named <TT
CLASS="REPLACEABLE"
><I
>agent</I
></TT
>
in node <TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
>. For example, <TT
CLASS="FILENAME"
>ifd_</TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
> is the log file for the interface daemon monitoring
agent that monitors interfaces and IP addresses and performs local failover
of IP addresses.</P
></DD
><DT
><TT
CLASS="FILENAME"
>/var/log/failsafe/crsd_</TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></DT
><DD
><P
>log file for reset daemon in node <TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></P
></DD
><DT
><TT
CLASS="FILENAME"
>/var/log/failsafe/</TT
>script_<TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></DT
><DD
><P
>log file for scripts in node <TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></P
></DD
><DT
><TT
CLASS="FILENAME"
>/var/log/failsafe/cli</TT
>_<TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></DT
><DD
><P
>log file or internal administrative commands in node <TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
> invoked by the Cluster Manager GUI and Cluster Manager CLI</P
></DD
></DL
></DIV
><P
>For information on using log groups in system recovery, see <A
HREF="le28716-parent.html"
>Chapter 9</A
>.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN4817"
>5.6.1. Configuring Log Groups with the Cluster Manager GUI</A
></H2
><P
>To configure a log group with the Cluster Manager GUI, perform the following
steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Nodes
&#38; Clusters&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Set Log
Configuration&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN4831"
>5.6.2. Configuring Log Groups with the Cluster Manager CLI</A
></H2
><P
>You can configure a log group with the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
> cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define log_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>on node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>] [<TT
CLASS="USERINPUT"
><B
>in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>You specify the node if you wish to customize the log group configuration
for a specific node only. If you have specified a default cluster, you do
not have to specify a cluster in this command; Linux FailSafe will use the
default.</P
><P
>The following prompt appears:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>Enter commands, when finished enter either "done" or "cancel"
log_group<TT
CLASS="REPLACEABLE"
><I
>&#8194;A</I
></TT
>?</PRE
></TD
></TR
></TABLE
><P
>When this prompt of the node name appears, you enter the log group parameters
you wish to modify in the following format:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>log_group<TT
CLASS="REPLACEABLE"
><I
>&#8194;A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>set log_level to </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>
log_group<TT
CLASS="REPLACEABLE"
><I
>&#8194;A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>add log_file</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>
log_group<TT
CLASS="REPLACEABLE"
><I
>&#8194;A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>remove log_file</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>When you are finished configuring the log group, enter <TT
CLASS="FILENAME"
>done</TT
> to return to the <TT
CLASS="FILENAME"
>cmgr</TT
> prompt.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN4859"
>5.6.3. Modifying Log Groups with the Cluster Manager CLI</A
></H2
><P
>Use the following CLI command to modify a log group:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify log_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;on </B
></TT
>[<TT
CLASS="USERINPUT"
><B
>node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>] [<TT
CLASS="USERINPUT"
><B
>in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>You modify a log group using the same commands you use to define a log
group.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN4871"
>5.6.4. Displaying Log Group Definitions with the Cluster Manager GUI</A
></H2
><P
>To display log group definitions with the Cluster Manager GUI, run &#8220;Set
Log Configuration&#8221; and choose the log group to display from the rollover
menu. The current log level and log file for that log group will be displayed
in the task window, where you can change those settings if you desire.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN4874"
>5.6.5. Displaying Log Group Definitions with the Cluster Manager CLI</A
></H2
><P
>Use the following command to view the parameters of a defined resource:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show log_groups</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>This command shows all of the log groups currently defined, with the
log group name, the logging levels and the log files.</P
><P
>For information on viewing the contents of the log file, see <A
HREF="le28716-parent.html"
>Chapter 9</A
>.</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le53159-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le40511-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Resource Configuration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le94219-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Resource Group Creation Example</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>"
><P
>Logs notification of critical errors
and normal operation</P
></TD
></TR
><TR
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>Info</TT
></P
></TD
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>2</P
></TD
><TD
WIDTH="62%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Logs minimal notification plus warning</P
></TD
></TR
><TR
><TD
WIDTH="19%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>Default</TT
></P
></TD
><TD
WIDhtml/g7155.html010064400016050000001000000522610717757406200140050ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Glossary</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Adding a New Hardware Device in an Active Cluster"
HREF="le32198-parent.html"><LINK
REL="NEXT"
TITLE="Index"
HREF="doc-index.html"></HEAD
><BODY
CLASS="GLOSSARY"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le32198-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="doc-index.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="GLOSSARY"
><H1
><A
NAME="AEN7155"
>Glossary</A
></H1
><DL
><DT
><B
>action scripts</B
></DT
><DD
><P
>The set of scripts that determine how a resource is started, monitored,
and stopped. There must be a set of action scripts specified for each
resource type. The possible set of action scripts is: <B
CLASS="COMMAND"
>probe</B
>, <B
CLASS="COMMAND"
>exclusive</B
>, <B
CLASS="COMMAND"
>start</B
>, <B
CLASS="COMMAND"
>stop</B
>, <B
CLASS="COMMAND"
>monitor</B
>, and <B
CLASS="COMMAND"
>restart</B
>.</P
></DD
><DT
><B
>cluster</B
></DT
><DD
><P
>A collection of one or more <I
CLASS="GLOSSTERM"
>cluster node</I
><I
CLASS="FIRSTTERM"
>s</I
> coupled to each other by networks or other similar interconnections.
A cluster is identified by a simple name; this name must be unique within
the <I
CLASS="FIRSTTERM"
>pool</I
>. A particular node may be a member of
only one cluster.</P
></DD
><DT
><B
>cluster administrator</B
></DT
><DD
><P
>The person responsible for managing and maintaining a Linux FailSafe
cluster.</P
></DD
><DT
><B
>cluster configuration database</B
></DT
><DD
><P
>Contains configuration information about all resources, resource
types, resource groups, failover policies, nodes, and clusters.</P
></DD
><DT
><B
>cluster node</B
></DT
><DD
><P
>A single Linux image. Usually, a cluster node is an individual computer.
The term <I
CLASS="EMPHASIS"
>node</I
> is also used in this guide for brevity.</P
></DD
><DT
><B
>control messages</B
></DT
><DD
><P
>Messages that cluster software sends between the cluster nodes to
request operations on or distribute information about cluster nodes and
resource groups. Linux FailSafe sends control messages for the purpose
of ensuring nodes and groups remain highly available. Control messages
and heartbeat messages are sent through a node's network interfaces that
have been attached to a control network. A node can be attached to multiple
control networks.</P
></DD
><DD
><P
>A node's control networks should not be set to accept control messages
if the node is not a dedicated Linux FailSafe node. Otherwise, end users
who run non-Linux FailSafe jobs on the machine can have their jobs killed
unexpectedly when Linux FailSafe resets the node.</P
></DD
><DT
><B
>control network</B
></DT
><DD
><P
>The network that connects nodes through their network interfaces
(typically Ethernet) such that Linux FailSafe can maintain a cluster's
high availability by sending heartbeat messages and control messages through
the network to the attached nodes. Linux FailSafe uses the highest priority
network interface on the control network; it uses a network interface
with lower priority when all higher-priority network interfaces on the
control network fail.</P
></DD
><DD
><P
>A node must have at least one control network interface for heartbeat
messages and one for control messages (both heartbeat and control messages
can be configured to use the same interface). A node can have no more
than eight control network interfaces.</P
></DD
><DT
><B
>dependency list</B
></DT
><DD
><P
>See <I
CLASS="GLOSSTERM"
>resource dependency</I
><I
CLASS="FIRSTTERM"
>&#8194;list</I
> or <I
CLASS="GLOSSTERM"
>resource type dependency</I
><I
CLASS="FIRSTTERM"
>&#8194;list</I
>.</P
></DD
><DT
><B
>failover</B
></DT
><DD
><P
>The process of allocating a <I
CLASS="FIRSTTERM"
>resource group</I
>
to another <I
CLASS="FIRSTTERM"
>node</I
> to another, according to a <I
CLASS="FIRSTTERM"
>failover policy</I
>. A failover may be triggered by the failure
of a resource, a change in the node membership (such as when a node fails
or starts), or a manual request by the administrator.</P
></DD
><DT
><B
>failover attribute</B
></DT
><DD
><P
>A string that affects the allocation of a resource group in a cluster.
The administrator must specify system-defined attributes (such as <I
CLASS="FIRSTTERM"
>AutoFailback</I
> or <I
CLASS="FIRSTTERM"
>ControlledFailback</I
>),
and can optionally supply site-specific attributes.</P
></DD
><DT
><B
>failover domain</B
></DT
><DD
><P
>The ordered list of <I
CLASS="GLOSSTERM"
>node</I
><I
CLASS="FIRSTTERM"
>s</I
>
on which a particular <I
CLASS="GLOSSTERM"
>resource group</I
> can be allocated.
The nodes listed in the failover domain must be within the same cluster;
however, the failover domain does not have to include every node in the
cluster.The administrator defines the <I
CLASS="FIRSTTERM"
>initial failover domain</I
> when creating a failover policy. This list is transformed
into the <I
CLASS="FIRSTTERM"
>running</I
>&#8194;<I
CLASS="FIRSTTERM"
>failover domain</I
> by the <I
CLASS="FIRSTTERM"
>failover script</I
>; the runtime
failover domain is what is actually used to select the failover node.
Linux FailSafe stores the runtime failover domain and uses it as input
to the next failover script invocation. The initial and runtime failover
domains may be identical, depending upon the contents of the failover
script. In general, Linux FailSafe allocates a given resource group to
the first node listed in the runtime failover domain that is also in the
node membership; the point at which this allocation takes place is affected
by the <I
CLASS="GLOSSTERM"
>failover attribute</I
><I
CLASS="FIRSTTERM"
>s</I
>.</P
></DD
><DT
><B
>failover policy</B
></DT
><DD
><P
>The method used by Linux FailSafe to determine the destination node
of a failover. A failover policy consists of a <I
CLASS="GLOSSTERM"
>failover domain</I
>, <I
CLASS="GLOSSTERM"
>failover attribute</I
><I
CLASS="FIRSTTERM"
>s</I
>,
and a <I
CLASS="GLOSSTERM"
>failover script</I
>. A failover policy name must
be unique within the <I
CLASS="GLOSSTERM"
>pool</I
>.</P
></DD
><DT
><B
>failover script</B
></DT
><DD
><P
>A failover policy component that generates a <I
CLASS="FIRSTTERM"
>runtime
failover domain</I
> and returns it to the Linux FailSafe process.
The Linux FailSafe process applies the failover attributes and then selects
the first node in the returned failover domain that is also in the current
node membership.</P
></DD
><DT
><B
>heartbeat messages</B
></DT
><DD
><P
>Messages that cluster software sends between the nodes that indicate
a node is up and running. Heartbeat messages and <I
CLASS="GLOSSTERM"
>control messages</I
> are sent through a node's network interfaces that have been
attached to a control network. A node can be attached to multiple control
networks.</P
></DD
><DT
><B
>heartbeat interval</B
></DT
><DD
><P
>Interval between heartbeat messages. The node timeout value must
be at least 10 times the heartbeat interval for proper Linux FailSafe
operation (otherwise false failovers may be triggered). The higher the
number of heartbeats (smaller heartbeat interval), the greater the potential
for slowing down the network. Conversely, the fewer the number of heartbeats
(larger heartbeat interval), the greater the potential for reducing availability
of resources.</P
></DD
><DT
><B
>initial failover domain</B
></DT
><DD
><P
>The ordered list of nodes, defined by the administrator when a failover
policy is first created, that is used the first time a cluster is booted.The
ordered list specified by the initial failover domain is transformed into
a <I
CLASS="GLOSSTERM"
>runtime failover domain</I
> by the <I
CLASS="GLOSSTERM"
>failover
script</I
>; the runtime failover domain is used along with failover
attributes to determine the node on which a resource group should reside.
With each failure, the failover script takes the current runtime failover
domain and potentially modifies it; the initial failover domain is never
used again. Depending on the runtime conditions and contents of the failover
script, the initial and runtime failover domains may be identical. See
also <I
CLASS="GLOSSTERM"
>runtime failover domain</I
>.</P
></DD
><DT
><B
>key/value attribute</B
></DT
><DD
><P
>A set of information that must be defined for a particular resource
type. For example, for the resource type <TT
CLASS="LITERAL"
>filesystem</TT
>,
one key/value pair might be <TT
CLASS="LITERAL"
>mount_point=/fs1</TT
> where <TT
CLASS="LITERAL"
>mount_point</TT
> is the key and <TT
CLASS="LITERAL"
>fs1</TT
> is the value
specific to the particular resource being defined. Depending on the value,
you specify either a <TT
CLASS="LITERAL"
>string</TT
> or <TT
CLASS="LITERAL"
>integer</TT
>
data type. In the previous example, you would specify <TT
CLASS="LITERAL"
>string</TT
> as the data type for the value<TT
CLASS="LITERAL"
>&#8194;fs1</TT
>.</P
></DD
><DT
><B
>log configuration</B
></DT
><DD
><P
>A log configuration has two parts: a <I
CLASS="GLOSSTERM"
>log level</I
>
and a <I
CLASS="GLOSSTERM"
>log file</I
>, both associated with a <I
CLASS="GLOSSTERM"
>log group</I
>. The cluster administrator can customize the location
and amount of log output, and can specify a log configuration for all
nodes or for only one node. For example, the <B
CLASS="COMMAND"
>crsd</B
> log
group can be configured to log detailed level-10 messages to the <TT
CLASS="FILENAME"
>/var/log/failsafe/crsd-foo</TT
> log only on the node <TT
CLASS="LITERAL"
>foo</TT
>, and to write only minimal level-1 messages to the <B
CLASS="COMMAND"
>crsd</B
> log on all other nodes.</P
></DD
><DT
><B
>log file</B
></DT
><DD
><P
>A file containing Linux FailSafe notifications for a particular <I
CLASS="GLOSSTERM"
>log group</I
>. A log file is part of the <I
CLASS="GLOSSTERM"
>log configuration</I
> for a log group. By default, log files reside in the <TT
CLASS="FILENAME"
>/var/log/failsafe</TT
> directory, but the cluster administrator
can customize this. Note: Linux FailSafe logs both normal operations and
critical errors to <TT
CLASS="FILENAME"
>/var/log/messages</TT
>,
as well as to individual logs for specific log groups.</P
></DD
><DT
><B
>log group</B
></DT
><DD
><P
>A set of one or more Linux FailSafe processes that use the same
log configuration. A log group usually corresponds to one Linux FailSafe
daemon, such as <B
CLASS="COMMAND"
>gcd</B
>.</P
></DD
><DT
><B
>log level</B
></DT
><DD
><P
>A number controlling the number of log messages that Linux FailSafe
will write into an associated log group's log file. A log level is part
of the log configuration for a log group.</P
></DD
><DT
><B
>node</B
></DT
><DD
><P
>See <I
CLASS="GLOSSTERM"
>cluster node</I
>&#8194;</P
></DD
><DT
><B
>node ID</B
></DT
><DD
><P
>A 16-bit positive integer that uniquely defines a cluster node.
During node definition, Linux FailSafe will assign a node ID if one has
not been assigned by the cluster administrator. Once assigned, the node
ID cannot be modified.</P
></DD
><DT
><B
>node membership</B
></DT
><DD
><P
>The list of nodes in a cluster on which Linux FailSafe can allocate
resource groups.</P
></DD
><DT
><B
>node timeout</B
></DT
><DD
><P
>If no heartbeat is received from a node in this period of time,
the node is considered to be dead. The node timeout value must be at least
10 times the heartbeat interval for proper Linux FailSafe operation (otherwise
false failovers may be triggered).</P
></DD
><DT
><B
>notification command</B
></DT
><DD
><P
>The command used to notify the cluster administrator of changes
or failures in the cluster, nodes, and resource groups. The command must
exist on every node in the cluster.</P
></DD
><DT
><B
>offline resource group</B
></DT
><DD
><P
>A resource group that is not highly available in the cluster. To
put a resource group in offline state, Linux FailSafe stops the group
(if needed) and stops monitoring the group. An offline resource group
can be running on a node, yet not under Linux FailSafe control. If the
cluster administrator specifies the <I
CLASS="EMPHASIS"
>detach only</I
> option
while taking the group offline, then Linux FailSafe will not stop the
group but will stop monitoring the group.</P
></DD
><DT
><B
>online resource group</B
></DT
><DD
><P
>A resource group that is highly available in the cluster. When Linux
FailSafe detects a failure that degrades the resource group availability,
it moves the resource group to another node in the cluster. To put a resource
group in online state, Linux FailSafe starts the group (if needed) and
begins monitoring the group. If the cluster administrator specifies the <TT
CLASS="LITERAL"
>attach only</TT
> option while bringing the group online, then Linux
FailSafe will not start the group but will begin monitoring the group.</P
></DD
><DT
><B
>owner host</B
></DT
><DD
><P
>A system that can control a Linux FailSafe node remotely, such as
power-cycling the node). Serial cables must physically connect the two
systems through the node's system controller port. At run time, the owner
host must be defined as a node in the Linux FailSafe pool.</P
></DD
><DT
><B
>owner TTY name</B
></DT
><DD
><P
>The device file name of the terminal port (TTY) on the <I
CLASS="GLOSSTERM"
>owner host</I
> to which the system controller serial cable is
connected. The other end of the cable connects to the Linux FailSafe node
with the system controller port, so the node can be controlled remotely
by the owner host.</P
></DD
><DT
><B
>pool</B
></DT
><DD
><P
>The entire set of <I
CLASS="GLOSSTERM"
>node</I
><I
CLASS="FIRSTTERM"
>s</I
>
involved with a group of clusters. The group of clusters are usually close
together and should always serve a common purpose. A replicated database
is stored on each node in the pool.</P
></DD
><DT
><B
>port password</B
></DT
><DD
><P
>The password for the system controller port, usually set once in
firmware or by setting jumper wires. (This is not the same as the node's
root password.)</P
></DD
><DT
><B
>powerfail mode</B
></DT
><DD
><P
>When powerfail mode is turned <TT
CLASS="LITERAL"
>on</TT
>, Linux FailSafe
tracks the response from a node's system controller as it makes reset
requests to a cluster node. When these requests fail to reset the node
successfully, Linux FailSafe uses heuristics to try to estimate whether
the machine has been powered down. If the heuristic algorithm returns
with success, Linux FailSafe assumes the remote machine has been reset
successfully. When powerfail mode is turned <TT
CLASS="LITERAL"
>off</TT
>, the
heuristics are not used and Linux FailSafe may not be able to detect node
power failures.</P
></DD
><DT
><B
>process membership</B
></DT
><DD
><P
>A list of process instances in a cluster that form a process group.
There can be one or more processes per node.</P
></DD
><DT
><B
>resource</B
></DT
><DD
><P
>A single physical or logical entity that provides a service to clients
or other resources. For example, a resource can be a single disk volume,
a particular network address, or an application such as a web server.
A resource is generally available for use over time on two or more <I
CLASS="GLOSSTERM"
>node</I
><I
CLASS="FIRSTTERM"
>s</I
> in a <I
CLASS="GLOSSTERM"
>cluster</I
>,
although it can be allocated to only one node at any given time. Resources
are identified by a <I
CLASS="GLOSSTERM"
>resource name</I
> and a <I
CLASS="GLOSSTERM"
>resource type</I
>. Dependent resources must be part of the same <I
CLASS="GLOSSTERM"
>resource group</I
> and are identified in a <I
CLASS="FIRSTTERM"
>resource
dependency list</I
>.</P
></DD
><DT
><B
>resource dependency</B
></DT
><DD
><P
>The condition in which a resource requires the existence of other
resources.</P
></DD
><DT
><B
>resource group</B
></DT
><DD
><P
>A collection of <I
CLASS="GLOSSTERM"
>resource</I
><I
CLASS="FIRSTTERM"
>s</I
>.
A resource group is identified by a simple name; this name must be unique
within a cluster. Resource groups cannot overlap; that is, two resource
groups cannot contain the same resource. All interdependent resources
must be part of the same resource group. If any individual resource in
a resource group becomes unavailable for its intended use, then the entire
resource group is considered unavailable. Therefore, a resource group
is the unit of failover for Linux FailSafe.</P
></DD
><DT
><B
>resource keys</B
></DT
><DD
><P
>Variables that define a resource of a given resource type. The action
scripts use this information to start, stop, and monitor a resource of
this resource type.</P
></DD
><DT
><B
>resource name</B
></DT
><DD
><P
>The simple name that identifies a specific instance of a <I
CLASS="GLOSSTERM"
>resource type</I
>. A resource name must be unique within a cluster.</P
></DD
><DT
><B
>resource type</B
></DT
><DD
><P
>A particular class of <I
CLASS="GLOSSTERM"
>resource</I
>. All of the
resources in a particular resource type can be handled in the same way
for the purposes of <I
CLASS="GLOSSTERM"
>failover</I
>. Every resource is
an instance of exactly one resource type. A resource type is identified
by a simple name; this name must be unique within a cluster. A resource
type can be defined for a specific node or for an entire cluster. A resource
type that is defined for a node overrides a cluster-wide resource type
definition with the same name; this allows an individual node to override
global settings from a cluster-wide resource type definition.</P
></DD
><DT
><B
>resource type dependency</B
></DT
><DD
><P
>A set of resource types upon which a resource type depends. For
example, the <TT
CLASS="LITERAL"
>filesystem</TT
> resource type depends upon
the<TT
CLASS="LITERAL"
>&#8194;volume</TT
>&#8194;resource type, and the <TT
CLASS="LITERAL"
>Netscape_web</TT
> resource type depends upon the <TT
CLASS="LITERAL"
>filesystem</TT
> and <TT
CLASS="LITERAL"
>IP_address</TT
> resource types.</P
></DD
><DT
><B
>runtime failover domain</B
></DT
><DD
><P
>The ordered set of nodes on which the resource group can execute
upon failures, as modified by the <I
CLASS="GLOSSTERM"
>failover script</I
>.
The runtime failover domain is used along with failover attributes to
determine the node on which a resource group should reside.See also <I
CLASS="GLOSSTERM"
>initial failover domain</I
>.</P
></DD
><DT
><B
>start/stop order</B
></DT
><DD
><P
>Each resource type has a start/stop order, which is a non-negative
integer. In a resource group, the start/stop orders of the resource types
determine the order in which the resources will be started when Linux
FailSafe brings the group online and will be stopped when Linux FailSafe
takes the group offline. The group's resources are started in increasing
order, and stopped in decreasing order; resources of the same type are
started and stopped in indeterminate order. For example, if resource type <TT
CLASS="LITERAL"
>volume</TT
> has order 10 and resource type <TT
CLASS="LITERAL"
>filesystem</TT
>
has order 20, then when Linux FailSafe brings a resource group online,
all volume resources in the group will be started before all filesystem
resources in the group.</P
></DD
><DT
><B
>system controller port</B
></DT
><DD
><P
>A port sitting on a node that provides a way to power-cycle the
node remotely. Enabling or disabling a system controller port in the cluster
configuration database (CDB) tells Linux FailSafe whether it can perform
operations on the system controller port. (When the port is enabled, serial
cables must attach the port to another node, the owner host.) System controller
port information is optional for a node in the pool, but is required if
the node will be added to a cluster; otherwise resources running on that
node never will be highly available.</P
></DD
><DT
><B
>tie-breaker node</B
></DT
><DD
><P
>A node identified as a tie-breaker for Linux FailSafe to use in
the process of computing node membership for the cluster, when exactly
half the nodes in the cluster are up and can communicate with each other.
If a tie-breaker node is not specified, Linux FailSafe will use the node
with the lowest node ID in the cluster as the tie-breaker node.</P
></DD
><DT
><B
>type-specific attribute</B
></DT
><DD
><P
>Required information used to define a resource of a particular resource
type. For example, for a resource of type <TT
CLASS="LITERAL"
>filesystem</TT
>,
you must enter attributes for the resource's volume name (where the filesystem
is located) and specify options for how to mount the filesystem (for example,
as readable and writable).</P
></DD
></DL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le32198-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="doc-index.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Adding a New Hardware Device in an Active Cluster</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Index</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>ster administrator. Once assigned, the node
ID cannot be modified.</P
></DD
><DT
><B
>node membership</B
></DT
><DD
><P
>The list of nodes in a cluster on which Linux FailSafe can allocate
resource groups.</P
></DD
><DT
><B
>node timeout</B
></DT
><DD
><P
>If no heartbeat is received from a node in this period of time,
the node is cohtml/i7433.html010064400016050000001000000025710717757242400140050ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Index</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Glossary"
HREF="g7155.html"></HEAD
><BODY
CLASS="INDEX"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="g7155.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
>&nbsp;</TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><H1
><A
NAME="AEN7433"
>Index</A
></H1
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="g7155.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>&nbsp;</TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Glossary</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>&nbsp;</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/index.html010064400016050000001000000250160717757407300143440ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
></TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="NEXT"
TITLE="About This Guide"
HREF="f42.html"></HEAD
><BODY
CLASS="BOOK"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="BOOK"
><A
NAME="AEN1"
></A
><DIV
CLASS="TITLEPAGE"
><H1
CLASS="TITLE"
><A
NAME="AEN3"
>Linux FailSafe&#8482;  Administrator's Guide</A
></H1
><H3
CLASS="OTHERCREDIT"
><A
NAME="AEN7"
>Written by Joshua Rodman of SuSE, Inc.
and Steven Levine and Jenn Byrnes of SGI</A
></H3
><H3
CLASS="OTHERCREDIT"
><A
NAME="AEN9"
>Illustrated by Dany Galgani and Chris Wengelski</A
></H3
><H3
CLASS="OTHERCREDIT"
><A
NAME="AEN11"
>Production by Adrian Daley, Glen Traefald</A
></H3
><H3
CLASS="OTHERCREDIT"
><A
NAME="AEN13"
>Engineering contributions by 
Scott Henry, Daniel Hurtubise,
Vidula Iyer, Ashwinee Khaladkar, Herbert Lewis, 
Michael Nishimoto, Wesley Smith, Bill Sparks, Paddy Sreenivasan, 
Dan Stekloff, Rebecca Underwood, Mayank Vasa, Manish Verma</A
></H3
><A
HREF="ln15.html"
>Legal Notice</A
><HR></DIV
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
><A
HREF="f42.html"
>About This Guide</A
></DT
><DD
><DL
><DT
>1. <A
HREF="x46.html"
>Audience</A
></DT
><DT
>2. <A
HREF="x50.html"
>Structure of This Guide</A
></DT
><DT
>3. <A
HREF="x81.html"
>Related Documentation</A
></DT
><DT
>4. <A
HREF="x149.html"
>Conventions Used in This Guide</A
></DT
></DL
></DD
><DT
>1. <A
HREF="le73529-parent.html"
>Overview of the Linux FailSafe System</A
></DT
><DD
><DL
><DT
>1.1. <A
HREF="le27299-parent.html"
>High Availability and Linux FailSafe</A
></DT
><DT
>1.2. <A
HREF="le89728-parent.html"
>Concepts</A
></DT
><DT
>1.3. <A
HREF="le94860-parent.html"
>Additional Linux FailSafe Features</A
></DT
><DT
>1.4. <A
HREF="le20463-parent.html"
>Linux FailSafe Administration</A
></DT
><DT
>1.5. <A
HREF="le32900-parent.html"
>Hardware Components of a Linux FailSafe Cluster</A
></DT
><DT
>1.6. <A
HREF="le45765-parent.html"
>Linux FailSafe Disk Connections</A
></DT
><DT
>1.7. <A
HREF="le79484-parent.html"
>Linux FailSafe Supported Configurations</A
></DT
><DT
>1.8. <A
HREF="le85141-parent.html"
>Highly Available Resources</A
></DT
><DT
>1.9. <A
HREF="le19101-parent.html"
>Highly Available Applications</A
></DT
><DT
>1.10. <A
HREF="le19267-parent.html"
>Failover and Recovery Processes</A
></DT
><DT
>1.11. <A
HREF="le24477-parent.html"
>Overview of Configuring and Testing a New Linux
FailSafe Cluster</A
></DT
><DT
>1.12. <A
HREF="le15726-parent.html"
>Linux FailSafe System Software</A
></DT
></DL
></DD
><DT
>2. <A
HREF="le88622-parent.html"
>Planning Linux FailSafe Configuration</A
></DT
><DD
><DL
><DT
>2.1. <A
HREF="le57040-parent.html"
>Introduction to Configuration Planning</A
></DT
><DT
>2.2. <A
HREF="le34382-parent.html"
>Disk Configuration</A
></DT
><DT
>2.3. <A
HREF="le96329-parent.html"
>Logical Volume Configuration</A
></DT
><DT
>2.4. <A
HREF="le53947-parent.html"
>Filesystem Configuration</A
></DT
><DT
>2.5. <A
HREF="le84104-parent.html"
>IP Address Configuration</A
></DT
></DL
></DD
><DT
>3. <A
HREF="le32854-parent.html"
>Installing Linux FailSafe Software and Preparing
the System</A
></DT
><DD
><DL
><DT
>3.1. <A
HREF="le29006-parent.html"
>Overview of Configuring Nodes for Linux FailSafe</A
></DT
><DT
>3.2. <A
HREF="le97755-parent.html"
>Installing Required Software</A
></DT
><DT
>3.3. <A
HREF="le23103-parent.html"
>Configuring System Files</A
></DT
><DT
>3.4. <A
HREF="le13651-parent.html"
>Additional Configuration Issues</A
></DT
><DT
>3.5. <A
HREF="le39637-parent.html"
>Choosing and Configuring devices and Filesystems</A
></DT
><DT
>3.6. <A
HREF="le97738-parent.html"
>Configuring Network Interfaces</A
></DT
><DT
>3.7. <A
HREF="le90681-parent.html"
>Configuration for Reset</A
></DT
></DL
></DD
><DT
>4. <A
HREF="le73346-parent.html"
>Linux FailSafe Administration Tools</A
></DT
><DD
><DL
><DT
>4.1. <A
HREF="le74378-parent.html"
>The Linux FailSafe Cluster Manager Tools</A
></DT
><DT
>4.2. <A
HREF="fs-guioverview.html"
>Using the Linux FailSafe Cluster Manager GUI</A
></DT
><DT
>4.3. <A
HREF="le15969-parent.html"
>Using the FailSafe Cluster Manager CLI</A
></DT
></DL
></DD
><DT
>5. <A
HREF="le94219-parent.html"
>Linux FailSafe Cluster Configuration</A
></DT
><DD
><DL
><DT
>5.1. <A
HREF="le59477-parent.html"
>Setting Configuration Defaults</A
></DT
><DT
>5.2. <A
HREF="le28499-parent.html"
>Name Restrictions</A
></DT
><DT
>5.3. <A
HREF="tv.html"
>Configuring Timeout Values and Monitoring Intervals</A
></DT
><DT
>5.4. <A
HREF="z957104627glen.html"
>Cluster Configuration</A
></DT
><DT
>5.5. <A
HREF="le53159-parent.html"
>Resource Configuration</A
></DT
><DT
>5.6. <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
></DT
><DT
>5.7. <A
HREF="le40511-parent.html"
>Resource Group Creation Example</A
></DT
><DT
>5.8. <A
HREF="le40790-parent.html"
>Linux FailSafe Configuration Example CLI Script</A
></DT
></DL
></DD
><DT
>6. <A
HREF="configexample.html"
>Configuration Examples</A
></DT
><DD
><DL
><DT
>6.1. <A
HREF="threenode-example.html"
>Linux FailSafe Example with Three-Node Cluster</A
></DT
><DT
>6.2. <A
HREF="threenode-script.html"
>cmgr Script</A
></DT
><DT
>6.3. <A
HREF="localfailover-of-ip.html"
>Local Failover of an IP Address</A
></DT
></DL
></DD
><DT
>7. <A
HREF="le99367-parent.html"
>Linux FailSafe System Operation</A
></DT
><DD
><DL
><DT
>7.1. <A
HREF="le85448-parent.html"
>Setting System Operation Defaults</A
></DT
><DT
>7.2. <A
HREF="le36400-parent.html"
>System Operation Considerations</A
></DT
><DT
>7.3. <A
HREF="fs-activatehaservices.html"
>Activating (Starting) Linux FailSafe</A
></DT
><DT
>7.4. <A
HREF="le16877-parent.html"
>System Status</A
></DT
><DT
>7.5. <A
HREF="le41282-parent.html"
>Resource Group Failover</A
></DT
><DT
>7.6. <A
HREF="z957117933glen.html"
>Deactivating (Stopping) Linux FailSafe</A
></DT
><DT
>7.7. <A
HREF="fs-resetmachine.html"
>Resetting Nodes</A
></DT
><DT
>7.8. <A
HREF="le37674-parent.html"
>Backing Up and Restoring Configuration With Cluster
Manager CLI</A
></DT
></DL
></DD
><DT
>8. <A
HREF="le56830-parent.html"
>Testing Linux FailSafe Configuration</A
></DT
><DD
><DL
><DT
>8.1. <A
HREF="le67057-parent.html"
>Overview of FailSafe Diagnostic Commands</A
></DT
><DT
>8.2. <A
HREF="le42786-parent.html"
>Performing Diagnostic Tasks with the Cluster
Manager GUI</A
></DT
><DT
>8.3. <A
HREF="le37273-parent.html"
>Performing Diagnostic Tasks with the Cluster
Manager CLI</A
></DT
></DL
></DD
><DT
>9. <A
HREF="le28716-parent.html"
>Linux FailSafe Recovery</A
></DT
><DD
><DL
><DT
>9.1. <A
HREF="le14340-parent.html"
>Overview of FailSafe System Recovery</A
></DT
><DT
>9.2. <A
HREF="le28847-parent.html"
>FailSafe Log Files</A
></DT
><DT
>9.3. <A
HREF="le17012-parent.html"
>Node Membership and Resets</A
></DT
><DT
>9.4. <A
HREF="le13884-parent.html"
>Status Monitoring</A
></DT
><DT
>9.5. <A
HREF="le35544-parent.html"
>Dynamic Control of FailSafe Services</A
></DT
><DT
>9.6. <A
HREF="le26593-parent.html"
>Recovery Procedures</A
></DT
></DL
></DD
><DT
>10. <A
HREF="le55630-parent.html"
>Upgrading and Maintaining Active Clusters</A
></DT
><DD
><DL
><DT
>10.1. <A
HREF="le40594-parent.html"
>Adding a Node to an Active Cluster</A
></DT
><DT
>10.2. <A
HREF="le15663-parent.html"
>Deleting a Node from an Active Cluster</A
></DT
><DT
>10.3. <A
HREF="x6931.html"
>Changing Control Networks in a Cluster</A
></DT
><DT
>10.4. <A
HREF="le26765-parent.html"
>Upgrading OS Software in an Active Cluster</A
></DT
><DT
>10.5. <A
HREF="le31814-parent.html"
>Upgrading FailSafe Software in an Active Cluster</A
></DT
><DT
>10.6. <A
HREF="le18685-parent.html"
>Adding New Resource Groups or Resources in an Active
Cluster</A
></DT
><DT
>10.7. <A
HREF="le32198-parent.html"
>Adding a New Hardware Device in an Active Cluster</A
></DT
></DL
></DD
><DT
><A
HREF="g7155.html"
>Glossary</A
></DT
><DT
><A
HREF="doc-index.html"
>Index</A
></DT
></DL
></DIV
><DIV
CLASS="LOT"
><DL
CLASS="LOT"
><DT
><B
>List of Tables</B
></DT
><DT
>1-1. <A
HREF="le89728-parent.html#LE99232-PARENT"
>Example Resource Group</A
></DT
><DT
>1-2. <A
HREF="le15726-parent.html#LE12498-PARENT"
>Contents of <TT
CLASS="FILENAME"
>/usr/lib/failsafe/bin</TT
></A
></DT
><DT
>1-3. <A
HREF="le15726-parent.html#LE21811-PARENT"
>Administrative Commands for Use in Scripts</A
></DT
><DT
>2-1. <A
HREF="le96329-parent.html#LE33754-PARENT"
>Logical Volume Configuration Parameters</A
></DT
><DT
>2-2. <A
HREF="le53947-parent.html#LE31422-PARENT"
>Filesystem Configuration Parameters</A
></DT
><DT
>2-3. <A
HREF="le84104-parent.html#LE73415-PARENT"
>IP Address Configuration Parameters</A
></DT
><DT
>4-1. <A
HREF="le15969-parent.html#AEN2872"
>Available Templates</A
></DT
><DT
>5-1. <A
HREF="fs-setlogparams.html#LE32420-PARENT"
>Log Levels</A
></DT
><DT
>8-1. <A
HREF="le67057-parent.html#LE10721-PARENT"
>FailSafe Diagnostic Test Summary</A
></DT
></DL
></DIV
><DIV
CLASS="LOT"
><DL
CLASS="LOT"
><DT
><B
>List of Figures</B
></DT
><DT
>1-1. <A
HREF="le32900-parent.html#LE72758-PARENT"
>Sample Linux FailSafe System Components</A
></DT
><DT
>1-2. <A
HREF="le85141-parent.html#LE77061-PARENT"
>Disk Storage Failover on a Two-Node System</A
></DT
><DT
>1-3. <A
HREF="le15726-parent.html#LE28867-PARENT"
>Software Layers</A
></DT
><DT
>1-4. <A
HREF="le15726-parent.html#AEN960"
>Read/Write Actions to the Cluster Configuration Database</A
></DT
><DT
>1-5. <A
HREF="le15726-parent.html#LE25208-PARENT"
>Communication Path for a Node that is Not in a Cluster</A
></DT
><DT
>1-6. <A
HREF="le15726-parent.html#Z944249330LHJ-PARENT"
>Message Paths for Action Scripts and Failover Policy
Scripts</A
></DT
><DT
>2-1. <A
HREF="le34382-parent.html#LE22456-PARENT"
>Non-Shared Disk Configuration and Failover</A
></DT
><DT
>2-2. <A
HREF="le34382-parent.html#LE83029-PARENT"
>Shared Disk Configuration for Active/Backup Use</A
></DT
><DT
>2-3. <A
HREF="le34382-parent.html#LE83152-PARENT"
>Shared Disk Configuration For Dual-Active Use</A
></DT
><DT
>3-1. <A
HREF="le97738-parent.html#LE47532-PARENT"
>Example Interface Configuration</A
></DT
><DT
>6-1. <A
HREF="threenode-example.html#AEN4977"
>Configuration Example</A
></DT
></DL
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="f42.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>About This Guide</TD
></TR
></TABLE
></DIV
></BODY
></HTML
> and Jenn Byrnes of SGI</A
></H3
><H3
CLASS="OTHERCREDIT"
><A
NAME="AEN9"
>Illustrated by Dany Galgani and Chris Wengelski</A
></H3
><H3
CLASS="OTHERCREDIT"
><A
NAME="AEN11"
>Production by Adrian Daley, Glen Traefald</A
></H3
><H3
CLASS="OTHERCREDIT"
><A
NAME="AEN13"
>Engineering contributions by 
Scott Henry, Daniel Hurtubise,
Vidula Iyer, Ashwinee Khaladkar, Herbert Lewis, 
Michael Nishimoto, Wesley Smith, Bill Sparks, Paddy Sreenivasan, 
Dan Stekloff, Rebecca Underwood, Mayank Vasa, Manish html/le13651-parent.html010064400016050000001000000100520717757327600155220ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Additional Configuration Issues</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Installing Linux FailSafe Software and Preparing
the System"
HREF="le32854-parent.html"><LINK
REL="PREVIOUS"
TITLE="Configuring System Files"
HREF="le23103-parent.html"><LINK
REL="NEXT"
TITLE="Choosing and Configuring devices and Filesystems"
HREF="le39637-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le23103-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 3. Installing Linux FailSafe Software and Preparing
the System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le39637-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE13651-PARENT"
>3.4. Additional Configuration Issues</A
></H1
><P
>During the hardware installation of Linux FailSafe nodes,
two additional issues must be considered:</P
><P
></P
><UL
><LI
><P
> The Linux FailSafe software requires the nodes to be
automatically booted when they are reset or when the node is powered on. 
Linux on x86 will be dependent upon BIOS configuration to ensure this.  Some
PC BIOSes will hang indefinitely upon error.  Clearly this is not useful for
high availability situations.  On other platforms, such as PowerPC, Alpha,
etc, the necessary steps will vary.</P
><P
>A related, but not identical issue is that of reboots on kernel panics.
 To ensure the system will reboot even in the case of a kernel failure, set
the panic value in a system boot file, such as  <TT
CLASS="FILENAME"
>init.d/boot.local</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>echo "<TT
CLASS="REPLACEABLE"
><I
>number</I
></TT
>" &#62; /proc/sys/kernel/panic</PRE
></TD
></TR
></TABLE
><P
><TT
CLASS="REPLACEABLE"
><I
>number</I
></TT
> is the number of seconds after a panic
before the system will reset.</P
><P
>If you would prefer administrator intervention to be required during
a hardware or kernel failure, you may leave this disabled</P
></LI
><LI
><P
>The SCSI controllers' host IDs of the nodes in a Linux
FailSafe cluster using physically shared storage must be different. If a cluster
has no shared storage or is using shared Fibre Channel storage, the value
of SCSI host ID is not important.</P
></LI
></UL
><P
>You can check the ID of most Linux controllers in the logged kernel
messages from boot time:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>grep ID= /var/log/messages</B
></TT
>&#8194;
&#60;6&#62;(scsi0) Wide Channel, SCSI ID=7, 16/255 SCBs</PRE
></TD
></TR
></TABLE
><P
>Changing the SCSI host ID is specific to the SCSI controller in use.
Refer to the controller documentation.</P
><P
>A controller uses its SCSI ID on all buses attached to it. Therefore,
you must make sure that no device attached to a node has the same number as
its SCSI unit number.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le23103-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le39637-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Configuring System Files</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le32854-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Choosing and Configuring devices and Filesystems</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le13884-parent.html010064400016050000001000000047540717757377200155470ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Status Monitoring</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Recovery"
HREF="le28716-parent.html"><LINK
REL="PREVIOUS"
TITLE="Node Membership and Resets"
HREF="le17012-parent.html"><LINK
REL="NEXT"
TITLE="Dynamic Control of FailSafe Services"
HREF="le35544-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le17012-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 9. Linux FailSafe Recovery</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le35544-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE13884-PARENT"
>9.4. Status Monitoring</A
></H1
><P
>FailSafe allows you to monitor and check the status of specified clusters,
nodes, resources, and resource groups. You can use this feature to isolate
where your system is encountering problems.</P
><P
>With the FailSafe Cluster Manager GUI Cluster View, you can monitor
the status of the FailSafe components continuously through their visual representation.
Using the FailSafe Cluster Manger CLI, you can display the status of the individual
components by using the <B
CLASS="COMMAND"
>show</B
> command.</P
><P
>For information on status monitoring and on the meaning of the states
of the FailSafe components, see <A
HREF="le16877-parent.html"
>Section 7.4</A
>.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le17012-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le35544-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Node Membership and Resets</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le28716-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Dynamic Control of FailSafe Services</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le14340-parent.html010064400016050000001000000067520717757375500155340ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Overview of FailSafe System Recovery</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Recovery"
HREF="le28716-parent.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe Recovery"
HREF="le28716-parent.html"><LINK
REL="NEXT"
TITLE="FailSafe Log Files"
HREF="le28847-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le28716-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 9. Linux FailSafe Recovery</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le28847-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE14340-PARENT"
>9.1. Overview of FailSafe System Recovery</A
></H1
><P
>When a FailSafe system experiences problems, you can
use some of the FailSafe features and commands to determine where the problem
is.</P
><P
>FailSafe provides the following tools to evaluate and recover from system
failure:</P
><P
></P
><UL
><LI
><P
>Log files</P
></LI
><LI
><P
>Commands to monitor status of system components</P
></LI
><LI
><P
>Commands to start, stop, and fail over highly available services</P
></LI
></UL
><P
>Keep in mind that the FailSafe logs may not detect system problems that
do not translate into FailSafe problems. For example, if a CPU goes bad, or
hardware maintenance is required, FailSafe may not be able to detect and log
these failures.</P
><P
>In general, when evaluating system problems of any nature on a FailSafe
configuration, you should determine whether you need to shut down a node to
address those problems. When you shut down a node, perform the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Stop FailSafe services on that node</P
></LI
><LI
><P
>Shut down the node to perform needed maintenance and repair</P
></LI
><LI
><P
>Start up the node</P
></LI
><LI
><P
>Start FailSafe services on that node</P
></LI
></OL
><P
>It is important that you explicitly stop FailSafe services before shutting
down a node, where possible, so that FailSafe does not interpret the node
shutdown as node failure. If FailSafe interprets the service interruption
as node failure, there could be unexpected ramifications, depending on how
you have configured your resource groups and your application failover domain.</P
><P
>When you shut down a node to perform maintenance, you may need to change
your FailSafe configuration to keep your system running.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le28716-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le28847-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe Recovery</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le28716-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>FailSafe Log Files</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le15663-parent.html010064400016050000001000000136520717757403500155320ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Deleting a Node from an Active Cluster</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Upgrading and Maintaining Active Clusters"
HREF="le55630-parent.html"><LINK
REL="PREVIOUS"
TITLE="Adding a Node to an Active Cluster"
HREF="le40594-parent.html"><LINK
REL="NEXT"
TITLE="Changing Control Networks in a Cluster"
HREF="x6931.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le40594-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 10. Upgrading and Maintaining Active Clusters</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="x6931.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE15663-PARENT"
>10.2. Deleting a Node from an Active Cluster</A
></H1
><P
>Use the following procedure to delete a node from an active cluster.
This procedure begins with the assumption that the node status is UP.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>If resource groups are online on the node, use the <B
CLASS="COMMAND"
>cluster_mgr</B
> command to move them to another node in the cluster.</P
><P
>To move the resource groups to another node in the cluster, there should
be another node available in the failover policy domain of the resource group.
If you want to leave the resource groups running in the same node, use the <B
CLASS="COMMAND"
>cluster_mgr</B
> command to detach the resource group. For example, the
following command would leave the resource group <TT
CLASS="FILENAME"
>web-rg</TT
>
running in the same node in the cluster <TT
CLASS="FILENAME"
>web-cluster</TT
>.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin detach resource_group "web-rg" in cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Delete the node from the failure domains of any failover policies
which use the node. In order to do this, the entire failover policy must be
re-defined, deleting the affected node from the failure domain.</P
></LI
><LI
><P
>To stop HA services on the node <TT
CLASS="FILENAME"
>web-node3</TT
>,
use the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command. This command will
move all the resource groups online on this node to other nodes in the cluster
if possible.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>stop ha_services on node web-node3 for cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>If it is not possible to move resource groups that are online on node <TT
CLASS="FILENAME"
>web-node3</TT
>, the above command will fail. The <B
CLASS="COMMAND"
>force</B
>
option is available to stop HA services in a node even in the case of an error.
Should there be any resources which can not be moved offline or deallocated
properly, a side-effect of the offline force command will be to leave these
resources allocated on the node.</P
></LI
></OL
><P
>Perform Steps 4, 5, 6, and 7 if the node must be deleted from the configuration
database.</P
><P
></P
><OL
START="4"
TYPE="1"
><LI
><P
>Delete the node from the cluster. To delete node <TT
CLASS="FILENAME"
>web-node3</TT
> from <TT
CLASS="FILENAME"
>web-cluster</TT
> configuration,
use the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify cluster web-cluster</B
></TT
>
Enter commands, when finished enter either "done" or "cancel"
web-cluster ? <TT
CLASS="USERINPUT"
><B
>remove node web-node3</B
></TT
>
web-cluster ? <TT
CLASS="USERINPUT"
><B
>done</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Remove node configuration from the configuration database.</P
><P
>The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command deletes the <TT
CLASS="FILENAME"
>web-node3</TT
> node definition from the configuration database.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; delete node web-node3</PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Stop all cluster processes and delete the configuration database.</P
><P
>The following commands stop cluster processes on the node and delete
the configuration database.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>/etc/rc.d/init.d/failsafe stop</B
></TT
>
# <TT
CLASS="USERINPUT"
><B
>killall cdbd</B
></TT
>
# <TT
CLASS="USERINPUT"
><B
>cdbdelete /var/lib/failsafe/cdb/cdb.db</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Disable cluster and HA processes from starting when the node
boots. The following commands perform those tasks:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>fsconfig failsafe off</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
></OL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le40594-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="x6931.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Adding a Node to an Active Cluster</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le55630-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Changing Control Networks in a Cluster</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le15726-parent.html010064400016050000001000000452340717757316600155370ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe System Software </TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Overview of Configuring and Testing a New Linux
FailSafe Cluster"
HREF="le24477-parent.html"><LINK
REL="NEXT"
TITLE="Planning Linux FailSafe Configuration"
HREF="le88622-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le24477-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le88622-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE15726-PARENT"
>1.12. Linux FailSafe System Software</A
></H1
><P
>This section describes the software layers, communication paths, and
cluster configuration database.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN750"
>1.12.1. Layers</A
></H2
><P
>A Linux FailSafe  system has the following software layers: </P
><P
></P
><UL
><LI
><P
>Plug-ins, which create highly available services.  If the
application plug-in you want is not available, you can hire the Silicon Graphics
Global Services group to develop the required software, or you can use the <I
CLASS="CITETITLE"
>Linux FailSafe Programmer's Guide</I
> to write the software yourself.</P
></LI
><LI
><P
>Linux FailSafe  base, which includes the ability to define
resource groups and failover policies</P
></LI
><LI
><P
>High-availability cluster infrastructure that lets you define
clusters, resources, and resource types (this consists of the  <TT
CLASS="LITERAL"
>cluster_services</TT
> installation package)         </P
></LI
><LI
><P
>Cluster software infrastructure, which lets you do the following:</P
><P
></P
><UL
><LI
><P
>Perform node logging</P
></LI
><LI
><P
>Administer the cluster</P
></LI
><LI
><P
>Define nodes</P
></LI
></UL
><P
>The cluster software infrastructure consists of the <TT
CLASS="LITERAL"
>cluster_admin</TT
> and <TT
CLASS="LITERAL"
>cluster_control</TT
> subsystems).</P
></LI
></UL
><P
><A
HREF="le15726-parent.html#LE28867-PARENT"
>Figure 1-3</A
> shows a graphic representation of these
layers. <A
HREF="le15726-parent.html#LE12498-PARENT"
>Table 1-2</A
> describes the layers for Linux FailSafe,
which are located in the <TT
CLASS="FILENAME"
>/usr/lib/failsafe/bin</TT
> directory.</P
><P
><DIV
CLASS="FIGURE"
><A
NAME="LE28867-PARENT"
></A
><P
><B
>Figure 1-3. Software Layers</B
></P
><P
><IMG
SRC="figures/software.layers.gif"></P
></DIV
></P
><DIV
CLASS="TABLE"
><A
NAME="LE12498-PARENT"
></A
><P
><B
>Table 1-2. Contents of <TT
CLASS="FILENAME"
>/usr/lib/failsafe/bin</TT
></B
></P
><TABLE
BORDER="1"
WIDTH="100%"
CLASS="CALSTABLE"
><THEAD
><TR
><TH
WIDTH="17%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Layer</P
></TH
><TH
WIDTH="24%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Subsystem</P
></TH
><TH
WIDTH="22%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Process</P
></TH
><TH
WIDTH="37%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Description</P
></TH
></TR
></THEAD
><TBODY
><TR
><TD
WIDTH="17%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Linux FailSafe Base</P
></TD
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>failsafe2</TT
></P
></TD
><TD
WIDTH="22%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_fsd</TT
></P
></TD
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Linux FailSafe daemon. Provides basic
component of the Linux FailSafe software. </P
></TD
></TR
><TR
><TD
WIDTH="17%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>High-availability cluster infrastructure</P
></TD
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>cluster_ha </TT
></P
></TD
><TD
WIDTH="22%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_cmsd</TT
></P
></TD
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Cluster membership daemon. Provides
the list of nodes, called <I
CLASS="FIRSTTERM"
>node membership</I
>, available
to the cluster. </P
></TD
></TR
><TR
><TD
WIDTH="17%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="22%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_gcd</TT
></P
></TD
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Group membership daemon. Provides group
membership and reliable communication services in the presence of failures
to Linux FailSafe processes.</P
></TD
></TR
><TR
><TD
WIDTH="17%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="22%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_srmd</TT
></P
></TD
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>System resource manager daemon. Manages
resources, resource groups, and resource types. Executes action scripts for
resources.</P
></TD
></TR
><TR
><TD
WIDTH="17%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="22%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_ifd</TT
></P
></TD
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Interface agent daemon. Monitors the
local node's network interfaces.</P
></TD
></TR
><TR
><TD
WIDTH="17%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Cluster software infrastructure</P
></TD
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>cluster_admin </TT
></P
></TD
><TD
WIDTH="22%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>cad </TT
></P
></TD
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Cluster administration daemon. Provides
administration services.</P
></TD
></TR
><TR
><TD
WIDTH="17%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>cluster_control </TT
></P
></TD
><TD
WIDTH="22%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>crsd </TT
></P
></TD
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Node control daemon. Monitors the serial
connection to other nodes. Has the ability to reset other nodes. </P
></TD
></TR
><TR
><TD
WIDTH="17%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="22%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>cmond</TT
></P
></TD
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Daemon that manages all other daemons.
This process starts other processes in all nodes in the cluster and restarts
them on failures.</P
></TD
></TR
><TR
><TD
WIDTH="17%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
></P
></TD
><TD
WIDTH="22%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>cdbd</TT
></P
></TD
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Manages the configuration database
and keeps each copy in sync on all nodes in the pool</P
></TD
></TR
></TBODY
></TABLE
></DIV
><P
></P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN950"
>1.12.2. Communication Paths</A
></H2
><P
>The following figures show communication paths in Linux FailSafe.  Note
that they do not represent <TT
CLASS="LITERAL"
>cmond</TT
>.  </P
><P
><DIV
CLASS="FIGURE"
><A
NAME="AEN960"
></A
><P
><B
>Figure 1-4.  Read/Write Actions to the Cluster Configuration Database</B
></P
><P
><IMG
SRC="figures/ha.cluster.config.info.flow.gif"></P
></DIV
></P
><P
><A
HREF="le15726-parent.html#LE25208-PARENT"
>Figure 1-5</A
> shows the communication path for a node
that is in the pool but not in a cluster. </P
><P
><DIV
CLASS="FIGURE"
><A
NAME="LE25208-PARENT"
></A
><P
><B
>Figure 1-5. Communication Path for a Node that is Not in a Cluster</B
></P
><P
><IMG
SRC="figures/machine.not.in.ha.cluster.gif"></P
></DIV
></P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN973"
>1.12.3. Conditions Under Which Action Scripts are Executed</A
></H2
><P
>Action scripts are executed under the following conditions:</P
><P
></P
><UL
><LI
><P
><TT
CLASS="LITERAL"
>exclusive</TT
>: the resource group is made online
by the user or HA processes are started</P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>start</TT
>: the resource group is made online
by the user, HA processes are started, or there is a resource group failover</P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>stop</TT
>: the resource group is made offline,
HA process are stopped, the resource group fails over, or the node is shut
down</P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>monitor</TT
>: the resource group is  online</P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>restart</TT
>: the <TT
CLASS="LITERAL"
>monitor</TT
>
script fails</P
></LI
></UL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN993"
>1.12.4. When Does FailSafe Execute Action and Failover Scripts</A
></H2
><P
>The order of execution is as follows:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Linux FailSafe is started, usually at node boot or manually,
and reads the resource group information from the cluster configuration database.</P
></LI
><LI
><P
>Linux FailSafe asks the system resource manager (SRM) to run <TT
CLASS="LITERAL"
>exclusive</TT
> scripts for all resource groups that are in the <TT
CLASS="LITERAL"
>Online ready</TT
> state.</P
></LI
><LI
><P
>SRM returns one of the following states for each resource
group:<P
></P
><UL
><LI
><P
><TT
CLASS="LITERAL"
>running</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>partially running</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>not running</TT
></P
></LI
></UL
></P
></LI
><LI
><P
>If a resource group has a state of <TT
CLASS="LITERAL"
>not running</TT
>
in a node where HA services have been started, the following occurs:<P
></P
><OL
TYPE="a"
><LI
><P
>Linux FailSafe   runs the failover policy script associated
with the resource group. The failover policy scripts take the list of nodes
that are capable of running the resource group (the <I
CLASS="FIRSTTERM"
>failover domain</I
>) as a parameter.</P
></LI
><LI
><P
>The failover policy script returns an ordered list of nodes
in descending order of priority (the <I
CLASS="FIRSTTERM"
>run-time failover domain</I
>)
where the resource group can be placed.</P
></LI
><LI
><P
>Linux FailSafe sends a request to SRM to move the resource
group to the first node in the run-time failover domain.</P
></LI
><LI
><P
>SRM executes the <TT
CLASS="LITERAL"
>start</TT
> action script for
all resources in the resource group:<P
></P
><UL
><LI
><P
>If the <TT
CLASS="LITERAL"
>start</TT
> script fails, the resource
group is marked <TT
CLASS="LITERAL"
>online</TT
> on that node with an <TT
CLASS="LITERAL"
>srmd
executable error</TT
> error.</P
></LI
><LI
><P
>If the <TT
CLASS="LITERAL"
>start</TT
> script is successful, SRM
automatically starts monitoring those resources. After the specified start
monitoring time passes, SRM executes the <TT
CLASS="LITERAL"
>monitor</TT
> action
script for the resource in the resource group.</P
></LI
></UL
></P
></LI
></OL
></P
></LI
><LI
><P
>If the state of the resource group is <TT
CLASS="LITERAL"
>running</TT
>
or<TT
CLASS="LITERAL"
> partially running </TT
> on only one node in the cluster,
Linux FailSafe   runs the associated failover policy script:<P
></P
><UL
><LI
><P
>If the highest priority node is the same node where the resource
group is partially running or running, the resource group is made online on
the same node. In the <TT
CLASS="LITERAL"
> partially running</TT
> case, Linux FailSafe
asks SRM to execute <TT
CLASS="LITERAL"
>start</TT
> scripts for resources in the
resource group that are not running.</P
></LI
><LI
><P
>If the highest priority node is a another node in the cluster,
Linux FailSafe asks SRM to execute <TT
CLASS="LITERAL"
>stop</TT
> action scripts
for resources in the resource group. Linux FailSafe  makes the resource group
online in the highest priority node in the cluster.</P
></LI
></UL
></P
></LI
><LI
><P
>If the state of the resource group is <TT
CLASS="LITERAL"
>running</TT
>
or<TT
CLASS="LITERAL"
> partially running </TT
>in multiple nodes in the cluster,
the resource group is marked with an <TT
CLASS="LITERAL"
> error exclusivity </TT
>error.
These resource groups will require operator intervention to become online
in the cluster.</P
></LI
></OL
><P
><A
HREF="le15726-parent.html#Z944249330LHJ-PARENT"
>Figure 1-6</A
> shows the message paths for action
scripts and failover policy scripts.</P
><DIV
CLASS="FIGURE"
><A
NAME="Z944249330LHJ-PARENT"
></A
><P
><B
>Figure 1-6. Message Paths for Action Scripts and Failover Policy
Scripts</B
></P
><P
><IMG
SRC="figures/ha.cluster.messages.gif"></P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN1064"
>1.12.5. Components</A
></H2
><P
>The cluster configuration database is a key component of Linux FailSafe
software. It contains all information about the following: </P
><P
></P
><UL
><LI
><P
>Resources</P
></LI
><LI
><P
>Resource types</P
></LI
><LI
><P
>Resource groups</P
></LI
><LI
><P
>Failover policies</P
></LI
><LI
><P
>Nodes</P
></LI
><LI
><P
>Clusters</P
></LI
></UL
><P
>The cluster configuration database daemon (<TT
CLASS="LITERAL"
>cdbd</TT
>)
maintains identical databases on each node in the cluster. </P
><P
>The following are the contents of the failsafe directories under the <TT
CLASS="FILENAME"
>/usr/lib</TT
> and <TT
CLASS="FILENAME"
>/var</TT
> hierarchies:</P
><P
></P
><UL
><LI
><P
><TT
CLASS="FILENAME"
>/var/run/failsafe/comm/</TT
></P
><P
>Directory that contains files that communicate between various daemons.</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>/usr/lib/failsafe/common_scripts/</TT
></P
><P
>Directory that contains the script library (the common functions that
may be used in action scripts).</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>/var/log/failsafe/</TT
></P
><P
>Directory that contains the logs of all scripts and daemons executed
by Linux FailSafe. The outputs and errors from the commands within the scripts
are logged in the <TT
CLASS="FILENAME"
>script_<TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
></TT
>
 file.</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>/usr/lib/failsafe/policies/</TT
></P
><P
>Directory that contains the failover scripts used for resource groups.</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>/usr/lib/failsafe/resource_types/template</TT
></P
><P
>Directory that contains the template action scripts.</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>/usr/lib/failsafe/resource_types/<TT
CLASS="REPLACEABLE"
><I
>rt_name</I
></TT
></TT
></P
><P
>Directory that contains the action scripts for the <TT
CLASS="REPLACEABLE"
><I
>rt_name</I
></TT
> resource type. For example, <TT
CLASS="LITERAL"
>/usr/lib/failsafe/resource_types/filesystem</TT
> .</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>resource_types/<TT
CLASS="REPLACEABLE"
><I
>rt_name</I
></TT
>/exclusive</TT
></P
><P
>Script that verifies that a resource of this resource type is not already
running.  For example, <TT
CLASS="LITERAL"
>resource_types/filesystem/exclusive</TT
>.</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>resource_types/<TT
CLASS="REPLACEABLE"
><I
>rt_name</I
></TT
>/monitor</TT
></P
><P
>Script that monitors a resource of this type.</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>resource_types/<TT
CLASS="REPLACEABLE"
><I
>rt_name</I
></TT
>/restart</TT
></P
><P
>Script that restarts a resource of this resource type on the same node
after a monitoring failure.</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>resource_types/<TT
CLASS="REPLACEABLE"
><I
>rt_name</I
></TT
>/start</TT
></P
><P
>Script that starts a resource of this resource type.</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>resource_types/<TT
CLASS="REPLACEABLE"
><I
>rt_name</I
></TT
>/stop</TT
></P
><P
>Script that stops a resource of this resource type.</P
></LI
></UL
><P
><A
HREF="le15726-parent.html#LE21811-PARENT"
>Table 1-3</A
> shows the administrative commands available
for use in scripts.</P
><DIV
CLASS="TABLE"
><A
NAME="LE21811-PARENT"
></A
><P
><B
>Table 1-3. Administrative Commands for Use in Scripts</B
></P
><TABLE
BORDER="1"
WIDTH="100%"
CLASS="CALSTABLE"
><THEAD
><TR
><TH
WIDTH="24%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Command</P
></TH
><TH
WIDTH="76%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Purpose</P
></TH
></TR
></THEAD
><TBODY
><TR
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_cilog</TT
></P
></TD
><TD
WIDTH="76%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Logs messages to the <TT
CLASS="FILENAME"
>script_</TT
> <TT
CLASS="USERINPUT"
><B
></B
></TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
> log
files. </P
></TD
></TR
><TR
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_execute_lock</TT
></P
></TD
><TD
WIDTH="76%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Executes a command with a file lock.
This allows command execution to be serialized</P
></TD
></TR
><TR
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_exec2</TT
></P
></TD
><TD
WIDTH="76%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Executes a command and retries the
command on failure or timeout. </P
></TD
></TR
><TR
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_filelock</TT
></P
></TD
><TD
WIDTH="76%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Locks a file.  </P
></TD
></TR
><TR
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_fileunlock</TT
></P
></TD
><TD
WIDTH="76%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Unlocks a file. </P
></TD
></TR
><TR
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_ifdadmin</TT
></P
></TD
><TD
WIDTH="76%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Communicates with the <TT
CLASS="LITERAL"
>ha_ifd</TT
> network interface agent daemon. </P
></TD
></TR
><TR
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_http_ping2 </TT
></P
></TD
><TD
WIDTH="76%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Checks if a web server is running. </P
></TD
></TR
><TR
><TD
WIDTH="24%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>ha_macconfig2 </TT
></P
></TD
><TD
WIDTH="76%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Displays or modifies MAC addresses
of a network interface. </P
></TD
></TR
></TBODY
></TABLE
></DIV
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le24477-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le88622-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Overview of Configuring and Testing a New Linux
FailSafe Cluster</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Planning Linux FailSafe Configuration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>SS="LITERAL"
>restart</TT
>: the <TT
CLASS="LITERAL"
>monitor</TT
>
script fails</P
></LI
></UL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN993"
>1.12.4. When Does FailSafe Execute Action and Failover Scripts</A
></H2
><P
>The order of execution is as follows:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Linux FailSafe is started, usually at node bhtml/le15969-parent.html010064400016050000001000000422250717757335400155440ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Using the FailSafe Cluster Manager CLI</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Administration Tools"
HREF="le73346-parent.html"><LINK
REL="PREVIOUS"
TITLE="Using the Linux FailSafe Cluster Manager GUI"
HREF="fs-guioverview.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe Cluster Configuration"
HREF="le94219-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="fs-guioverview.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 4. Linux FailSafe Administration Tools</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le94219-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE15969-PARENT"
>4.3. Using the FailSafe Cluster Manager CLI</A
></H1
><P
>This section documents how to perform cluster administrative
tasks by means of the FailSafe Cluster Manager CLI. In order to execute commands
with the FailSafe Cluster Manager CLI, you should be logged in as root.</P
><P
>To use the cluster manager, enter either of the following:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>/usr/lib/failsafe/bin/cluster_mgr</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>or</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>/usr/lib/failsafe/bin/cmgr</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>After you have entered this command, you should see the following message
and the cluster manager CLI command prompt:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>Welcome to SGI Cluster Manager Command-Line Interface
cmgr&#62;</PRE
></TD
></TR
></TABLE
><P
>Once the command prompt displays, you can enter the cluster manager
commands.</P
><P
>At any time, you can enter <TT
CLASS="FILENAME"
>?</TT
> or <TT
CLASS="FILENAME"
>help</TT
> to bring up the CLI help display.</P
><P
>When you are creating or modifying a component of a Linux FailSafe system,
you can enter either of the following commands:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><B
CLASS="COMMAND"
>cancel</B
></DT
><DD
><P
>Abort the current mode and discard any changes you have made.</P
></DD
><DT
><B
CLASS="COMMAND"
>done</B
></DT
><DD
><P
>Commit the current definitions or modifications and return to the <TT
CLASS="FILENAME"
>cmgr</TT
> prompt.</P
></DD
></DL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2713"
>4.3.1. Entering CLI Commands Directly</A
></H2
><P
> There are some Cluster Manager CLI command that you
can execute directly from the command line, without entering <B
CLASS="COMMAND"
>cmgr</B
> mode, by using the <B
CLASS="COMMAND"
>-c</B
> option of the <B
CLASS="COMMAND"
>cluster_mgr</B
> command. These commands are <B
CLASS="COMMAND"
>show</B
>, <B
CLASS="COMMAND"
>delete</B
>, <B
CLASS="COMMAND"
>admin</B
>, <B
CLASS="COMMAND"
>install</B
>,<B
CLASS="COMMAND"
>&#8194;start</B
>, <B
CLASS="COMMAND"
>stop</B
>, <B
CLASS="COMMAND"
>test</B
>, <B
CLASS="COMMAND"
>help</B
>, and <B
CLASS="COMMAND"
>quit</B
>. You can execute these commands
directly using the following format:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>cluster_mgr -c "<TT
CLASS="REPLACEABLE"
><I
>command</I
></TT
>"</PRE
></TD
></TR
></TABLE
><P
>For example, you can execute a <B
CLASS="COMMAND"
>show clusters </B
>CLI
command as follows:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>% <TT
CLASS="USERINPUT"
><B
>/usr/lib/failsafe/bin/cluster_mgr -c "show clusters"</B
></TT
></PRE
></TD
></TR
></TABLE
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>1 Cluster(s) defined
&#8194;       eagan</PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2741"
>4.3.2. Invoking the Cluster Manager CLI in Prompt Mode</A
></H2
><P
>   The Cluster
Manager CLI provides an option which displays prompts for the required inputs
of administration commands that define and modify Linux FailSafe components.
You can run the CLI in prompt mode in either of the following ways:</P
><P
></P
><UL
><LI
><P
>Specify a <B
CLASS="COMMAND"
>-p</B
> option when you enter the <B
CLASS="COMMAND"
>cluster_mgr</B
> (or <B
CLASS="COMMAND"
>cmgr</B
>) command, as in the following
example:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>/usr/lib/failsafe/bin/cluster_mgr -p</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Execute a <B
CLASS="COMMAND"
>set prompting on</B
> command after
you have brought up the CLI, as in the following example:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>set prompting on</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>This method of entering prompt mode allows you to toggle in and out
of prompt mode as you execute individual CLI commands. To get out of prompt
mode while you are running the CLI, enter the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>set prompting </B
></TT
></PRE
></TD
></TR
></TABLE
></LI
></UL
><P
>For example, if you are not in the prompt mode of the CLI and you enter
the following command to define a node, you will see a single prompt, as indicated:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>
Enter commands, when finished enter either "done" or "cancel"</PRE
></TD
></TR
></TABLE
><P
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>?</P
><P
>At this prompt, you enter the individual node definition commands in
the following format (for full information on defining nodes, see <A
HREF="z957104627glen.html#LE15937-PARENT"
>Section 5.4.1.2</A
>):</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>set hostname to <TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>

set nodeid to <TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>
set sysctrl_type to <TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
>
set sysctrl_password to <TT
CLASS="REPLACEABLE"
><I
>E</I
></TT
>
set sysctrl_status to <TT
CLASS="REPLACEABLE"
><I
>F</I
></TT
>
set sysctrl_owner to <TT
CLASS="REPLACEABLE"
><I
>G</I
></TT
>
set sysctrl_device to <TT
CLASS="REPLACEABLE"
><I
>H</I
></TT
>
set sysctrl_owner_type to <TT
CLASS="REPLACEABLE"
><I
>I</I
></TT
>
add nic <TT
CLASS="REPLACEABLE"
><I
>J</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>Then, after you add a network interface, a prompt appears requesting
the parameters for the network interface, which you enter similarly.</P
><P
>If you are running CLI in prompt mode, however, the display appears
as follows (when you provide the appropriate inputs):</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>

Enter commands, when finished enter either "done" or "cancel"

Node Name [A]? 
Hostname? 
Node ID [0]? 
Sysctrl Type &#60;chalL|msc|mmsc&#62;?<TT
CLASS="USERINPUT"
><B
>&#8194;</B
></TT
>
Sysctrl Password [ ]?
Sysctrl Status &#60;enabled|disabled&#62;? 
Sysctrl Owner? 
Sysctrl Device? 
Sysctrl Owner Type &#60;tty&#62; ? 
Number of Controllers [2]? 
Controller IP Address? 
Controller Heartbeat HB (use network for heartbeats) &#60;true|false&#62;? 
Controller (use network for control messages) &#60;true|false&#62;? 
Controller Priority &#60;1,2,...&#62;? </PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2798"
>4.3.3. Using Input Files of CLI Commands</A
></H2
><P
> You can execute a series of Cluster Manager CLI commands
by using the <B
CLASS="COMMAND"
>-f </B
>option of the <B
CLASS="COMMAND"
>cluster_mgr</B
>
command and specifying an input file:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>/usr/lib/failsafe/bin/cluster_mgr -f "<TT
CLASS="REPLACEABLE"
><I
>input_file</I
></TT
>"</PRE
></TD
></TR
></TABLE
><P
>The input file must contain Cluster Manager CLI commands and end with
a <B
CLASS="COMMAND"
>quit</B
> command.</P
><P
>For example, the file <TT
CLASS="FILENAME"
>input.file</TT
> contains the following:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>show clusters
show nodes in cluster beta3
quit</PRE
></TD
></TR
></TABLE
><P
>You can execute the following command, which will yield the indicated
output:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>% <TT
CLASS="USERINPUT"
><B
>/usr/lib/failsafe/bin/cluster_mgr -f input.file</B
></TT
>

1 Cluster(s) defined
&#8194;       eagan
Cluster eagan has following 2 machine(s)
&#8194;       cm1
&#8194;       cm2</PRE
></TD
></TR
></TABLE
><P
>The <B
CLASS="COMMAND"
>cluster_mgr</B
> command provides a <B
CLASS="COMMAND"
>-i</B
>
option to be used with the <B
CLASS="COMMAND"
>-f</B
> option. This is the &#8220;ignore&#8221;
option which indicates that the Cluster Manager should not exit if a command
fails while executing a script.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE41514-PARENT"
>4.3.4. CLI Command Scripts</A
></H2
><P
> You can use the <B
CLASS="COMMAND"
>-f</B
>
option of the <B
CLASS="COMMAND"
>cluster_mgr</B
> command to write a script of
Cluster Manager CLI commands that you can execute directly. The script must
contain the following line as the first line of the script.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>#!/usr/lib/failsafe/bin/cluster_mgr -f</PRE
></TD
></TR
></TABLE
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>When you use the <B
CLASS="COMMAND"
>-i</B
> option of the <B
CLASS="COMMAND"
>cluster_mgr</B
> command to indicate that the Cluster Manager should not exit if
a command fails while executing a script, you must use the following syntax
in the first line of the script file:  <B
CLASS="COMMAND"
>#!/usr/lib/failsafe/bin/cluster_mgr
-if</B
>.  It is not necessary to use the <B
CLASS="COMMAND"
>-if</B
> syntax
when using the <B
CLASS="COMMAND"
>-i</B
> option from the command line directly.</P
></BLOCKQUOTE
></DIV
><P
>Each line of the script must be a valid <B
CLASS="COMMAND"
>cluster_mgr command </B
>line, similar to a <TT
CLASS="FILENAME"
>here</TT
> document. Because the
Cluster Manager CLI will run through commands as if entered interactively,
you must include <B
CLASS="COMMAND"
>done</B
> and <B
CLASS="COMMAND"
>quit</B
> lines
to finish a multi-level command and exit out of the Cluster Manager CLI.</P
><P
>There are CLI template files of scripts that you can modify to configure
the different components of your system. These files are located in the  <TT
CLASS="FILENAME"
>/usr/lib/failsafe/cmgr-templates</TT
> directory. For information on
CLI templates, see <A
HREF="le15969-parent.html#LE10673-PARENT"
>Section 4.3.5</A
>.</P
><P
>The following shows an example of a CLI command script <TT
CLASS="FILENAME"
>cli.script</TT
>.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>% <TT
CLASS="USERINPUT"
><B
>more cli.script</B
></TT
>
#!/usr/lib/failsafe/bin/cluster_mgr -f

show clusters
show nodes in cluster beta3
quit

% <TT
CLASS="USERINPUT"
><B
>cli.script</B
></TT
>
1 Cluster(s) defined
&#8194;       eagan
Cluster eagan has following 2 machine(s)
&#8194;       cm1
&#8194;       cm2

%</PRE
></TD
></TR
></TABLE
><P
>For a complete example of a CLI command script that configures a cluster,
see <A
HREF="le40790-parent.html"
>Section 5.8</A
> in <A
HREF="le94219-parent.html"
>Chapter 5</A
>.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE10673-PARENT"
>4.3.5. CLI Template Scripts</A
></H2
><P
>  Template files of CLI scripts
that you can modify to configure the different components of your system are
located in the <TT
CLASS="FILENAME"
>/usr/lib/failsafe/cmgr-templates</TT
> directory.</P
><P
>Each template file contains list of <B
CLASS="COMMAND"
>cluster_mgr</B
> 
commands to create a particular object, as well as comments describing each
field.  The template also provides default values for optional fields.</P
><P
>The <TT
CLASS="FILENAME"
>/usr/lib/failsafe/cmgr-templates</TT
> directory
contains following templates:</P
><DIV
CLASS="TABLE"
><A
NAME="AEN2872"
></A
><P
><B
>Table 4-1. Available Templates</B
></P
><TABLE
BORDER="1"
CLASS="CALSTABLE"
><TBODY
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><I
CLASS="EMPHASIS"
>File name</I
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><I
CLASS="EMPHASIS"
>Description</I
></P
></TD
></TR
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="FILENAME"
>cmgr-create-cluster</TT
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Creation of a cluster</P
></TD
></TR
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="FILENAME"
>cmgr-create-failover_policy</TT
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Creation of failover policy</P
></TD
></TR
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="FILENAME"
>cmgr-create-node</TT
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Creation of node</P
></TD
></TR
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="FILENAME"
>cmgr-create-resource_group</TT
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Creation of Resource Group</P
></TD
></TR
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="FILENAME"
>cmgr-create-resource_type</TT
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Creation of resource type</P
></TD
></TR
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="FILENAME"
>cmgr-create-resource-<TT
CLASS="REPLACEABLE"
><I
>resource type</I
></TT
></TT
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>CLI script template for creation of
resource of type <TT
CLASS="REPLACEABLE"
><I
>resource type</I
></TT
></P
></TD
></TR
></TBODY
></TABLE
></DIV
><P
>To create a Linux FailSafe configuration, you can concatenate multiple
templates into one file and execute the resulting CLI command script.</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>If you concatenate information from multiple template scripts to prepare
your cluster configuration, you must remove the <B
CLASS="COMMAND"
>quit</B
> at
the end of each template script, except for the final <B
CLASS="COMMAND"
>quit</B
>.
A <TT
CLASS="FILENAME"
>cluster_mgr</TT
> script must have only one <B
CLASS="COMMAND"
>quit</B
> line.</P
></BLOCKQUOTE
></DIV
><P
>For example: For a 3 node configuration with an NFS resource group containing
1 volume, 1 filesystem, 1 IP address and 1 NFS resource, you would concatenate
the following files, removing the <B
CLASS="COMMAND"
>quit</B
> at the end of each
template script except the last one:</P
><P
></P
><UL
><LI
><P
>3 copies of the <TT
CLASS="FILENAME"
>cmgr-create-node</TT
> file</P
></LI
><LI
><P
>1 copy of the <TT
CLASS="FILENAME"
>cmgr-create-cluster</TT
> file</P
></LI
><LI
><P
>1 copy of the <TT
CLASS="FILENAME"
>cmgr-create-failover_policy</TT
>
file</P
></LI
><LI
><P
>1 copy of the <TT
CLASS="FILENAME"
>cmgr-create-resource_group</TT
>
file</P
></LI
><LI
><P
>1 copy of the <TT
CLASS="FILENAME"
>cmgr-create-resource-volume</TT
>
file</P
></LI
><LI
><P
>1 copy of the <TT
CLASS="FILENAME"
>cmgr-create-resource-filesystem</TT
>
file</P
></LI
><LI
><P
>1 copy of the <TT
CLASS="FILENAME"
>cmgr-create-resource-IP_address</TT
>
file</P
></LI
><LI
><P
>1 copy of the <TT
CLASS="FILENAME"
>cmgr-create-resource-NFS</TT
>
file</P
></LI
></UL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2957"
>4.3.6. Invoking a Shell from within CLI</A
></H2
><P
> You can invoke
a shell from within the Cluster Manager CLI. Enter the following command to
invoke a shell:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>sh</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>To exit the shell and to return to the CLI, enter <TT
CLASS="LITERAL"
>exit</TT
>
at the shell prompt.</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="fs-guioverview.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le94219-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Using the Linux FailSafe Cluster Manager GUI</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73346-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe Cluster Configuration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>ly.</P
><P
>If you are running CLI in prompt mode, however, the display appears
as follows (when you provide the appropriate inputs):</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>

Enter commands, when finished enter eithehtml/le16877-parent.html010064400016050000001000000666110717757364300155510ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>System Status</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe System Operation"
HREF="le99367-parent.html"><LINK
REL="PREVIOUS"
TITLE="Activating (Starting) Linux FailSafe"
HREF="fs-activatehaservices.html"><LINK
REL="NEXT"
TITLE="Resource Group Failover"
HREF="le41282-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="fs-activatehaservices.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 7. Linux FailSafe System Operation</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le41282-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE16877-PARENT"
>7.4. System Status</A
></H1
><P
> While the Linux FailSafe system is running, you can monitor the
status of the Linux FailSafe components to determine the state of the component.
Linux FailSafe allows you to view the system status in the following ways:</P
><P
></P
><UL
><LI
><P
>You can keep continuous watch on the state of a cluster using
the FailSafe Cluster View of the Cluster Manager GUI.</P
></LI
><LI
><P
>You can query the status of an individual resource group,
node, or cluster using either the Cluster Manager GUI or the Cluster Manager
CLI.</P
></LI
><LI
><P
>You can use the <B
CLASS="COMMAND"
>haStatus</B
> script provided
with the Cluster Manager CLI to see the status of all clusters, nodes, resources,
and resource groups in the configuration.</P
></LI
></UL
><P
>The following sections describe the procedures for performing each of
these tasks.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5247"
>7.4.1. Monitoring System Status with the Cluster Manager GUI</A
></H2
><P
>The easiest way to keep a continuous watch on the state of a cluster
is to use the FailSafe Cluster View of the Cluster Manager GUI. </P
><P
>In the FailSafe Cluster View window, problems system components are
experiencing appear as blinking red icons. Components in transitional states
also appear as blinking icons. If there is a problem in a resource group or
node, the FailSafe Cluster View icon for the cluster turns red and blinks,
as well as the resource group or node icon.</P
><P
>The full color legend for component states in the FailSafe Cluster View
is as follows:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
>grey</DT
><DD
><P
>healthy but not online or active</P
></DD
><DT
>green</DT
><DD
><P
>healthy and active or online</P
></DD
><DT
>blinking green</DT
><DD
><P
>transitioning to green</P
></DD
><DT
>blinking red</DT
><DD
><P
>problems with component</P
></DD
><DT
>black and white outline</DT
><DD
><P
>resource type</P
></DD
><DT
>grey with yellow wrench</DT
><DD
><P
>maintenance mode, may or may not be currently monitored by Linux FailSafe</P
></DD
></DL
></DIV
><P
>If you minimize the FailSafe Cluster View window, the minimized-icon
shows the current state of the cluster. When the cluster has Linux FailSafe
HA services active and there is no error, the icon shows a green cluster.
When the cluster goes into error state, the icon shows a red cluster. When
the cluster has Linux FailSafe HA services inactive, the icon shows a grey
cluster.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5278"
>7.4.2. Monitoring Resource and Reset Serial Line with the Cluster Manager
CLI</A
></H2
><P
> You can use the CLI to query the
status of a resource or to ping the system controller at a node, as described
in the following subsections.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5287"
>7.4.2.1. Querying Resource Status with the Cluster Manager CLI</A
></H3
><P
>To query a resource status, use the following
CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show status of resource</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A </I
></TT
><TT
CLASS="USERINPUT"
><B
>of resource_type</B
></TT
><TT
CLASS="REPLACEABLE"
><I
>&#8194;B</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster when you use this command and it will show the status of the indicated
resource in the default cluster.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5301"
>7.4.2.2. Pinging a System Controller with the Cluster Manager CLI</A
></H3
><P
> To perform
a ping operation on a system controller by providing the device name, use
the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin ping dev_name</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of dev_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>with sysctrl_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE29367-PARENT"
>7.4.3. Resource Group Status</A
></H2
><P
>To query the status of a resource group, you
provide the name of the resource group and the cluster which includes the
resource group. Resource group status includes the following components:</P
><P
></P
><UL
><LI
><P
>Resource group state</P
></LI
><LI
><P
>Resource group error state</P
></LI
><LI
><P
>Resource owner</P
></LI
></UL
><P
>These components are described in the following subsections.</P
><P
>If a node that contains a resource group online has a status of <TT
CLASS="LITERAL"
>UNKNOWN</TT
>, the status of the resource group will not be available
or <TT
CLASS="LITERAL"
>ONLINE-READY</TT
>.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5334"
>7.4.3.1. Resource Group State</A
></H3
><P
> A resource group state can be one of the
following:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="LITERAL"
>ONLINE</TT
></DT
><DD
><P
>Linux FailSafe is running on the local nodes. The resource group is
allocated on a node in the cluster and is being monitored by Linux FailSafe.
It is fully allocated if there is no error; otherwise, some resources may
not be allocated or some resources may be in error state.</P
></DD
><DT
><TT
CLASS="LITERAL"
>ONLINE-PENDING</TT
></DT
><DD
><P
>Linux FailSafe is running on the local nodes and the resource group
is in the process of being allocated. This is a transient state.</P
></DD
><DT
><TT
CLASS="LITERAL"
>OFFLINE</TT
></DT
><DD
><P
>The resource group is not running or the resource group has been detached,
regardless of whether Linux FailSafe is running. When Linux FailSafe starts
up, it will not allocate this resource group.</P
></DD
><DT
><TT
CLASS="LITERAL"
>OFFLINE-PENDING</TT
></DT
><DD
><P
>Linux FailSafe is running on the local nodes and the resource group
is in the process of being released (becoming offline). This is a transient
state.</P
></DD
><DT
><TT
CLASS="LITERAL"
>ONLINE-READY</TT
></DT
><DD
><P
>Linux FailSafe is not running on the local node. When Linux FailSafe
starts up, it will attempt to bring this resource group online. No Linux FailSafe
process is running on the current node is this state is returned.</P
></DD
><DT
><TT
CLASS="LITERAL"
>ONLINE-MAINTENANCE</TT
></DT
><DD
><P
>The resource group is allocated in a node in the cluster but it is not
being monitored by Linux FailSafe. If a node failure occurs while a resource
group in <TT
CLASS="LITERAL"
>ONLINE-MAINTENANCE</TT
> state resides on that node,
the resource group will be moved to another node and monitoring will resume.
An administrator may move a resource group to an <TT
CLASS="LITERAL"
>ONLINE-MAINTENANCE</TT
> state for upgrade or testing purposes, or if there is any reason
that Linux FailSafe should not act on that resource for a period of time.</P
></DD
><DT
><TT
CLASS="LITERAL"
>INTERNAL ERROR</TT
></DT
><DD
><P
>An internal Linux FailSafe error has occurred and Linux FailSafe does
not know the state of the resource group. Error recovery is required.</P
></DD
><DT
><TT
CLASS="LITERAL"
>DISCOVERY (EXCLUSIVITY)</TT
></DT
><DD
><P
>The resource group is in the process of going online if Linux FailSafe
can correctly determine whether any resource in the resource group is already
allocated on all nodes in the resource group's application failure domain.
This is a transient state.</P
></DD
><DT
><TT
CLASS="LITERAL"
>INITIALIZING</TT
></DT
><DD
><P
>Linux FailSafe on the local node has yet to get any information about
this resource group. This is a transient state.</P
></DD
></DL
></DIV
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5408"
>7.4.3.2. Resource Group Error State</A
></H3
><P
>When a resource group is ONLINE, its error status is continually being
monitored. A resource group error status can be one of the following: </P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="LITERAL"
>NO ERROR</TT
></DT
><DD
><P
>Resource group has no error.</P
></DD
><DT
><TT
CLASS="LITERAL"
>INTERNAL ERROR - NOT RECOVERABLE</TT
></DT
><DD
><P
>Notify Silicon Graphics if this condition arises.</P
></DD
><DT
><TT
CLASS="LITERAL"
>NODE UNKNOWN</TT
></DT
><DD
><P
>Node that had the resource group online is in unknown state. This occurs
when the node is not part of the cluster. The last known state of the resource
group is <TT
CLASS="LITERAL"
>ONLINE</TT
>, but the system cannot talk to the node.</P
></DD
><DT
><TT
CLASS="LITERAL"
>SRMD EXECUTABLE ERROR</TT
></DT
><DD
><P
>The start or stop action has failed for a resource in the resource group.</P
></DD
><DT
>   <TT
CLASS="LITERAL"
>SPLIT RESOURCE GROUP (EXCLUSIVITY)</TT
></DT
><DD
><P
>Linux FailSafe has determined that part of the resource group was running
on at least two different nodes in the cluster.</P
></DD
><DT
><TT
CLASS="LITERAL"
>NODE NOT AVAILABLE (EXCLUSIVITY)</TT
></DT
><DD
><P
>Linux FailSafe has determined that one of the nodes in the resource
group's application failure domain was not in the membership. Linux FailSafe
cannot bring the resource group online until that node is removed from the
application failure domain or HA services are started on that node.</P
></DD
><DT
><TT
CLASS="LITERAL"
>MONITOR ACTIVITY UNKNOWN</TT
></DT
><DD
><P
>In the process of turning maintenance mode on or off, an error occurred.
Linux FailSafe can no longer determine if monitoring is enabled or disabled.
Retry the operation. If the error continues, report the error to Silicon Graphics.</P
></DD
><DT
><TT
CLASS="LITERAL"
>NO AVAILABLE NODES</TT
></DT
><DD
><P
>A monitoring error has occurred on the last valid node in the cluster's
membership.</P
></DD
></DL
></DIV
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5474"
>7.4.3.3. Resource Owner</A
></H3
><P
>The resource owner is the logical node name of
the node that currently owns the resource.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5480"
>7.4.3.4. Monitoring Resource Group Status with the Cluster Manager GUI</A
></H3
><P
>You can use the FailSafe ClusterView to monitor the status of the resources
in a Linux FailSafe configuration. You can launch the FailSafe Cluster View
directly, or you can bring it up at any time by clicking on &#8220;FailSafe
Cluster View&#8221; at the bottom of the &#8220;FailSafe Manager&#8221; display.</P
><P
>From the View menu, select &#8220;Resources in Groups&#8221; to see
the resources organized by the groups they belong to, or select &#8220;Groups
owned by Nodes&#8221; to see where the online groups are running. This view
lets you observe failovers as they occur.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5484"
>7.4.3.5. Querying Resource Group Status with the Cluster Manager CLI</A
></H3
><P
>To query a resource group status, use the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show status of resource_group</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster when you use this command and it will show the status of the indicated
resource group in the default cluster.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5493"
>7.4.4. Node Status</A
></H2
><P
>  To query the status
of a node, you provide the logical node name of the node. The node status
can be one of the following:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="LITERAL"
>UP</TT
></DT
><DD
><P
>This node is part of cluster membership.</P
></DD
><DT
><TT
CLASS="LITERAL"
>DOWN</TT
></DT
><DD
><P
>This node is not part of cluster membership (no heartbeats) and this
node has been reset. This is a transient state.</P
></DD
><DT
><TT
CLASS="LITERAL"
>UNKNOWN</TT
></DT
><DD
><P
>This node is not part of cluster membership (no heartbeats) and this
node has not been reset (reset attempt has failed).</P
></DD
><DT
><TT
CLASS="LITERAL"
>INACTIVE</TT
></DT
><DD
><P
>HA services have not been started on this node.</P
></DD
></DL
></DIV
><P
>When you start HA services, node states transition from <TT
CLASS="LITERAL"
>INACTIVE</TT
> to <TT
CLASS="LITERAL"
>UP</TT
>. It may happen that a node state may transition
from <TT
CLASS="LITERAL"
>INACTIVE</TT
> to <TT
CLASS="LITERAL"
>UNKNOWN</TT
> to <TT
CLASS="LITERAL"
>UP</TT
>.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5540"
>7.4.4.1. Monitoring Cluster Status with the Cluster Manager GUI</A
></H3
><P
>You can use the FailSafe Cluster View to monitor the status of the clusters
in a Linux FailSafe configuration. You can launch the FailSafe Cluster View
directly, or you can bring it up at any time by clicking on &#8220;FailSafe
Cluster View&#8221; at the bottom of the &#8220;FailSafe Manager&#8221; display.</P
><P
>From the View menu, select &#8220;Groups owned by Nodes&#8221; to monitor
the health of the default cluster, its resource groups, and the group's resources.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5544"
>7.4.4.2. Querying Node Status with the Cluster Manager CLI</A
></H3
><P
>To query node status, use the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show status of node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A </I
></TT
></PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5550"
>7.4.4.3. Pinging the System Controller with the Cluster Manager CLI</A
></H3
><P
>When Linux FailSafe is running, you can determine whether the system
controller on a node is responding with the following Cluster Manger CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin ping node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>This command uses the Linux FailSafe daemons to test whether the system
controller is responding.</P
><P
>You can verify reset connectivity on a node in a cluster even when the
Linux FailSafe daemons are not running by using the <B
CLASS="COMMAND"
>standalone</B
>
option of the <B
CLASS="COMMAND"
>admin ping</B
> command of the CLI:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin ping standalone node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>This command does not go through the Linux FailSafe daemons, but calls
the <B
CLASS="COMMAND"
>ping</B
> command directly to test whether the system controller
on the indicated node is responding.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5565"
>7.4.5. Cluster Status</A
></H2
><P
>   To query the status of a cluster, you
provide the name of the cluster. The cluster status can be one of the following:</P
><P
></P
><UL
><LI
><P
><TT
CLASS="LITERAL"
>ACTIVE</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>INACTIVE</TT
></P
></LI
></UL
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5584"
>7.4.5.1. Querying Cluster Status with the Cluster Manager GUI</A
></H3
><P
>You can use the Cluster View of the Cluster Manager GUI to monitor the
status of the clusters in a Linux FailSafe system.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5587"
>7.4.5.2. Querying Cluster Status with the Cluster Manager CLI</A
></H3
><P
>To query node and cluster status, use the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show status of cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A </I
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE28488-PARENT"
>7.4.6. Viewing System Status with the haStatus CLI Script</A
></H2
><P
>The <B
CLASS="COMMAND"
>haStatus</B
> script provides status and configuration information about
clusters, nodes, resources, and resource groups in the configuration. This
script is installed in the <TT
CLASS="FILENAME"
>/var/cluster/cmgr-scripts</TT
>
directory. You can modify this script to suit your needs. See the <B
CLASS="COMMAND"
>haStatus</B
> (1M) man page for further information about this script.</P
><P
>The following examples show the output of the different options of the <B
CLASS="COMMAND"
>haStatus</B
> script.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>haStatus -help</B
></TT
>
Usage: haStatus [-a|-i] [-c clustername]
where,
&#8194;-a prints detailed cluster configuration information and cluster
status.
&#8194;-i prints detailed cluster configuration information only.
&#8194;-c can be used to specify a cluster for which status is to be printed.
&#8194;&#8220;clustername&#8221; is the name of the cluster for which status is to be
printed.
# <TT
CLASS="USERINPUT"
><B
>haStatus</B
></TT
>
Tue Nov 30 14:12:09 PST 1999
Cluster test-cluster:
&#8194;       Cluster state is ACTIVE.
Node hans2:
&#8194;       State of machine is UP.
Node hans1:
&#8194;       State of machine is UP.
Resource_group nfs-group1:
&#8194;       State: Online
&#8194;       Error: No error
&#8194;       Owner: hans1
&#8194;       Failover Policy: fp_h1_h2_ord_auto_auto
&#8194;       Resources:
&#8194;               /hafs1  (type: NFS)
&#8194;               /hafs1/nfs/statmon      (type: statd)
&#8194;               150.166.41.95   (type: IP_address)
&#8194;               /hafs1  (type: filesystem)
&#8194;               havol1  (type: volume)
# <TT
CLASS="USERINPUT"
><B
>haStatus -i</B
></TT
>
Tue Nov 30 14:13:52 PST 1999
Cluster test-cluster:
Node hans2:
&#8194;       Logical Machine Name: hans2
&#8194;       Hostname: hans2.engr.sgi.com
&#8194;       Is FailSafe: true
&#8194;       Is Cellular: false
&#8194;       Nodeid: 32418
&#8194;       Reset type: powerCycle
&#8194;       System Controller: msc
&#8194;       System Controller status: enabled
&#8194;       System Controller owner: hans1
&#8194;       System Controller owner device: /dev/ttyd2
&#8194;       System Controller owner type: tty
&#8194;       ControlNet Ipaddr: 192.26.50.15
&#8194;       ControlNet HB: true
&#8194;       ControlNet Control: true
&#8194;       ControlNet Priority: 1
&#8194;       ControlNet Ipaddr: 150.166.41.61
&#8194;       ControlNet HB: true
&#8194;       ControlNet Control: false
&#8194;       ControlNet Priority: 2
Node hans1:
&#8194;       Logical Machine Name: hans1
&#8194;       Hostname: hans1.engr.sgi.com
&#8194;       Is FailSafe: true
&#8194;       Is Cellular: false
&#8194;       Nodeid: 32645
&#8194;       Reset type: powerCycle
&#8194;       System Controller: msc
&#8194;       System Controller status: enabled
&#8194;       System Controller owner: hans2
&#8194;       System Controller owner device: /dev/ttyd2
&#8194;       System Controller owner type: tty
&#8194;       ControlNet Ipaddr: 192.26.50.14
&#8194;       ControlNet HB: true
&#8194;       ControlNet Control: true
&#8194;       ControlNet Priority: 1
&#8194;       ControlNet Ipaddr: 150.166.41.60
&#8194;       ControlNet HB: true
&#8194;       ControlNet Control: false
&#8194;       ControlNet Priority: 2
Resource_group nfs-group1:
&#8194;       Failover Policy: fp_h1_h2_ord_auto_auto
&#8194;               Version: 1
&#8194;               Script: ordered
&#8194;               Attributes: Auto_Failback Auto_Recovery
&#8194;               Initial AFD: hans1 hans2
&#8194;       Resources:
&#8194;               /hafs1  (type: NFS)
&#8194;               /hafs1/nfs/statmon      (type: statd)
&#8194;               150.166.41.95   (type: IP_address)
&#8194;               /hafs1  (type: filesystem)
&#8194;               havol1  (type: volume)
Resource /hafs1 (type NFS):
&#8194;       export-info: rw,wsync
&#8194;       filesystem: /hafs1
&#8194;       Resource dependencies
&#8194;       statd /hafs1/nfs/statmon
&#8194;       filesystem /hafs1
Resource /hafs1/nfs/statmon (type statd):
&#8194;       InterfaceAddress: 150.166.41.95
&#8194;       Resource dependencies
&#8194;       IP_address 150.166.41.95
&#8194;       filesystem /hafs1
Resource 150.166.41.95 (type IP_address):
&#8194;       NetworkMask: 0xffffff00
&#8194;       interfaces: ef1
&#8194;       BroadcastAddress: 150.166.41.255
&#8194;       No resource dependencies
Resource /hafs1 (type filesystem):
&#8194;       volume-name: havol1
&#8194;       mount-options: rw,noauto
&#8194;       monitoring-level: 2
&#8194;       Resource dependencies
&#8194;       volume havol1
Resource havol1 (type volume):
&#8194;       devname-group: sys
&#8194;       devname-owner: root
&#8194;       devname-mode: 666
&#8194;       No resource dependencies
Failover_policy fp_h1_h2_ord_auto_auto:
&#8194;       Version: 1
&#8194;       Script: ordered
&#8194;       Attributes: Auto_Failback Auto_Recovery
&#8194;       Initial AFD: hans1 hans2
# <TT
CLASS="USERINPUT"
><B
>haStatus -a</B
></TT
>
Tue Nov 30 14:45:30 PST 1999
Cluster test-cluster:
&#8194;       Cluster state is ACTIVE.
Node hans2:
&#8194;       State of machine is UP.
&#8194;       Logical Machine Name: hans2
&#8194;       Hostname: hans2.engr.sgi.com
&#8194;       Is FailSafe: true
&#8194;       Is Cellular: false
&#8194;       Nodeid: 32418
&#8194;       Reset type: powerCycle
&#8194;       System Controller: msc
&#8194;       System Controller status: enabled
&#8194;       System Controller owner: hans1
&#8194;       System Controller owner device: /dev/ttyd2
&#8194;       System Controller owner type: tty
&#8194;       ControlNet Ipaddr: 192.26.50.15
&#8194;       ControlNet HB: true
&#8194;       ControlNet Control: true
&#8194;       ControlNet Priority: 1
&#8194;       ControlNet Ipaddr: 150.166.41.61
&#8194;       ControlNet HB: true
&#8194;       ControlNet Control: false
&#8194;       ControlNet Priority: 2
Node hans1:
&#8194;       State of machine is UP.
&#8194;       Logical Machine Name: hans1
&#8194;       Hostname: hans1.engr.sgi.com
&#8194;       Is FailSafe: true
&#8194;       Is Cellular: false
&#8194;       Nodeid: 32645
&#8194;       Reset type: powerCycle
&#8194;       System Controller: msc
&#8194;       System Controller status: enabled
&#8194;       System Controller owner: hans2
&#8194;       System Controller owner device: /dev/ttyd2
&#8194;       System Controller owner type: tty
&#8194;       ControlNet Ipaddr: 192.26.50.14
&#8194;       ControlNet HB: true
&#8194;       ControlNet Control: true
&#8194;       ControlNet Priority: 1
&#8194;       ControlNet Ipaddr: 150.166.41.60
&#8194;       ControlNet HB: true
&#8194;       ControlNet Control: false
&#8194;       ControlNet Priority: 2
Resource_group nfs-group1:
&#8194;       State: Online
&#8194;       Error: No error
&#8194;       Owner: hans1
&#8194;       Failover Policy: fp_h1_h2_ord_auto_auto
&#8194;               Version: 1
&#8194;               Script: ordered
&#8194;               Attributes: Auto_Failback Auto_Recovery
&#8194;               Initial AFD: hans1 hans2
&#8194;       Resources:
&#8194;               /hafs1  (type: NFS)
&#8194;               /hafs1/nfs/statmon      (type: statd)
&#8194;               150.166.41.95   (type: IP_address)
&#8194;               /hafs1  (type: filesystem)
&#8194;               havol1  (type: volume)
Resource /hafs1 (type NFS):
&#8194;       State: Online
&#8194;       Error: None
&#8194;       Owner: hans1
&#8194;       Flags: Resource is monitored locally
&#8194;       export-info: rw,wsync
&#8194;       filesystem: /hafs1
&#8194;       Resource dependencies
&#8194;       statd /hafs1/nfs/statmon
&#8194;       filesystem /hafs1
Resource /hafs1/nfs/statmon (type statd):
&#8194;       State: Online
&#8194;       Error: None
&#8194;       Owner: hans1
&#8194;       Flags: Resource is monitored locally
&#8194;       InterfaceAddress: 150.166.41.95
&#8194;       Resource dependencies
&#8194;       IP_address 150.166.41.95
&#8194;       filesystem /hafs1
Resource 150.166.41.95 (type IP_address):
&#8194;       State: Online
&#8194;       Error: None
&#8194;       Owner: hans1
&#8194;       Flags: Resource is monitored locally
&#8194;       NetworkMask: 0xffffff00
&#8194;       interfaces: ef1
&#8194;       BroadcastAddress: 150.166.41.255
&#8194;       No resource dependencies
Resource /hafs1 (type filesystem):
&#8194;       State: Online
&#8194;       Error: None
&#8194;       Owner: hans1
&#8194;       Flags: Resource is monitored locally
&#8194;       volume-name: havol1
&#8194;       mount-options: rw,noauto
&#8194;       monitoring-level: 2
&#8194;       Resource dependencies
&#8194;       volume havol1
Resource havol1 (type volume):
&#8194;       State: Online
&#8194;       Error: None
&#8194;       Owner: hans1
&#8194;       Flags: Resource is monitored locally
&#8194;       devname-group: sys
&#8194;       devname-owner: root
&#8194;       devname-mode: 666
&#8194;       No resource dependencies
# <TT
CLASS="USERINPUT"
><B
>haStatus -c test-cluster</B
></TT
>
Tue Nov 30 14:42:04 PST 1999
Cluster test-cluster:
&#8194;       Cluster state is ACTIVE.
Node hans2:
&#8194;       State of machine is UP.
Node hans1:
&#8194;       State of machine is UP.
Resource_group nfs-group1:
&#8194;       State: Online
&#8194;       Error: No error
&#8194;       Owner: hans1
&#8194;       Failover Policy: fp_h1_h2_ord_auto_auto
&#8194;       Resources:
&#8194;               /hafs1  (type: NFS)
&#8194;               /hafs1/nfs/statmon      (type: statd)
&#8194;               150.166.41.95   (type: IP_address)
&#8194;               /hafs1  (type: filesystem)
&#8194;               havol1  (type: volume)</PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="fs-activatehaservices.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le41282-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Activating (Starting) Linux FailSafe</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le99367-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Resource Group Failover</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>script to suit your needs. See the <B
CLASS="COMMAND"
>haStatus</B
> (1M) man page for further information about this shtml/le17012-parent.html010064400016050000001000000177310717757376600155340ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Node Membership and Resets</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Recovery"
HREF="le28716-parent.html"><LINK
REL="PREVIOUS"
TITLE="FailSafe Log Files"
HREF="le28847-parent.html"><LINK
REL="NEXT"
TITLE="Status Monitoring"
HREF="le13884-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le28847-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 9. Linux FailSafe Recovery</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le13884-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE17012-PARENT"
>9.3. Node Membership and Resets</A
></H1
><P
>In looking over the actions of a FailSafe system on failure to determine
what has gone wrong and how processes have transferred, it is important to
consider the concept of node membership. When failover occurs, the runtime
failover domain can include only those nodes that are in the cluster membership.  </P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN6393"
>9.3.1. Node Membership and Tie-Breaker Node</A
></H2
><P
>Nodes can enter into the cluster membership
only when they are not disabled and they are in a known state. This ensures
that data integrity is maintained because only nodes within the cluster membership
can access the shared storage. If nodes outside the membership and not controlled
by FailSafe were able to access the shared storage, two nodes might try to
access the same data at the same time, a situation that would result in data
corruption. For this reason, disabled nodes do not participate in the membership
computation. Note that no attempt is made to reset nodes that are configured
disabled before confirming the cluster membership.</P
><P
>Node membership in a cluster is based on a quorum majority.
For a cluster to be enabled, more than 50% of the nodes in the cluster must
be in a known state, able to talk to each other, using heartbeat control networks.
This quorum determines which nodes are part of the cluster membership that
is formed.</P
><P
>If
there are an even number of nodes in the cluster, it is possible that there
will be no majority quorum; there could be two sets of nodes, each consisting
of 50% of the total number of node, unable to communicate with the other set
of nodes. In this case, FailSafe uses the node that has been configured as
the tie-breaker node when you configured your FailSafe parameters. If no tie-breaker
node was configured, FailSafe uses the enabled node with the lowest node id
number.</P
><P
>For information on setting tie-breaker nodes, see <A
HREF="z957104627glen.html#FS-SETFSPARAMETERS"
>Section 5.4.4</A
>.</P
><P
> The nodes in a quorum attempt to reset the nodes
that are not in the quorum. Nodes that can be reset are declared <TT
CLASS="LITERAL"
>DOWN</TT
> in the membership, nodes that could not be reset are declared <TT
CLASS="LITERAL"
>UNKNOWN</TT
>. Nodes in the quorum are <TT
CLASS="LITERAL"
>UP</TT
>.</P
><P
>If a new majority quorum is computed, a new membership is
declared whether any node could be reset or not.</P
><P
>If at least one node in the current quorum has a current membership,
the nodes will proceed to declare a new membership if they can reset at least
one node.</P
><P
>If all nodes in the new tied quorum are coming up for the
first time, they will try to reset and proceed with a new membership only
if the quorum includes the tie-breaker node.</P
><P
>If a tied subset of nodes in the cluster had no previous membership,
then the subset of nodes in the cluster with the tie-breaker node attempts
to reset nodes in the other subset of nodes in the cluster. If at least one
node reset succeeds, a new membership is confirmed.</P
><P
>If a tied subset of nodes in the cluster had previous membership,
the nodes in one subset of nodes in the cluster attempt to reset nodes in
the other subset of nodes in the cluster. If at least one node reset succeeds,
a new membership is confirmed. The subset of nodes in the cluster with the
tie-breaker node resets immediately, the other subset of nodes in the cluster
attempts to reset after some time.</P
><P
>Resets are done through system controllers connected to tty
ports through serial lines. Periodic serial line monitoring never stops. If
the estimated serial line monitoring failure interval and the estimated heartbeat
loss interval overlap, we suspect a power failure at the node being reset.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN6420"
>9.3.2. No Membership Formed</A
></H2
><P
>When no
cluster membership is formed, you should check the following areas for possible
problems:</P
><P
></P
><UL
><LI
><P
>Is the cluster membership daemon, <B
CLASS="COMMAND"
>ha_cmsd</B
>
running? Is the database daemon, <B
CLASS="COMMAND"
>cdbd</B
>, running?</P
></LI
><LI
><P
>Can the nodes communicate with each other?</P
><P
></P
><UL
><LI
><P
>Are the control networks configured as heartbeat networks?</P
></LI
></UL
></LI
><LI
><P
>Can the control network addresses be pinged from peer nodes?</P
></LI
><LI
><P
>Are the quorum majority or tie rules satisfied?</P
><P
>Look at the <TT
CLASS="FILENAME"
>cmsd</TT
> log to determine membership status.</P
></LI
><LI
><P
>If a reset is required, are the following conditions met?</P
><P
></P
><UL
><LI
><P
>Is the node control daemon,<B
CLASS="COMMAND"
>&#8194;crsd</B
>,
up and running?</P
></LI
><LI
><P
>Is the reset serial line in good health?</P
><P
>You can look at the <TT
CLASS="FILENAME"
>crsd</TT
> log for the node you are
concerned with, or execute an <B
CLASS="COMMAND"
>admin ping</B
> and <B
CLASS="COMMAND"
>admin reset</B
> command on the node to check this.</P
></LI
></UL
></LI
></UL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN6453"
>9.3.3. No Membership Formed</A
></H2
><P
>When no cluster membership is formed, you should check the following
areas for possible problems:</P
><P
></P
><UL
><LI
><P
>Is the cluster membership daemon, <B
CLASS="COMMAND"
>ha_cmsd</B
>
running? Is the database daemon, <B
CLASS="COMMAND"
>cdbd</B
>, running?</P
></LI
><LI
><P
>Can the nodes communicate with each other?</P
><P
></P
><UL
><LI
><P
>Are the control networks configured as heartbeat networks?</P
></LI
></UL
></LI
><LI
><P
>Can the control network addresses be pinged from peer nodes?</P
></LI
><LI
><P
>Are the quorum majority or tie rules satisfied?</P
><P
>Look at the <TT
CLASS="FILENAME"
>cmsd</TT
> log to determine membership status.</P
></LI
><LI
><P
>If a reset is required, are the following conditions met?</P
><P
></P
><UL
><LI
><P
>Is the node control daemon,<B
CLASS="COMMAND"
>&#8194;crsd</B
>,
up and running?</P
></LI
><LI
><P
>Is the reset serial line in good health?</P
><P
>You can look at the <TT
CLASS="FILENAME"
>crsd</TT
> log for the node you are
concerned with, or execute an <B
CLASS="COMMAND"
>admin ping</B
> and <B
CLASS="COMMAND"
>admin reset</B
> command on the node to check this.</P
></LI
></UL
></LI
></UL
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le28847-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le13884-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>FailSafe Log Files</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le28716-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Status Monitoring</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>id
number.</P
><P
>For information on shtml/le18685-parent.html010064400016050000001000000112740717757405300155370ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Adding New Resource Groups or Resources in an Active
Cluster</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Upgrading and Maintaining Active Clusters"
HREF="le55630-parent.html"><LINK
REL="PREVIOUS"
TITLE="Upgrading FailSafe Software in an Active Cluster"
HREF="le31814-parent.html"><LINK
REL="NEXT"
TITLE="Adding a New Hardware Device in an Active Cluster"
HREF="le32198-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le31814-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 10. Upgrading and Maintaining Active Clusters</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le32198-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE18685-PARENT"
>10.6. Adding New Resource Groups or Resources in an Active
Cluster</A
></H1
><P
>The following procedure describes how to add a resource group and resources
to an active cluster. To add resources to an existing resource group, perform
resource configuration (Step 4), resource diagnostics (Step 5) and add resources
to the resource group (Step 6).</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Identify all the resources that have to be moved together.
These resources running on a node should be able to provide a service to the
client. These resources should be placed in a resource group. For example,
Netscape webserver <TT
CLASS="FILENAME"
>mfg-web</TT
>, its IP address 192.26.50.40,
and the filesystem <TT
CLASS="FILENAME"
>/shared/mfg-web</TT
> containing the web
configuration and document pages should be placed in the same resource group
(for example, <TT
CLASS="FILENAME"
>mfg-web-rg</TT
>).</P
></LI
><LI
><P
>Configure the resources in all nodes in the cluster where
the resource group is expected to be online. For example, this might involve
configuring netscape web server <TT
CLASS="FILENAME"
>mfg-web</TT
> on nodes <TT
CLASS="FILENAME"
>web-node1</TT
> and <TT
CLASS="FILENAME"
>web-node2</TT
> in the cluster.</P
></LI
><LI
><P
>Create a failover policy. Determine the type of failover attribute
required for the resource group. The <B
CLASS="COMMAND"
>cluster_mgr</B
> template
(<TT
CLASS="FILENAME"
>/usr/lib/failsafe/cmgr-templates/cmgr-create-failover_policy</TT
>)
can be used to create the failover policy.</P
></LI
><LI
><P
>Configure the resources in configuration database. There are <B
CLASS="COMMAND"
>cluster_mgr</B
> templates to create resources of various resource types
in <TT
CLASS="FILENAME"
>/usr/lib/failsafe/cmgr-templates</TT
> directory. For example,
the volume resource, the <TT
CLASS="FILENAME"
>/shared/mfg-web</TT
> filesystem,
the <TT
CLASS="LITERAL"
>192.26.50.40 IP_address</TT
> resource, and the <TT
CLASS="FILENAME"
>mfg-web</TT
> Netscape_web resource have to be created in the configuration
database. Create the resource dependencies for these resources.</P
></LI
><LI
><P
>Run resource diagnostics. For information on the diagnostic
commands, see <A
HREF="le56830-parent.html"
>Chapter 8</A
>.</P
></LI
><LI
><P
>Create resource group and add resources to the resource group.
The <B
CLASS="COMMAND"
>cluster_mgr</B
> template (<TT
CLASS="FILENAME"
>/usr/lib/failsafe/cmgr-templates/cmgr-create-resource_group</TT
>) can be used to create resource group and add resources to resource
group.</P
><P
>All resources that are dependent on each other should be added to the
resource group at the same time. If resources are added to an existing resource
group that is online in a node in the cluster, the resources are also made
online on the same node.</P
></LI
></OL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le31814-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le32198-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Upgrading FailSafe Software in an Active Cluster</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le55630-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Adding a New Hardware Device in an Active Cluster</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le19101-parent.html010064400016050000001000000065450717757313500155240ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Highly Available Applications</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Highly Available Resources"
HREF="le85141-parent.html"><LINK
REL="NEXT"
TITLE="Failover and Recovery Processes"
HREF="le19267-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le85141-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le19267-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE19101-PARENT"
>1.9. Highly Available Applications</A
></H1
><P
>Each application has a primary node and up to seven additional nodes
that you can use as a backup node, according to the failover policy you define.
 The primary node is the node on which the application runs when Linux FailSafe
is in <I
CLASS="FIRSTTERM"
>normal state</I
>. When a failure of any highly available
resources or highly available application is detected by Linux FailSafe software,
all highly available resources in the affected resource group on the failed
node are failed over to a different node and the highly available applications
on the failed node are stopped. When these operations are complete, the highly
available applications are started on the backup node.</P
><P
>All information about highly available applications, including the primary
node, components of the resource group, and failover policy for the application
and monitoring, is specified when you configure your Linux FailSafe system
with the Cluster Manager GUI or with the Cluster Manager CLI. Information
on configuring the system is provided in <A
HREF="le94219-parent.html"
>Chapter 5</A
>.
Monitoring scripts detect the failure of a highly available application.</P
><P
>The Linux FailSafe software provides a framework for making applications
highly available services. By writing scripts and configuring the system in
accordance with those scripts, you can turn client/server applications into
highly available applications. For information, see the  <I
CLASS="CITETITLE"
>Linux
FailSafe Programmer's Guide</I
>.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le85141-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le19267-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Highly Available Resources</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Failover and Recovery Processes</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le19267-parent.html010064400016050000001000000121110717757314100155200ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Failover and Recovery Processes</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Highly Available Applications"
HREF="le19101-parent.html"><LINK
REL="NEXT"
TITLE="Overview of Configuring and Testing a New Linux
FailSafe Cluster"
HREF="le24477-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le19101-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le24477-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE19267-PARENT"
>1.10. Failover and Recovery Processes</A
></H1
><P
> When a failure is detected on one node (the
node has crashed, hung, or been shut down, or a highly available service is
no longer operating), a different node performs a failover of the highly available
services that are being provided on the node with the failure (called the <I
CLASS="FIRSTTERM"
>failed node</I
>). Failover allows all of the highly available services,
including those provided by the failed node, to remain available within the
cluster.</P
><P
>A failure in a highly available service can be detected by Linux FailSafe
processes running on another node. Depending on which node detects the failure,
the sequence of actions following the failure is different.</P
><P
>If the failure is detected by the Linux FailSafe software running on
the same node, the failed node performs these operations:</P
><P
></P
><UL
><LI
><P
>Stops the highly available resource group running on the node</P
></LI
><LI
><P
>Moves the highly available resource group to a different node,
according to the defined failover policy for the resource group</P
></LI
><LI
><P
>Sends a message to the node that will take over the services
to start providing all resource group services previously provided by the
failed node</P
></LI
></UL
><P
>When it receives the message, the node that is taking over the resource
group performs these operations:</P
><P
></P
><UL
><LI
><P
>Transfers ownership of the resource group from the failed
node to itself</P
></LI
><LI
><P
>Starts offering the resource group services that were running
on the failed node</P
></LI
></UL
><P
>If the failure is detected by Linux FailSafe software running on a different
node, the node detecting the failure performs these operations:</P
><P
></P
><UL
><LI
><P
>Using the serial connection between the nodes, reboots the
failed node to prevent corruption of data</P
></LI
><LI
><P
>Transfers ownership of the resource group from the failed
node to the other nodes in the cluster, based on the resource group failover
policy.</P
></LI
><LI
><P
>Starts offering the resource group services that were running
on the failed node</P
></LI
></UL
><P
>When a failed node comes back up, whether the node automatically starts
to provide highly available services again depends on the failover policy
you define. For information on defining failover policies, see <A
HREF="le53159-parent.html#FS-DEFINEFAILOVER"
>Section 5.5.12</A
>.</P
><P
>Normally, a node that experiences a failure automatically reboots and
resumes providing highly available services. This scenario works well for
transient errors (as well as for planned outages for equipment and software
upgrades). However, if there are persistent errors, automatic reboot can cause
recovery and an immediate failover again. To prevent this, the Linux FailSafe
software checks how long the rebooted node has been up since the last time
it was started. If the interval is less than five minutes (by default), the
Linux FailSafe software automatically disables Linux FailSafe from booting
on the failed node and does not start up the Linux FailSafe software on this
node.  It also writes error messages to <TT
CLASS="FILENAME"
>/var/log/failsafe</TT
>
and to the appropriate log file.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le19101-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le24477-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Highly Available Applications</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Overview of Configuring and Testing a New Linux
FailSafe Cluster</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le20463-parent.html010064400016050000001000000053750717757311000155200ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe Administration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Additional Linux FailSafe Features"
HREF="le94860-parent.html"><LINK
REL="NEXT"
TITLE="Hardware Components of a Linux FailSafe Cluster"
HREF="le32900-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le94860-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le32900-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE20463-PARENT"
>1.4. Linux FailSafe Administration</A
></H1
><P
>You can perform all Linux FailSafe administrative tasks by means of
the Linux FailSafe Cluster Manager Graphical User Interface (GUI). The Linux
FailSafe GUI provides a guided interface to configure, administer, and monitor
a Linux FailSafe-controlled highly available cluster. The Linux FailSafe GUI
also provides screen-by-screen help text.</P
><P
>If you wish, you can perform Linux FailSafe administrative tasks directly
by means of the Linux FailSafe Cluster Manager CLI, which provides a command-line
interface for the administration tasks.</P
><P
>For information on Linux FailSafe Cluster Manager tools, see <A
HREF="le73346-parent.html"
>Chapter 4</A
>.</P
><P
>For information on Linux FailSafe configuration and administration tasks,
see <A
HREF="le94219-parent.html"
>Chapter 5</A
>, and <A
HREF="le99367-parent.html"
>Chapter 7</A
>.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le94860-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le32900-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Additional Linux FailSafe Features</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Hardware Components of a Linux FailSafe Cluster</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le23103-parent.html010064400016050000001000000362200717757327300155150ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Configuring System Files</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Installing Linux FailSafe Software and Preparing
the System"
HREF="le32854-parent.html"><LINK
REL="PREVIOUS"
TITLE="Installing Required Software"
HREF="le97755-parent.html"><LINK
REL="NEXT"
TITLE="Additional Configuration Issues"
HREF="le13651-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le97755-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 3. Installing Linux FailSafe Software and Preparing
the System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le13651-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE23103-PARENT"
>3.3. Configuring System Files</A
></H1
><P
>When
you install the Linux FailSafe Software, there are some system file considerations
you must take into account. This section describes the required and optional
changes you make to the following files for every node in the pool:</P
><P
></P
><UL
><LI
><P
><TT
CLASS="FILENAME"
>/etc/services</TT
></P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>/etc/failsafe/config/cad.options</TT
></P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>/etc/failsafe/config/cdbd.options</TT
></P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>/etc/failsafe/config/cmond.options</TT
></P
></LI
></UL
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2003"
>3.3.1. Configuring /etc/services for Linux FailSafe</A
></H2
><P
>The <TT
CLASS="FILENAME"
>/etc/services</TT
> file must contain entries for <TT
CLASS="LITERAL"
>sgi-cmsd</TT
>, <TT
CLASS="LITERAL"
>sgi-crsd</TT
>, <TT
CLASS="LITERAL"
>sgi-gcd</TT
>,
and <TT
CLASS="LITERAL"
>sgi-cad</TT
> on each node before starting HA services in
the node. The port numbers assigned for these processes must be the same in
all nodes in the cluster. Note that <TT
CLASS="LITERAL"
>sgi-cad</TT
> requires a
TCP port.</P
><P
>The following shows an example of <TT
CLASS="FILENAME"
>/etc/services</TT
>
entries for     <TT
CLASS="LITERAL"
>sgi-cmsd</TT
>, <TT
CLASS="LITERAL"
>sgi-crsd</TT
>, <TT
CLASS="LITERAL"
>sgi-gcd</TT
> and <TT
CLASS="LITERAL"
>sgi-cad</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>sgi-cmsd   7000/udp           # SGI Cluster Membership Daemon
sgi-crsd   17001/udp           # Cluster reset services daemon
sgi-gcd    17002/udp           # SGI Group Communication Daemon
sgi-cad    17003/tcp           # Cluster Admin daemon</PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2019"
>3.3.2. Configuring /etc/failsafe/config/cad.options for Linux FailSafe</A
></H2
><P
>The <TT
CLASS="FILENAME"
>/etc/failsafe/config/cad.options</TT
> file contains
the list of parameters that the cluster administration daemon (CAD) reads
when the process is started. The CAD provides cluster information to the Linux
FailSafe Cluster Manager GUI. </P
><P
>The following options can be set in the <TT
CLASS="FILENAME"
>cad.options</TT
>
file:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="LITERAL"
>--append_log</TT
></DT
><DD
><P
>Append CAD logging information to the CAD log file instead of overwriting
it.</P
></DD
><DT
><TT
CLASS="LITERAL"
>--log_file </TT
><TT
CLASS="REPLACEABLE"
><I
>filename</I
></TT
></DT
><DD
><P
>CAD log file name. Alternately, this can be specified as <TT
CLASS="LITERAL"
>-lf </TT
><TT
CLASS="REPLACEABLE"
><I
>filename.</I
></TT
></P
></DD
><DT
><TT
CLASS="LITERAL"
>-vvvv</TT
></DT
><DD
><P
>Verbosity level. The number of &#8220;<TT
CLASS="LITERAL"
>v</TT
>&#8221;s indicates
the level of logging. Setting <TT
CLASS="LITERAL"
>-v</TT
> logs the fewest messages.
Setting <TT
CLASS="LITERAL"
>-vvvv</TT
> logs the highest number of messages.</P
></DD
></DL
></DIV
><P
>The following example shows an <TT
CLASS="FILENAME"
>/etc/failsafe/config/cad.options</TT
> file:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>-vv -lf /var/log/failsafe/cad_nodename --append_log</PRE
></TD
></TR
></TABLE
><P
>When you change the <TT
CLASS="FILENAME"
>cad.options</TT
> file, you must
restart the CAD processes with the <B
CLASS="COMMAND"
>/etc/rc.d/init.d/fs_cluster restart</B
> command for those changes to take affect.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2057"
>3.3.3. Configuring /etc/failsafe/config/cdbd.options for Linux FailSafe</A
></H2
><P
>The <TT
CLASS="FILENAME"
>/etc/failsafe/config/cdbd.options</TT
> file contains
the list of parameters that the cdbd daemon reads when the process is started.
The cdbd daemon is the configuration database daemon that manages the distribution
of cluster configuration database (CDB) across the nodes in the pool. </P
><P
>The following options can be set in the <TT
CLASS="FILENAME"
>cdbd.options</TT
>
file:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="LITERAL"
>-logevents</TT
> <TT
CLASS="REPLACEABLE"
><I
>eventname</I
></TT
></DT
><DD
><P
>Log selected events. These event names may be used: <B
CLASS="COMMAND"
>all</B
>, <B
CLASS="COMMAND"
>internal</B
>, <B
CLASS="COMMAND"
>args</B
>, <B
CLASS="COMMAND"
>attach</B
>, <B
CLASS="COMMAND"
>chandle</B
>, <B
CLASS="COMMAND"
>node</B
>, <B
CLASS="COMMAND"
>tree</B
>, <B
CLASS="COMMAND"
>lock</B
>, <B
CLASS="COMMAND"
>datacon</B
>, <B
CLASS="COMMAND"
>trap</B
>, <B
CLASS="COMMAND"
>notify</B
>, <B
CLASS="COMMAND"
>access</B
>,<B
CLASS="COMMAND"
>&#8194;storage</B
>.</P
><P
>The default value for this option is <B
CLASS="COMMAND"
>all</B
>.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-logdest</TT
> <TT
CLASS="REPLACEABLE"
><I
>log_destination</I
></TT
></DT
><DD
><P
>Set log destination. These log destinations may be used: <B
CLASS="COMMAND"
>all</B
>, <B
CLASS="COMMAND"
>stdout</B
>, <B
CLASS="COMMAND"
>stderr</B
>, <B
CLASS="COMMAND"
>syslog</B
>, <B
CLASS="COMMAND"
>logfile</B
>. If multiple destinations are
specified, the log messages are written to all of them. If <B
CLASS="COMMAND"
>logfile</B
> is specified, it has no effect unless the -logfile option is also
specified.  If the log destination is <TT
CLASS="LITERAL"
>stderr</TT
> or <TT
CLASS="LITERAL"
>stdout</TT
>, logging is then disabled if <TT
CLASS="LITERAL"
>cdbd</TT
> runs
as a daemon, because <TT
CLASS="LITERAL"
>stdout</TT
> and <TT
CLASS="LITERAL"
>stderr</TT
>
are closed when <TT
CLASS="LITERAL"
>cdbd</TT
> is running as a daemon.</P
><P
>The default value for this option is <B
CLASS="COMMAND"
>logfile</B
>.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-logfile</TT
> <TT
CLASS="REPLACEABLE"
><I
>filename</I
></TT
></DT
><DD
><P
>Set log file name.</P
><P
>The default value is <TT
CLASS="FILENAME"
>/var/log/failsafe/cdbd_log</TT
></P
></DD
><DT
><TT
CLASS="LITERAL"
>-logfilemax</TT
> <TT
CLASS="REPLACEABLE"
><I
>maximum_size</I
></TT
></DT
><DD
><P
>Set log file maximum size (in bytes). If the file exceeds the maximum
size, any preexisting <TT
CLASS="FILENAME"
>filename.old</TT
> will be deleted, the
current file will be renamed to <TT
CLASS="FILENAME"
>filename.old</TT
>, and a new
file will be created. A single message will not be split across files.</P
><P
>If <TT
CLASS="LITERAL"
>-logfile</TT
> is set, the default value for this option
is 10000000.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-loglevel</TT
> <TT
CLASS="REPLACEABLE"
><I
>log level</I
></TT
></DT
><DD
><P
>Set log level. These log levels may be used: <B
CLASS="COMMAND"
>always</B
>, <B
CLASS="COMMAND"
>critical</B
>, <B
CLASS="COMMAND"
>error</B
>, <B
CLASS="COMMAND"
>warning</B
>, <B
CLASS="COMMAND"
>info</B
>, <B
CLASS="COMMAND"
>moreinfo</B
>, <B
CLASS="COMMAND"
>freq</B
>, <B
CLASS="COMMAND"
>morefreq</B
>, <B
CLASS="COMMAND"
>trace</B
>, <B
CLASS="COMMAND"
>busy</B
>.</P
><P
>The default value for this option is <B
CLASS="COMMAND"
>info</B
>.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-trace</TT
> <TT
CLASS="REPLACEABLE"
><I
>trace class</I
></TT
></DT
><DD
><P
>Trace selected events. These trace classes may be used: <B
CLASS="COMMAND"
>all</B
>, <B
CLASS="COMMAND"
>rpcs</B
>, <B
CLASS="COMMAND"
>updates</B
>, <B
CLASS="COMMAND"
>transactions</B
>, <B
CLASS="COMMAND"
>monitor</B
>. No tracing is done, even
if it is requested for one or more classes of events, unless either or both
of <TT
CLASS="LITERAL"
>-tracefile</TT
> or <TT
CLASS="LITERAL"
>-tracelog</TT
> is specified.</P
><P
>The default value for this option is <B
CLASS="COMMAND"
>transactions</B
>.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-tracefile</TT
> <TT
CLASS="REPLACEABLE"
><I
>filename</I
></TT
></DT
><DD
><P
>Set trace file name.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-tracefilemax</TT
> <TT
CLASS="REPLACEABLE"
><I
>maximum
size</I
></TT
></DT
><DD
><P
>Set trace file maximum size (in bytes). If the file exceeds the maximum
size, any preexisting<TT
CLASS="FILENAME"
>&#8194;filename.old</TT
> will be deleted,
the current file will be renamed to <TT
CLASS="FILENAME"
>filename.old</TT
>.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-[no]tracelog</TT
></DT
><DD
><P
>[Do not] trace to log destination. When this option is set, tracing
messages are directed to the log destination or destinations. If there is
also a trace file, the tracing messages are written there as well.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-[no]parent_timer</TT
></DT
><DD
><P
>[Do not] exit when parent exits.</P
><P
>The default value for this option is <TT
CLASS="LITERAL"
>-noparent_timer</TT
>.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-[no]daemonize</TT
></DT
><DD
><P
>[Do not] run as a daemon.</P
><P
>The default value for this option is <TT
CLASS="LITERAL"
>-daemonize</TT
>.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-l</TT
></DT
><DD
><P
>Do not run as a daemon.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-h</TT
></DT
><DD
><P
>Print usage message.</P
></DD
><DT
><TT
CLASS="LITERAL"
>-o help</TT
></DT
><DD
><P
>Print usage message.</P
></DD
></DL
></DIV
><P
>Note that if you use the default values for these options, the system
will be configured so that all log messages of level <B
CLASS="COMMAND"
>info</B
>
or less, and all trace messages for transaction events to file <TT
CLASS="FILENAME"
>/var/log/failsafe/cdbd_log</TT
>. When the file size reaches 10MB, this
file will be moved to its namesake with the <TT
CLASS="FILENAME"
>.old</TT
> extension,
and logging will roll over to a new file of the same name. A single message
will not be split across files.</P
><P
>The following example shows an <TT
CLASS="FILENAME"
>/etc/failsafe/config/cdbd.options</TT
> file that directs all <TT
CLASS="LITERAL"
>cdbd</TT
> logging information
to <TT
CLASS="FILENAME"
>/var/log/messages</TT
>, and all <TT
CLASS="LITERAL"
>cdbd</TT
>
tracing information to <TT
CLASS="FILENAME"
>/var/log/failsafe/cdbd_ops1</TT
>. All
log events are being logged, and the following trace events are being logged:
RPCs, updates and transactions. When the size of the tracefile <TT
CLASS="FILENAME"
>/var/log/failsafe/cdbd_ops1</TT
> exceeds 100000000, this file is renamed
to <TT
CLASS="FILENAME"
>/var/log/failsafe/cdbd_ops1.old</TT
> and a new file <TT
CLASS="FILENAME"
>/var/log/failsafe/cdbd_ops1</TT
> is created. A single message is not
split across files.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>-logevents all -loglevel trace -logdest syslog -trace rpcs -trace 
updates -trace transactions -tracefile /var/log/failsafe/cdbd_ops1 
-tracefilemax 100000000</PRE
></TD
></TR
></TABLE
><P
>The following example shows an<TT
CLASS="FILENAME"
>&#8194;/etc/failsafe/config/cdbd.options</TT
> file that directs all log and trace messages into one file, <TT
CLASS="FILENAME"
>/var/log/failsafe/cdbd_chaos6</TT
>, for which a maximum size of 100000000
is specified. <TT
CLASS="LITERAL"
>-tracelog</TT
> directs the tracing to the log
file.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>-logevents all -loglevel trace -trace rpcs -trace updates -trace 
transactions -tracelog -logfile /var/log/failsafe/cdbd_chaos6 
-logfilemax 100000000 -logdest logfile.</PRE
></TD
></TR
></TABLE
><P
>When you change the <TT
CLASS="FILENAME"
>cdbd.options</TT
> file, you must
restart the <TT
CLASS="LITERAL"
>cdbd</TT
> processes with the <B
CLASS="COMMAND"
>/etc/rc.d/init.d/fs_cluster
restart</B
> command for those changes to take affect.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE32812-PARENT"
>3.3.4. Configuring /etc/failsafe/config/cmond.options for
Linux FailSafe</A
></H2
><P
>The<TT
CLASS="FILENAME"
>/etc/failsafe/config/cmond.options</TT
>
file contains the list of parameters that the cluster monitor daemon (<B
CLASS="COMMAND"
>cmond</B
>) reads when the process is started. It also specifies the
name of the file that logs <B
CLASS="COMMAND"
>cmond</B
> events. The cluster monitor
daemon provides a framework for starting, stopping, and monitoring process
groups. See the <B
CLASS="COMMAND"
>cmond</B
> man page for information on the cluster
monitor daemon.</P
><P
>The following options can be set in the <TT
CLASS="FILENAME"
>cmond.options</TT
>
file:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="LITERAL"
>-L</TT
> <TT
CLASS="REPLACEABLE"
><I
>loglevel</I
></TT
></DT
><DD
><P
>Set log level to <TT
CLASS="REPLACEABLE"
><I
>loglevel</I
></TT
></P
></DD
><DT
><TT
CLASS="LITERAL"
>-d</TT
></DT
><DD
><P
>Run in debug mode</P
></DD
><DT
><TT
CLASS="LITERAL"
>-l</TT
></DT
><DD
><P
>Lazy mode, where <B
CLASS="COMMAND"
>cmond</B
> does not validate its connection
to the cluster database</P
></DD
><DT
><TT
CLASS="LITERAL"
>-t </TT
><TT
CLASS="REPLACEABLE"
><I
>napinterval</I
></TT
></DT
><DD
><P
>The time interval in milliseconds after which <B
CLASS="COMMAND"
>cmond</B
>
checks for liveliness of process groups it is monitoring</P
></DD
><DT
><TT
CLASS="LITERAL"
>-s</TT
> [<TT
CLASS="REPLACEABLE"
><I
>eventname</I
></TT
>]</DT
><DD
><P
>Log messages to <TT
CLASS="FILENAME"
>stderr</TT
></P
></DD
></DL
></DIV
><P
>A default <TT
CLASS="FILENAME"
>cmond.options</TT
> file is shipped with the
following options. This default options file logs <B
CLASS="COMMAND"
>cmond</B
>
events to the <B
CLASS="COMMAND"
>/var/log/failsafe/cmond_log</B
> file.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>-L info -f /var/log/failsafe/cmond_log</PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le97755-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le13651-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Installing Required Software</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le32854-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Additional Configuration Issues</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le24477-parent.html010064400016050000001000000057070717757314600155410ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Overview of Configuring and Testing a New Linux
FailSafe Cluster</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Failover and Recovery Processes"
HREF="le19267-parent.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe System Software "
HREF="le15726-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le19267-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le15726-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE24477-PARENT"
>1.11. Overview of Configuring and Testing a New Linux
FailSafe Cluster</A
></H1
><P
>After the Linux FailSafe cluster hardware has been installed, follow
this general procedure to configure and test the Linux FailSafe system:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Become familiar with Linux FailSafe terms by reviewing this
chapter.</P
></LI
><LI
><P
>Plan the configuration of highly available applications and
services on the cluster using <A
HREF="le88622-parent.html"
>Chapter 2</A
>.</P
></LI
><LI
><P
>Perform various administrative tasks, including the installation
of prerequisite software, that are required by Linux FailSafe, as described
in <A
HREF="le32854-parent.html"
>Chapter 3</A
>.</P
></LI
><LI
><P
>Define the Linux FailSafe configuration as explained in <A
HREF="le94219-parent.html"
>Chapter 5</A
>.</P
></LI
><LI
><P
>Test the Linux FailSafe system in three phases: test individual
components prior to starting Linux FailSafe software, test normal operation
of the Linux FailSafe system, and simulate failures to test the operation
of the system after a failure occurs.</P
></LI
></OL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le19267-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le15726-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Failover and Recovery Processes</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe System Software</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>P
>Become familiar with Linux FailSafe terms by reviewinghtml/le26593-parent.html010064400016050000001000000461250717757401600155360ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Recovery Procedures</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Recovery"
HREF="le28716-parent.html"><LINK
REL="PREVIOUS"
TITLE="Dynamic Control of FailSafe Services"
HREF="le35544-parent.html"><LINK
REL="NEXT"
TITLE="Upgrading and Maintaining Active Clusters"
HREF="le55630-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le35544-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 9. Linux FailSafe Recovery</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le55630-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE26593-PARENT"
>9.6. Recovery Procedures</A
></H1
><P
>The following sections describe various recovery procedures
you can perform when different failsafe components fail. Procedures for the
following situations are provided:</P
><P
></P
><UL
><LI
><P
><A
HREF="le26593-parent.html#LE37488-PARENT"
>Section 9.6.1</A
></P
></LI
><LI
><P
><A
HREF="le26593-parent.html#LE22743-PARENT"
>Section 9.6.2</A
></P
></LI
><LI
><P
><A
HREF="le26593-parent.html#LE13349-PARENT"
>Section 9.6.3</A
></P
></LI
><LI
><P
><A
HREF="le26593-parent.html#LE15209-PARENT"
>Section 9.6.4</A
></P
></LI
><LI
><P
><A
HREF="le26593-parent.html#LE32749-PARENT"
>Section 9.6.5</A
></P
></LI
><LI
><P
><A
HREF="le26593-parent.html#LE33694-PARENT"
>Section 9.6.6</A
></P
></LI
><LI
><P
><A
HREF="le26593-parent.html#LE32026-PARENT"
>Section 9.6.7</A
></P
></LI
><LI
><P
><A
HREF="le26593-parent.html#LE19553-PARENT"
>Section 9.6.8</A
></P
></LI
></UL
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE37488-PARENT"
>9.6.1. Cluster Error Recovery</A
></H2
><P
>Follow this procedure if status of the cluster is UNKNOWN in all nodes
in the cluster.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Check to see if there are control networks that have failed
(see <A
HREF="le26593-parent.html#LE32749-PARENT"
>Section 9.6.5</A
>).</P
></LI
><LI
><P
>At least 50% of the nodes in the cluster must be able to communicate
with each other to have an active cluster (Quorum requirement). If there are
not sufficient nodes in the cluster that can communicate with each other using
control networks, stop HA services on some of the nodes so that the quorum
requirement is satisfied.</P
></LI
><LI
><P
>If there are no hardware configuration problems, detach all
resource groups that are online in the cluster (if any), stop HA services
in the cluster, and restart HA services in the cluster.</P
></LI
></OL
><P
>The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command detaches the resource
group <TT
CLASS="FILENAME"
>web-rg</TT
> in cluster <TT
CLASS="FILENAME"
>web-cluster</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin detach resource_group web-rg in cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>To stop HA services in the cluster <TT
CLASS="FILENAME"
>web-cluster</TT
>
and ignore errors (<B
CLASS="COMMAND"
>force</B
> option), use the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>stop ha_services for cluster web-cluster force</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>To start HA services in the cluster <TT
CLASS="FILENAME"
>web-cluster</TT
>,
use the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>start ha_services for cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE22743-PARENT"
>9.6.2. Node Error recovery</A
></H2
><P
>Follow this procedure if the status of a node is UNKNOWN in an active
cluster:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Check to see if the control networks in the node are working
(see <A
HREF="le26593-parent.html#LE32749-PARENT"
>Section 9.6.5</A
>).</P
></LI
><LI
><P
>Check to see if the serial reset cables to reset the node
are working (see <A
HREF="le26593-parent.html#LE33694-PARENT"
>Section 9.6.6</A
>).</P
></LI
><LI
><P
>If there are no hardware configuration problems, stop HA services
in the node and restart HA services.</P
><P
>To stop HA services in the node <TT
CLASS="FILENAME"
>web-node3</TT
> in the
cluster <TT
CLASS="FILENAME"
>web-cluster</TT
>, ignoring errors (<B
CLASS="COMMAND"
>force</B
> option), use the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>stop ha_services in node web-node3 for cluster web-cluster 
force</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>To start HA services in the node <TT
CLASS="FILENAME"
>web-node3 </TT
>in the
cluster <TT
CLASS="FILENAME"
>web-cluster</TT
>, use the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>start ha_services in node web-node3 for cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
></OL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE13349-PARENT"
>9.6.3. Resource Group Maintenance and Error Recovery</A
></H2
><P
>To do simple maintenance on an application that is part of the resource
group, use the following procedure. This procedure stops monitoring the resources
in the resource group when maintenance mode is on. You need to turn maintenance
mode off when application maintenance is done.</P
><DIV
CLASS="CAUTION"
><P
></P
><TABLE
CLASS="CAUTION"
BORDER="1"
WIDTH="100%"
><TR
><TD
ALIGN="CENTER"
><B
>Caution</B
></TD
></TR
><TR
><TD
ALIGN="LEFT"
><P
>If there is node failure on the node where resource group maintenance
is being performed, the resource group is moved to another node in the failover
policy domain.</P
></TD
></TR
></TABLE
></DIV
><P
></P
><OL
TYPE="1"
><LI
><P
>To put a resource group <TT
CLASS="FILENAME"
>web-rg</TT
> in maintenance
mode, use the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin maintenance_on resource_group web-rg in cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>The resource group state changes to <TT
CLASS="LITERAL"
>ONLINE_MAINTENANCE</TT
>. Do whatever application maintenance is required. (Rotating application
logs is an example of simple application maintenance).</P
></LI
><LI
><P
>To remove a resource group <TT
CLASS="FILENAME"
>web-rg</TT
> from
maintenance mode, use the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin maintenance_off resource_group web-rg in cluster 
web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>The resource group state changes back to <TT
CLASS="LITERAL"
>ONLINE</TT
>.</P
></LI
></OL
><P
>You perform the following procedure when a resource group is in an <TT
CLASS="LITERAL"
>ONLINE</TT
> state and has an SRMD <TT
CLASS="LITERAL"
>EXECUTABLE ERROR</TT
>.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Look at the SRM logs (default location: <TT
CLASS="FILENAME"
>/var/log/failsafe/srmd_</TT
><TT
CLASS="FILENAME"
>node name</TT
>) to determine the cause of failure
and the resource that has failed.</P
></LI
><LI
><P
>Fix the cause of failure. This might require changes to resource
configuration or changes to resource type stop/start/failover action timeouts.</P
></LI
><LI
><P
>After fixing the problem, move the resource group offline
with the <B
CLASS="COMMAND"
>force</B
> option and then move the resource group
online.</P
><P
>The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command moves the resource
group <TT
CLASS="FILENAME"
>web-rg</TT
> in the cluster <TT
CLASS="FILENAME"
>web-cluster</TT
>
offline and ignores any errors:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin offline resource_group web-rg in cluster web-cluster 
force</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command moves the resource
group <TT
CLASS="FILENAME"
>web-rg</TT
> in the cluster <TT
CLASS="FILENAME"
>web-cluster</TT
>
online:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin online resource_group web-rg in cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>The resource group <TT
CLASS="FILENAME"
>web-rg</TT
> should be in an <TT
CLASS="LITERAL"
>ONLINE</TT
> state with no error.</P
></LI
></OL
><P
>You use the following procedure when a resource group is not online
but is in an error state. Most of these errors occur as a result of the exclusivity
process. This process, run when a resource group is brought online, determines
if any resources are already allocated somewhere in the failure domain of
a resource group. Note that exclusivity scripts return that a resource is
allocated on a node if the script fails in any way. In other words, unless
the script can determine that a resource is not present, it returns a value
indicating that the resource is allocated.</P
><P
>Some possible error states include: <TT
CLASS="LITERAL"
>SPLIT RESOURCE GROUP (EXCLUSIVITY)</TT
>, <TT
CLASS="LITERAL"
>NODE NOT AVAILABLE (EXCLUSIVITY)</TT
>, <TT
CLASS="LITERAL"
>NO AVAILABLE NODES</TT
> in failure domain. See <A
HREF="le16877-parent.html#LE29367-PARENT"
>Section 7.4.3</A
>,
for explanations of resource group error codes.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Look at the <TT
CLASS="FILENAME"
>failsafe</TT
> and SRM logs (default
directory: <TT
CLASS="FILENAME"
>/var/log/failsafe</TT
>, files: <TT
CLASS="FILENAME"
>failsafe_</TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
>, <TT
CLASS="FILENAME"
>srmd_</TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
>) to determine the cause of the failure and the resource
that failed.</P
><P
>For example, say the task of moving a resource group online results
in a resource group with error state <TT
CLASS="LITERAL"
>SPLIT RESOURCE GROUP (EXCLUSIVITY)</TT
>. This means that parts of a resource group are allocated on at
least two different nodes. One of the failsafe logs will have the description
of which nodes are believed to have the resource group partially allocated.</P
><P
>At this point, look at the <TT
CLASS="FILENAME"
>srmd</TT
> logs on each of
these machines to see what resources are believed to be allocated. In some
cases, a misconfigured resource will show up as a resource which is allocated.
This is especially true for <TT
CLASS="FILENAME"
>Netscape_web</TT
> resources.</P
></LI
><LI
><P
>Fix the cause of the failure. This might require changes to
resource configuration or changes to resource type start/stop/exclusivity
timeouts.</P
></LI
><LI
><P
>After fixing the problem, move the resource group offline
with the <B
CLASS="COMMAND"
>force</B
> option and then move the resource group
online.</P
></LI
></OL
><P
>There are a few double failures that can occur in the cluster which
will cause resource groups to remain in a non-highly-available state. At times
a resource group might get stuck in an offline state. A resource group might
also stay in an error state on a node even when a new node joins the cluster
and the resource group can migrate to that node to clear the error.</P
><P
>When these circumstances arise, the correct action should be as follows:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Try to move the resource group online if it is offline.</P
></LI
><LI
><P
>If the resource group is stuck on a node, detach the resource
group, then bring it online again. This should clear many errors.</P
></LI
><LI
><P
>If detaching the resource group does not work, force the resource
group offline, then bring it back online.</P
></LI
><LI
><P
>If commands appear to be hanging or not working properly,
detach all resource groups, then shut down the cluster and bring all resource
groups back online.</P
></LI
></OL
><P
>See <A
HREF="le41282-parent.html#FS-TAKERESGROUPOFFLINE"
>Section 7.5.2</A
>, for information on detaching
resource groups and forcing resource groups offline.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE15209-PARENT"
>9.6.4. Resource Error Recovery</A
></H2
><P
>You use this procedure when a resource that is not part of a resource
group is in an <TT
CLASS="LITERAL"
>ONLINE</TT
> state with error. This can happen
when the addition or removal of resources from a resource group fails.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Look at the SRM logs (default location: <TT
CLASS="FILENAME"
>/var/log/failsafe/srmd_</TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
>) to determine the cause of
failure and the resource that has failed.</P
></LI
><LI
><P
>Fix the cause of failure. This might require changes to resource
configuration or changes to resource type stop/start/failover action timeouts.</P
></LI
><LI
><P
>After fixing the problem, move the resource offline with the <B
CLASS="COMMAND"
>force</B
> option of the Cluster Manager CLI <B
CLASS="COMMAND"
>admin offline</B
>
command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62;<TT
CLASS="USERINPUT"
><B
>&#8194;admin offline_force resource web-srvr of resource_type 
Netscape_Web in cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>Executing this command removes the error state of resource <TT
CLASS="FILENAME"
>web-srvr</TT
> of type <TT
CLASS="FILENAME"
>Netscape_Web</TT
>, making it available
to be added to a resource group.</P
><P
>You can also use the Cluster Manager GUI to clear the error state for
the resource. To do this, you select the &#8220;Recover a Resource&#8221;
task from the &#8220;Resources and Resource Types&#8221; category of the FailSafe
Manager.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE32749-PARENT"
>9.6.5. Control Network Failure Recovery</A
></H2
><P
>Control network failures are reported in <TT
CLASS="FILENAME"
>cmsd</TT
> logs.
The default location of <TT
CLASS="FILENAME"
>cmsd</TT
> log is <TT
CLASS="FILENAME"
>/var/log/failsafe/cmsd_</TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
>. Follow this procedure when
the control network fails:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Use the <B
CLASS="COMMAND"
>ping</B
> command to check whether the
control network IP address is configured in the node.</P
></LI
><LI
><P
>Check node configuration to see whether the control network
IP addresses are correctly specified.</P
><P
>The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command displays node configuration
for <TT
CLASS="FILENAME"
>web-node3:</TT
></P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show node web-node3</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>If IP names are specified for control networks instead of
IP addresses in XX.XX.XX.XX notation, check to see whether IP names can be
resolved using DNS. It is recommended that IP addresses are used instead of
IP names.</P
></LI
><LI
><P
>Check whether the heartbeat interval and node timeouts are
correctly set for the cluster. These HA parameters can seen using <B
CLASS="COMMAND"
>cluster_mgr show ha_parameters</B
> command.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE33694-PARENT"
>9.6.6. Serial Cable Failure Recovery</A
></H2
><P
>Serial cables are used for resetting a node when there is a node failure.
Serial cable failures are reported in <TT
CLASS="FILENAME"
>crsd</TT
> logs. The
default location for the <TT
CLASS="FILENAME"
>crsd</TT
> log is <TT
CLASS="FILENAME"
>/var/log/failsafe/crsd_</TT
><TT
CLASS="REPLACEABLE"
><I
>nodename</I
></TT
>.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Check the node configuration to see whether serial cable connection
is correctly configured.</P
><P
>The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command displays node configuration
for <TT
CLASS="FILENAME"
>web-node3</TT
></P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show node web-node3</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>Use the <B
CLASS="COMMAND"
>cluster_mgr admin ping</B
> command to verify
the serial cables.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin ping node web-node3</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
></OL
><P
>The above command reports serial cables problems in node <TT
CLASS="FILENAME"
>web-node3</TT
>.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE32026-PARENT"
>9.6.7. CDB Maintenance and Recovery</A
></H2
><P
>When the entire configuration database (CDB) must be reinitialized,
execute the following command: </P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>/usr/cluster/bin/cdbreinit /var/cluster/cdb/cdb.db</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>This command will restart all cluster processes. The contents of the
configuration database will be automatically synchronized with other nodes
if other nodes in the pool are available.</P
><P
>Otherwise, the CDB will need to be restored from backup at this point.
For instructions on backing up and restoring the CDB, see <A
HREF="le37674-parent.html"
>Section 7.8</A
>.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE19553-PARENT"
>9.6.8. FailSafe Cluster Manager GUI and CLI Inconsistencies</A
></H2
><P
>If the FailSafe Cluster Manager GUI is displaying information that is
inconsistent with the FailSafe <B
CLASS="COMMAND"
>cluster_mgr</B
> command, restart
cad process on the node to which Cluster Manager GUI is connected to by executing
the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>killall cad</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>The cluster administration daemon is restarted automatically by the <B
CLASS="COMMAND"
>cmond</B
> process.</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le35544-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le55630-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Dynamic Control of FailSafe Services</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le28716-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Upgrading and Maintaining Active Clusters</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>/TT
> state with no error.</P
></LI
></OL
><P
>You use the following procedure when a resource group is not online
but is in an error state. Most of these errors occur as a result of the exclusivity
process. This process, run when a resource group is brought online, determines
if any resources are already allocated somewhere in the failure domain of
a resource group. Note that exclusivity scripts return that a resource is
ahtml/le26765-parent.html010064400016050000001000000111630717757404400155320ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Upgrading OS Software in an Active Cluster</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Upgrading and Maintaining Active Clusters"
HREF="le55630-parent.html"><LINK
REL="PREVIOUS"
TITLE="Changing Control Networks in a Cluster"
HREF="x6931.html"><LINK
REL="NEXT"
TITLE="Upgrading FailSafe Software in an Active Cluster"
HREF="le31814-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="x6931.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 10. Upgrading and Maintaining Active Clusters</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le31814-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE26765-PARENT"
>10.4. Upgrading OS Software in an Active Cluster</A
></H1
><P
>When you upgrade your OS software in an active cluster,
you perform the upgrade on one node at a time.</P
><P
>If the OS software upgrade does not require reboot or does not impact
the FailSafe software, there is no need to use the OS upgrade procedure. If
you do not know whether the upgrade will impact FailSafe software or if the
OS upgrade requires a machine reboot, follow the upgrade procedure described
below.</P
><P
>The following procedure upgrades the OS software on node <TT
CLASS="FILENAME"
>web-node3</TT
>.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>If resource groups are online on the node, use a <B
CLASS="COMMAND"
>cluster_mgr</B
> command to move them another node in the cluster. To
move the resource group to another node in the cluster, there should be another
node available in the failover policy domain of the resource group.</P
><P
>The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command moves resource
group <TT
CLASS="FILENAME"
>web-rg</TT
> to another node in the cluster <TT
CLASS="FILENAME"
>web-cluster:</TT
></P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin move resource_group web-rg in cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>To stop HA services on the node <TT
CLASS="FILENAME"
>web-node3</TT
>,
use the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command. This command will
move all the resource groups online on this node to other nodes in the cluster
if possible.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>stop ha_services on node web-node3 for cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>If it is not possible to move resource groups that are online on node <TT
CLASS="FILENAME"
>web-node3</TT
>, the above command will fail. You can use the <B
CLASS="COMMAND"
>force</B
> option to stop HA services in a node even in the case of an
error.</P
></LI
><LI
><P
>Perform the OS upgrade in the node <TT
CLASS="FILENAME"
>web-node3</TT
>.</P
></LI
><LI
><P
>After the OS upgrade, make sure cluster processes (<B
CLASS="COMMAND"
>cmond</B
>, <B
CLASS="COMMAND"
>cad</B
>, <B
CLASS="COMMAND"
>crsd</B
>) are running.</P
></LI
><LI
><P
>Restart HA services on the node. The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command restarts HA services on the node:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>start ha_services on node web-node3 for cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>Make sure the resource groups are running on the most appropriate node
after restarting HA services.</P
></LI
></OL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="x6931.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le31814-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Changing Control Networks in a Cluster</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le55630-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Upgrading FailSafe Software in an Active Cluster</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le27299-parent.html010064400016050000001000000150760717757306300155440ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>High Availability and Linux FailSafe</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="NEXT"
TITLE="Concepts"
HREF="le89728-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le73529-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le89728-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE27299-PARENT"
>1.1. High Availability and Linux FailSafe</A
></H1
><P
>In the world of mission critical computing, the availability of information
and computing resources is extremely important. The availability of a system
is affected by how long it is unavailable after a failure in any of its components.
 Different degrees of availability are provided by different types of systems:</P
><P
></P
><UL
><LI
><P
> Fault-tolerant systems (continuous availability).
 These systems use redundant components and specialized logic to ensure continuous
operation and to provide complete data integrity. On these systems the degree
of availability is extremely high. Some of these systems can also tolerate
outages due to hardware or software upgrades (continuous availability). This
solution is very expensive and requires specialized hardware and software.</P
></LI
><LI
><P
>Highly available systems. These systems survive single points
of failure by using redundant off-the-shelf components and specialized software.
 They provide a lower degree of availability than the fault-tolerant systems,
but at much lower cost. Typically these systems provide high availability
only for client/server applications, and base their redundancy on cluster
architectures with shared resources.</P
></LI
></UL
><P
>The Silicon Graphics Linux FailSafe product provides a general
facility for providing highly available services. Linux FailSafe provides
highly available services for a cluster that contains multiple nodes (<TT
CLASS="REPLACEABLE"
><I
>N</I
></TT
>-node configuration).  Using Linux FailSafe, you can configure
a highly available system in any of the following topologies:</P
><P
></P
><UL
><LI
><P
>Basic two-node configuration</P
></LI
><LI
><P
>Ring configuration</P
></LI
><LI
><P
>Star configuration, in which multiple applications running
on multiple nodes are backed up by one node</P
></LI
><LI
><P
>Symmetric pool configuration</P
></LI
></UL
><P
>These configurations provide redundancy of processors and I/O controllers.
 Redundancy of storage can either be obtained through the use of multi-hosted
RAID disk devices and mirrored disks, or with redundant disk systems which
are kept in synchronization.</P
><P
>If one of the nodes in the cluster or one of the nodes' components fails,
a different node in the cluster restarts the highly available services of
the failed node. To clients, the services on the replacement node are indistinguishable
from the original services before failure occurred. It appears as if the original
node has crashed and rebooted quickly. The clients notice only a brief interruption
in the highly available service.</P
><P
>In a Linux FailSafe highly available system, nodes can serve as backup
for other nodes. Unlike the backup resources in a fault-tolerant system, which
serve purely as redundant hardware for backup in case of failure, the resources
of each node in a highly available system can be used during normal operation
to run other applications that are not necessarily highly available services.
 All highly available services are owned and accessed by one node at a time.</P
><P
>Highly available services are monitored by the Linux FailSafe software.
 During normal operation, if a failure is detected on any of these components,
a <I
CLASS="FIRSTTERM"
>failover</I
> process is initiated. Using Linux FailSafe,
you can define a failover policy to establish which node will take over the
services under what conditions. This process consists of resetting the failed
node (to ensure data consistency), doing any recovery required by the failed
over services, and quickly restarting the services on the node that will take
them over.</P
><P
>Linux FailSafe supports <I
CLASS="FIRSTTERM"
>selective failover</I
> in
which individual highly available applications can be failed over to a backup
node independent of the other highly available applications on that node.</P
><P
>Linux FailSafe highly available services fall into two groups: highly
available resources and highly available applications. Highly available resources
include network interfaces, logical volumes, and filesystems such as ext2f
or reiserfs that have been configured for Linux FailSafe. Silicon Graphics
has also developed Linux FailSafe NFS. Highly available applications can include
applications such as NFS, Apache, etc.</P
><P
> Linux FailSafe provides
a framework for making additional applications into highly available services.
 If you want to add highly available applications on a Linux FailSafe cluster,
you must write scripts to handle application monitoring functions. Information
on developing these scripts is described in the <I
CLASS="CITETITLE"
>Linux FailSafe
Programmer's Guide</I
>. If you need assistance in this regard, contact
SGI Global Services, which offers custom Linux FailSafe agent development
and HA integration services.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le89728-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Overview of the Linux FailSafe System</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Concepts</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le28499-parent.html010064400016050000001000000052360717757341500155450ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Name Restrictions</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Cluster Configuration"
HREF="le94219-parent.html"><LINK
REL="PREVIOUS"
TITLE="Setting Configuration Defaults"
HREF="le59477-parent.html"><LINK
REL="NEXT"
TITLE="Configuring Timeout Values and Monitoring Intervals"
HREF="tv.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le59477-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 5. Linux FailSafe Cluster Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="tv.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE28499-PARENT"
>5.2. Name Restrictions</A
></H1
><P
>When
you specify the names of the various components of a Linux FailSafe system,
the name cannot begin with an underscore (_) or include any whitespace characters.
In addition, the name of any Linux FailSafe component cannot contain a space,
an unprintable character, or a *, ?, \, or #.</P
><P
>The following is the list of permitted characters for the name of a
Linux FailSafe component:</P
><P
></P
><UL
><LI
><P
>alphanumeric characters</P
></LI
><LI
><P
>/</P
></LI
><LI
><P
>.</P
></LI
><LI
><P
>- (hyphen)</P
></LI
><LI
><P
>_ (underscore)</P
></LI
><LI
><P
>:</P
></LI
><LI
><P
>&#8220;</P
></LI
><LI
><P
>=</P
></LI
><LI
><P
>@</P
></LI
><LI
><P
>'</P
></LI
></UL
><P
>These character restrictions hold true whether you are configuring your
system with the Cluster Manager GUI or the Cluster Manager CLI.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le59477-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="tv.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Setting Configuration Defaults</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le94219-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Configuring Timeout Values and Monitoring Intervals</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le28716-parent.html010064400016050000001000000076150717757401700155370ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe Recovery</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Performing Diagnostic Tasks with the Cluster
Manager CLI"
HREF="le37273-parent.html"><LINK
REL="NEXT"
TITLE="Overview of FailSafe System Recovery"
HREF="le14340-parent.html"></HEAD
><BODY
CLASS="CHAPTER"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le37273-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le14340-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><H1
><A
NAME="LE28716-PARENT"
>Chapter 9. Linux FailSafe Recovery</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>9.1. <A
HREF="le14340-parent.html"
>Overview of FailSafe System Recovery</A
></DT
><DT
>9.2. <A
HREF="le28847-parent.html"
>FailSafe Log Files</A
></DT
><DT
>9.3. <A
HREF="le17012-parent.html"
>Node Membership and Resets</A
></DT
><DD
><DL
><DT
>9.3.1. <A
HREF="le17012-parent.html#AEN6393"
>Node Membership and Tie-Breaker Node</A
></DT
><DT
>9.3.2. <A
HREF="le17012-parent.html#AEN6420"
>No Membership Formed</A
></DT
><DT
>9.3.3. <A
HREF="le17012-parent.html#AEN6453"
>No Membership Formed</A
></DT
></DL
></DD
><DT
>9.4. <A
HREF="le13884-parent.html"
>Status Monitoring</A
></DT
><DT
>9.5. <A
HREF="le35544-parent.html"
>Dynamic Control of FailSafe Services</A
></DT
><DT
>9.6. <A
HREF="le26593-parent.html"
>Recovery Procedures</A
></DT
><DD
><DL
><DT
>9.6.1. <A
HREF="le26593-parent.html#LE37488-PARENT"
>Cluster Error Recovery</A
></DT
><DT
>9.6.2. <A
HREF="le26593-parent.html#LE22743-PARENT"
>Node Error recovery</A
></DT
><DT
>9.6.3. <A
HREF="le26593-parent.html#LE13349-PARENT"
>Resource Group Maintenance and Error Recovery</A
></DT
><DT
>9.6.4. <A
HREF="le26593-parent.html#LE15209-PARENT"
>Resource Error Recovery</A
></DT
><DT
>9.6.5. <A
HREF="le26593-parent.html#LE32749-PARENT"
>Control Network Failure Recovery</A
></DT
><DT
>9.6.6. <A
HREF="le26593-parent.html#LE33694-PARENT"
>Serial Cable Failure Recovery</A
></DT
><DT
>9.6.7. <A
HREF="le26593-parent.html#LE32026-PARENT"
>CDB Maintenance and Recovery</A
></DT
><DT
>9.6.8. <A
HREF="le26593-parent.html#LE19553-PARENT"
>FailSafe Cluster Manager GUI and CLI Inconsistencies</A
></DT
></DL
></DD
></DL
></DIV
><P
>This chapter provides information on FailSafe system recovery, and includes
sections on the following topics:</P
><P
></P
><UL
><LI
><P
><A
HREF="le14340-parent.html"
>Section 9.1</A
></P
></LI
><LI
><P
><A
HREF="le28847-parent.html"
>Section 9.2</A
></P
></LI
><LI
><P
><A
HREF="le17012-parent.html"
>Section 9.3</A
></P
></LI
><LI
><P
><A
HREF="le13884-parent.html"
>Section 9.4</A
></P
></LI
><LI
><P
><A
HREF="le35544-parent.html"
>Section 9.5</A
></P
></LI
><LI
><P
><A
HREF="le26593-parent.html"
>Section 9.6</A
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le37273-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le14340-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Performing Diagnostic Tasks with the Cluster
Manager CLI</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Overview of FailSafe System Recovery</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le28847-parent.html010064400016050000001000000103470717757376100155450ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>FailSafe Log Files</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Recovery"
HREF="le28716-parent.html"><LINK
REL="PREVIOUS"
TITLE="Overview of FailSafe System Recovery"
HREF="le14340-parent.html"><LINK
REL="NEXT"
TITLE="Node Membership and Resets"
HREF="le17012-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le14340-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 9. Linux FailSafe Recovery</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le17012-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE28847-PARENT"
>9.2. FailSafe Log Files</A
></H1
><P
>Linux
FailSafe maintains system logs for each of the FailSafe daemons. You can customize
the system logs according to the level of logging you wish to maintain.</P
><P
>For information on setting up log configurations, see  <A
HREF="fs-setlogparams.html"
>Section 5.6</A
>.</P
><P
>Log messages can be of the following types:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
>Normal</DT
><DD
><P
>Normal messages report on the successful completion of a task. An example
of a normal message is as follows:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>Wed Sep 2 11:57:25.284 &#60;N ha_gcd cms 10185:0&#62; 
Delivering TOTAL membership (S# 1, GS# 1)</PRE
></TD
></TR
></TABLE
></DD
><DT
>Error/Warning</DT
><DD
><P
>Error or warning messages indicate that an error has occurred or may
occur soon. These messages may result from using the wrong command or improper
syntax. An example of a warning message is as follows:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>Wed Sep 2 13:45:47.199 &#60;W crsd crs 9908:0 
crs_config.c:634&#62; CI_ERR_NOTFOUND, safer - no 
such node</PRE
></TD
></TR
></TABLE
></DD
><DT
>Syslog Messages</DT
><DD
><P
>All normal and error messages are also logged to <TT
CLASS="FILENAME"
>syslog</TT
>.
Syslog messages include the symbol <B
CLASS="COMMAND"
>&#60;CI&#62;</B
> in the header
to indicate they are cluster-related messages. An example of a syslog message
is as follows:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>Wed Sep 2 12:22:57 6X:safe syslog: &#60;&#60;CI&#62; 
ha_cmsd misc 10435:0&#62; CI_FAILURE, I am not part 
of the enabled cluster anymore</PRE
></TD
></TR
></TABLE
></DD
><DT
>Debug</DT
><DD
><P
>Debug messages appear in the log group file when the logging level is
set to debug0 or higher (using the GUI) or 10 or higher (using the CLI).</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>Many megabytes of disk space can be consumed on the server when debug
levels are used in a log configuration.</P
></BLOCKQUOTE
></DIV
></DD
></DL
></DIV
><P
>Examining the log files should enable you to see the nature of the system
error. Noting the time of the error and looking at the log files to note the
activity of the various daemons immediately before error occurred, you may
be able to determine what situation existed that caused the failure.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le14340-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le17012-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Overview of FailSafe System Recovery</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le28716-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Node Membership and Resets</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le29006-parent.html010064400016050000001000000065330717757326200155270ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Overview of Configuring Nodes for Linux FailSafe</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Installing Linux FailSafe Software and Preparing
the System"
HREF="le32854-parent.html"><LINK
REL="PREVIOUS"
TITLE="Installing Linux FailSafe Software and Preparing
the System"
HREF="le32854-parent.html"><LINK
REL="NEXT"
TITLE="Installing Required Software"
HREF="le97755-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le32854-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 3. Installing Linux FailSafe Software and Preparing
the System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le97755-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE29006-PARENT"
>3.1. Overview of Configuring Nodes for Linux FailSafe</A
></H1
><P
>Performing the system administration procedures required to prepare
nodes for Linux FailSafe involves these steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Install required software as described in <A
HREF="le97755-parent.html"
>Section 3.2</A
>.</P
></LI
><LI
><P
>Configure the system files on each node, as described in<A
HREF="le23103-parent.html"
>Section 3.3</A
>.</P
></LI
><LI
><P
>Check the setting of two important configuration issues on
each node as described in <A
HREF="le13651-parent.html"
>Section 3.4</A
>.</P
></LI
><LI
><P
>Create the devices and filesystems required by the highly
available applications you plan to run on the cluster. See <A
HREF="le39637-parent.html"
>Section 3.5</A
>.</P
></LI
><LI
><P
>Configure the network interfaces on the nodes using the procedure
in <A
HREF="le97738-parent.html"
>Section 3.6</A
>.</P
></LI
><LI
><P
>Configure the serial ports used on each node for the serial
connection to the other nodes by following the procedure in <A
HREF="le90681-parent.html"
>Section 3.7</A
>.</P
></LI
><LI
><P
>When you are ready configure the nodes so that Linux FailSafe
software starts up when they are rebooted.</P
></LI
></OL
><P
>To complete the configuration of nodes for Linux FailSafe, you must
configure the components of the Linux FailSafe system, as described in <A
HREF="le94219-parent.html"
>Chapter 5</A
>.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le32854-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le97755-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Installing Linux FailSafe Software and Preparing
the System</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le32854-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Installing Required Software</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>ration procedures required to prepare
nodes for Linux FailSafe involves these steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Install required software as described in <Ahtml/le31814-parent.html010064400016050000001000000115520717757404700155260ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Upgrading FailSafe Software in an Active Cluster</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Upgrading and Maintaining Active Clusters"
HREF="le55630-parent.html"><LINK
REL="PREVIOUS"
TITLE="Upgrading OS Software in an Active Cluster"
HREF="le26765-parent.html"><LINK
REL="NEXT"
TITLE="Adding New Resource Groups or Resources in an Active
Cluster"
HREF="le18685-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le26765-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 10. Upgrading and Maintaining Active Clusters</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le18685-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE31814-PARENT"
>10.5. Upgrading FailSafe Software in an Active Cluster</A
></H1
><P
>When you upgrade FailSafe software in an active cluster, you upgrade
one node at a time in the cluster.</P
><P
>The following procedure upgrades FailSafe on node <TT
CLASS="FILENAME"
>web-node3</TT
>.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>If resource groups are online on the node, use a <B
CLASS="COMMAND"
>cluster_mgr</B
> command to move them another node in the cluster. To
move the resource group to another node in the cluster, there should be another
node available in the failover policy domain of the resource group.</P
><P
>The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command moves resource
group <TT
CLASS="FILENAME"
>web-rg</TT
> to another node in the cluster <TT
CLASS="FILENAME"
>web-cluster:</TT
></P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin move resource_group web-rg in cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>To stop HA services on the node <TT
CLASS="FILENAME"
>web-node3</TT
>,
use the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command. This command will
move all the resource groups online on this node to other nodes in the cluster
if possible.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>stop ha_services on node web-node3 for cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>If it is not possible to move resource groups that are online on node <TT
CLASS="FILENAME"
>web-node3</TT
>, the above command will fail. You can use the <B
CLASS="COMMAND"
>force</B
> option to stop HA services in a node even in the case of an
error.</P
></LI
><LI
><P
>Stop all cluster processes running on the node.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>/etc/rc.dinit.d/failsafe stop</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Perform the FailSafe upgrade in the node <TT
CLASS="FILENAME"
>web-node3</TT
>.</P
></LI
><LI
><P
>After the FailSafe upgrade, check whether cluster processes
(<B
CLASS="COMMAND"
>cmond</B
>, <B
CLASS="COMMAND"
>cad</B
>, <B
CLASS="COMMAND"
>crsd</B
>)
are running. If not, restart cluster processes:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>fsconfig failsafe on; /etc/rc.d/init.d/failsafe start</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Restart HA services on the node. The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command restarts HA services on the node:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>start ha_services on node web-node3 for cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>Make sure the resource groups are running on the most appropriate node
after restarting HA services.</P
></LI
></OL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le26765-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le18685-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Upgrading OS Software in an Active Cluster</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le55630-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Adding New Resource Groups or Resources in an Active
Cluster</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le32198-parent.html010064400016050000001000000052310717757405700155320ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Adding a New Hardware Device in an Active Cluster</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Upgrading and Maintaining Active Clusters"
HREF="le55630-parent.html"><LINK
REL="PREVIOUS"
TITLE="Adding New Resource Groups or Resources in an Active
Cluster"
HREF="le18685-parent.html"><LINK
REL="NEXT"
TITLE="Glossary"
HREF="g7155.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le18685-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 10. Upgrading and Maintaining Active Clusters</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="g7155.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE32198-PARENT"
>10.7. Adding a New Hardware Device in an Active Cluster</A
></H1
><P
>When you add hardware devices to an active cluster, you add them one
node at a time.</P
><P
>To add hardware devices to a node in an active cluster, follow the same
procedure as when you upgrade OS software in an active cluster, as described
in <A
HREF="le26765-parent.html"
>Section 10.4</A
>. In summary:</P
><P
></P
><UL
><LI
><P
>You must move the resource groups offline and stop HA services
in the node before adding the hardware device.</P
></LI
><LI
><P
>After adding the hardware device, make sure cluster processes
are running and start HA services on the node.</P
></LI
></UL
><P
>To include the new hardware device in the configuration database, you
must modify your resource configuration and your node configuration, where
appropriate.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le18685-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="g7155.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Adding New Resource Groups or Resources in an Active
Cluster</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le55630-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Glossary</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Upgrading and Maintaining Active Clusters"
HREF="le55630-parent.html"><LINK
REL="PREVIOUS"
TITLE="Adding New Resource Groups or Resources in an Active
Cluster"
HREF="le18685-parent.html"><LINK
REL="NEXT"
TITLE="Glossary"
HREF="g7155.html"></HEAhtml/le32854-parent.html010064400016050000001000000077050717757331300155330ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Installing Linux FailSafe Software and Preparing
the System</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="IP Address Configuration"
HREF="le84104-parent.html"><LINK
REL="NEXT"
TITLE="Overview of Configuring Nodes for Linux FailSafe"
HREF="le29006-parent.html"></HEAD
><BODY
CLASS="CHAPTER"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le84104-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le29006-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><H1
><A
NAME="LE32854-PARENT"
>Chapter 3. Installing Linux FailSafe Software and Preparing
the System</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>3.1. <A
HREF="le29006-parent.html"
>Overview of Configuring Nodes for Linux FailSafe</A
></DT
><DT
>3.2. <A
HREF="le97755-parent.html"
>Installing Required Software</A
></DT
><DT
>3.3. <A
HREF="le23103-parent.html"
>Configuring System Files</A
></DT
><DD
><DL
><DT
>3.3.1. <A
HREF="le23103-parent.html#AEN2003"
>Configuring /etc/services for Linux FailSafe</A
></DT
><DT
>3.3.2. <A
HREF="le23103-parent.html#AEN2019"
>Configuring /etc/failsafe/config/cad.options for Linux FailSafe</A
></DT
><DT
>3.3.3. <A
HREF="le23103-parent.html#AEN2057"
>Configuring /etc/failsafe/config/cdbd.options for Linux FailSafe</A
></DT
><DT
>3.3.4. <A
HREF="le23103-parent.html#LE32812-PARENT"
>Configuring /etc/failsafe/config/cmond.options for
Linux FailSafe</A
></DT
></DL
></DD
><DT
>3.4. <A
HREF="le13651-parent.html"
>Additional Configuration Issues</A
></DT
><DT
>3.5. <A
HREF="le39637-parent.html"
>Choosing and Configuring devices and Filesystems</A
></DT
><DT
>3.6. <A
HREF="le97738-parent.html"
>Configuring Network Interfaces</A
></DT
><DT
>3.7. <A
HREF="le90681-parent.html"
>Configuration for Reset</A
></DT
><DD
><DL
><DT
>3.7.1. <A
HREF="le90681-parent.html#AEN2461"
>Changing the getty Process</A
></DT
><DT
>3.7.2. <A
HREF="le90681-parent.html#AEN2490"
>Configuring the BIOS</A
></DT
></DL
></DD
></DL
></DIV
><P
>This chapter describes several system administration procedures that
must be performed on the nodes in a cluster to prepare and configure them
for Linux FailSafe. These procedures assume that you have done the planning
described in <A
HREF="le88622-parent.html"
>Chapter 2</A
>.</P
><P
>The major sections in this chapter are as follows:</P
><P
></P
><UL
><LI
><P
><A
HREF="le29006-parent.html"
>Section 3.1</A
></P
></LI
><LI
><P
><A
HREF="le97755-parent.html"
>Section 3.2</A
></P
></LI
><LI
><P
><A
HREF="le23103-parent.html"
>Section 3.3</A
></P
></LI
><LI
><P
><A
HREF="le13651-parent.html"
>Section 3.4</A
></P
></LI
><LI
><P
><A
HREF="le39637-parent.html"
>Section 3.5</A
></P
></LI
><LI
><P
><A
HREF="le97738-parent.html"
>Section 3.6</A
></P
></LI
><LI
><P
><A
HREF="le90681-parent.html"
>Section 3.7</A
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le84104-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le29006-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>IP Address Configuration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Overview of Configuring Nodes for Linux FailSafe</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le32900-parent.html010064400016050000001000000076740717757311400155270ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Hardware Components of a Linux FailSafe Cluster</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe Administration"
HREF="le20463-parent.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe Disk Connections"
HREF="le45765-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le20463-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le45765-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE32900-PARENT"
>1.5. Hardware Components of a Linux FailSafe Cluster</A
></H1
><P
> <A
HREF="le32900-parent.html#LE72758-PARENT"
>Figure 1-1</A
>, shows an example
of Linux FailSafe hardware components, in this case for a two-node system.</P
><P
><DIV
CLASS="FIGURE"
><A
NAME="LE72758-PARENT"
></A
><P
><B
>Figure 1-1. Sample Linux FailSafe System Components</B
></P
><P
><IMG
SRC="figures/a1-1.failsafe.components.gif"></P
></DIV
></P
><P
>The hardware components of the Linux FailSafe system are as follows:</P
><P
></P
><UL
><LI
><P
>Up to eight Linux nodes</P
></LI
><LI
><P
>Two or more interfaces on each node to control networks (Ethernet,
FDDI, or any other available network interface)</P
><P
>At least two network interfaces on each node are required for the control
network <I
CLASS="FIRSTTERM"
>heartbeat</I
> connection, by which each node monitors
the state of other nodes. The Linux FailSafe software also uses this connection
to pass <I
CLASS="FIRSTTERM"
>control</I
> messages between nodes. These interfaces
have distinct IP addresses.</P
></LI
><LI
><P
>A mechanism for remote reset of nodes</P
><P
>A reset ensures that the failed node is not using the shared disks when
the replacement node takes them over.</P
></LI
><LI
><P
>Disk storage and SCSI bus shared by the nodes in the cluster</P
><P
>The nodes in the Linux FailSafe system can share dual-hosted disk storage
over a shared fast and wide SCSI bus where this is supported by the SCSI controller
and Linux driver.   <DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>Note that few Linux drivers are currently known to implement this correctly.
 Please check hardware compatibility lists if this is a configuration you
plan to use.  Fibre Channel solutions should universally support this.</P
></BLOCKQUOTE
></DIV
> <DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>The Linux FailSafe system is designed to survive a single point of failure.
 Therefore, when a system component fails, it must be restarted, repaired,
or replaced as soon as possible to avoid the possibility of two or more failed
components.</P
></BLOCKQUOTE
></DIV
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le20463-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le45765-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe Administration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe Disk Connections</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le34382-parent.html010064400016050000001000000220670717757321300155260ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Disk Configuration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Planning Linux FailSafe Configuration"
HREF="le88622-parent.html"><LINK
REL="PREVIOUS"
TITLE="Introduction to Configuration Planning"
HREF="le57040-parent.html"><LINK
REL="NEXT"
TITLE="Logical Volume Configuration"
HREF="le96329-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le57040-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 2. Planning Linux FailSafe Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le96329-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE34382-PARENT"
>2.2. Disk Configuration</A
></H1
><P
> The first subsection below
describes the disk configuration issues that must be considered when planning
a Linux FailSafe system. It explains the basic configurations of shared and
non-shared disks and how they are reconfigured by Linux FailSafe after a failover.
 The second subsection explains how disk configurations are specified when
you configure the Linux FailSafe system.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN1343"
>2.2.1. Planning Disk Configuration</A
></H2
><P
>For each disk in a Linux FailSafe cluster, you must choose whether to
make it a shared disk, which enables it to be failed over, or a non-shared
disk. Non-shared disks are not failed over.</P
><P
>The nodes in a Linux FailSafe cluster must follow these requirements:</P
><P
></P
><UL
><LI
><P
>The system disk must be a non-shared disk.</P
></LI
><LI
><P
>The Linux FailSafe software, in particular the directories <TT
CLASS="FILENAME"
>/var/run/failsafe</TT
> and <TT
CLASS="FILENAME"
>/var/lib/failsafe</TT
>, must
be on a non-shared disk.</P
></LI
></UL
><P
>Choosing to make a disk shared or non-shared depends on the needs of
the highly available services that use the disk. Each highly available service
has requirements about the location of data associated with the service:</P
><P
></P
><UL
><LI
><P
>Some data must be placed on non-shared disks</P
></LI
><LI
><P
>Some data must be placed on shared disks</P
></LI
><LI
><P
>Some data can be on shared or non-shared disks</P
></LI
></UL
><P
>The figures in the remainder of this section show the basic disk configurations
on Linux FailSafe clusters before failover. Each figure also shows the configuration
after failover. The basic disk configurations are these:</P
><P
></P
><UL
><LI
><P
>A non-shared disk on each node</P
></LI
><LI
><P
>Multiple shared disks contained Web server and NFS file server
documents</P
></LI
></UL
><P
>In each of the before and after failover diagrams, just one or two disks
are shown. In fact, many disks could be connected in the same way as each
disk shown. Thus each disk shown can represent a set of disks.</P
><P
>A Linux cluster can contain a combination of the basic disk configurations
listed above.</P
><P
><A
HREF="le34382-parent.html#LE22456-PARENT"
>Figure 2-1</A
> shows two nodes in a Linux FailSafe
cluster, each of which has a non-shared disk with two resource groups. When
non-shared disks are used by highly available applications, the data required
by those applications must be duplicated on non-shared disks on both nodes.
When a failover occurs, IP aliases fail over. The data that was originally
available on the failed node is still available from the replacement node
by using the IP alias to access it.</P
><P
>The configuration in <A
HREF="le34382-parent.html#LE22456-PARENT"
>Figure 2-1</A
> contains two resource
groups, <TT
CLASS="LITERAL"
>Group1</TT
> and <TT
CLASS="LITERAL"
>Group2</TT
>. <TT
CLASS="LITERAL"
>Group1</TT
> contains resource <TT
CLASS="LITERAL"
>192.26.50.1</TT
> of <TT
CLASS="LITERAL"
>IP_address</TT
> resource type. <TT
CLASS="LITERAL"
>Group2</TT
> contains resource <TT
CLASS="LITERAL"
>192.26.50.2</TT
> of <TT
CLASS="LITERAL"
>IP_address</TT
> resource type.</P
><P
><DIV
CLASS="FIGURE"
><A
NAME="LE22456-PARENT"
></A
><P
><B
>Figure 2-1. Non-Shared Disk Configuration and Failover</B
></P
><P
><IMG
SRC="figures/a2-3.non.shared.disk.config.gif"></P
></DIV
></P
><P
><A
HREF="le34382-parent.html#LE83029-PARENT"
>Figure 2-2</A
> shows a two-node configuration with
one resource group, <TT
CLASS="LITERAL"
>Group1</TT
>. Resource group Group1 has a
failover domain of (<TT
CLASS="LITERAL"
>xfs-ha1</TT
>, <TT
CLASS="LITERAL"
>xfs-ha2</TT
>).
Resource group Group1 contains three resources: resource <TT
CLASS="LITERAL"
>192.26.50.1</TT
> of resource type <TT
CLASS="LITERAL"
>IP_address</TT
>, resource <TT
CLASS="LITERAL"
>/shared</TT
> of resource type <TT
CLASS="LITERAL"
>filesystem</TT
>, and resource <TT
CLASS="LITERAL"
>shared_vol</TT
> of resource type <TT
CLASS="LITERAL"
>volume</TT
>.</P
><P
>In this configuration, the resource group <TT
CLASS="LITERAL"
>Group1</TT
>
has a <I
CLASS="FIRSTTERM"
>primary node</I
>, which is the node that accesses
the disk prior to a failover. It is shown by a solid line connection. The
backup node, which accesses the disk after a failover, is shown by a dotted
line. Thus, the disk is shared between the nodes. In an active/backup configuration,
all resource groups have the same primary node. The backup node does not run
any highly available resource groups until a failover occurs.</P
><P
><DIV
CLASS="FIGURE"
><A
NAME="LE83029-PARENT"
></A
><P
><B
>Figure 2-2. Shared Disk Configuration for Active/Backup Use</B
></P
><P
><IMG
SRC="figures/a2-4.shared.disk.config.gif"></P
></DIV
></P
><P
><A
HREF="le34382-parent.html#LE83152-PARENT"
>Figure 2-3</A
>, shows two shared disks in a two-node
cluster with two resource groups, <TT
CLASS="LITERAL"
>Group1</TT
> and <TT
CLASS="LITERAL"
>Group2</TT
>. Resource group <TT
CLASS="LITERAL"
>Group1</TT
> contains the following
resources:</P
><P
></P
><UL
><LI
><P
>Resource <TT
CLASS="LITERAL"
>192.26.50.1</TT
> of type <TT
CLASS="LITERAL"
>IP_address</TT
></P
></LI
><LI
><P
>Resource <TT
CLASS="LITERAL"
>shared1_vol</TT
> of type <TT
CLASS="LITERAL"
>volume</TT
></P
></LI
><LI
><P
>Resource <TT
CLASS="LITERAL"
>/shared1</TT
> of type <TT
CLASS="LITERAL"
>filesystem</TT
></P
></LI
></UL
><P
>Resource group <TT
CLASS="LITERAL"
>Group1</TT
> has a failover domain of (<TT
CLASS="LITERAL"
>xfs-ha1</TT
>, <TT
CLASS="LITERAL"
>xfs-ha2</TT
>).</P
><P
>Resource group <TT
CLASS="LITERAL"
>Group2</TT
> contains the following resources:</P
><P
></P
><UL
><LI
><P
>Resource <TT
CLASS="LITERAL"
>192.26.50.2</TT
> of type <TT
CLASS="LITERAL"
>IP_address</TT
></P
></LI
><LI
><P
>Resource <TT
CLASS="LITERAL"
>shared2_vol</TT
> of type <TT
CLASS="LITERAL"
>volume</TT
></P
></LI
><LI
><P
>Resource <TT
CLASS="LITERAL"
>/shared2</TT
> of type <TT
CLASS="LITERAL"
>filesystem</TT
></P
></LI
></UL
><P
>Resource group <TT
CLASS="LITERAL"
>Group2</TT
> has a failover domain of (<TT
CLASS="LITERAL"
>xfs-ha2</TT
>, <TT
CLASS="LITERAL"
>xfs-ha1</TT
>).</P
><P
>In this configuration, each node serves as a primary node for one resource
group. The solid line connections show the connection to the primary node
prior to failover. The dotted lines show the connections to the backup nodes.
After a failover, the surviving node has all the resource groups.</P
><P
><DIV
CLASS="FIGURE"
><A
NAME="LE83152-PARENT"
></A
><P
><B
>Figure 2-3. Shared Disk Configuration For Dual-Active Use</B
></P
><P
><IMG
SRC="figures/a2-5.shred.disk.2active.cnfig.gif"></P
></DIV
></P
><P
>Other sections in this chapter provide more specific information about
choosing between shared and non-shared disks for various types of data associated
with each highly available service.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE10802-PARENT"
>2.2.2. Configuration Parameters for Disks</A
></H2
><P
>There are no configuration parameters associated with non-shared disks.
They are not specified when you configure a Linux FailSafe system. Only shared
disks (actually, the logical volumes/partitions on shared disks) are specified
at configuration. See the <A
HREF="le96329-parent.html#LE13082-PARENT"
>Section 2.3.1</A
> for details.</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le57040-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le96329-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Introduction to Configuration Planning</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le88622-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Logical Volume Configuration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le35544-parent.html010064400016050000001000000063000717757377600155350ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Dynamic Control of FailSafe Services</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Recovery"
HREF="le28716-parent.html"><LINK
REL="PREVIOUS"
TITLE="Status Monitoring"
HREF="le13884-parent.html"><LINK
REL="NEXT"
TITLE="Recovery Procedures"
HREF="le26593-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le13884-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 9. Linux FailSafe Recovery</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le26593-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE35544-PARENT"
>9.5. Dynamic Control of FailSafe Services</A
></H1
><P
>FailSafe allows you to perform a variety of administrative tasks that
can help you troubleshoot a system with problems without bringing down the
entire system. These tasks include the following:</P
><P
></P
><UL
><LI
><P
>You can add or delete nodes from a cluster without affecting
the FailSafe services and the applications running in the cluster</P
></LI
><LI
><P
>You can add or delete a resource group without affecting other
online resource groups</P
></LI
><LI
><P
>You can add or delete resources from a resource group while
it is still online</P
></LI
><LI
><P
>You can change FailSafe parameters such as the heartbeat interval
and the node timeout and have those values take immediate affect while the
services are up and running</P
></LI
><LI
><P
>You can start and stop FailSafe services on specified nodes</P
></LI
><LI
><P
>You can move a resource group online, or take it offline</P
></LI
><LI
><P
>You can stop the monitoring of a resource group by putting
the resource group into maintenance mode. This is not an expensive operation,
as it does not stop and start the resource group, it just puts the resource
group in a state where it is not available to FailSafe.</P
></LI
><LI
><P
>You can reset individual nodes</P
></LI
></UL
><P
>For information on how to perform these tasks, see <A
HREF="le94219-parent.html"
>Chapter 5</A
>,
and <A
HREF="le99367-parent.html"
>Chapter 7</A
>.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le13884-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le26593-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Status Monitoring</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le28716-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Recovery Procedures</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le36400-parent.html010064400016050000001000000044150717757361700155240ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>System Operation Considerations</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe System Operation"
HREF="le99367-parent.html"><LINK
REL="PREVIOUS"
TITLE="Setting System Operation Defaults"
HREF="le85448-parent.html"><LINK
REL="NEXT"
TITLE="Activating (Starting) Linux FailSafe"
HREF="fs-activatehaservices.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le85448-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 7. Linux FailSafe System Operation</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="fs-activatehaservices.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE36400-PARENT"
>7.2. System Operation Considerations</A
></H1
><P
>Once a Linux FailSafe command is started, it may partially complete
even if you interrupt the command by typing <B
CLASS="KEYCAP"
>Ctrl-c</B
>. If you
halt the execution of a command this way, you may leave the cluster in an
indeterminate state and you may need to use the various status commands to
determine the actual state of the cluster and its components.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le85448-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="fs-activatehaservices.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Setting System Operation Defaults</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le99367-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Activating (Starting) Linux FailSafe</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le37273-parent.html010064400016050000001000000617150717757373400155430ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Performing Diagnostic Tasks with the Cluster
Manager CLI</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Testing Linux FailSafe Configuration"
HREF="le56830-parent.html"><LINK
REL="PREVIOUS"
TITLE="Performing Diagnostic Tasks with the Cluster
Manager GUI"
HREF="le42786-parent.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe Recovery"
HREF="le28716-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le42786-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 8. Testing Linux FailSafe Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le28716-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE37273-PARENT"
>8.3. Performing Diagnostic Tasks with the Cluster
Manager CLI</A
></H1
><P
>The following subsections described how to perform diagnostic tasks
on your system using the Cluster Manager CLI commands.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN6044"
>8.3.1. Testing the Serial Connections with the Cluster Manager CLI</A
></H2
><P
>You can use the Cluster Manager
CLI to test the serial connections between the Linux FailSafe nodes. This
test pings each specified node through the serial line and produces an
error message if the ping is not successful. Do not execute this command
while FailSafe is running.</P
><P
>When you are using the Cluster Manager CLI, use the following command
to test the serial connections for the machines in a cluster</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test serial in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
><TT
CLASS="USERINPUT"
><B
>node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C ...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>This test yields an error message when it encounters its first error,
indicating the node that did not respond. If you receive an error message
after executing this test, verify the cable connections of the serial
cable from the indicated node's serial port to the remote power control
unit or the system controller port of the other nodes and run the test
again.</P
><P
>The following shows an example of the <B
CLASS="COMMAND"
>test serial</B
>
CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>cluster_mgr</B
></TT
>
Welcome to Linux FailSafe Cluster Manager Command-Line Interface

cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test serial in cluster eagan on node cm1</B
></TT
>
Success: testing serial...
Success: Ensuring Node Can Get IP Addresses For All Specified Hosts
Success: Number of IP addresses obtained for &#60;cm1&#62; = 1
Success:        The first IP address for &#60;cm1&#62; = 128.162.19.34
Success: Checking serial lines via crsd (crsd is running)
Success: Successfully checked serial line
Success: Serial Line OK
Success: overall exit status:success, tests failed:0, total tests executed:1</PRE
></TD
></TR
></TABLE
><P
>The following shows an example of an attempt to run the <B
CLASS="COMMAND"
>test serial</B
> CLI command while FailSafe is running (causing the
command to fail to execute):</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test serial in cluster eagan on node cm1</B
></TT
>
Error: Cannot run the serial tests, diagnostics has detected FailSafe (ha_cmsd) is running

Failed to execute FailSafe tests/diagnostics ha

test command failed
cmgr&#62;</PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE11186-PARENT"
>8.3.2. Testing Network Connectivity with the Cluster
Manager CLI</A
></H2
><P
>You can use the Cluster
Manager CLI to test the network connectivity in a cluster. This test checks
if the specified nodes can communicate with each other through each configured
interface in the nodes. This test will not run if FailSafe is running.</P
><P
>When you are using the Cluster Manager CLI, use the following command
to test the network connectivity for the machines in a cluster</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test connectivity in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
><TT
CLASS="USERINPUT"
><B
>node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C ...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>The following shows an example of the <B
CLASS="COMMAND"
>test connectivity</B
> CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test connectivity in cluster eagan on node cm1</B
></TT
>
Success: testing connectivity...
Success: checking that the control IP_addresses are on the same networks
Success: pinging address cm1-priv interface ef0 from host cm1
Success: pinging address cm1 interface ef1 from host cm1
Success: overall exit status:success, tests failed:0, total tests 
executed:1</PRE
></TD
></TR
></TABLE
><P
>This test yields an error message when it encounters its first error,
indicating the node that did not respond. If you receive an error message
after executing this test, verify that the network interface has been
configured up, using the <B
CLASS="COMMAND"
>ifconfig</B
> command, for example:</P
><P
># <TT
CLASS="USERINPUT"
><B
>/usr/etc/ifconfig ec3</B
></TT
></P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>ec3: flags=c63&#60;UP,BROADCAST,NOTRAILERS,RUNNING,FILTMULTI,MULTICAST&#62;
&#8194;       inet 190.0.3.1 netmask 0xffffff00 broadcast 190.0.3.255</PRE
></TD
></TR
></TABLE
><P
>The UP in the first line of output indicates that the interface
is configured up.</P
><P
>If the network interface is configured up, verify that the network
cables are connected properly and run the test again.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE28447-PARENT"
>8.3.3. Testing Resources with the Cluster Manager CLI</A
></H2
><P
>You can use the Cluster Manager CLI to test any configured resource
by resource name or by resource type.</P
><P
>The Cluster Manager CLI uses the following syntax to test a resource
by name:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
><TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D </I
></TT
><TT
CLASS="USERINPUT"
><B
>node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>E ...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>The following shows an example of testing a resource by name:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource /disk1 of resource_type filesystem in cluster eagan on machine cm1</B
></TT
>
Success: *** testing node resources on node cm1 ***
Success: *** testing all filesystem resources on node cm1 ***
Success: testing resource /disk1 of resource type filesystem on node cm1
Success: overall exit status:success, tests failed:0, total tests executed:1</PRE
></TD
></TR
></TABLE
><P
>The Cluster Manager CLI uses the following syntax to test a resource
by resource type:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A </I
></TT
><TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C </I
></TT
><TT
CLASS="USERINPUT"
><B
>node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>The following shows an example of testing resources by resource
type:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource_type filesystem in cluster eagan on machine cm1</B
></TT
>
Success: *** testing node resources on node cm1 ***
Success: *** testing all filesystem resources on node cm1 ***
Success: testing resource /disk4 of resource type filesystem on node cm1
Success: testing resource /disk5 of resource type filesystem on node cm1
Success: testing resource /disk2 of resource type filesystem on node cm1
Success: testing resource /disk3 of resource type filesystem on node cm1
Success: testing resource /disk1 of resource type filesystem on node cm1
Success: overall exit status:success, tests failed:0, total tests executed:5</PRE
></TD
></TR
></TABLE
><P
>You can use the CLI to test volume and filesystem resources in <I
CLASS="EMPHASIS"
>destructive</I
> mode. This provides a more thorough test of filesystems
and volumes. CLI tests will not run in destructive mode if FailSafe is
running.</P
><P
>The Cluster Manager CLI uses the following syntax for the commands
that test resources in destructive mode:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
><TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C ...</I
></TT
>] <TT
CLASS="USERINPUT"
><B
>destructive</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>The following sections describe the diagnostic tests available for
resources.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN6140"
>8.3.3.1. Testing Logical Volumes</A
></H3
><P
>You can use the Cluster Manager CLI to
test the logical volumes in a cluster. This test checks if the specified
volume is configured correctly.</P
><P
>When you are using the Cluster Manager CLI, use the following command
to test a logical volume:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type volume on cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C </I
></TT
><TT
CLASS="USERINPUT"
><B
>node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D ...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>The following example tests a logical volume:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource alternate of resource_type volume on cluster eagan</B
></TT
>
Success: *** testing node resources on node cm1 ***
Success: *** testing all volume resources on node cm1 ***
Success: running resource type volume tests on node cm1
Success: *** testing node resources on node cm2 ***
Success: *** testing all volume resources on node cm2 ***
Success: running resource type volume tests on node cm2
Success: overall exit status:success, tests failed:0, total tests executed:2
cmgr&#62;</PRE
></TD
></TR
></TABLE
><P
>The following example tests a logical volume in destructive mode:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource alternate of resource_type volume on cluster eagan destructive</B
></TT
>
Warning: executing the tests in destructive mode
Success: *** testing node resources on node cm1 ***
Success: *** testing all volume resources on node cm1 ***
Success: running resource type volume tests on node cm1
Success: successfully assembled volume: alternate
Success: *** testing node resources on node cm2 ***
Success: *** testing all volume resources on node cm2 ***
Success: running resource type volume tests on node cm2
Success: successfully assembled volume: alternate
Success: overall exit status:success, tests failed:0, total tests executed:2
cmgr&#62;</PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN6162"
>8.3.3.2. Testing Filesystems</A
></H3
><P
>You can use the Cluster Manager CLI to test the filesystems configured
in a cluster. This test checks if the specified filesystem is configured
correctly and, in addition, checks whether the volume the filesystem will
reside on is configured correctly.</P
><P
>When you are using the Cluster Manager CLI, use the following command
to test a filesystem:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type filesystems on cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D ...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>The following example tests a filesystem. This example first uses
a CLI <B
CLASS="COMMAND"
>show</B
> command to display the filesystems that
have been defined in a cluster.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resources of resource_type filesystem in cluster eagan</B
></TT
>
/disk4 type filesystem
/disk5 type filesystem
/disk2 type filesystem
/disk3 type filesystem
/disk1 type filesystem
cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource /disk4 of resource_type filesystem in cluster eagan on node cm1</B
></TT
>
Success: *** testing node resources on node cm1 ***
Success: *** testing all filesystem resources on node cm1 ***
Success: successfully mounted filesystem: /disk4
Success: overall exit status:success, tests failed:0, total tests executed:1
cmgr&#62;</PRE
></TD
></TR
></TABLE
><P
>The following example tests a filesystem in destructive mode:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource /disk4 of resource_type filesystem in cluster eagan on node cm1 destructive</B
></TT
>
Warning: executing the tests in destructive mode
Success: *** testing node resources on node cm1 ***
Success: *** testing all filesystem resources on node cm1 ***
Success: successfully mounted filesystem: /disk4
Success: overall exit status:success, tests failed:0, total tests executed:1
cmgr&#62;</PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN6186"
>8.3.3.3. Testing NFS Filesystems</A
></H3
><P
>You can use the Cluster Manager CLI to test the NFS filesystems
configured in a cluster. This test checks if the specified NFS filesystem
is configured correctly and, in addition, checks whether the volume the
NFS filesystem will reside on is configured correctly. </P
><P
>When you are using the Cluster Manager CLI, use the following command
to test an NFS filesystem:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type NFS on cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C </I
></TT
><TT
CLASS="USERINPUT"
><B
>node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D ...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>The following example tests an NFS filesystem:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource /disk4 of resource_type NFS in cluster eagan</B
></TT
>
Success: *** testing node resources on node cm1 ***
Success: *** testing all NFS resources on node cm1 ***
Success: *** testing node resources on node cm2 ***
Success: *** testing all NFS resources on node cm2 ***
Success: overall exit status:success, tests failed:0, total tests executed:2
cmgr&#62;</PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN6207"
>8.3.3.4. Testing statd Resources</A
></H3
><P
>You can use the Cluster Manager CLI to test the statd resources
configured in a cluster. When you are using the Cluster Manager CLI, use
the following command to test an NFS filesystem: </P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type statd on cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C </I
></TT
><TT
CLASS="USERINPUT"
><B
>node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D ...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>The following example tests a statd resource:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource /disk1/statmon of resource_type statd in cluster eagan</B
></TT
>
Success: *** testing node resources on node cm1 ***
Success: *** testing all statd resources on node cm1 ***
Success: *** testing node resources on node cm2 ***
Success: *** testing all statd resources on node cm2 ***
Success: overall exit status:success, tests failed:0, total tests executed:2
cmgr&#62;</PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN6228"
>8.3.3.5. Testing Netscape-web Resources</A
></H3
><P
>You can use the Cluster Manager CLI to test the Netscape Web resources
configured in a cluster. </P
><P
>When you are using the Cluster Manager CLI, use the following command
to test a Netscape-web resource:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type Netscape_web on cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C  </I
></TT
><TT
CLASS="USERINPUT"
><B
>node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D ...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>The following example tests a Netscape-web resource. In this example,
the Netscape-web resource on node cm2 failed the diagnostic test.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource nss-enterprise of resource_type Netscape_web in cluster eagan</B
></TT
>
Success: *** testing node resources on node cm1 ***
Success: *** testing all Netscape_web resources on node cm1 ***
Success: *** testing node resources on node cm2 ***
Success: *** testing all Netscape_web resources on node cm2 ***
Warning: resource nss-enterprise has invaild script /var/netscape/suitespot/https-ha85 location
Warning: /var/netscape/suitespot/https-ha85/config/magnus.conf must contain the
"Port" parameter
Warning: /var/netscape/suitespot/https-ha85/config/magnus.conf must contain the
"Address" parameter
Warning: resource nss-enterprise of type Netscape_web failed
Success: overall exit status:failed, tests failed:1, total tests executed:2
Failed to execute FailSafe tests/diagnostics ha
test command failed
cmgr&#62;</PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="LE62758-PARENT"
>8.3.3.6. Testing Resource Groups</A
></H3
><P
>You can use the Cluster Manager CLI to test a resource group. This
test cycles through the resource tests for all of the resources defined
for a resource group. Resource tests are performed only on nodes in the
resource group's application failover domain. </P
><P
>The Cluster Manager CLI uses the following syntax for the commands
that test resource groups:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C </I
></TT
><TT
CLASS="USERINPUT"
><B
>node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
><TT
CLASS="REPLACEABLE"
><I
>&#8194;...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>The following example tests a resource group. This example first
uses a CLI <B
CLASS="COMMAND"
>show</B
> command to display the resource groups
that have been defined in a cluster.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resource_groups in cluster eagan</B
></TT
>
Resource Groups:
&#8194;       nfs2
&#8194;       informix
cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test resource_group nfs2 in cluster eagan on machine cm1</B
></TT
>
Success: *** testing node resources on node cm1 ***
Success: testing resource /disk4 of resource type NFS on node cm1
Success: testing resource /disk3 of resource type NFS on node cm1
Success: testing resource /disk3/statmon of resource type statd on node cm1
Success: testing resource 128.162.19.45 of resource type IP_address on node cm1
Success: testing resource /disk4 of resource type filesystem on node cm1
Success: testing resource /disk3 of resource type filesystem on node cm1
Success: testing resource dmf1 of resource type volume on node cm1
Success: testing resource dmfjournals of resource type volume on node cm1
Success: overall exit status:success, tests failed:0, total tests executed:16
cmgr&#62;</PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE29671-PARENT"
>8.3.4. Testing Failover Policies with the Cluster Manager
CLI</A
></H2
><P
>You can use the Cluster Manager CLI to test whether a failover policy
is defined correctly. This test checks the failover policy by validating
the policy script, failover attributes, and whether the application failover
domain consists of valid nodes from the cluster.</P
><P
>The Cluster Manager CLI uses the following syntax for the commands
that test a failover policy:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test failover_policy </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
><TT
CLASS="REPLACEABLE"
><I
>&#8194;...</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>The following example tests a failover policy. This example first
uses a CLI <B
CLASS="COMMAND"
>show</B
> command to display the failover policies
that have been defined in a cluster.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show failover_policies</B
></TT
>
Failover Policies:
&#8194;       reverse
&#8194;       ordered-in-order
cmgr&#62; <TT
CLASS="USERINPUT"
><B
>test failover_policy reverse in cluster eagan</B
></TT
>
Success: *** testing node resources on node cm1 ***
Success: testing policy reverse on node cm1
Success: *** testing node resources on node cm2 ***
Success: testing policy reverse on node cm2
Success: overall exit status:success, tests failed:0, total tests executed:2
cmgr&#62;</PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le42786-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le28716-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Performing Diagnostic Tasks with the Cluster
Manager GUI</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le56830-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe Recovery</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>ning: executing the tests in destructive mode
Succehtml/le37674-parent.html010064400016050000001000000100670717757367600155470ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Backing Up and Restoring Configuration With Cluster
Manager CLI</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe System Operation"
HREF="le99367-parent.html"><LINK
REL="PREVIOUS"
TITLE="Resetting Nodes"
HREF="fs-resetmachine.html"><LINK
REL="NEXT"
TITLE="Testing Linux FailSafe Configuration"
HREF="le56830-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="fs-resetmachine.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 7. Linux FailSafe System Operation</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le56830-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE37674-PARENT"
>7.8. Backing Up and Restoring Configuration With Cluster
Manager CLI</A
></H1
><P
>   The
Cluster Manager CLI provides scripts that you can use to backup and restore
your configuration: <B
CLASS="COMMAND"
>cdbDump</B
> and <B
CLASS="COMMAND"
>cdbRestore</B
>.
These scripts are installed in the <TT
CLASS="FILENAME"
>/var/cluster/cmgr-scripts</TT
>
directory. You can modify these scripts to suit your needs.</P
><P
>The <B
CLASS="COMMAND"
>cdbDump </B
>script, as provided, creates compressed
tar files of the <TT
CLASS="FILENAME"
>/var/cluster/cdb/cdb.db#</TT
> directory and
the <TT
CLASS="FILENAME"
>/var/cluster/cdb.db</TT
> file.</P
><P
>The <B
CLASS="COMMAND"
>cdbRestore </B
>script, as provided, restores the
compressed tar files of the <TT
CLASS="FILENAME"
>/var/cluster/cdb/cdb.db#</TT
>
directory and the <TT
CLASS="FILENAME"
>/var/cluster/cdb.db</TT
> file.</P
><P
>When you use the <B
CLASS="COMMAND"
>cdbDump</B
> and <B
CLASS="COMMAND"
>cdbRestore </B
>scripts, you should follow the following procedures:</P
><P
></P
><UL
><LI
><P
>Run the <B
CLASS="COMMAND"
>cdbDump</B
> and <B
CLASS="COMMAND"
>cdbRestore </B
>scripts only when no administrative commands are running. This could
result in an inconsistent backup.</P
></LI
><LI
><P
>You must backup the configuration of each node in the cluster
separately. The configuration information is different for each node, and
all node-specific information is stored locally only.</P
></LI
><LI
><P
>Run the backup procedure whenever you change your configuration.</P
></LI
><LI
><P
>The backups of all nodes in the pool taken at the same time
should be restored together.</P
></LI
><LI
><P
>Cluster and Linux FailSafe process should not be running when
you restore your configuration.</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>In addition to the above restrictions, you should not perform a <B
CLASS="COMMAND"
>cdbDump </B
>while information is changing in the CDB. Check SYSLOG for
information to help determine when CDB activity is occurring. As a rule of
thumb, you should be able to perform a <B
CLASS="COMMAND"
>cdbDump </B
>if at least
15 minutes have passed since the last node joined the cluster or the last
administration command was run.</P
></BLOCKQUOTE
></DIV
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="fs-resetmachine.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le56830-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Resetting Nodes</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le99367-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Testing Linux FailSafe Configuration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le39637-parent.html010064400016050000001000000103750717757330200155340ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Choosing and Configuring devices and Filesystems</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Installing Linux FailSafe Software and Preparing
the System"
HREF="le32854-parent.html"><LINK
REL="PREVIOUS"
TITLE="Additional Configuration Issues"
HREF="le13651-parent.html"><LINK
REL="NEXT"
TITLE="Configuring Network Interfaces"
HREF="le97738-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le13651-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 3. Installing Linux FailSafe Software and Preparing
the System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le97738-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE39637-PARENT"
>3.5. Choosing and Configuring devices and Filesystems</A
></H1
><P
>   Creating devices, logical
volumes, and filesystems will have a variety of steps specific to the filesystems
and other tools selected.  Documenting these is outside the scope of this
guide. Please refer to the system and distribution-specific documentation
for more assistance in this area.</P
><P
>When you create the volumes and filesystems for use with Linux FailSafe,
remember these important points:</P
><P
></P
><UL
><LI
><P
>If the shared disks are not in a RAID storage system, mirrored
volumes should be used.</P
></LI
><LI
><P
>  Each device used must be owned by
the same node that is the primary node for the highly available applications
that use the logical volume. To simplify the management of the <TT
CLASS="REPLACEABLE"
><I
>nodenames</I
></TT
> (owners) of volumes on shared disks, follow these
recommendations:</P
><P
></P
><UL
><LI
><P
>Work with the volumes on a shared disk from only one node
in the cluster.</P
></LI
><LI
><P
>After you create all the volumes on one node, you can selectively
shift the ownership to the other node.</P
></LI
></UL
></LI
><LI
><P
>If the volumes you create are used as raw volumes (no filesystem)
for storing database data, the database system may require that the device
names have specific owners, groups, and modes. If this is the case (see the
documentation provided by the database vendor), use the <B
CLASS="COMMAND"
>chown</B
>
and <B
CLASS="COMMAND"
>chmod </B
> commands (see the <B
CLASS="COMMAND"
>chown</B
> and <B
CLASS="COMMAND"
>chmod</B
> reference pages) to set the owner, group, and mode as required.</P
></LI
><LI
><P
>No filesystem entries are made in <TT
CLASS="FILENAME"
>/etc/fstab</TT
>
for filesystems on shared disks; Linux FailSafe software mounts the filesystems
on shared disks. However, to simplify system administration, consider adding
comments to <TT
CLASS="FILENAME"
>/etc/fstab</TT
> that list the filesystems configured
for Linux FailSafe. Thus, a system administrator who sees mounted Linux FailSafe
filesystems in the output of the <B
CLASS="COMMAND"
>df</B
> command and looks for
the filesystems in the <TT
CLASS="FILENAME"
>/etc/fstab</TT
> file will learn that
they are filesystems managed by Linux FailSafe.</P
></LI
><LI
><P
>Be sure to create the mount point directory for each filesystem
on all nodes in the failover domain.</P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le13651-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le97738-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Additional Configuration Issues</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le32854-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Configuring Network Interfaces</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>orage system, mirrored
volumes should be used.</P
></LI
><LI
><P
>  Each device used must be owned by
the same node that is the primary node for the highly available applications
that use the logical volume. To simplify the management of the <TT
CLASS="REPLAChtml/le40511-parent.html010064400016050000001000000107730717757353600155260ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Resource Group Creation Example</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Cluster Configuration"
HREF="le94219-parent.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe System Log Configuration"
HREF="fs-setlogparams.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe Configuration Example CLI Script"
HREF="le40790-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="fs-setlogparams.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 5. Linux FailSafe Cluster Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le40790-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE40511-PARENT"
>5.7. Resource Group Creation Example</A
></H1
><P
>Use the following procedure to create
a resource group using the Cluster Manager CLI:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Determine the list of resources that belong to the resource
group you are defining. The list of resources that belong to a resource group
are the resources that move from one node to another as one unit.</P
><P
> A resource group that provides
NFS services would contain a resource of each of the following types:</P
><P
></P
><UL
><LI
><P
><TT
CLASS="FILENAME"
>IP_address</TT
></P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>volume</TT
></P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>filesystem</TT
></P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>NFS</TT
></P
><P
>All resource and resource type dependencies of resources in a resource
group must be satisfied. For example, the <TT
CLASS="FILENAME"
>NFS</TT
> resource
type depends on the <TT
CLASS="FILENAME"
>filesystem</TT
> resource type, so a resource
group containing a resource of <TT
CLASS="FILENAME"
>NFS</TT
> resource type should
also contain a resource of <TT
CLASS="FILENAME"
>filesystem</TT
> resource type.</P
></LI
></UL
></LI
><LI
><P
>Determine the failover policy to be used by the resource group.</P
></LI
><LI
><P
>Use the template <TT
CLASS="FILENAME"
>cluster_mgr</TT
> script available
in the <TT
CLASS="FILENAME"
>/usr/lib/failsafe/cmgr-templates/cmgr-create-resource_group</TT
> file.</P
><P
>This example shows a script that creates a resource group with the following
characteristics:</P
><P
></P
><UL
><LI
><P
>The resource group is named <TT
CLASS="FILENAME"
>nfs-group</TT
></P
></LI
><LI
><P
>The resource group is in cluster <TT
CLASS="FILENAME"
>HA-cluster</TT
></P
></LI
><LI
><P
>The resource group uses the failover policy</P
></LI
><LI
><P
>the resource group contains <TT
CLASS="FILENAME"
>IP_Address</TT
>, <TT
CLASS="FILENAME"
>volume</TT
>, <TT
CLASS="FILENAME"
>filesystem</TT
>, and <TT
CLASS="FILENAME"
>NFS</TT
>
resources</P
><P
>The following script can be used to create this resource group:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>define resource_group nfs-group in cluster HA-cluster
&#8194;       set failover_policy to n1_n2_ordered
&#8194;       add resource 192.0.2.34 of resource_type IP_address
&#8194;       add resource havol1 of resource_type volume
&#8194;       add resource /hafs1 of resource_type filesystem
&#8194;       add resource /hafs1 of resource_type NFS
done</PRE
></TD
></TR
></TABLE
></LI
></UL
></LI
><LI
><P
>Run this script using the <B
CLASS="COMMAND"
>-f</B
> option of
the <B
CLASS="COMMAND"
>cluster_mgr</B
> command.</P
></LI
></OL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="fs-setlogparams.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le40790-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe System Log Configuration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le94219-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe Configuration Example CLI Script</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le40594-parent.html010064400016050000001000000164660717757403200155360ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Adding a Node to an Active Cluster</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Upgrading and Maintaining Active Clusters"
HREF="le55630-parent.html"><LINK
REL="PREVIOUS"
TITLE="Upgrading and Maintaining Active Clusters"
HREF="le55630-parent.html"><LINK
REL="NEXT"
TITLE="Deleting a Node from an Active Cluster"
HREF="le15663-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le55630-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 10. Upgrading and Maintaining Active Clusters</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le15663-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE40594-PARENT"
>10.1. Adding a Node to an Active Cluster</A
></H1
><P
>Use the following procedure to add a node to an active cluster. This
procedure begins with the assumption that <TT
CLASS="FILENAME"
>cluster_admin</TT
>, <TT
CLASS="FILENAME"
>cluster_control</TT
>, <TT
CLASS="FILENAME"
>cluster_ha</TT
> and <TT
CLASS="FILENAME"
>failsafe2</TT
> products are already installed in this node.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Check control network connections from the node to the rest
of the cluster using <B
CLASS="COMMAND"
>ping</B
> command. Note the list of control
network IP addresses.</P
></LI
><LI
><P
>Check the serial connections to reset this node. Note the
name of the node that can reset this node.</P
></LI
><LI
><P
>Run node diagnostics. For information on FailSafe diagnostic
commands, see <A
HREF="le56830-parent.html"
>Chapter 8</A
>.&#8194;</P
></LI
><LI
><P
>Make sure <TT
CLASS="FILENAME"
>sgi-cad</TT
>, <TT
CLASS="FILENAME"
>sgi-crsd</TT
>, <TT
CLASS="FILENAME"
>sgi-cmsd</TT
>, and <TT
CLASS="FILENAME"
>sgi-gcd</TT
>
entries are present in the <TT
CLASS="FILENAME"
>/etc/services</TT
> file. The port
numbers for these processes should match the port numbers in other nodes in
the cluster.</P
><P
>Example entries:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>sgi-cad         7200/tcp      # SGI cluster admin daemon
sgi-crsd        7500/udp      # SGI cluster reset services daemon
sgi-cmsd        7000/udp      # SGI cluster membership Daemon
sgi-gcd         8000/udp      # SGI group communication Daemon</PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Check if cluster processes (<TT
CLASS="FILENAME"
>cad</TT
>, <TT
CLASS="FILENAME"
>cmond</TT
>, <TT
CLASS="FILENAME"
>crsd</TT
>) are running.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>ps -ef | grep cad</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>If cluster processes are not running, run the <B
CLASS="COMMAND"
>cdbreinit</B
>
command.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>/usr/lib/failsafe/bin/cdbreinit /var/lib/failsafe/cdb/cdb.db</B
></TT
>&#8194;
	Killing cdbd...
	Removing database header file /var/lib/failsafe/cdb/cdb.db...
	Preparing to delete database directory /var/lib/failsafe/cdb/cdb.db# !!
	Continue[y/n]<TT
CLASS="USERINPUT"
><B
>y</B
></TT
>
	Removing database directory /var/lib/failsafe/cdb/cdb.db#...
	Deleted CDB database at /var/lib/failsafe/cdb/cdb.db
	Recreating new CDB database at /var/lib/failsafe/cdb/cdb.db with cdb-exitop...
	 cdbd
	 Created standard CDB database in /var/lib/failsafe/cdb/cdb.db

	 Please make sure that "sgi-cad" service is added to /etc/services file
	 If not, add the entry and restart cluster processes.
	 Please refer to FailSafe administration manual for more
	 information.

	 Modifying CDB database at /var/lib/failsafe/cdb/cdb.db with cluster_ha-exitop...
	 Modified standard CDB database in /var/lib/failsafe/cdb/cdb.db

	 Please make sure that "sgi-cmsd" and "sgi-gcd" services are added
	 to /etc/services file before starting HA services.
	 Please refer to FailSafe administration manual for more
	 information.

	 Starting cluster control processes with cluster_control-exitop...

	 Please make sure that "sgi-crsd" service is added to /etc/services file
	 If not, add the entry and restart cluster processes.
	 Please refer to FailSafe administration manual for more
	 information.

	 Started cluster control processes
	 Restarting cluster admin processes with failsafe-exitop...</PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Use <B
CLASS="COMMAND"
>cluster_mgr</B
> template (<TT
CLASS="FILENAME"
>/usr/lib/failsafe/cmgr-templates/cmgr-create-node</TT
>) or <B
CLASS="COMMAND"
>cluster_mgr</B
> command to define the node.</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>This node must be defined from one of nodes that is already in the cluster.</P
></BLOCKQUOTE
></DIV
></LI
><LI
><P
>Use the <B
CLASS="COMMAND"
>cluster_mgr</B
> command to add the
node to the cluster.</P
><P
>For example: The following <B
CLASS="COMMAND"
>cluster_mgr</B
> command adds
the node <TT
CLASS="FILENAME"
>web-node3</TT
> to the cluster <TT
CLASS="FILENAME"
>web-cluster</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify cluster web-cluster</B
></TT
>
Enter commands, when finished enter either "done" or "cancel"</PRE
></TD
></TR
></TABLE
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>web-cluster ? <TT
CLASS="USERINPUT"
><B
>add node web-node3</B
></TT
>
web-cluster ? <TT
CLASS="USERINPUT"
><B
>done</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>You can start HA services on this node using the <B
CLASS="COMMAND"
>cluster_mgr</B
> command. For example, the following <B
CLASS="COMMAND"
>cluster_mgr</B
> command starts HA services on node <TT
CLASS="FILENAME"
>web-node3</TT
>
in cluster <TT
CLASS="FILENAME"
>web-cluster</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>start ha_services on node web-node3 in cluster web-cluster</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Remember to add this node to the failure domain of the relevant
failover policy. In order to do this, the entire failover policy must be re-defined,
including the additional node in the failure domain.</P
></LI
></OL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le55630-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le15663-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Upgrading and Maintaining Active Clusters</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le55630-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Deleting a Node from an Active Cluster</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le40790-parent.html010064400016050000001000000764420717757354200155410ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe Configuration Example CLI Script</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Cluster Configuration"
HREF="le94219-parent.html"><LINK
REL="PREVIOUS"
TITLE="Resource Group Creation Example"
HREF="le40511-parent.html"><LINK
REL="NEXT"
TITLE="Configuration Examples"
HREF="configexample.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le40511-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 5. Linux FailSafe Cluster Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="configexample.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE40790-PARENT"
>5.8. Linux FailSafe Configuration Example CLI Script</A
></H1
><P
>The following Cluster Manager CLI script provides an example which shows
how to configure a cluster in the cluster database. The script illustrates
the CLI commands that you execute when you define a cluster. You will use
the parameters of your own system when you configure your cluster. After you
create a CLI script, you can set the execute permissions and execute the script
directly.</P
><P
>For general information on CLI scripts, see <A
HREF="le15969-parent.html#LE41514-PARENT"
>Section 4.3.4</A
>.
For information on the CLI template files that you can use to create your
own configuration script, see <A
HREF="le15969-parent.html#LE10673-PARENT"
>Section 4.3.5</A
>.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>#!/usr/lib/failsafe/bin/cluster_mgr -f


#################################################################
#                                                               #
# Sample cmgr script to create a 2-node cluster in the cluster  #
# database (cdb).                                               #
# This script is created using cmgr template files under        #
#          /usr/lib/failsafe/cmgr-scripts directory.          #
# The cluster has 2 resource groups:                            #
# 1. nfs-group - Has 2 NFS, 2 filesystem, 2 volume, 1 statd and #
#                1 IP_address resources.                        #
# 2. web-group - Has 1 Netscape_web and 1 IP_address resources. #
#                                                               #
# NOTE: After running this script to define the cluster in the  #
# cdb, the user has to enable the two resource groups using the #
# cmgr admin online resource_group command.                     #
#                                                               #
#################################################################

#
# Create the first node.
# Information to create a node is obtained from template script:
#	/usr/lib/failsafe/cmgr-templates/cmgr-create-node
#

#
#
# logical name of the node. It is recommended that logical name of the # node be 
output of hostname(1) command.
#
define node sleepy
#
# Hostname of the node. This is optional. If this field is not
# specified,logical name of the node is assumed to be hostname.
# This value has to be
# the output of hostname(1) command.
#
&#8194;      set hostname to sleepy
# 
# Node identifier. Node identifier is a 16 bit integer that uniquely
# identifies the node. This field is optional. If value is
# not provided,cluster software generates node identifier.
# Example value: 1
&#8194;      set nodeid to 101
#
# Description of the system controller of this node.
# System controller can be &#8220;chalL&#8221; or &#8220;msc&#8221; or &#8220;mmsc&#8221;. If the node is a
# Challenge DM/L/XL, then system controller type is &#8220;chalL&#8221;. If the
# node is Origin 200 or deskside Origin 2000, then the system
# controller type is &#8220;msc&#8221;. If the node is rackmount Origin 2000, the
# system controller type is &#8220;mmsc&#8221;.
# Possible values: msc, mmsc, chalL
#
&#8194;       set sysctrl_type to msc
#
# You can enable or disable system controller definition. Users are 
# expected to enable system controller definition after verify the
# serial reset cables connected to this node.
# Possible values: enabled, disabled
#
&#8194;       set sysctrl_status to enabled
# 
# The system controller password for doing privileged system controller
# commands.
# This field is optional.
#
&#8194;       set sysctrl_password to none
#
# System controller owner. The node name of the machine that is 
# connected using serial cables to system controller of this node.
# System controller node also has to be defined in the CDB.
#
&#8194;       set sysctrl_owner to grumpy
#
# System controller device. The absolute device path name of the tty
# to which the serial cable is connected in this node.
# Example value: /dev/ttyd2
#
&#8194;       set sysctrl_device to /dev/ttyd2
#
# Currently, the system controller owner can be connected to the system
# controller on this node using &#8220;tty&#8221; device.  
# Possible value: tty
#
&#8194;       set sysctrl_owner_type to tty
#
# List of control networks. There can be multiple control networks
# specified for a node. HA cluster software uses these control 
# networks for communication between nodes.  At least two control
# networks should be specified for heartbeat messages and one
# control network for failsafe control messages.
# For each control network for the node, please add one more
# control network section.
#
# Name of control network IP address. This IP address must
# be configured on the network interface in /etc/rc.config
# file in the node.
# It is recommended that the IP address in internet dot notation
# is provided.
# Example value: 192.26.50.3
#
&#8194;       add nic 192.26.50.14
#
# Flag to indicate if the control network can be used for sending 
# heartbeat messages.
# Possible values: true, false
#
&#8194;           set heartbeat to true
#
# Flag to indicate if the control network can be used for sending 
# failsafe control messages.
# Possible values: true, false
#
&#8194;           set ctrl_msgs to true
#
# Priority of the control network. Higher the priority value, lower the
# priority of the control network.
# Example value: 1
#
&#8194;           set priority to 1
#
# Control network information complete
#
&#8194;       done
#
# Add more control networks information here.
#

# Name of control network IP address. This IP address must be
# configured on the network interface in /etc/rc.config
# file in the node.
# It is recommended that the IP address in internet dot
# notation is provided.
# Example value: 192.26.50.3
#
&#8194;       add nic 150.166.41.60
#
# Flag to indicate if the control network can be used for sending 
# heartbeat messages.
# Possible values: true, false
#
&#8194;           set heartbeat to true
#
# Flag to indicate if the control network can be used for sending 
# failsafe control messages.
# Possible values: true, false
#
&#8194;           set ctrl_msgs to false
#
# Priority of the control network. Higher the priority value, lower the
# priority of the control network.
# Example value: 1
#
&#8194;           set priority to 2
#
# Control network information complete
#
&#8194;       done
#
# Node definition complete
#
done


#
# Create the second node.
# Information to create a node is obtained from template script:
# 	/usr/lib/failsafe/cmgr-templates/cmgr-create-node
#

#
#
# logical name of the node. It is recommended that logical name of 
# the node be output of hostname(1) command.
#
define node grumpy
#
# Hostname of the node. This is optional. If this field is not
# specified,logical name of the node is assumed to be hostname. 
# This value has to be
# the output of hostname(1) command.
#
&#8194;      set hostname to grumpy
# 
# Node identifier. Node identifier is a 16 bit integer that uniquely
# identifies the node. This field is optional. If value is 
# not provided,cluster software generates node identifier.
# Example value: 1
&#8194;      set nodeid to 102
#
# Description of the system controller of this node.
# System controller can be &#8220;chalL&#8221; or &#8220;msc&#8221; or &#8220;mmsc&#8221;. If the node is a
# Challenge DM/L/XL, then system controller type is &#8220;chalL&#8221;. If the
# node is Origin 200 or deskside Origin 2000, then the system
# controller type is &#8220;msc&#8221;. If the node is rackmount Origin 2000,
# the system controller type is &#8220;mmsc&#8221;.
# Possible values: msc, mmsc, chalL
#
&#8194;       set sysctrl_type to msc
#
# You can enable or disable system controller definition. Users are 
# expected to enable system controller definition after verify the
# serial reset cables connected to this node.
# Possible values: enabled, disabled
#
&#8194;       set sysctrl_status to enabled
# 
# The system controller password for doing privileged system controller
# commands.
# This field is optional.
#
&#8194;       set sysctrl_password to none
#
# System controller owner. The node name of the machine that is 
# connected using serial cables to system controller of this node.
# System controller node also has to be defined in the CDB.
#
&#8194;       set sysctrl_owner to sleepy
#
# System controller device. The absolute device path name of the tty
# to which the serial cable is connected in this node.
# Example value: /dev/ttyd2
#
&#8194;       set sysctrl_device to /dev/ttyd2
#
# Currently, the system controller owner can be connected to the system
# controller on this node using &#8220;tty&#8221; device.  
# Possible value: tty
#
&#8194;       set sysctrl_owner_type to tty
#
# List of control networks. There can be multiple control networks
# specified for a node. HA cluster software uses these control
# networks for communication between nodes.  At least two control
# networks should be specified for heartbeat messages and one
# control network for failsafe control messages.
# For each control network for the node, please add one more 
# control network section.
#
# Name of control network IP address. This IP address must be
# configured on the network interface in /etc/rc.config
# file in the node.
# It is recommended that the IP address in internet dot notation
# is provided.
# Example value: 192.26.50.3
#
&#8194;       add nic 192.26.50.15
#
# Flag to indicate if the control network can be used for sending 
# heartbeat messages.
# Possible values: true, false
#
&#8194;           set heartbeat to true
#
# Flag to indicate if the control network can be used for sending 
# failsafe control messages.
# Possible values: true, false
#
&#8194;           set ctrl_msgs to true
#
# Priority of the control network. Higher the priority value, lower the
# priority of the control network.
# Example value: 1
#
&#8194;           set priority to 1
#
# Control network information complete
#
&#8194;       done
#
# Add more control networks information here.
#

# Name of control network IP address. This IP address must be
# configured on the network interface in /etc/rc.config
# file in the node.
# It is recommended that the IP address in internet dot notation
# is provided.
# Example value: 192.26.50.3
#
&#8194;       add nic 150.166.41.61
#
# Flag to indicate if the control network can be used for sending 
# heartbeat messages.
# Possible values: true, false
#
&#8194;           set heartbeat to true
#
# Flag to indicate if the control network can be used for sending 
# failsafe control messages.
# Possible values: true, false
#
&#8194;           set ctrl_msgs to false
#
# Priority of the control network. Higher the priority value, lower the
# priority of the control network.
# Example value: 1
#
&#8194;           set priority to 2
#
# Control network information complete
#
&#8194;       done
#
# Node definition complete
#
done


#
# Define (create) the cluster.
# Information to create the cluster is obtained from template script:
#       /usr/lib/failsafe/cmgr-templates/cmgr-create-cluster
#

#
# Name of the cluster.  
#
define cluster failsafe-cluster
#
# Notification command for the cluster. This is optional. If this 
# field is not specified,  /usr/bin/mail command is used for
# notification. Notification is sent when there is change in status of
# cluster, node and resource group.
#
&#8194;      set notify_cmd to /usr/bin/mail
# 
# Notification address for the cluster. This field value is passed as
# argument to the notification command. Specifying the notification
# command is optional and user can specify only the notification
# address in order to receive notifications by mail. If address is
# not specified, notification will not be sent.
# Example value: failsafe_alias@sysadm.company.com
&#8194;      set notify_addr to robinhood@sgi.com princejohn@sgi.com
#
# List of nodes added to the cluster.
# Repeat the following line for each node to be added to the cluster.
# Node should be already defined in the CDB and logical name of the
# node has to be specified.
&#8194;       add node sleepy
#
# Add more nodes to the cluster here.
#
&#8194;       add node grumpy

#
# Cluster definition complete
#
done


#
# Create failover policies
# Information to create the failover policies is obtained from
# template script:
#       /usr/lib/failsafe/cmgr-templates/cmgr-create-cluster
#

#
# Create the first failover policy.
#

#
# Name of the failover policy.  
#
define failover_policy sleepy-primary
#
# Failover policy attribute. This field is mandatory.
# Possible values: Auto_Failback, Controlled_Failback, Auto_Recovery,
# InPlace_Recovery
#

&#8194;       set attribute to Auto_Failback

&#8194;       set attribute to Auto_Recovery

# 
# Failover policy script. The failover policy scripts have to
# be present in
# /usr/lib/failsafe/policies directory. This field is mandatory.
# Example value: ordered (file name not the full path name).
&#8194;       set script to ordered
#
# Failover policy domain. Ordered list of nodes in the cluster
# separated by spaces. This field is mandatory.
#
&#8194;       set domain to sleepy grumpy
#
# Failover policy definition complete
#
done

#
# Create the second failover policy.
#

#
# Name of the failover policy.  
#
define failover_policy grumpy-primary
#
# Failover policy attribute. This field is mandatory. 
# Possible values: Auto_Failback, Controlled_Failback, Auto_Recovery,
# InPlace_Recovery
#

&#8194;       set attribute to Auto_Failback

&#8194;       set attribute to InPlace_Recovery

# 
# Failover policy script. The failover policy scripts have
# to be present in
# /usr/lib/failsafe/policies directory. This field is mandatory.
# Example value: ordered (file name not the full path name).
&#8194;       set script to ordered
#
# Failover policy domain. Ordered list of nodes in the cluster
# separated by spaces. This field is mandatory.
#
&#8194;       set domain to  grumpy sleepy
#
# Failover policy definition complete
#
done


#
# Create the IP_address resources.
# Information to create an IP_address resource is obtained from:
#       /usr/lib/failsafe/cmgr-templates/cmgr-create-resource-IP_address
#

#
# If multiple resources of resource type IP_address have to be created,
# repeat the following IP_address definition template.
#
# Name of the IP_address resource.  The name of the resource has to
# be IP address in the internet &#8220;.&#8221; notation. This IP address is used
# by clients to access highly available resources.
# Example value: 192.26.50.140
#
define resource 150.166.41.179 of resource_type IP_address in cluster failsafe-cluster

#
# The network mask for the IP address. The network mask value is used
# to configure the IP address on the network interface.
# Example value: 0xffffff00
&#8194;       set NetworkMask to 0xffffff00
# 
# The ordered list of interfaces that can be used to configure the IP
# address.The list of interface names are separated by comma. 
# Example value: eth0, eth1
&#8194;       set interfaces to eth1
#
# The broadcast address for the IP address.
# Example value: 192.26.50.255
&#8194;       set BroadcastAddress to 150.166.41.255

#
# IP_address resource definition for the cluster complete
#
done

#
# Name of the IP_address resource.  The name of the resource has to be
# IP address in the internet &#8220;.&#8221; notation. This IP address is used by 
# clients to access highly available resources.
# Example value: 192.26.50.140
#
define resource 150.166.41.99 of resource_type IP_address in cluster failsafe-cluster

#
# The network mask for the IP address. The network mask value is used
# to configure the IP address on the network interface.
# Example value: 0xffffff00
&#8194;       set NetworkMask to 0xffffff00
# 
# The ordered list of interfaces that can be used to configure the IP
# address.
# The list of interface names are separated by comma. 
# Example value: eth0, eth1
&#8194;       set interfaces to eth1
#
# The broadcast address for the IP address.
# Example value: 192.26.50.255
&#8194;       set BroadcastAddress to 150.166.41.255

#
# IP_address resource definition for the cluster complete
#
done


#
# Create the volume resources.
# Information to create a volume resource is obtained from:
#       /usr/lib/failsafe/cmgr-templates/cmgr-create-resource-volume
#

#
# If multiple resources of resource type volume have to be created,
# repeat the following volume definition template.
#
# Name of the volume.  The name of the volume has to be:
# Example value: HA_vol (not /dev/xlv/HA_vol)
#
define resource bagheera of resource_type volume in cluster failsafe-cluster

#
# The user name of the device file name. This field is optional. If
# this field is not specified, value ``root'' is used.
# Example value: oracle
&#8194;       set devname-owner to root
# 
# The group name of the device file name. This field is optional.
# If this field is not specified, value ``sys&#8221; is used.
# Example value: oracle
&#8194;       set devname-group to sys
#
# The device file permissions. This field is optional. If this
# field is not specified, value ``666&#8221; is used. The file permissions
# have to be specified in octal notation. See chmod(1) for more
# information.
# Example value: 666
&#8194;       set devname-mode to 666

#
# Volume resource definition for the cluster complete
#
done

#
# Name of the volume.  The name of the volume has to be:
# Example value: HA_vol (not /dev/xlv/HA_vol)
#
define resource bhaloo of resource_type volume in cluster failsafe-cluster

#
# The user name of the device file name. This field is optional. If this
# field is not specified, value &#8220;root&#8221; is used.
# Example value: oracle
&#8194;       set devname-owner to root
# 
# The group name of the device file name. This field is optional.
# If this field is not specified, value &#8220;sys&#8221; is used.
# Example value: oracle
&#8194;       set devname-group to sys
#
# The device file permissions. This field is optional. If this field is
# not specified, value &#8220;666&#8221; is used. The file permissions
# have to be specified in octal notation. See chmod(1) for more
# information.
# Example value: 666
&#8194;       set devname-mode to 666

#
# Volume resource definition for the cluster complete
#
done


#
# Create the filesystem resources.
# Information to create a filesystem resource is obtained from:
#       /usr/lib/failsafe/cmgr-templates/cmgr-create-resource-filesystem
#

#
# filesystem resource type is for XFS filesystem only.

# If multiple resources of resource type filesystem have to be created,
# repeat the following filesystem definition template.
#
# Name of the filesystem.  The name of the filesystem resource has 
# to be absolute path name of the filesystem mount point.
# Example value: /shared_vol 
#
define resource /haathi of resource_type filesystem in cluster failsafe-cluster

#
# The name of the volume resource corresponding to the filesystem. This 
# resource should be the same as the volume dependency, see below.
# This field is mandatory.
# Example value: HA_vol
&#8194;       set volume-name to bagheera
# 
# The options to be used when mounting the filesystem. This field is
# mandatory. For the list of mount options, see fstab(4).
# Example value: &#8220;rw&#8221;
&#8194;       set mount-options to rw
#
# The monitoring level for the filesystem. This field is optional. If
# this field is not specified, value &#8220;1&#8221; is used. 
# Monitoring level can be 
# 1 - Checks if filesystem exists in the mtab file (see mtab(4)). This
# is a lightweight check compared to monitoring level 2.
# 2 - Checks if the filesystem is mounted using stat(1m) command.
#
&#8194;       set monitoring-level to 2
done

#
# Add filesystem resource type dependency
#
modify resource /haathi of resource_type filesystem in cluster failsafe-cluster
#
# The filesystem resource type definition also contains a resource
# dependency on a volume resource.
# This field is mandatory.
# Example value: HA_vol
&#8194;       add dependency bagheera of type volume
#
# filesystem resource definition for the cluster complete
#
done

#
# Name of the filesystem.  The name of the filesystem resource has 
# to be absolute path name of the filesystem mount point.
# Example value: /shared_vol 
#
define resource /sherkhan of resource_type filesystem in cluster failsafe-cluster

#
# The name of the volume resource corresponding to the filesystem. This 
# resource should be the same as the volume dependency, see below.
# This field is mandatory.
# Example value: HA_vol
&#8194;       set volume-name to bhaloo
# 
# The options to be used when mounting the filesystem. This field is
# mandatory.For the list of mount options, see fstab(4).
# Example value: &#8220;rw&#8221;
&#8194;       set mount-options to rw
#
# The monitoring level for the filesystem. This field is optional. If
# this field is not specified, value &#8220;1&#8221; is used. 
# Monitoring level can be 
# 1 - Checks if filesystem exists in the mtab file (see mtab(4)). This
# is a lightweight check compared to monitoring level 2.
# 2 - Checks if the filesystem is mounted using stat(1m) command.
#
&#8194;       set monitoring-level to 2
done

#
# Add filesystem resource type dependency
#
modify resource /sherkhan of resource_type filesystem in cluster failsafe-cluster
#
# The filesystem resource type definition also contains a resource
# dependency on a volume resource.
# This field is mandatory.
# Example value: HA_vol
&#8194;       add dependency bhaloo of type volume
#
# filesystem resource definition for the cluster complete
#
done


#
# Create the statd resource.
# Information to create a filesystem resource is obtained from:
#       /usr/lib/failsafe/cmgr-templates/cmgr-create-resource-statd
#

#
# If multiple resources of resource type statd have to be created,
# repeat the following filesystem definition template.
#
# Name of the statd.  The name of the resource has to be the location
# of the NFS/lockd directory.
# Example value: /disk1/statmon
#

define resource /haathi/statmon of resource_type statd in cluster failsafe-cluster

#
# The IP address on which the NFS clinets connect, this resource should
# be the same as the IP_address dependency, see below.
# This field is mandatory.
# Example value: 128.1.2.3
&#8194;       set InterfaceAddress to 150.166.41.99
done


#
# Add the statd resource type dependencies
#
modify resource /haathi/statmon of resource_type statd in cluster failsafe-cluster
#
# The statd resource type definition also contains a resource
# dependency on a IP_address resource.
# This field is mandatory.
# Example value: 128.1.2.3
&#8194;       add dependency 150.166.41.99 of type IP_address
#
# The statd resource type definition also contains a resource
# dependency on a filesystem resource. It defines the location of
# the NFS lock directory filesystem.
# This field is mandatory.
# Example value: /disk1
&#8194;       add dependency /haathi of type filesystem
#
# statd resource definition for the cluster complete
#
done


#
# Create the NFS resources.
# Information to create a NFS resource is obtained from:
#       /usr/lib/failsafe/cmgr-templates/cmgr-create-resource-NFS
#

#
# If multiple resources of resource type NFS have to be created, repeat
# the following NFS definition template.
#
# Name of the NFS export point. The name of the NFS resource has to be
# export path name of the filesystem mount point.
# Example value: /disk1
#
define resource /haathi of resource_type NFS in cluster failsafe-cluster

#
# The export options to be used when exporting the filesystem. For the
# list of export options, see exportfs(1M).
# This field is mandatory.
# Example value: &#8220;rw,wsync,anon=root&#8221;
&#8194;       set export-info to rw
#
# The name of the filesystem resource corresponding to the export
# point. This resource should be the same as the filesystem dependency,
# see below.
# This field is mandatory.
# Example value: /disk1
&#8194;       set filesystem to /haathi
done

#
# Add the resource type dependency
#
modify resource /haathi of resource_type NFS in cluster failsafe-cluster
#
# The NFS resource type definition also contains a resource dependency
# on a filesystem resource.
# This field is mandatory.
# Example value: /disk1
&#8194;       add dependency /haathi of type filesystem
#
# The NFS resource type also contains a pseudo resource dependency
# on a statd resource. You really must have a statd resource associated
# with a NFS resource, so the NFS locks can be failed over.
# This field is mandatory.
# Example value: /disk1/statmon
&#8194;       add dependency /haathi/statmon of type statd

#
# NFS resource definition for the cluster complete
#
done

#
# Name of the NFS export point. The name of the NFS resource has to be 
# export path name of the filesystem mount point.
# Example value: /disk1
#
define resource /sherkhan of resource_type NFS in cluster failsafe-cluster

# 
# The export options to be used when exporting the filesystem. For the
# list of export options, see exportfs(1M).
# This field is mandatory.
# Example value: &#8220;rw,wsync,anon=root&#8221;
&#8194;       set export-info to rw
#
# The name of the filesystem resource corresponding to the export
# point. This 
# resource should be the same as the filesystem dependency, see below.
# This field is mandatory.
# Example value: /disk1
&#8194;       set filesystem to /sherkhan
done

#
# Add the resource type dependency
#
modify resource /sherkhan of resource_type NFS in cluster failsafe-cluster
#
# The NFS resource type definition also contains a resource dependency
# on a filesystem resource.
# This field is mandatory.
# Example value: /disk1
&#8194;       add dependency /sherkhan of type filesystem
#
# The NFS resource type also contains a pseudo resource dependency
# on a statd resource. You really must have a statd resource associated
# with a NFS resource, so the NFS locks can be failed over.
# This field is mandatory.
# Example value: /disk1/statmon
&#8194;       add dependency /haathi/statmon of type statd

#
# NFS resource definition for the cluster complete
#
done


#
# Create the Netscape_web resource.
# Information to create a Netscape_web resource is obtained from:
#       /usr/lib/failsafe/cmgr-templates/cmgr-create-resource-Netscape_web
#

#
# If multiple resources of resource type Netscape_web have to be
# created, repeat the following filesystem definition template.
#
# Name of the Netscape WEB server.  The name of the resource has to be
# a unique identifier.
# Example value: ha80 
#
define resource web-server of resource_type Netscape_web in cluster failsafe-cluster

#
# The locations of the servers startup and stop scripts.
# This field is mandatory.
# Example value: /usr/ns-home/ha86
&#8194;       set admin-scripts to /var/netscape/suitespot/https-control3
#
# the TCP port number with the server listens on.
# This field is mandatory.
# Example value: 80
&#8194;       set port-number to 80
#
# The desired monitoring level, the user can specify either;
#       1 - checks for process existence
#       2 - issues an HTML query to the server.
# This field is mandatory.
# Example value: 2
&#8194;       set monitor-level to 2
#
# The locations of the WEB servers initial HTML page
# This field is mandatory.
# Example value: /var/www/htdocs
&#8194;       set default-page-location to /var/www/htdocs
#
# The WEB servers IP address, this must be a configured IP_address
# resource. 
# This resource should be the same as the IP_address dependency, see
# below.
# This field is mandatory.
# Example value: 28.12.9.5
&#8194;       set web-ipaddr to 150.166.41.179
done

#
# Add the resource dependency
#
modify resource web-server of resource_type Netscape_web in cluster failsafe-cluster
#
# The Netscape_web resource type definition also contains a resource
# dependency on a IP_address resource.
# This field is mandatory.
# Example value: 28.12.9.5
&#8194;       add dependency 150.166.41.179 of type IP_address
#
# Netscape_web resource definition for the cluster complete
#
done


#
# Create the resource groups.
# Information to create a resource group is obtained from:
#       /usr/lib/failsafe/cmgr-templates/cmgr-create-resource_group
#

#
# Name of the resource group. Name of the resource group must be unique
# in the cluster.
#
define resource_group nfs-group in cluster failsafe-cluster
#
# Failover policy for the resource group. This field is mandatory. 
# Failover policy should be already defined in the CDB.
#
&#8194;       set failover_policy to sleepy-primary
#
# List of resources in the resource group.
# Repeat the following line for each resource to be added to the
# resource group.
&#8194;       add resource 150.166.41.99 of resource_type IP_address
#
# Add more resources to the resource group here.
#
&#8194;       add resource bagheera of resource_type volume

&#8194;       add resource bhaloo of resource_type volume

&#8194;       add resource /haathi of resource_type filesystem

&#8194;       add resource /sherkhan of resource_type filesystem

&#8194;       add resource /haathi/statmon of resource_type statd

&#8194;       add resource /haathi of resource_type NFS

&#8194;       add resource /sherkhan of resource_type NFS

#
# Resource group definition complete
#
done

#
# Name of the resource group. Name of the resource group must be unique
# in the cluster.
#
define resource_group web-group in cluster failsafe-cluster
#
# Failover policy for the resource group. This field is mandatory. 
# Failover policy should be already defined in the CDB.
#
&#8194;       set failover_policy to grumpy-primary
#
# List of resources in the resource group.
# Repeat the following line for each resource to be added to the
# resource group.
&#8194;       add resource 150.166.41.179 of resource_type IP_address

#
# Add more resources to the resource group here.
#

	 add resource web-server of resource_type Netscape_web

#
# Resource group definition complete
#
done


#
# Script complete. This should be last line of the script
#
quit</PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le40511-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="configexample.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Resource Group Creation Example</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le94219-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Configuration Examples</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>.
# Example value: HA_vol
&#8194;       set volume-name to bhaloo
# 
# The options to be used when mounting the filesystem. This field is
# mandatory.For the list of mount options, see fstab(4).
# Example value: &#8220;rw&html/le41282-parent.html010064400016050000001000000357610717757366100155370ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Resource Group Failover</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe System Operation"
HREF="le99367-parent.html"><LINK
REL="PREVIOUS"
TITLE="System Status"
HREF="le16877-parent.html"><LINK
REL="NEXT"
TITLE="Deactivating (Stopping) Linux FailSafe"
HREF="z957117933glen.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le16877-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 7. Linux FailSafe System Operation</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="z957117933glen.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE41282-PARENT"
>7.5. Resource Group Failover</A
></H1
><P
>   While a Linux FailSafe
system is running, you can move a resource group online to a particular node,
or you can take a resource group offline. In addition, you can move a resource
group from one node in a cluster to another node in a cluster. The following
subsections describe these tasks.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-BRINGRESGROUPONLINE"
>7.5.1. Bringing a Resource Group Online</A
></H2
><P
>Before you bring a resource group online for the first time, you should
run the diagnostic tests on that resource group. Diagnostics check system
configurations and perform some validations that are not performed when you
bring a resource group online.</P
><P
>To bring a resource group online, you specify the name of the resource
and the name of the cluster which contains the node.</P
><P
>You cannot bring a resource group online if the resource group has no
members.</P
><P
>To bring a resource group fully online, HA services must be active.
When HA services are active, an attempt is made to allocate the resource group
in the cluster. However, you can also execute a command to bring the resource
group online when HA services are not active. When HA services are not active,
the resource group is marked to be brought online when HA services become
active.</P
><DIV
CLASS="CAUTION"
><P
></P
><TABLE
CLASS="CAUTION"
BORDER="1"
WIDTH="100%"
><TR
><TD
ALIGN="CENTER"
><B
>Caution</B
></TD
></TR
><TR
><TD
ALIGN="LEFT"
><P
>Before bringing a resource group online in the cluster, you must be
sure that the resource group is not running on a disabled node (where HA services
are not running). Bringing a resource group online while it is running on
a disabled node could cause data corruption. For information on detached resource
groups, see <A
HREF="le41282-parent.html#FS-TAKERESGROUPOFFLINE"
>Section 7.5.2</A
>.</P
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5633"
>7.5.1.1. Bringing a Resource Group Online with the Cluster Manager GUI</A
></H3
><P
>To bring a resource group online using the Cluster Manager GUI, perform
the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Bring
a Resource Group Online&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5645"
>7.5.1.2. Bringing a Resource Group Online with the Cluster Manager CLI</A
></H3
><P
>To bring a resource group online, use the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin online resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster when you use this command.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-TAKERESGROUPOFFLINE"
>7.5.2. Taking a Resource Group Offline</A
></H2
><P
>When you take a resource group offline, FaillSafe takes each resource
in the resource group offline in a predefined order. If any single resource
gives an error during this process, the process stops, leaving all remaining
resources allocated.</P
><P
>You can take a Linux FailSafe resource group offline in any of three
ways:</P
><P
></P
><UL
><LI
><P
>Take the resource group offline. This physically stops the
processes for that resource group and does not reset any error conditions.
If this operation fails, the resource group will be left online in an error
state.</P
></LI
><LI
><P
>Force the resource group offline. This physically stops the
processes for that resource group but resets any error conditions. This operation
cannot fail.</P
></LI
><LI
><P
>Detach the resource groups. This causes Linux FailSafe to
stop monitoring the resource group, but does not physically stop the processes
on that group. Linux FailSafe will report the status as offline and will not
have any control over the group. This operation should rarely fail.</P
></LI
></UL
><P
>If you do not need to stop the resource group and do not want Linux
FailSafe to monitor the resource group while you make changes but you would
still like to have administrative control over the resource group (for instance,
to move that resource group to another node), you can put the resource group
in maintenance mode using the &#8220;Suspend Monitoring a Resource Group&#8221;
task on the GUI or the <B
CLASS="COMMAND"
>admin maintenance_on</B
> command of
the CLI, as described in <A
HREF="le41282-parent.html#FS-STOPMONITORINGRESGROUP"
>Section 7.5.4</A
>.</P
><DIV
CLASS="CAUTION"
><P
></P
><TABLE
CLASS="CAUTION"
BORDER="1"
WIDTH="100%"
><TR
><TD
ALIGN="CENTER"
><B
>Caution</B
></TD
></TR
><TR
><TD
ALIGN="LEFT"
><P
>Detaching a resource group leaves the resources in the resource group
running at the cluster node where it was online. After stopping HA services
on that cluster node, you should not bring the resource group online onto
another node in the cluster, as this may cause data corruption.</P
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5670"
>7.5.2.1. Taking a Resource Group Offline with the Cluster Manager GUI</A
></H3
><P
>To take a resource group offline using the Cluster Manager GUI, perform
the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the Cluster Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Take
a Resource Group Offline&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5684"
>7.5.2.2. Taking a Resource Group Offline with the Cluster Manager CLI</A
></H3
><P
>To take a resource group offline, use the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin offline resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default.</P
><P
>To take a resource group offline with the force option in effect, use
the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin offline_force resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>To detach a resource group, use the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin offline_detach resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-MOVERESGROUP"
>7.5.3. Moving a Resource Group</A
></H2
><P
>While Linux FailSafe is active, you can move
a resource group to another node in the same cluster. When you move a resource
group, you specify the following:</P
><P
></P
><UL
><LI
><P
>The name of the resource group.</P
></LI
><LI
><P
>The logical name of the destination node (optional). When
you do not provide a logical destination name, Linux FailSafe chooses the
destination based on the failover policy.</P
></LI
><LI
><P
>The name of the cluster that contains the nodes.</P
></LI
></UL
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5718"
>7.5.3.1. Moving a Resource Group with the Cluster Manager GUI</A
></H3
><P
>To move a resource group using the Cluster Manager GUI, perform the
following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Move
a Resource Group&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5730"
>7.5.3.2. Moving a Resource Group with the Cluster Manager CLI</A
></H3
><P
>To move a resource group to another node, use the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin move resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>] [<TT
CLASS="USERINPUT"
><B
>to node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>]</PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-STOPMONITORINGRESGROUP"
>7.5.4. Stop Monitoring of a Resource Group (Maintenance
Mode)</A
></H2
><P
>  You
can temporarily stop Linux FailSafe from monitoring a specific resource group,
which puts the resource group in maintenance mode. The resource group remains
on its same node in the cluster but is no longer monitored by Linux FailSafe
for resource failures.</P
><P
>You can put a resource group into maintenance mode if you do not want
Linux FailSafe to monitor the group for a period of time. You may want to
do this for upgrade or testing purposes, or if there is any reason that Linux
FailSafe should not act on that resource group. When a resource group is in
maintenance mode, it is not being monitored and it is not highly available.
If the resource group's owner node fails, Linux FailSafe will move the resource
group to another node and resume monitoring.</P
><P
>When you put a resource group into maintenance mode, resources in the
resource group are in <TT
CLASS="LITERAL"
>ONLINE-MAINTENANCE</TT
> state. The <TT
CLASS="LITERAL"
>ONLINE-MAINTENANCE</TT
> state for the resource is seen only on the node
that has the resource online. All other nodes will show the resource as ONLINE.
The resource group, however, should appear as being in <TT
CLASS="LITERAL"
>ONLINE-MAINTENANCE</TT
> state in all nodes.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5756"
>7.5.4.1. Putting a Resource Group into Maintenance Mode with the Cluster Manager
GUI</A
></H3
><P
>To put a resource group into maintenance mode using the Cluster Manager
GUI, perform the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Suspend
Monitoring a Resource Group&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5766"
>7.5.4.2. Resume Monitoring of a Resource Group with the Cluster Manager GUI</A
></H3
><P
>To resume monitoring a resource group using the Cluster Manager GUI,
perform the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Resume
Monitoring a Resource Group&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5776"
>7.5.4.3. Putting a Resource Group into Maintenance Mode with the Cluster Manager
CLI</A
></H3
><P
>To put a resource group into maintenance mode, use the following CLI
command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin maintenance_on resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster when you use this command.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN5785"
>7.5.4.4. Resume Monitoring of a Resource Group with the Cluster Manager CLI</A
></H3
><P
>To move a resource group back online
from maintenance mode, use the following CLI command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>admin maintenance_off resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
></DIV
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le16877-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="z957117933glen.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>System Status</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le99367-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Deactivating (Stopping) Linux FailSafe</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>rror
state.</P
html/le42786-parent.html010064400016050000001000000100240717757372200155300ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Performing Diagnostic Tasks with the Cluster
Manager GUI</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Testing Linux FailSafe Configuration"
HREF="le56830-parent.html"><LINK
REL="PREVIOUS"
TITLE="Overview of FailSafe Diagnostic Commands"
HREF="le67057-parent.html"><LINK
REL="NEXT"
TITLE="Performing Diagnostic Tasks with the Cluster
Manager CLI"
HREF="le37273-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le67057-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 8. Testing Linux FailSafe Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le37273-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE42786-PARENT"
>8.2. Performing Diagnostic Tasks with the Cluster
Manager GUI</A
></H1
><P
>To test the components of a FailSafe system using the Cluster Manager
GUI, perform the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Select Task Manager on the FailSafe Toolchest.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Diagnostics&#8221;
category.</P
></LI
><LI
><P
>Select one of the diagnostics tasks that appear on the
right side of the display: &#8220;Test Connectivity,&#8221; &#8220;Test
Resources,&#8221; or &#8220;Test Failover Policy.&#8221;</P
></LI
></OL
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-TESTCONNECTIVITY"
>8.2.1. Testing Connectivity with the Cluster Manager GUI</A
></H2
><P
> When you select the &#8220;Test Connectivity&#8221; task from
the Diagnostics display, you can test the network and serial connections
on the nodes in your cluster by entering the requested inputs. You can
test all of the nodes in the cluster at one time, or you can specify an
individual node to test.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-TESTRESOURCES"
>8.2.2. Testing Resources with the Cluster Manager GUI</A
></H2
><P
>When you select the &#8220;Test Resources&#8221; task from the Diagnostics
display, you can test the resources on the nodes in your cluster by entering
the requested inputs. You can test resources by type and by group. You
can test the resources of a resource type or in a resource group on all
of the nodes in the cluster at one time, or you can specify an individual
node to test. Resource tests are performed only on nodes in the resource
group's application failover domain.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-TESTFAILOVERPOL"
>8.2.3. Testing Failover Policies with the Cluster Manager GUI</A
></H2
><P
>When you select the &#8220;Test
Failover Policy&#8221; task from the Diagnostics display, you can test
whether a failover policy is defined correctly. This test checks the failover
policy by validating the policy script, failover attributes, and whether
the application failover domain consists of valid nodes from the cluster.</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le67057-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le37273-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Overview of FailSafe Diagnostic Commands</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le56830-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Performing Diagnostic Tasks with the Cluster
Manager CLI</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le45765-parent.html010064400016050000001000000056470717757312000155370ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe Disk Connections</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Hardware Components of a Linux FailSafe Cluster"
HREF="le32900-parent.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe Supported Configurations"
HREF="le79484-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le32900-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le79484-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE45765-PARENT"
>1.6. Linux FailSafe Disk Connections</A
></H1
><P
>A Linux FailSafe system supports the following disk connections:</P
><P
></P
><UL
><LI
><P
>RAID support</P
><P
></P
><UL
><LI
><P
>Single controller or dual controllers</P
></LI
><LI
><P
>Single or dual hubs</P
></LI
><LI
><P
>Single or dual pathing</P
></LI
></UL
></LI
><LI
><P
>JBOD support</P
><P
></P
><UL
><LI
><P
>Single or dual vaults</P
></LI
><LI
><P
>Single or dual hubs</P
></LI
></UL
></LI
><LI
><P
>Network-mirrored support</P
><P
></P
><UL
><LI
><P
>Clustered filesystems such as GFS</P
></LI
><LI
><P
>Network mirroring block devices such as with DRBD</P
></LI
></UL
></LI
></UL
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>Network mirrored devices are not discussed in the examples within this
guide.  However, the Linux FailSafe configuration items that are set for shared
storage apply validly to network-duplicated storage.</P
></BLOCKQUOTE
></DIV
><P
>SCSI disks can be connected to two machines only. Fibre channel disks
can be connected to multiple machines.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le32900-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le79484-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Hardware Components of a Linux FailSafe Cluster</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe Supported Configurations</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>N="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="html/le53159-parent.html010064400016050000001000002247240717757351700155440ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Resource Configuration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Cluster Configuration"
HREF="le94219-parent.html"><LINK
REL="PREVIOUS"
TITLE="Cluster Configuration"
HREF="z957104627glen.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe System Log Configuration"
HREF="fs-setlogparams.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="z957104627glen.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 5. Linux FailSafe Cluster Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="fs-setlogparams.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE53159-PARENT"
>5.5. Resource Configuration</A
></H1
><P
>A <I
CLASS="GLOSSTERM"
>resource</I
> is a single
physical or logical entity that provides a service to clients or other resources.
A resource is generally available for use on two or more nodes in a cluster,
although only one node controls the resource at any given time. For example,
a resource can be a single disk volume, a particular network address, or an
application such as a web node.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-DEFINERESOURCE"
>5.5.1. Defining Resources</A
></H2
><P
>Resources are identified by a resource name
and a resource type. A <I
CLASS="FIRSTTERM"
>resource name</I
> identifies a specific
instance of a resource type. A <I
CLASS="FIRSTTERM"
>resource type</I
> is a particular
class of resource. All of the resources in a given resource type can be handled
in the same way for the purposes of failover. Every resource is an instance
of exactly one resource type.</P
><P
>A resource type is identified with a simple name. A resource type can
be defined for a specific logical node, or it can be defined for an entire
cluster. A resource type that is defined for a node will override a clusterwide
resource type definition of the same name; this allows an individual node
to override global settings from a clusterwide resource type definition.</P
><P
>The Linux FailSafe software includes many predefined resource types.
If these types fit the application you want to make into a highly available
service, you can reuse them. If none fit, you can define additional resource
types.</P
><P
>To define a resource, you provide the following information:</P
><P
></P
><UL
><LI
><P
>The name of the resource to define, with a maximum length
of 255 characters.</P
></LI
><LI
><P
>The type of resource to define. The Linux FailSafe system
contains some pre-defined resource types (template and <TT
CLASS="FILENAME"
>IP_Address</TT
>). You can define your own resource type as well.</P
></LI
><LI
><P
>The name of the cluster that contains the resource.</P
></LI
><LI
><P
>The logical name of the node that contains the resource (optional).
If you specify a node, a local version of the resource will be defined on
that node.</P
></LI
><LI
><P
>Resource type-specific attributes for the resource. Each resource
type may require specific parameters to define for the resource, as described
in the following subsections.</P
></LI
></UL
><P
>You can define up to 100 resources in a Linux FailSafe configuration.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="IPATTRIBUTES"
>5.5.1.1. IP Address Resource Attributes</A
></H3
><P
> The IP Address resources
are the IP addresses used by clients to access the highly available services
within the resource group. These IP addresses are moved from one node to another
along with the other resources in the resource group when a failure is detected.</P
><P
>You specify the resource name of an IP address in dotted decimal notation.
 IP names that require name resolution should not be used. For example, 192.26.50.1
is a valid resource name of the IP Address resource type.</P
><P
>The IP address you define as a Linux FailSafe resource must not be the
same as the IP address of a node hostname or the IP address of a node's control
network.</P
><P
>When you define an IP address, you can optionally specifying the following
parameters. If you specify any of these parameters, you must specify all of
them.</P
><P
></P
><UL
><LI
><P
>The broadcast address for the IP address.</P
></LI
><LI
><P
>The network mask of the IP address.</P
></LI
><LI
><P
>A comma-separated list of interfaces on which the IP address
can be configured. This ordered list is a superset of all the interfaces on
all nodes where this IP address might be allocated. Hence, in a mixed cluster
with different ethernet drivers, an IP address might be placed on eth0 on
one system and ln0 on a another. In this case the <TT
CLASS="FILENAME"
>interfaces</TT
>
field would be  <TT
CLASS="FILENAME"
>eth0,ln0</TT
> or <TT
CLASS="FILENAME"
>ln0,eth0</TT
>.</P
><P
>The order of the list of interfaces determines the priority order for
determining which IP address will be used for local restarts of the node.</P
></LI
></UL
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-ADDDEPTORESOURCE"
>5.5.2. Adding Dependency to a Resource</A
></H2
><P
>One resource can be dependent on one or more other
resources; if so, it will not be able to start (that is, be made available
for use) unless the dependent resources are started as well. Dependent resources
must be part of the same resource group.</P
><P
>Like resources, a resource type can be dependent on one or more other
resource types. If such a dependency exists, at least one instance of each
of the dependent resource types must be defined. For example, a resource type
named <B
CLASS="COMMAND"
>Netscape_web</B
> might have resource type dependencies
on a resource types named <B
CLASS="COMMAND"
>IP_address</B
> and <B
CLASS="COMMAND"
>volume</B
>. If a resource named <B
CLASS="COMMAND"
>ws1</B
> is defined with the <B
CLASS="COMMAND"
>Netscape_web</B
> resource type, then the resource group containing <B
CLASS="COMMAND"
>ws1</B
> must also contain at least one resource of the type <B
CLASS="COMMAND"
>IP_address</B
> and one resource of the type <B
CLASS="COMMAND"
>volume</B
>.</P
><P
>You cannot make resources mutually dependent. For example, if resource
A is dependent on resource B, then you cannot make resource B dependent on
resource A. In addition, you cannot define cyclic dependencies. For example,
if resource A is dependent on resource B, and resource B is dependent on resource
C, then resource C cannot be dependent on resource A.</P
><P
>When you add a dependency to a resource definition, you provide the
following information:</P
><P
></P
><UL
><LI
><P
>The name of the existing resource to which you are adding
a dependency.</P
></LI
><LI
><P
>The resource type of the existing resource to which you are
adding a dependency.</P
></LI
><LI
><P
>The name of the cluster that contains the resource.</P
></LI
><LI
><P
>Optionally, the logical node name of the node in the cluster
that contains the resource. If specified, resource dependencies are added
to the node's definition of the resource. If this is not specified, resource
dependencies are added to the cluster-wide resource definition.</P
></LI
><LI
><P
>The resource name of the resource dependency.</P
></LI
><LI
><P
>The resource type of the resource dependency.</P
></LI
></UL
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3655"
>5.5.2.1. Defining a Resource with the Cluster Manager GUI</A
></H3
><P
>To define a resource with the Cluster Manager GUI, perform the following
steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Resources
&#38; Resource Types&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Define
a New Resource&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
><LI
><P
>On the right side of the display, click on the &#8220;Add/Remove
Dependencies for a Resource Definition&#8221; to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
><P
>When you use this command to define a resource, you define a cluster-wide
resource that is not specific to a node. For information on defining a node-specific
resource, see <A
HREF="le53159-parent.html#FS-DEFINEMACHSPECRESOURCE"
>Section 5.5.3</A
>.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="LE42004-PARENT"
>5.5.2.2. Defining a Resource with the Cluster Manager CLI</A
></H3
><P
>Use the following CLI command to define a clusterwide resource:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define resource</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>of resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>] [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>Entering this command specifies the name and resource type of the resource
you are defining within a specified cluster. If you have specified a default
cluster or a default resource type, you do not need to specify a resource
type or a cluster in this command and the CLI will use the default.</P
><P
>When you use this command to define a resource, you define a clusterwide
resource that is not specific to a node. For information on defining a node-specific
resource, see <A
HREF="le53159-parent.html#FS-DEFINEMACHSPECRESOURCE"
>Section 5.5.3</A
>.</P
><P
>The following prompt appears:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>resource A?</PRE
></TD
></TR
></TABLE
><P
>When this prompt appears during resource creation, you can enter the
following commands to specify the attributes of the resource you are defining
and to add and remove dependencies from the resource:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>resource A? <TT
CLASS="USERINPUT"
><B
>set</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>key</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>to</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>value</I
></TT
>
resource A? <TT
CLASS="USERINPUT"
><B
>add dependency</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>E</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>F</I
></TT
>
resource A? <TT
CLASS="USERINPUT"
><B
>remove dependency</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>E</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>F</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>The attributes you define with the <B
CLASS="COMMAND"
>set </B
><TT
CLASS="REPLACEABLE"
><I
>key</I
></TT
><B
CLASS="COMMAND"
>&#8194;to </B
><TT
CLASS="REPLACEABLE"
><I
>value</I
></TT
>
command will depend on the type of resource you are defining, as described
in <A
HREF="le53159-parent.html#FS-DEFINERESOURCE"
>Section 5.5.1</A
>.</P
><P
>For detailed information on how to determine the format for defining
resource attributes, see <A
HREF="le53159-parent.html#LE20812-PARENT"
>Section 5.5.2.3</A
>.</P
><P
>When you are finished defining the resource and its dependencies, enter <TT
CLASS="FILENAME"
>done</TT
> to return to the <TT
CLASS="FILENAME"
>cmgr</TT
> prompt.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="LE20812-PARENT"
>5.5.2.3. Specifying Resource Attributes with Cluster Manager
CLI</A
></H3
><P
>To see the format in which you can specify the user-specific attributes
that you need to set for a particular resource type, you can enter the following
command to see the full definition of that resource type:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resource_type </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>For example, to see the <TT
CLASS="REPLACEABLE"
><I
>key</I
></TT
> attributes you
define for a resource of a defined resource type <TT
CLASS="FILENAME"
>IP_address</TT
>,
you would enter the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62;  <TT
CLASS="USERINPUT"
><B
>show resource_type IP_address in cluster nfs-cluster</B
></TT
>

Name: IP_address
Predefined: true
Order: 401
Restart mode: 1
Restart count: 2

Action name: stop
        Executable: /usr/lib/failsafe/resource_types/IP_address/stop
        Maximum execution time: 80000ms
        Monitoring interval: 0ms
        Start monitoring time: 0ms
Action name: exclusive
        Executable: /usr/lib/failsafe/resource_types/IP_address/exclusive
        Maximum execution time: 100000ms
        Monitoring interval: 0ms
        Start monitoring time: 0ms
Action name: start
        Executable: /usr/lib/failsafe/resource_types/IP_address/start
        Maximum execution time: 80000ms
        Monitoring interval: 0ms
        Start monitoring time: 0ms
Action name: restart
        Executable: /usr/lib/failsafe/resource_types/IP_address/restart
        Maximum execution time: 80000ms
        Monitoring interval: 0ms
        Start monitoring time: 0ms
Action name: monitor
        Executable: /usr/lib/failsafe/resource_types/IP_address/monitor
        Maximum execution time: 40000ms
        Monitoring interval: 20000ms
        Start monitoring time: 50000ms

Type specific attribute: NetworkMask
        Data type: string
Type specific attribute: interfaces
        Data type: string
Type specific attribute: BroadcastAddress
        Data type: string

No resource type dependencies</PRE
></TD
></TR
></TABLE
><P
>The display reflects the format in which you can specify the group id,
the device owner, and the device file permissions for the volume. In this
case, the <TT
CLASS="FILENAME"
>devname-group</TT
> key specifies the group id of
the device file, the <TT
CLASS="FILENAME"
>devname_owner</TT
> key specifies the
owner of the device file, and the <TT
CLASS="FILENAME"
>devname_mode</TT
> key specifies
the device file permissions.</P
><P
>For example, to set the group id to <TT
CLASS="FILENAME"
>sys</TT
>, enter
the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>resource A? <TT
CLASS="USERINPUT"
><B
>set devname-group to sys</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>This remainder of this section summarizes the attributes you specify
for the predefined Linux FailSafe resource types with the <TT
CLASS="REPLACEABLE"
><I
>set
key to value</I
></TT
> command of the Cluster Manger CLI.</P
><P
> When you define an
IP address, you specify the following attributes:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="LITERAL"
>NetworkMask</TT
></DT
><DD
><P
>The subnet mask of the IP address</P
></DD
><DT
><TT
CLASS="LITERAL"
>interfaces</TT
></DT
><DD
><P
>A comma-separated list of interfaces on which the IP address can be
configured</P
></DD
><DT
><TT
CLASS="LITERAL"
>BroadcastAddress</TT
></DT
><DD
><P
>The broadcast address for the IP address</P
></DD
></DL
></DIV
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-DEFINEMACHSPECRESOURCE"
>5.5.3. Defining a Node-Specific Resource</A
></H2
><P
> You can redefine an existing resource with a
resource definition that applies only to a particular node. Only existing
clusterwide resources can be redefined; resources already defined for a specific
cluster node cannot be redefined.</P
><P
>You
use this feature when you configure heterogeneous clusters for an <TT
CLASS="LITERAL"
>IP_address</TT
> resource. For example, <TT
CLASS="LITERAL"
>IP_address</TT
>
192.26.50.2 can be configured on et0 on an SGI Challenge node and on eth0
on all other Linux servers. The clusterwide resource definition for 192.26.50.2 will have
the <TT
CLASS="LITERAL"
>interfaces</TT
> field set to eth0 and the node-specific
definition for the Challenge node will have et0 as the <TT
CLASS="LITERAL"
>interfaces</TT
> field.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3778"
>5.5.3.1. Defining a Node-Specific Resource with the Cluster Manager GUI</A
></H3
><P
>Using the Cluster Manager GUI, you can take an existing clusterwide
resource definition and redefine it for use on a specific node in the cluster:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Resources
&#38; Resource Types&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Redefine
a Resource For a Specific Node&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="LE39339-PARENT"
>5.5.3.2. Defining a Node-Specific Resource with the Cluster
Manager CLI</A
></H3
><P
>You can use the Cluster Manager CLI to redefine a clusterwide resource
to be specific to a node just as you define a clusterwide resource, except
that you specify a node on the <B
CLASS="COMMAND"
>define resource</B
> command.</P
><P
>Use the following CLI command to define a node-specific resource:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define resource</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-MODIFYDELRESOURCE"
>5.5.4. Modifying and Deleting Resources</A
></H2
><P
> After you have defined resources,
you can modify and delete them.</P
><P
>You can modify only the type-specific attributes for a resource. You
cannot rename a resource once it has been defined.</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>There are some resource attributes whose modification does not take
effect until the resource group containing that resource is brought online
again. For example, if you modify the export options of a resource of type
NFS, the modifications do not take effect immediately; they take effect when
the resource is brought online.</P
></BLOCKQUOTE
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3819"
>5.5.4.1. Modifying and Deleting Resources with the Cluster Manager GUI</A
></H3
><P
>To modify a resource with the Cluster Manager GUI, perform the following
procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Resources
&#38; Resource Types&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Modify
a Resource Definition&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
><P
>To delete a resource with the Cluster Manager GUI, perform the following
procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Resources
&#38; Resource Types&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Delete
a Resource&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3845"
>5.5.4.2. Modifying and Deleting Resources with the Cluster Manager CLI</A
></H3
><P
>Use the following CLI command to modify a resource:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify resource</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>Entering this command specifies the name and resource type of the resource
you are modifying within a specified cluster. If you have specified a default
cluster, you do not need to specify a cluster in this command and the CLI
will use the default.</P
><P
>You modify a resource using the same commands you use to define a resource.</P
><P
>You can use the following command to delete a resource definition:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>delete resource</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
>]</PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN3865"
>5.5.5. Displaying Resources</A
></H2
><P
>You can display resources in various ways. You can
display the attributes of a particular defined resource, you can display all
of the defined resources in a specified resource group, or you can display
all the defined resources of a specified resource type.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3871"
>5.5.5.1. Displaying Resources with the Cluster Manager GUI</A
></H3
><P
>The Cluster Manager GUI provides a convenient display of resources through
the FailSafe Cluster View. You can launch the FailSafe Cluster View directly,
or you can bring it up at any time by clicking on the &#8220;FailSafe Cluster
View&#8221; button at the bottom of the &#8220;FailSafe Manager&#8221; display.</P
><P
>From the View menu of the FailSafe Cluster View, select Resources to
see all defined resources. The status of these resources will be shown in
the icon (green indicates online, grey indicates offline). Alternately, you
can select &#8220;Resources of Type&#8221; from the View menu to see resources
organized by resource type, or you can select &#8220;Resources by Group&#8221;
to see resources organized by resource group.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3875"
>5.5.5.2. Displaying Resources with the Cluster Manager CLI</A
></H3
><P
>Use the following command to view the parameters of a defined resource:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resource </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;of resource_type </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>Use the following command to view all of the defined resources in a
resource group:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resources in resource_group</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default.</P
><P
>Use the following command to view all of the defined resources of a
particular resource type in a specified cluster:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resources of resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-DEFINERESOURCETYPE"
>5.5.6. Defining a Resource Type</A
></H2
><P
>The Linux FailSafe software includes many
predefined resource types. If these types fit the application you want to
make into a highly available service, you can reuse them. If none fits, you
can define additional resource types.</P
><P
>Complete information on defining resource types is provided in the <I
CLASS="CITETITLE"
>Linux FailSafe Programmer's Guide</I
>. This manual provides a summary
of that information.</P
><P
>To define a new resource type, you must have the following information:</P
><P
></P
><UL
><LI
><P
>Name of the resource type, with a maximum length of 255 characters.</P
></LI
><LI
><P
>Name of the cluster to which the resource type will apply.</P
></LI
><LI
><P
>Node on which the resource type will apply, if the resource
type is to be restricted to a specific node.</P
></LI
><LI
><P
>Order of performing the action scripts for resources of this
type in relation to resources of other types:</P
><P
></P
><UL
><LI
><P
>Resources are started in the increasing order of this value</P
></LI
><LI
><P
>Resources are stopped in the decreasing order of this value</P
><P
>See the <I
CLASS="CITETITLE"
>Linux FailSafe Programmer's Guide</I
> for
a full description of the order ranges available.</P
></LI
></UL
></LI
><LI
><P
>Restart mode, which can be one of the following values:</P
><P
></P
><UL
><LI
><P
>0 = Do not restart on monitoring failures</P
></LI
><LI
><P
>1 = Restart a fixed number of times</P
></LI
></UL
></LI
><LI
><P
>Number of local restarts (when restart mode is 1).</P
></LI
><LI
><P
>Location of the executable script. This is always  <TT
CLASS="FILENAME"
>/usr/lib/failsafe/resources_types/</TT
><TT
CLASS="REPLACEABLE"
><I
>rtname</I
></TT
>,
where <TT
CLASS="REPLACEABLE"
><I
>rtname</I
></TT
> is the resource type name.</P
></LI
><LI
><P
>Monitoring interval, which is the time period (in milliseconds)
between successive executions of the <B
CLASS="COMMAND"
>monitor</B
> action script;
this is only valid for the <B
CLASS="COMMAND"
>monitor</B
> action script.</P
></LI
><LI
><P
>Starting time for monitoring. When the resource group is made
in online in a cluster node, Linux FailSafe will start monitoring the resources
after the specified time period (in milliseconds).</P
></LI
><LI
><P
>Action scripts to be defined for this resource type, You must
specify scripts for <B
CLASS="COMMAND"
>start</B
>, <B
CLASS="COMMAND"
>stop</B
>, <B
CLASS="COMMAND"
>exclusive</B
>, and <B
CLASS="COMMAND"
>monitor</B
>, although the <B
CLASS="COMMAND"
>monitor</B
> script may contain only a return-success function if you
wish. If you specify 1 for the restart mode, you must specify a <B
CLASS="COMMAND"
>restart</B
> script. </P
></LI
><LI
><P
>Type-specific attributes to be defined for this resource type.
The action scripts use this information to start, stop, and monitor a resource
of this resource type. For example, NFS requires the following resource keys:</P
><P
></P
><UL
><LI
><P
><TT
CLASS="FILENAME"
>export-point</TT
>, which takes a value that
defines the export disk name. This name is used as input to the <B
CLASS="COMMAND"
>exportfs</B
> command. For example:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>export-point = /this_disk</PRE
></TD
></TR
></TABLE
></LI
><LI
><P
><TT
CLASS="FILENAME"
>export-info</TT
>, which takes a value that
defines the export options for the filesystem. These options are used in the <B
CLASS="COMMAND"
>exportfs</B
> command. For example:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>export-info = rw,sync,no_root_squash</PRE
></TD
></TR
></TABLE
></LI
><LI
><P
><TT
CLASS="FILENAME"
>filesystem</TT
>, which takes a value that
defines the raw filesystem. This name is used as input to the <B
CLASS="COMMAND"
>mount</B
>) command. For example:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>filesystem = /dev/sda1</PRE
></TD
></TR
></TABLE
></LI
></UL
></LI
></UL
><P
>To define a new resource type, you use the Cluster Manager GUI or the
Cluster Manager CLI.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3969"
>5.5.6.1. Defining a Resource Type with the Cluster Manager GUI</A
></H3
><P
>To define a resource type with the Cluster Manager GUI, perform the
following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Resources
&#38; Resource Types&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Define
a Resource Type&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3983"
>5.5.6.2. Defining a Resource Type with the Cluster Manager CLI</A
></H3
><P
>The following steps show the use of <B
CLASS="COMMAND"
>cluster_mgr</B
> interactively
to define a resource type called <TT
CLASS="LITERAL"
>test_rt</TT
>.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Log in as <B
CLASS="COMMAND"
>root</B
>.</P
></LI
><LI
><P
>Execute the <B
CLASS="COMMAND"
>cluster_mgr </B
>command using the <B
CLASS="COMMAND"
>-p</B
> option to prompt you for information (the command name can be
abbreviated to <B
CLASS="COMMAND"
>cmgr</B
>):</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>/usr/lib/failsafe/bin/cluster_mgr -p</B
></TT
>
Welcome to Linux FailSafe Cluster Manager Command-Line Interface

cmgr&#62;</PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Use the <B
CLASS="COMMAND"
>set</B
> subcommand to specify the default
cluster used for <B
CLASS="COMMAND"
>cluster_mgr</B
> operations. In this example,
we use a cluster named <TT
CLASS="LITERAL"
>test</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>set cluster test</B
></TT
></PRE
></TD
></TR
></TABLE
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>If you prefer, you can specify the cluster name as needed with each
subcommand.</P
></BLOCKQUOTE
></DIV
></LI
><LI
><P
>Use the <B
CLASS="COMMAND"
>define resource_type</B
> subcommand.
By default, the resource type will apply across the cluster; if you wish to
limit the resource_type to a specific node, enter the node name when prompted.
If you wish to enable restart mode, enter 1 when prompted.</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>&#8194;The following example only shows the prompts and answers for two
action scripts (<B
CLASS="COMMAND"
>start</B
> and <B
CLASS="COMMAND"
>stop</B
>) for
a new resource type named <B
CLASS="COMMAND"
>test_rt</B
>.</P
></BLOCKQUOTE
></DIV
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define resource_type test_rt</B
></TT
>

(Enter "cancel" at any time to abort)

Node[optional]?
Order ? <TT
CLASS="USERINPUT"
><B
>300</B
></TT
>
Restart Mode ? (0)

DEFINE RESOURCE TYPE OPTIONS

&#8194;       0) Modify Action Script.
&#8194;       1) Add Action Script.
&#8194;       2) Remove Action Script.
&#8194;       3) Add Type Specific Attribute.
&#8194;       4) Remove Type Specific Attribute.
&#8194;       5) Add Dependency.
&#8194;       6) Remove Dependency.
&#8194;       7) Show Current Information.
&#8194;       8) Cancel. (Aborts command)
&#8194;       9) Done. (Exits and runs command)

Enter option:<TT
CLASS="USERINPUT"
><B
>1</B
></TT
>

No current resource type actions

Action name ? <TT
CLASS="USERINPUT"
><B
>start</B
></TT
>
Executable Time? <TT
CLASS="USERINPUT"
><B
>40000</B
></TT
>
Monitoring Interval? <TT
CLASS="USERINPUT"
><B
>0</B
></TT
>
Start Monitoring Time? <TT
CLASS="USERINPUT"
><B
>0</B
></TT
>

&#8194;       0) Modify Action Script.
&#8194;       1) Add Action Script.
&#8194;       2) Remove Action Script.
&#8194;       3) Add Type Specific Attribute.
&#8194;       4) Remove Type Specific Attribute.
&#8194;       5) Add Dependency.
&#8194;       6) Remove Dependency.
&#8194;       7) Show Current Information.
&#8194;       8) Cancel. (Aborts command)
&#8194;       9) Done. (Exits and runs command)

Enter option:<TT
CLASS="USERINPUT"
><B
>1</B
></TT
>

Current resource type actions:
&#8194;       Action - 1: start

Action name <TT
CLASS="USERINPUT"
><B
>stop</B
></TT
>
Executable Time? <TT
CLASS="USERINPUT"
><B
>40000</B
></TT
>
Monitoring Interval? <TT
CLASS="USERINPUT"
><B
>0</B
></TT
>
Start Monitoring Time? <TT
CLASS="USERINPUT"
><B
>0</B
></TT
>&#8194;

&#8194;       0) Modify Action Script.
&#8194;       1) Add Action Script.
&#8194;       2) Remove Action Script.
&#8194;       3) Add Type Specific Attribute.
&#8194;       4) Remove Type Specific Attribute.
&#8194;       5) Add Dependency.
&#8194;       6) Remove Dependency.
&#8194;       7) Show Current Information.
&#8194;       8) Cancel. (Aborts command)
&#8194;       9) Done. (Exits and runs command)

Enter option:<TT
CLASS="USERINPUT"
><B
>3</B
></TT
>

No current type specific attributes

Type Specific Attribute ? <TT
CLASS="USERINPUT"
><B
>integer-att</B
></TT
>
Datatype ? <TT
CLASS="USERINPUT"
><B
>integer</B
></TT
>
Default value[optional] ? <TT
CLASS="USERINPUT"
><B
>33</B
></TT
>

&#8194;       0) Modify Action Script.
&#8194;       1) Add Action Script.
&#8194;       2) Remove Action Script.
&#8194;       3) Add Type Specific Attribute.
&#8194;       4) Remove Type Specific Attribute.
&#8194;       5) Add Dependency.
&#8194;       6) Remove Dependency.
&#8194;       7) Show Current Information.
&#8194;       8) Cancel. (Aborts command)
&#8194;       9) Done. (Exits and runs command)

Enter option:<TT
CLASS="USERINPUT"
><B
>3</B
></TT
>

Current type specific attributes:
&#8194;       Type Specific Attribute - 1: export-point

Type Specific Attribute ? <TT
CLASS="USERINPUT"
><B
>string-att</B
></TT
>
Datatype ? <TT
CLASS="USERINPUT"
><B
>string</B
></TT
>
Default value[optional] ? <TT
CLASS="USERINPUT"
><B
>rw</B
></TT
>

&#8194;       0) Modify Action Script.
&#8194;       1) Add Action Script.
&#8194;       2) Remove Action Script.
&#8194;       3) Add Type Specific Attribute.
&#8194;       4) Remove Type Specific Attribute.
&#8194;       5) Add Dependency.
&#8194;       6) Remove Dependency.
&#8194;       7) Show Current Information.
&#8194;       8) Cancel. (Aborts command)
&#8194;       9) Done. (Exits and runs command)Enter option:<TT
CLASS="USERINPUT"
><B
>5</B
></TT
>

No current resource type dependencies

Dependency name ? <TT
CLASS="USERINPUT"
><B
>filesystem</B
></TT
>

&#8194;       0) Modify Action Script.
&#8194;       1) Add Action Script.
&#8194;       2) Remove Action Script.
&#8194;       3) Add Type Specific Attribute.
&#8194;       4) Remove Type Specific Attribute.
&#8194;       5) Add Dependency.
&#8194;       6) Remove Dependency.
&#8194;       7) Show Current Information.
&#8194;       8) Cancel. (Aborts command)
&#8194;       9) Done. (Exits and runs command)

Enter option:<TT
CLASS="USERINPUT"
><B
>7</B
></TT
>

Current resource type actions:
&#8194;       Action - 1: start
&#8194;       Action - 2: stop

Current type specific attributes:
&#8194;       Type Specific Attribute - 1: integer-att
&#8194;       Type Specific Attribute - 2: string-att

No current resource type dependencies

Resource dependencies to be added:
&#8194;       Resource dependency - 1: filesystem

&#8194;       0) Modify Action Script.
&#8194;       1) Add Action Script.
&#8194;       2) Remove Action Script.
&#8194;       3) Add Type Specific Attribute.
&#8194;       4) Remove Type Specific Attribute.
&#8194;       5) Add Dependency.
&#8194;       6) Remove Dependency.
&#8194;       7) Show Current Information.
&#8194;       8) Cancel. (Aborts command)
&#8194;       9) Done. (Exits and runs command)

Enter option:<TT
CLASS="USERINPUT"
><B
>9</B
></TT
>
Successfully created resource_type test_rt

cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resource_types</B
></TT
>

NFS
template
Netscape_web
test_rt
statd
Oracle_DB
MAC_address
IP_address
INFORMIX_DB
filesystem
volume

cmgr&#62; <TT
CLASS="USERINPUT"
><B
>exit</B
></TT
>
#</PRE
></TD
></TR
></TABLE
></LI
></OL
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-DEFINEMACHSPECRESTYPE"
>5.5.7. Defining a Node-Specific Resource Type</A
></H2
><P
> You can redefine an existing
resource type with a resource definition that applies only to a particular
node. Only existing clusterwide resource types can be redefined; resource
types already defined for a specific cluster node cannot be redefined.</P
><P
>A resource type that is defined for a node overrides a
cluster-wide resource type definition with the same name; this allows an individual
node to override global settings from a clusterwide resource type definition.
You can use this feature if you want to have different script timeouts for
a node or you want to restart a resource on only one node in the cluster.</P
><P
>For example, the <TT
CLASS="LITERAL"
>IP_address</TT
> resource has local restart
enabled by default. If you would like to have an IP address type without local
restart for a particular node, you can make a copy of the <TT
CLASS="LITERAL"
>IP_address</TT
> clusterwide resource type with all of the parameters the same except
for restart mode, which you set to 0.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4055"
>5.5.7.1. Defining a Node-Specific Resource Type with the Cluster Manager GUI</A
></H3
><P
>Using the Cluster Manager GUI, you can take an existing clusterwide
resource type definition and redefine it for use on a specific node in the
cluster. Perform the following tasks:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Resources
&#38; Resource Types&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Redefine
a Resource Type For a Specific Node&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4069"
>5.5.7.2. Defining a Node-Specific Resource Type with the Cluster Manager CLI</A
></H3
><P
>With the Cluster Manager CLI, you redefine a node-specific resource
type just as you define a cluster-wide resource type, except that you specify
a node on the <B
CLASS="COMMAND"
>define resource_type</B
> command.</P
><P
>Use the following CLI command to define a node-specific resource type:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>on node</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-ADDDEPTORESTYPE"
>5.5.8. Adding Dependencies to a Resource Type</A
></H2
><P
>Like resources, a resource type can be
dependent on one or more other resource types. If such a dependency exists,
at least one instance of each of the dependent resource types must be defined.
For example, a resource type named <B
CLASS="COMMAND"
>Netscape_web</B
> might have
resource type dependencies on a resource type named <B
CLASS="COMMAND"
>IP_address</B
>
and <B
CLASS="COMMAND"
>volume</B
>. If a resource named <B
CLASS="COMMAND"
>ws1</B
>
is defined with the <B
CLASS="COMMAND"
>Netscape_web</B
> resource type, then the
resource group containing <B
CLASS="COMMAND"
>ws1</B
> must also contain at least
one resource of the type <B
CLASS="COMMAND"
>IP_address</B
> nd one resource of
the type <B
CLASS="COMMAND"
>volume</B
>.</P
><P
>When using the Cluster Manager GUI, you add or remove dependencies for
a resource type by selecting the &#8220;Add/Remove Dependencies for a Resource
Type&#8221; from the &#8220;Resources &#38; Resource Types&#8221; display
and providing the indicated input. When using the Cluster Manager CLI, you
add or remove dependencies when you define or modify the resource type.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-MODIFYRESTYPE"
>5.5.9. Modifying and Deleting Resource Types</A
></H2
><P
> After you have defined
a resource types, you can modify and delete them.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4106"
>5.5.9.1. Modifying and Deleting Resource Types with the Cluster Manager GUI</A
></H3
><P
>To modify a resource type with the Cluster Manager GUI, perform the
following procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Resources
&#38; Resource Types&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Modify
a Resource Type Definition&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
><P
>To delete a resource type with the Cluster Manager GUI, perform the
following procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Resources
&#38; Resource Types&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Delete
a Resource Type&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4132"
>5.5.9.2. Modifying and Deleting Resource Types with the Cluster Manager CLI</A
></H3
><P
>Use the following CLI command to modify a resource:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>Entering this command specifies the resource type you are modifying
within a specified cluster. If you have specified a default cluster, you do
not need to specify a cluster in this command and the CLI will use the default.</P
><P
>You modify a resource type using the same commands you use to define
a resource type.</P
><P
>You can use the following command to delete a resource type:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>delete resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-LOADRESOURCETYPE"
>5.5.10. Installing (Loading) a Resource Type on a Cluster</A
></H2
><P
> When you define a cluster, Linux
FailSafe installs a set of resource type definitions that you can use that
include default values. If you need to install additional standard Silicon
Graphics-supplied resource type definitions on the cluster, or if you delete
a standard resource type definition and wish to reinstall it, you can load
that resource type definition on the cluster.</P
><P
>The resource type definition you are installing cannot exist on the
cluster.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4157"
>5.5.10.1. Installing a Resource Type with the Cluster Manager GUI</A
></H3
><P
>To install a resource type using the GUI, select the &#8220;Load a Resource&#8221;
task from the &#8220;Resources &#38; Resource Types&#8221; task page and enter
the resource type to load.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4160"
>5.5.10.2. Installing a Resource Type with the Cluster Manager CLI</A
></H3
><P
>Use the following CLI command to install a resource type on a cluster:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>install resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN4169"
>5.5.11. Displaying Resource Types</A
></H2
><P
>After you have defined a resource types,
you can display them.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4175"
>5.5.11.1. Displaying Resource Types with the Cluster Manager GUI</A
></H3
><P
>The Cluster Manager GUI provides a convenient display of resource types
through the FailSafe Cluster View. You can launch the FailSafe Cluster View
directly, or you can bring it up at any time by clicking on the &#8220;FailSafe
Cluster View&#8221; prompt at the bottom of the &#8220;FailSafe Manager&#8221;
display.</P
><P
>From the View menu of the FailSafe Cluster View, select Types to see
all defined resource types. You can then click on any of the resource type
icons to view the parameters of the resource type.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4179"
>5.5.11.2. Displaying Resource Types with the Cluster Manager CLI</A
></H3
><P
>Use the following command to view the parameters of a defined resource
type in a specified cluster:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resource_type </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default.</P
><P
>Use the following command to view all of the defined resource types
in a cluster:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resource_types</B
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default.</P
><P
>Use the following command to view all of the defined resource types
that have been installed:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resource_types</B
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>installed</B
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-DEFINEFAILOVER"
>5.5.12. Defining a Failover Policy</A
></H2
><P
>Before you can configure your resources into a resource group, you must
determine which failover policy to apply to the resource group. To define
a failover policy, you provide the following information:</P
><P
></P
><UL
><LI
><P
>The name of the failover policy, with a maximum length of
63 characters, which must be unique within the pool.</P
></LI
><LI
><P
>The name of an existing failover script.</P
></LI
><LI
><P
>The initial failover domain, which is an ordered list of the
nodes on which the resource group may execute. The administrator supplies
the initial failover domain when configuring the failover policy; this is
input to the failover script, which generates the runtime failover domain.</P
></LI
><LI
><P
>The failover attributes, which modify the behavior of the
failover script.</P
></LI
></UL
><P
>Complete information on failover policies and failover scripts, with
an emphasis on writing your own failover policies and scripts, is provided
in the <I
CLASS="CITETITLE"
>Linux FailSafe Programmer's Guide</I
>.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4215"
>5.5.12.1. Failover Scripts</A
></H3
><P
> A <I
CLASS="GLOSSTERM"
>failover script</I
>
helps determine the node that is chosen for a failed resource group. The failover
script takes the initial failover domain and transforms it into the runtime
failover domain. Depending upon the contents of the script, the initial and
the runtime domains may be identical.</P
><P
>The <TT
CLASS="FILENAME"
>ordered</TT
> failover script is provided with the
Linux FailSafe release. The <TT
CLASS="FILENAME"
>ordered</TT
> script never changes
the initial domain; when using this script, the initial and runtime domains
are equivalent.</P
><P
>The <TT
CLASS="FILENAME"
>round-robin</TT
> failover script is also provided
with the Linux FailSafe release. The <TT
CLASS="FILENAME"
>round-robin</TT
> cript
selects the resource group owner in a round-robin (circular) fashion. This
policy can be used for resource groups that can be run in any node in the
cluster.</P
><P
>Failover scripts are stored in the  <B
CLASS="COMMAND"
>/usr/lib/failsafe/policies</B
> directory. If the  <B
CLASS="COMMAND"
>ordered</B
> script does not meet
your needs, you can define a new failover script and place it in the  <TT
CLASS="FILENAME"
>/usr/lib/failsafe/policies</TT
> directory. When you are using the FailSafe
GUI, the GUI automatically detects your script and presents it to you as a
choice for you to use. You can configure the Linux FailSafe database to use
your new failover script for the required resource groups. For information
on defining failover scripts, see the <I
CLASS="CITETITLE"
>Linux FailSafe Programmer's
Guide</I
>.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4235"
>5.5.12.2. Failover Domain</A
></H3
><P
>A <I
CLASS="GLOSSTERM"
>failover domain</I
> is the ordered list of nodes
on which a given resource group can be allocated. The nodes listed in the
failover domain must be within the same cluster; however, the failover domain
does not have to include every node in the cluster. The failover domain can
be used to statically load balance the resource groups in a cluster.</P
><P
>Examples:</P
><P
></P
><UL
><LI
><P
>In a four-node cluster, two nodes might share a volume. The
failover domain of the resource group containing the volume will be the two
nodes that share the volume.</P
></LI
><LI
><P
>If you have a cluster of nodes named venus, mercury, and pluto,
you could configure the following initial failover domains for resource groups
RG1 and RG2:  </P
><P
></P
><UL
><LI
><P
>venus, mercury, pluto for RG1</P
></LI
><LI
><P
>pluto, mercury for RG2</P
></LI
></UL
></LI
></UL
><P
>When you define a failover policy, you specify the <I
CLASS="GLOSSTERM"
>initial
failover domain</I
>. The initial failover domain is used when a cluster
is first booted. The ordered list specified by the initial failover domain
is transformed into a <I
CLASS="GLOSSTERM"
>runtime failover domain</I
> by
the failover script. With each failure, the failover script takes the current
run-time failover domain and potentially modifies it; the initial failover
domain is never used again. Depending on the run-time conditions and contents
of the failover script, the initial and run-time failover domains may be identical.</P
><P
>Linux FailSafe stores the run-time failover domain and uses it as input
to the next failover script invocation.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4263"
>5.5.12.3. Failover Attributes</A
></H3
><P
> A failover attribute is a value that
is passed to the failover script and used by Linux FailSafe for the purpose
of modifying the run-time failover domain used for a specific resource group.
You can specify a failover attribute of <TT
CLASS="FILENAME"
>Auto_Failback</TT
>, <TT
CLASS="FILENAME"
>Controlled_Failback, Auto_Recovery,</TT
> or <TT
CLASS="FILENAME"
>InPlace_Recovery.
Auto_Failback</TT
> and <TT
CLASS="FILENAME"
>Controlled_Failback</TT
> are
mutually exclusive, but you must specify one or the other. <TT
CLASS="FILENAME"
>Auto_Recovery</TT
> and <TT
CLASS="FILENAME"
>InPlace_Recovery</TT
> are mutually exclusive,
but whether you specify one or the other is optional.</P
><P
>A failover attribute of <TT
CLASS="FILENAME"
>Auto_Failback</TT
> specifies
that the resource group will be run on the first available node in the runtime
failover domain. If the first node fails, the next available node will be
used; when the first node reboots, the resource group will return to it. This
attribute is best used when some type of load balancing is required.</P
><P
>A failover attribute of <TT
CLASS="FILENAME"
>Controlled_Failback</TT
> specifies
that the resource group will be run on the first available node in the runtime
failover domain, and will remain running on that node until it fails. If the
first node fails, the next available node will be used; the resource group
will remain on this new node even after the first node reboots.This attribute
is best used when client/server applications have expensive recovery mechanisms,
such as databases or any application that uses <B
CLASS="COMMAND"
>tcp</B
> to communicate.</P
><P
>The recovery attributes <TT
CLASS="FILENAME"
>Auto_Recovery</TT
> and <TT
CLASS="FILENAME"
>InPlace_Recovery</TT
> determine the node on which a resource group will
be allocated when its state changes to online and a member of the group is
already allocated (such as when volumes are present). <TT
CLASS="FILENAME"
>Auto_Recovery</TT
> specifies that the failover policy will be used to allocate the
resource group; this is the default recovery attribute if you have specified
the <TT
CLASS="FILENAME"
>Auto_Failback</TT
> attribute. <TT
CLASS="FILENAME"
>InPlace_Recovery</TT
> specifies that the resource group will be allocated on the node
that already contains part of the resource group; this is the default recovery
attribute if you have specified the <TT
CLASS="FILENAME"
>Controlled_Failback</TT
>
attribute.</P
><P
>See the <I
CLASS="CITETITLE"
>Linux FailSafe Programmer's Guide</I
> for
a full discussions of example failover policies.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4291"
>5.5.12.4. Defining a Failover Policy with the Cluster Manager GUI</A
></H3
><P
>To define a failover policy using the GUI, perform the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Define
a Failover Policy&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4305"
>5.5.12.5. Defining a Failover Policy with the Cluster Manager CLI</A
></H3
><P
>To define a failover policy, enter the following command at the <TT
CLASS="FILENAME"
>cmgr</TT
> prompt to specify the name of the failover policy:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define failover_policy </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>The following prompt appears:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>failover_policy <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>?</PRE
></TD
></TR
></TABLE
><P
>When this prompt appears you can use the following commands to specify
the components of a failover policy:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>failover_policy <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>set attribute to </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>
failover policy <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>set script to </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>
failover policy <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>set domain to </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
>
failover_policy <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? </PRE
></TD
></TR
></TABLE
><P
>When you define a failover policy, you can set as many attributes and
domains as your setup requires, but executing the <B
CLASS="COMMAND"
>add attribute</B
>
and <B
CLASS="COMMAND"
>add domain</B
> commands with different values. The CLI
also allows you to specify multiple domains in one command of the following
format:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>failover_policy <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>set domain to </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A B C</I
></TT
> ..<TT
CLASS="REPLACEABLE"
><I
>.</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>The components of a failover policy are described in detail in the <I
CLASS="CITETITLE"
>Linux FailSafe Programmer's Guide</I
> and in summary in <A
HREF="le53159-parent.html#FS-DEFINEFAILOVER"
>Section 5.5.12</A
>.</P
><P
>When you are finished defining the failover policy, enter <TT
CLASS="FILENAME"
>done</TT
> to return to the <TT
CLASS="FILENAME"
>cmgr</TT
> prompt.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-MODIFYDELFAILOVERPOLICY"
>5.5.13. Modifying and Deleting Failover Policies</A
></H2
><P
>After you have defined a failover policy, you can modify or delete it.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4344"
>5.5.13.1. Modifying and Deleting Failover Policies with the Cluster Manager GUI</A
></H3
><P
>To modify a failover policy with the Cluster Manager GUI, perform the
following procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Modify
a Failover Policy Definition&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
><P
>To delete a failover policy with the Cluster Manager GUI, perform the
following procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Delete
a Failover Policy&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4370"
>5.5.13.2. Modifying and Deleting Failover Policies with the Cluster Manager CLI</A
></H3
><P
>Use the following CLI command to modify a failover policy:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify failover_policy</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>&#8194;</PRE
></TD
></TR
></TABLE
><P
>You modify a failover policy using the same commands you use to define
a failover policy.</P
><P
>You can use the following command to delete a failover policy definition:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>delete failover_policy</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN4381"
>5.5.14. Displaying Failover Policies</A
></H2
><P
>You can use Linux FailSafe to display any of the following:</P
><P
></P
><UL
><LI
><P
>The components of a specified failover policy</P
></LI
><LI
><P
>All of the failover policies that have been defined</P
></LI
><LI
><P
>All of the failover policy attributes that have been defined</P
></LI
><LI
><P
>All of the failover policy scripts that have been defined</P
></LI
></UL
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4393"
>5.5.14.1. Displaying Failover Policies with the Cluster Manager GUI</A
></H3
><P
>The Cluster Manager GUI provides a convenient display of failover policies
through the FailSafe Cluster View. You can launch the FailSafe Cluster View
directly, or you can bring it up at any time by clicking on the &#8220;FailSafe
Cluster View&#8221; prompt at the bottom of the &#8220;FailSafe Manager&#8221;
display.</P
><P
>From the View menu of the FailSafe Cluster View, select Failover Policies
to see all defined failover policies.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4397"
>5.5.14.2. Displaying Failover Policies with the Cluster Manager CLI</A
></H3
><P
>Use the following command to view the parameters of a defined failover
policy:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show failover_policy </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>Use the following command to view all of the defined failover policies:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show failover policies</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>Use the following command to view all of the defined failover policy
attributes:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show failover_policy attributes</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>Use the following command to view all of the defined failover policy
scripts:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show failover_policy scripts</B
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-DEFINERESGROUP"
>5.5.15. Defining Resource Groups</A
></H2
><P
>Resources are configured together into <I
CLASS="FIRSTTERM"
>resource group</I
>s. A resource group is a collection of interdependent
resources. If any individual resource in a resource group becomes unavailable
for its intended use, then the entire resource group is considered unavailable.
Therefore, a resource group is the unit of failover for Linux FailSafe.</P
><P
>For example, a resource group could contain all of the resources that
are required for the operation of a web node, such as the web node itself,
the IP address with which it communicates to the outside world, and the disk
volumes containing the content that it serves.</P
><P
>When you define a resource group, you specify a <I
CLASS="FIRSTTERM"
>failover
policy</I
>. A failover policy controls the behavior of a resource
group in failure situations.</P
><P
>To define a resource group, you provide the following information:</P
><P
></P
><UL
><LI
><P
>The name of the resource group, with a maximum length of 63
characters.</P
></LI
><LI
><P
>The name of the cluster to which the resource group is available</P
></LI
><LI
><P
>The resources to include in the resource group, and their
resource types</P
></LI
><LI
><P
>The name of the failover policy that determines which node
will take over the services of the resource group on failure</P
></LI
></UL
><P
>Linux FailSafe does not allow resource groups that do not contain any
resources to be brought online.</P
><P
>You can define up to 100 resources configured in any number of resource
groups.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="FS-ADDRESOURCESTORESGROUP"
>5.5.15.1. Defining a Resource Group with the Cluster Manager GUI</A
></H3
><P
>To define a resource group with the Cluster Manager GUI, perform the
following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on &#8220;Guided Configuration&#8221;.</P
></LI
><LI
><P
>On the right side of the display click on &#8220;Set Up Highly
Available Resource Groups&#8221; to launch the task link.</P
></LI
><LI
><P
>In the resulting window, click each task link in turn, as
it becomes available. Enter the selected inputs for each task.</P
></LI
><LI
><P
>When finished, click &#8220;OK&#8221; to close the taskset
window.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4448"
>5.5.15.2. Defining a Resource Group with the Cluster Manager CLI</A
></H3
><P
>To configure a resource group, enter the following command at the <TT
CLASS="FILENAME"
>cmgr</TT
> prompt to specify the name of a resource group and the cluster
to which the resource group is available:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
><TT
CLASS="REPLACEABLE"
><I
>&#8194;B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>Entering this command specifies the name of the resource group you are
defining within a specified cluster. If you have specified a default cluster,
you do not need to specify a cluster in this command and the CLI will use
the default.</P
><P
>The following prompt appears:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>Enter commands, when finished enter either "done" or "cancel"
resource_group <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>?</PRE
></TD
></TR
></TABLE
><P
>When this prompt appears you can use the following commands to specify
the resources to include in the resource group and the failover policy to
apply to the resource group:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>resource_group <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>add resource</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>
resource_group <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>set failover_policy to </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>After you have set the failover policy and you have finished adding
resources to the resource group, enter <TT
CLASS="FILENAME"
>done</TT
> to return
to the <TT
CLASS="FILENAME"
>cmgr</TT
> prompt.</P
><P
>For a full example of resource group creation using the Cluster Manager
CLI, see <A
HREF="le40511-parent.html"
>Section 5.7</A
>.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-MODIFYRESGROUP"
>5.5.16. Modifying and Deleting Resource Groups</A
></H2
><P
> After you have defined
resource groups, you can modify and delete the resource groups. You can change
the failover policy of a resource group by specifying a new failover policy
associated with that resource group, and you can add or delete resources to
the existing resource group. Note, however, that since you cannot have a resource
group online that does not contain any resources, Linux FailSafe does not
allow you to delete all resources from a resource group once the resource
group is online. Likewise, Linux FailSafe does not allow you to bring a resource
group online if it has no resources. Also, resources must be added and deleted
in atomic units; this means that resources which are interdependent must be
added and deleted together.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4485"
>5.5.16.1. Modifying and Deleting Resource Groups with the Cluster Manager GUI</A
></H3
><P
>To modify a failure policy with the Cluster Manager GUI, perform the
following procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Modify
a Resource Group Definition&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
><P
>To add or delete resources to a resource group definition with the Cluster
Manager GUI, perform the following procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Add/Remove
Resources in Resource Group&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
><P
>To delete a resource group with the Cluster Manager GUI, perform the
following procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Failover
Policies &#38; Resource Groups&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Delete
a Resource Group&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4523"
>5.5.16.2. Modifying and Deleting Resource Groups with the Cluster Manager CLI</A
></H3
><P
>Use the following CLI command to modify a resource group:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
><TT
CLASS="REPLACEABLE"
><I
>&#8194;B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default. You modify a resource
group using the same commands you use to define a failover policy:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>resource_group <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>add resource</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>&#8194;<TT
CLASS="USERINPUT"
><B
>of resource_type</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>
resource_group <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>set failover_policy to </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>You can use the following command to delete a resource group definition:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>delete resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
><TT
CLASS="REPLACEABLE"
><I
>&#8194;B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN4548"
>5.5.17. Displaying Resource Groups</A
></H2
><P
>You can display the parameters of a defined
resource group, and you can display all of the resource groups defined for
a cluster.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4554"
>5.5.17.1. Displaying Resource Groups with the Cluster Manager GUI</A
></H3
><P
>The Cluster Manager GUI provides a convenient display of resource groups
through the FailSafe Cluster View. You can launch the FailSafe Cluster View
directly, or you can bring it up at any time by clicking on the &#8220;FailSafe
Cluster View&#8221; prompt at the bottom of the &#8220;FailSafe Manager&#8221;
display.</P
><P
>From the View menu of the FailSafe Cluster View, select Groups to see
all defined resource groups.</P
><P
>To display which nodes are currently running which groups, select &#8220;Groups
owned by Nodes.&#8221; To display which groups are running which failover
policies, select &#8220;Groups by Failover Policies.&#8221;</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN4559"
>5.5.17.2. Displaying Resource Groups with the Cluster Manager CLI</A
></H3
><P
>Use the following command to view the parameters of a defined resource
group:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resource_group </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A </I
></TT
>[<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
><TT
CLASS="REPLACEABLE"
><I
>&#8194;B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster in this command and the CLI will use the default.</P
><P
>Use the following command to view all of the defined failover policies:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show resource_groups</B
></TT
> [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>]</PRE
></TD
></TR
></TABLE
></DIV
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="z957104627glen.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="fs-setlogparams.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Cluster Configuration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le94219-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe System Log Configuration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>#8220;Guided Configuration&#8221;.</P
></LI
html/le53947-parent.html010064400016050000001000000170130717757323100155310ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Filesystem Configuration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Planning Linux FailSafe Configuration"
HREF="le88622-parent.html"><LINK
REL="PREVIOUS"
TITLE="Logical Volume Configuration"
HREF="le96329-parent.html"><LINK
REL="NEXT"
TITLE="IP Address Configuration"
HREF="le84104-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le96329-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 2. Planning Linux FailSafe Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le84104-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE53947-PARENT"
>2.4. Filesystem Configuration</A
></H1
><P
> The first subsection below describes filesystem issues that must
be considered when planning a Linux FailSafe system. The second subsection
gives an example of an XFS filesystem configuration on a Linux FailSafe system.
The third subsection explains the aspects of the configuration that must be
specified for a Linux FailSafe system.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE24179-PARENT"
>2.4.1. Planning Filesystems</A
></H2
><P
>The Linux FailSafe software supports the automatic failover of filesystem
including XFS, ext2fs, and reiserfs on shared disks. Shared disks must be
either mirrored or RAID storage systems that are shared between the nodes
in the two-node Linux FailSafe cluster.</P
><P
>The following are special issues that you need to be aware of when you
are working with filesystems on shared disks in a Linux FailSafe cluster:</P
><P
></P
><UL
><LI
><P
>All filesystems to be failed over must be supported by Failsafe.</P
></LI
><LI
><P
>For availability, filesystems to be failed over in a Linux
FailSafe cluster should be created either on mirrored disks or using a system
that supports hardware mirroring of the data such as a RAID storage system.</P
></LI
><LI
><P
>Create the mount points for the filesystems on all nodes in
the failover domain.</P
></LI
><LI
><P
>When you set up the various highly available filesystems
on each node, make sure that each filesystem uses a different mount point.</P
></LI
><LI
><P
>Do not simultaneously mount filesystems on shared disks on
more than one node. Doing so causes data corruption. Normally, Linux FailSafe
performs all mounts of filesystems on shared disks. If you manually mount
a filesystem on a shared disk, make sure that it is not being used by another
node.</P
></LI
><LI
><P
>Do not place filesystems on shared disks in the <TT
CLASS="FILENAME"
>/etc/fstab</TT
> file. Linux FailSafe mounts these filesystems only after
making sure that another node does not have these filesystems mounted.</P
></LI
></UL
><P
>The resource name of a resource of the <TT
CLASS="LITERAL"
>filesystem</TT
>
resource type is the mount point of the filesystem.</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>When clients are actively writing to a Linux FailSafe NFS filesystem
during failover of filesystems, data corruption can occur unless filesystems
are exported with the mode <TT
CLASS="LITERAL"
>sync</TT
>. This mode requires that
local mounts of the filesystems use the <TT
CLASS="LITERAL"
>sync</TT
> mount mode
as well. Using <TT
CLASS="LITERAL"
>sync</TT
> affects performance considerably.</P
></BLOCKQUOTE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN1588"
>2.4.2. Example Filesystem Configuration</A
></H2
><P
>Continuing with the example configuration from the <A
HREF="le34382-parent.html#LE10802-PARENT"
>Section 2.2.2</A
>,
say that volumes A and B have XFS filesystems on them:</P
><P
></P
><UL
><LI
><P
>The filesystem on volume A is mounted at <TT
CLASS="FILENAME"
>/sharedA</TT
> with modes <TT
CLASS="LITERAL"
>rw</TT
> and <TT
CLASS="LITERAL"
>noauto</TT
>.
Call it filesystem A.</P
></LI
><LI
><P
>The filesystem on volume B is mounted at <TT
CLASS="FILENAME"
>/sharedB</TT
> with modes <TT
CLASS="LITERAL"
>rw</TT
>, <TT
CLASS="LITERAL"
>noauto</TT
>, and <TT
CLASS="LITERAL"
>wsync</TT
>.  Call it filesystem B.</P
></LI
></UL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN1604"
>2.4.3. Configuration Parameters for Filesystems</A
></H2
><P
> <A
HREF="le53947-parent.html#LE31422-PARENT"
>Table 2-2</A
>, lists a label and configuration
parameters for each filesystem.</P
><DIV
CLASS="TABLE"
><A
NAME="LE31422-PARENT"
></A
><P
><B
>Table 2-2. Filesystem Configuration Parameters</B
></P
><TABLE
BORDER="1"
WIDTH="100%"
CLASS="CALSTABLE"
><THEAD
><TR
><TH
WIDTH="21%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Resource Attribute</P
></TH
><TH
WIDTH="16%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
><TT
CLASS="LITERAL"
>/sharedA</TT
></P
></TH
><TH
WIDTH="23%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
><TT
CLASS="LITERAL"
>/sharedB</TT
></P
></TH
><TH
WIDTH="39%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Comments</P
></TH
></TR
></THEAD
><TBODY
><TR
><TD
WIDTH="21%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>monitoring-level</TT
></P
></TD
><TD
WIDTH="16%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>2</TT
></P
></TD
><TD
WIDTH="23%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>2</TT
></P
></TD
><TD
WIDTH="39%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>There are two types of monitoring:</P
><P
>1 &#8211; checks<TT
CLASS="LITERAL"
> /etc/mtab</TT
> file</P
><P
>2 &#8211; checks if the filesystem is mounted using <B
CLASS="COMMAND"
>stat</B
>
command</P
></TD
></TR
><TR
><TD
WIDTH="21%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>volume-name</TT
></P
></TD
><TD
WIDTH="16%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>volA</TT
></P
></TD
><TD
WIDTH="23%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>volB</TT
></P
></TD
><TD
WIDTH="39%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>The label of the logical volume on
which the filesystem was created.</P
></TD
></TR
><TR
><TD
WIDTH="21%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>mode</TT
></P
></TD
><TD
WIDTH="16%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>rw,noauto</TT
></P
></TD
><TD
WIDTH="23%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>rw,noauto,wsync</TT
></P
></TD
><TD
WIDTH="39%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>The mount options used to mount the
filesystem. This is specified the same as the options for the mount command
or other filesystems listed in <TT
CLASS="FILENAME"
>/etc/fstab</TT
>. </P
></TD
></TR
></TBODY
></TABLE
></DIV
><P
>See <A
HREF="le39637-parent.html"
>Section 3.5</A
>, for information about creating
XFS filesystems.</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le96329-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le84104-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Logical Volume Configuration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le88622-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>IP Address Configuration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le55630-parent.html010064400016050000001000000062230717757405700155300ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Upgrading and Maintaining Active Clusters</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Recovery Procedures"
HREF="le26593-parent.html"><LINK
REL="NEXT"
TITLE="Adding a Node to an Active Cluster"
HREF="le40594-parent.html"></HEAD
><BODY
CLASS="CHAPTER"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le26593-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le40594-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><H1
><A
NAME="LE55630-PARENT"
>Chapter 10. Upgrading and Maintaining Active Clusters</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>10.1. <A
HREF="le40594-parent.html"
>Adding a Node to an Active Cluster</A
></DT
><DT
>10.2. <A
HREF="le15663-parent.html"
>Deleting a Node from an Active Cluster</A
></DT
><DT
>10.3. <A
HREF="x6931.html"
>Changing Control Networks in a Cluster</A
></DT
><DT
>10.4. <A
HREF="le26765-parent.html"
>Upgrading OS Software in an Active Cluster</A
></DT
><DT
>10.5. <A
HREF="le31814-parent.html"
>Upgrading FailSafe Software in an Active Cluster</A
></DT
><DT
>10.6. <A
HREF="le18685-parent.html"
>Adding New Resource Groups or Resources in an Active
Cluster</A
></DT
><DT
>10.7. <A
HREF="le32198-parent.html"
>Adding a New Hardware Device in an Active Cluster</A
></DT
></DL
></DIV
><P
>When a Linux FailSafe system is running, you may need to perform various
administration procedures without shutting down the entire cluster. This chapter
provides instructions for performing upgrade and maintenance procedures on
active clusters. It includes the following procedures:</P
><P
></P
><UL
><LI
><P
><A
HREF="le40594-parent.html"
>Section 10.1</A
></P
></LI
><LI
><P
><A
HREF="le15663-parent.html"
>Section 10.2</A
></P
></LI
><LI
><P
><A
HREF="le26765-parent.html"
>Section 10.4</A
></P
></LI
><LI
><P
><A
HREF="le31814-parent.html"
>Section 10.5</A
></P
></LI
><LI
><P
><A
HREF="le18685-parent.html"
>Section 10.6</A
></P
></LI
><LI
><P
><A
HREF="le32198-parent.html"
>Section 10.7</A
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le26593-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le40594-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Recovery Procedures</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Adding a Node to an Active Cluster</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le56830-parent.html010064400016050000001000000071310717757373400155330ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Testing Linux FailSafe Configuration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Backing Up and Restoring Configuration With Cluster
Manager CLI"
HREF="le37674-parent.html"><LINK
REL="NEXT"
TITLE="Overview of FailSafe Diagnostic Commands"
HREF="le67057-parent.html"></HEAD
><BODY
CLASS="CHAPTER"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le37674-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le67057-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><H1
><A
NAME="LE56830-PARENT"
>Chapter 8. Testing Linux FailSafe Configuration</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>8.1. <A
HREF="le67057-parent.html"
>Overview of FailSafe Diagnostic Commands</A
></DT
><DT
>8.2. <A
HREF="le42786-parent.html"
>Performing Diagnostic Tasks with the Cluster
Manager GUI</A
></DT
><DD
><DL
><DT
>8.2.1. <A
HREF="le42786-parent.html#FS-TESTCONNECTIVITY"
>Testing Connectivity with the Cluster Manager GUI</A
></DT
><DT
>8.2.2. <A
HREF="le42786-parent.html#FS-TESTRESOURCES"
>Testing Resources with the Cluster Manager GUI</A
></DT
><DT
>8.2.3. <A
HREF="le42786-parent.html#FS-TESTFAILOVERPOL"
>Testing Failover Policies with the Cluster Manager GUI</A
></DT
></DL
></DD
><DT
>8.3. <A
HREF="le37273-parent.html"
>Performing Diagnostic Tasks with the Cluster
Manager CLI</A
></DT
><DD
><DL
><DT
>8.3.1. <A
HREF="le37273-parent.html#AEN6044"
>Testing the Serial Connections with the Cluster Manager CLI</A
></DT
><DT
>8.3.2. <A
HREF="le37273-parent.html#LE11186-PARENT"
>Testing Network Connectivity with the Cluster
Manager CLI</A
></DT
><DT
>8.3.3. <A
HREF="le37273-parent.html#LE28447-PARENT"
>Testing Resources with the Cluster Manager CLI</A
></DT
><DT
>8.3.4. <A
HREF="le37273-parent.html#LE29671-PARENT"
>Testing Failover Policies with the Cluster Manager
CLI</A
></DT
></DL
></DD
></DL
></DIV
><P
>This chapter explains how to test the Linux FailSafe system configuration
using the Cluster Manager GUI and the Cluster Manager CLI. For general
information on using the Cluster Manager GUI and the Cluster Manager CLI,
see <A
HREF="le73346-parent.html"
>Chapter 4</A
>.</P
><P
>The sections in this chapter are as follows:</P
><P
></P
><UL
><LI
><P
><A
HREF="le67057-parent.html"
>Section 8.1</A
></P
></LI
><LI
><P
><A
HREF="le42786-parent.html"
>Section 8.2</A
></P
></LI
><LI
><P
><A
HREF="le37273-parent.html"
>Section 8.3</A
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le37674-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le67057-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Backing Up and Restoring Configuration With Cluster
Manager CLI</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Overview of FailSafe Diagnostic Commands</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le57040-parent.html010064400016050000001000000166570717757320400155320ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Introduction to Configuration Planning</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Planning Linux FailSafe Configuration"
HREF="le88622-parent.html"><LINK
REL="PREVIOUS"
TITLE="Planning Linux FailSafe Configuration"
HREF="le88622-parent.html"><LINK
REL="NEXT"
TITLE="Disk Configuration"
HREF="le34382-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le88622-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 2. Planning Linux FailSafe Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le34382-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE57040-PARENT"
>2.1. Introduction to Configuration Planning</A
></H1
><P
>Configuration planning involves
making decisions about how you plan to use the Linux FailSafe cluster, and
based on that, how the disks and interfaces must be set up to meet the needs
of the highly available services you want the cluster to provide. Questions
you must answer during the planning process are:</P
><P
></P
><UL
><LI
><P
>What do you plan to use the nodes for?</P
><P
>Your answers might include uses such as offering home directories for
users, running particular applications, supporting an Oracle database, providing
Netscape World Wide Web service, and providing file service.</P
></LI
><LI
><P
>Which of these uses will be provided as a highly available
service?</P
><P
> To offer applications as highly available services that are not currently
available as Linux Failsafe software options, a set of application monitoring
shell scripts needs to be developed that provides switch over and switch back
functionality. Developing these scripts is described in the <I
CLASS="CITETITLE"
>Linux
FailSafe Programmer's Guide</I
>. If you need assistance in this regard,
contact SGI Global Services, which offers custom Linux FailSafe agent development
and HA integration services.</P
></LI
><LI
><P
>Which node will be the primary node for each highly available
service?</P
><P
>The primary node is the node that provides the service (exports the
filesystem, is a Netscape server, provides the database, and so on) when the
node is in an UP state.</P
></LI
><LI
><P
>For each highly available service, how will the software and
data be distributed on shared and non-shared disks?</P
><P
>Each application has requirements and choices for placing its software
on disks that are failed over (shared) or not failed over (non-shared).</P
></LI
><LI
><P
>Are the shared disks going to be part of a RAID storage system
or are they going to be disks in SCSI/Fibre channel disk storage that has
mirroring such as the Linux Raid Tools implemented on them?</P
><P
>For reliability, shared disks must be part of a RAID storage system
or in SCSI/Fibre channel disk storage with mirroring on them.</P
></LI
><LI
><P
>Will the shared disks be used as raw devices/volumes or as
volumes with filesystems on them?</P
><P
>Logical volumes, filesystems, and raw partitions are all supported by
Linux Failsafe.  The choice of volumes, filesystems, or raw devices depends
on the application that is going to use the disk space.</P
></LI
><LI
><P
>Which IP addresses will be used by clients of highly available
services?</P
><P
>Multiple interfaces may be required on each node because a node could
be connected to more than one network or because there could be more than
one interface to a single network.</P
></LI
><LI
><P
>Which resources will be part of a resource group?</P
><P
>All resources that are dependent on each other have to be in the resource
group.</P
></LI
><LI
><P
>What will be the failover domain of the resource group?</P
><P
>The failover domain determines the list of nodes in the cluster where
the resource group can reside. For example, a volume resource that is part
of a resource group can reside only in nodes from which the disks composing
the volume can be accessed.</P
></LI
><LI
><P
>How many highly available IP addresses
on each network interface will be available to clients of the highly available
services?</P
><P
>At least one highly available IP address must be available for each
interface on each node that is used by clients of highly available services.</P
></LI
><LI
><P
>Which IP addresses on primary nodes are going to be available
to clients of the highly available services?</P
></LI
><LI
><P
>For each highly available IP address that is available on
a primary node to clients of highly available services, which interface on
the other nodes will be assigned that IP address after a failover?</P
><P
>Every highly available IP address used by a highly available service
must be mapped to at least one interface in each node that can take over the
resource group service. The highly available IP addresses are failed over
from the interface in the primary node of the resource group to the interface
in the replacement node.</P
></LI
></UL
><P
>As an example of the configuration planning process, say that you have
a two-node Linux FailSafe cluster that is a departmental server. You want
to make four XFS filesystems available for NFS mounting and have two Netscape
FastTrack servers, each serving a different set of documents. These applications
will be highly available services.</P
><P
>You decide to distribute the services across two nodes, so each node
will be the primary node for two filesystems and one Netscape server. The
filesystems and the document roots for the Netscape servers (on XFS filesystems)
are each on their own striped LVM logical volume. The logical volumes are
created from disks in a RAID storage system connected to both nodes.</P
><P
>There are four resource groups: NFSgroup1 and NFSgroup2 are the NFS
resource groups, and Webgroup1 and Webgroup2 are the Web resource groups.
NFSgroup1 and Webgroup1 will have one node as the primary node. NFSgroup2
and Webgroup2 will have the other node as the primary node.</P
><P
>Two networks are available on each node, eth0 and eth1. The eth0 interfaces
in each node are connected to each other to form a private network.</P
><P
>The following sections help you answer the configuration questions above,
make additional configuration decisions required by Linux FailSafe, and collect
the information you need to perform the configuration tasks described in <A
HREF="le32854-parent.html"
>Chapter 3</A
>, and <A
HREF="le94219-parent.html"
>Chapter 5</A
>.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le88622-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le34382-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Planning Linux FailSafe Configuration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le88622-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Disk Configuration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>Linux FailSafe Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
>html/le59477-parent.html010064400016050000001000000112650717757341200155410ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Setting Configuration Defaults</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Cluster Configuration"
HREF="le94219-parent.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe Cluster Configuration"
HREF="le94219-parent.html"><LINK
REL="NEXT"
TITLE="Name Restrictions"
HREF="le28499-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le94219-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 5. Linux FailSafe Cluster Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le28499-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE59477-PARENT"
>5.1. Setting Configuration Defaults</A
></H1
><P
> Before
you configure the components of a FailSafe system, you can set default values
for some of the components that Linux FailSafe will use when defining the
components.</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
>Default cluster</DT
><DD
><P
>Certain cluster manager commands require you to specify a cluster. You
can specify a default cluster to use as the default if you do not specify
a cluster explicitly.</P
></DD
><DT
>Default node</DT
><DD
><P
>Certain cluster manager commands require you to specify a node. With
the Cluster Manager CLI, you can specify a default node to use as the default
if you do not specify a node explicitly.</P
></DD
><DT
>Default resource type</DT
><DD
><P
>Certain cluster manager commands require you to specify a resource type.
With the Cluster Manager CLI, you can specify a default resource type to use
as the default if you do not specify a resource type explicitly.</P
></DD
></DL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN3015"
>5.1.1. Setting Default Cluster with the Cluster Manager GUI</A
></H2
><P
>The GUI prompts you to enter the name of the default cluster when you
have not specified one. Alternately, you can set the default cluster by clicking
the &#8220;Select Cluster...&#8221; button at the bottom of the FailSafe Manager
window.</P
><P
>When using the GUI, there is no need to set a default node or resource
type.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN3019"
>5.1.2. Setting and Viewing Configuration Defaults with the Cluster Manager
CLI</A
></H2
><P
>When you are using the Cluster Manager CLI, you can use the following
commands to specify default values. The default values are in effect only
for the current session of the Cluster Manager CLI.</P
><P
>Use the following command to specify a default cluster:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>set cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>Use the following command to specify a default node:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>set node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>Use the following command to specify a default resource type:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>set resource_type </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>You can view the current default configuration values of the Cluster
Manager CLI with the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show set defaults</B
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le94219-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le28499-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe Cluster Configuration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le94219-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Name Restrictions</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le67057-parent.html010064400016050000001000000077540717757371400155470ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Overview of FailSafe Diagnostic Commands</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Testing Linux FailSafe Configuration"
HREF="le56830-parent.html"><LINK
REL="PREVIOUS"
TITLE="Testing Linux FailSafe Configuration"
HREF="le56830-parent.html"><LINK
REL="NEXT"
TITLE="Performing Diagnostic Tasks with the Cluster
Manager GUI"
HREF="le42786-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le56830-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 8. Testing Linux FailSafe Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le42786-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE67057-PARENT"
>8.1. Overview of FailSafe Diagnostic Commands</A
></H1
><P
>  <A
HREF="le67057-parent.html#LE10721-PARENT"
>Table 8-1</A
>
shows the tests you can perform with Linux FailSafe diagnostic commands:</P
><DIV
CLASS="TABLE"
><A
NAME="LE10721-PARENT"
></A
><P
><B
>Table 8-1. FailSafe Diagnostic Test Summary</B
></P
><TABLE
BORDER="1"
CLASS="CALSTABLE"
><THEAD
><TR
><TH
WIDTH="29%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Diagnostic Test</P
></TH
><TH
WIDTH="71%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Checks Performed</P
></TH
></TR
></THEAD
><TBODY
><TR
><TD
WIDTH="29%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>resource</P
></TD
><TD
WIDTH="71%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Checks that the resource type parameters
are set</P
><P
>Check that the parameters are syntactically correct</P
><P
>Validates that the parameters exist</P
></TD
></TR
><TR
><TD
WIDTH="29%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>resource group</P
></TD
><TD
WIDTH="71%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Tests all resources defined in
the resource group</P
></TD
></TR
><TR
><TD
WIDTH="29%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>failover policy</P
></TD
><TD
WIDTH="71%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Checks that the failover policy
exists</P
><P
>Checks that the failover domain contains a valid list
of hosts</P
></TD
></TR
><TR
><TD
WIDTH="29%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>network connectivity</P
></TD
><TD
WIDTH="71%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Checks that the control interfaces
are on the same network</P
><P
>Checks that the nodes can communicate
with each other</P
></TD
></TR
><TR
><TD
WIDTH="29%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>serial connection</P
></TD
><TD
WIDTH="71%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>Checks that the nodes can reset
each other</P
></TD
></TR
></TBODY
></TABLE
></DIV
><P
>All transactions are logged to the diagnostics file <TT
CLASS="FILENAME"
>diags_</TT
><I
CLASS="EMPHASIS"
>nodename</I
> in the log directory.</P
><P
>You should test resource groups before starting FailSafe HA services
or starting a resource group. These tests are designed to check for resource
inconsistencies which could prevent the resource group from starting successfully.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le56830-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le42786-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Testing Linux FailSafe Configuration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le56830-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Performing Diagnostic Tasks with the Cluster
Manager GUI</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>tion</TD
><TD
WIDTH=html/le73346-parent.html010064400016050000001000000072510717757335400155350ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe Administration Tools</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Configuration for Reset"
HREF="le90681-parent.html"><LINK
REL="NEXT"
TITLE="The Linux FailSafe Cluster Manager Tools"
HREF="le74378-parent.html"></HEAD
><BODY
CLASS="CHAPTER"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le90681-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le74378-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><H1
><A
NAME="LE73346-PARENT"
>Chapter 4. Linux FailSafe Administration Tools</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>4.1. <A
HREF="le74378-parent.html"
>The Linux FailSafe Cluster Manager Tools</A
></DT
><DT
>4.2. <A
HREF="fs-guioverview.html"
>Using the Linux FailSafe Cluster Manager GUI</A
></DT
><DD
><DL
><DT
>4.2.1. <A
HREF="fs-guioverview.html#FS-CLUSTERVIEWOVERVIEW"
>The FailSafe Cluster View</A
></DT
><DT
>4.2.2. <A
HREF="fs-guioverview.html#FS-TASKMANOVERVIEW"
>The FailSafe Manager</A
></DT
><DT
>4.2.3. <A
HREF="fs-guioverview.html#LE35303-PARENT"
>Starting the FailSafe Manager GUI</A
></DT
><DT
>4.2.4. <A
HREF="fs-guioverview.html#LE49425-PARENT"
>Opening the FailSafe Cluster View window</A
></DT
><DT
>4.2.5. <A
HREF="fs-guioverview.html#AEN2636"
>Viewing Cluster Item Details</A
></DT
><DT
>4.2.6. <A
HREF="fs-guioverview.html#AEN2645"
>Performing Tasks</A
></DT
><DT
>4.2.7. <A
HREF="fs-guioverview.html#AEN2668"
>Using the Linux FailSafe Tasksets</A
></DT
></DL
></DD
><DT
>4.3. <A
HREF="le15969-parent.html"
>Using the FailSafe Cluster Manager CLI</A
></DT
><DD
><DL
><DT
>4.3.1. <A
HREF="le15969-parent.html#AEN2713"
>Entering CLI Commands Directly</A
></DT
><DT
>4.3.2. <A
HREF="le15969-parent.html#AEN2741"
>Invoking the Cluster Manager CLI in Prompt Mode</A
></DT
><DT
>4.3.3. <A
HREF="le15969-parent.html#AEN2798"
>Using Input Files of CLI Commands</A
></DT
><DT
>4.3.4. <A
HREF="le15969-parent.html#LE41514-PARENT"
>CLI Command Scripts</A
></DT
><DT
>4.3.5. <A
HREF="le15969-parent.html#LE10673-PARENT"
>CLI Template Scripts</A
></DT
><DT
>4.3.6. <A
HREF="le15969-parent.html#AEN2957"
>Invoking a Shell from within CLI</A
></DT
></DL
></DD
></DL
></DIV
><P
>This chapter describes Linux FailSafe administration tools and their
operation. The major sections in this chapter are as follows:</P
><P
></P
><UL
><LI
><P
><A
HREF="le74378-parent.html"
>Section 4.1</A
></P
></LI
><LI
><P
><A
HREF="fs-guioverview.html"
>Section 4.2</A
></P
></LI
><LI
><P
><A
HREF="le15969-parent.html"
>Section 4.3</A
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le90681-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le74378-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Configuration for Reset</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>The Linux FailSafe Cluster Manager Tools</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le73529-parent.html010064400016050000001000000136410717757316700155420ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Overview of the Linux FailSafe System</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Conventions Used in This Guide"
HREF="x149.html"><LINK
REL="NEXT"
TITLE="High Availability and Linux FailSafe"
HREF="le27299-parent.html"></HEAD
><BODY
CLASS="CHAPTER"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="x149.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le27299-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><H1
><A
NAME="LE73529-PARENT"
>Chapter 1. Overview of the Linux FailSafe System</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>1.1. <A
HREF="le27299-parent.html"
>High Availability and Linux FailSafe</A
></DT
><DT
>1.2. <A
HREF="le89728-parent.html"
>Concepts</A
></DT
><DD
><DL
><DT
>1.2.1. <A
HREF="le89728-parent.html#AEN271"
>Cluster Node (or Node)</A
></DT
><DT
>1.2.2. <A
HREF="le89728-parent.html#AEN280"
>Pool</A
></DT
><DT
>1.2.3. <A
HREF="le89728-parent.html#AEN286"
>Cluster</A
></DT
><DT
>1.2.4. <A
HREF="le89728-parent.html#AEN292"
>Node Membership</A
></DT
><DT
>1.2.5. <A
HREF="le89728-parent.html#AEN300"
>Process Membership</A
></DT
><DT
>1.2.6. <A
HREF="le89728-parent.html#AEN307"
>Resource</A
></DT
><DT
>1.2.7. <A
HREF="le89728-parent.html#AEN315"
>Resource Type</A
></DT
><DT
>1.2.8. <A
HREF="le89728-parent.html#AEN334"
>Resource Name</A
></DT
><DT
>1.2.9. <A
HREF="le89728-parent.html#AEN341"
>Resource Group</A
></DT
><DT
>1.2.10. <A
HREF="le89728-parent.html#AEN392"
>Resource Dependency List</A
></DT
><DT
>1.2.11. <A
HREF="le89728-parent.html#AEN396"
>Resource Type Dependency List</A
></DT
><DT
>1.2.12. <A
HREF="le89728-parent.html#AEN419"
>Failover</A
></DT
><DT
>1.2.13. <A
HREF="le89728-parent.html#AEN425"
>Failover Policy</A
></DT
><DT
>1.2.14. <A
HREF="le89728-parent.html#AEN440"
>Failover Domain</A
></DT
><DT
>1.2.15. <A
HREF="le89728-parent.html#AEN452"
>Failover Attribute</A
></DT
><DT
>1.2.16. <A
HREF="le89728-parent.html#AEN460"
>Failover Scripts</A
></DT
><DT
>1.2.17. <A
HREF="le89728-parent.html#AEN477"
>Action Scripts</A
></DT
></DL
></DD
><DT
>1.3. <A
HREF="le94860-parent.html"
>Additional Linux FailSafe Features</A
></DT
><DD
><DL
><DT
>1.3.1. <A
HREF="le94860-parent.html#AEN516"
>Dynamic Management</A
></DT
><DT
>1.3.2. <A
HREF="le94860-parent.html#AEN529"
>Fine Grain Failover</A
></DT
><DT
>1.3.3. <A
HREF="le94860-parent.html#AEN533"
>Local Restarts</A
></DT
></DL
></DD
><DT
>1.4. <A
HREF="le20463-parent.html"
>Linux FailSafe Administration</A
></DT
><DT
>1.5. <A
HREF="le32900-parent.html"
>Hardware Components of a Linux FailSafe Cluster</A
></DT
><DT
>1.6. <A
HREF="le45765-parent.html"
>Linux FailSafe Disk Connections</A
></DT
><DT
>1.7. <A
HREF="le79484-parent.html"
>Linux FailSafe Supported Configurations</A
></DT
><DT
>1.8. <A
HREF="le85141-parent.html"
>Highly Available Resources</A
></DT
><DD
><DL
><DT
>1.8.1. <A
HREF="le85141-parent.html#AEN633"
>Nodes</A
></DT
><DT
>1.8.2. <A
HREF="le85141-parent.html#LE80214-PARENT"
>Network Interfaces and IP Addresses</A
></DT
><DT
>1.8.3. <A
HREF="le85141-parent.html#AEN665"
>Disks</A
></DT
></DL
></DD
><DT
>1.9. <A
HREF="le19101-parent.html"
>Highly Available Applications</A
></DT
><DT
>1.10. <A
HREF="le19267-parent.html"
>Failover and Recovery Processes</A
></DT
><DT
>1.11. <A
HREF="le24477-parent.html"
>Overview of Configuring and Testing a New Linux
FailSafe Cluster</A
></DT
><DT
>1.12. <A
HREF="le15726-parent.html"
>Linux FailSafe System Software</A
></DT
><DD
><DL
><DT
>1.12.1. <A
HREF="le15726-parent.html#AEN750"
>Layers</A
></DT
><DT
>1.12.2. <A
HREF="le15726-parent.html#AEN950"
>Communication Paths</A
></DT
><DT
>1.12.3. <A
HREF="le15726-parent.html#AEN973"
>Conditions Under Which Action Scripts are Executed</A
></DT
><DT
>1.12.4. <A
HREF="le15726-parent.html#AEN993"
>When Does FailSafe Execute Action and Failover Scripts</A
></DT
><DT
>1.12.5. <A
HREF="le15726-parent.html#AEN1064"
>Components</A
></DT
></DL
></DD
></DL
></DIV
><P
>This chapter provides an overview of the components and operation of
the Linux FailSafe system. It contains these major sections:</P
><P
></P
><UL
><LI
><P
><A
HREF="le27299-parent.html"
>Section 1.1</A
></P
></LI
><LI
><P
><A
HREF="le89728-parent.html"
>Section 1.2</A
></P
></LI
><LI
><P
><A
HREF="le94860-parent.html"
>Section 1.3</A
></P
></LI
><LI
><P
><A
HREF="le20463-parent.html"
>Section 1.4</A
></P
></LI
><LI
><P
><A
HREF="le32900-parent.html"
>Section 1.5</A
></P
></LI
><LI
><P
><A
HREF="le45765-parent.html"
>Section 1.6</A
></P
></LI
><LI
><P
><A
HREF="le79484-parent.html"
>Section 1.7</A
></P
></LI
><LI
><P
><A
HREF="le85141-parent.html"
>Section 1.8</A
></P
></LI
><LI
><P
><A
HREF="le19101-parent.html"
>Section 1.9</A
></P
></LI
><LI
><P
><A
HREF="le19267-parent.html"
>Section 1.10</A
></P
></LI
><LI
><P
><A
HREF="le24477-parent.html"
>Section 1.11</A
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="x149.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le27299-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Conventions Used in This Guide</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>High Availability and Linux FailSafe</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>e Group</A
></DT
><DT
>1.2.10. <A
HREF="le89728-parent.html#AEN392"
>Resource Dependency List</html/le74378-parent.html010064400016050000001000000077530717757333100155450ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>The Linux FailSafe Cluster Manager Tools</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Administration Tools"
HREF="le73346-parent.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe Administration Tools"
HREF="le73346-parent.html"><LINK
REL="NEXT"
TITLE="Using the Linux FailSafe Cluster Manager GUI"
HREF="fs-guioverview.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le73346-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 4. Linux FailSafe Administration Tools</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="fs-guioverview.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE74378-PARENT"
>4.1. The Linux FailSafe Cluster Manager Tools</A
></H1
><P
> You
can perform the Linux FailSafe administrative tasks using either of the following
tools:</P
><P
></P
><UL
><LI
><P
>The Linux FailSafe Cluster Manager Graphical User Interface
(GUI)</P
></LI
><LI
><P
>The Linux FailSafe Cluster Manager Command Line Interface
(CLI)</P
></LI
></UL
><P
>Although these tools use the same underlying software to configure and
monitor a Linux FailSafe system, the GUI provides the following additional
features, which are particularly important in a production system:</P
><P
></P
><UL
><LI
><P
>Online help is provided with the <SPAN
CLASS="INTERFACE"
>Help</SPAN
> button. You can also click any blue text to get more information
about that concept or input field.</P
></LI
><LI
><P
>The cluster state is shown visually for instant recognition
of status, problems, and failovers.</P
></LI
><LI
><P
>The state is updated dynamically for continuous system monitoring.</P
></LI
><LI
><P
>All inputs are checked for correct syntax before attempting
to change the cluster database information.In every task, the cluster configuration
will not update until you click <SPAN
CLASS="INTERFACE"
>OK</SPAN
>.</P
></LI
><LI
><P
>Tasks and tasksets take you step-by-step through configuration
and management operations, making actual changes to the cluster database as
the you perform a task.</P
></LI
><LI
><P
>The graphical tools can be run securely and remotely on any
computer that has a Java virtual machine, including Windows computers
and laptops.</P
></LI
></UL
><P
>The Linux FailSafe Cluster Manager CLI, on the other hand, is more limited
in its functions. It enables you to configure and administer a Linux FailSafe
system using a command-line interface only on a Linux system. It provides
a minimum of help or formatted output and does not provide dynamic status
except when queried. An experienced Linux FailSafe administrator may find
the Cluster Manager CLI to be convenient when performing basic Linux FailSafe
configuration tasks, isolated single tasks in a production environment, or
when running scripts to automate some cluster administration tasks.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le73346-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="fs-guioverview.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe Administration Tools</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73346-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Using the Linux FailSafe Cluster Manager GUI</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le79484-parent.html010064400016050000001000000065600717757312300155420ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe Supported Configurations</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe Disk Connections"
HREF="le45765-parent.html"><LINK
REL="NEXT"
TITLE="Highly Available Resources"
HREF="le85141-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le45765-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le85141-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE79484-PARENT"
>1.7. Linux FailSafe Supported Configurations</A
></H1
><P
>Linux FailSafe supports the following highly available configurations:</P
><P
></P
><UL
><LI
><P
>Basic two-node configuration</P
></LI
><LI
><P
>Star configuration of multiple primary and 1 backup node</P
></LI
><LI
><P
>Ring configuration</P
></LI
></UL
><P
>You can use the following reset models when configuring a Linux FailSafe
system:</P
><P
></P
><UL
><LI
><P
>Server-to-server. Each server is directly connected to another
for reset. May be unidirectional.</P
></LI
><LI
><P
>Network. Each server can reset any other by sending a signal
over the control network to a multiplexer.</P
></LI
></UL
><P
>The following sections provide descriptions of the different Linux FailSafe
configurations.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN622"
>1.7.1. Basic Two-Node Configuration</A
></H2
><P
>In a basic two-node configuration, the following arrangements are possible:</P
><P
></P
><UL
><LI
><P
>All highly available services run on one node. The other node
is the backup node. After failover, the services run on the backup node. In
this case, the backup node is a hot standby for failover purposes only. The
backup node can run other applications that are not highly available services.</P
></LI
><LI
><P
>Highly available services run concurrently on both nodes.
For each service, the other node serves as a backup node. For example, both
nodes can be exporting different NFS filesystems. If a failover occurs, one
node then exports all of the NFS filesystems.</P
></LI
></UL
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le45765-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le85141-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe Disk Connections</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Highly Available Resources</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>isk Connections"
HREF="le45765-parent.html"><LINK
REL="NEXT"
TITLE="Highly Available Resources"
HREF="le85141-parent.html"></HEAD
><BODY
CLASS="html/le84104-parent.html010064400016050000001000000260610717757324000155210ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>IP Address Configuration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Planning Linux FailSafe Configuration"
HREF="le88622-parent.html"><LINK
REL="PREVIOUS"
TITLE="Filesystem Configuration"
HREF="le53947-parent.html"><LINK
REL="NEXT"
TITLE="Installing Linux FailSafe Software and Preparing
the System"
HREF="le32854-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le53947-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 2. Planning Linux FailSafe Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le32854-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE84104-PARENT"
>2.5. IP Address Configuration</A
></H1
><P
>  The first subsection below describes network interface
and IP address issues that must be considered when planning a Linux FailSafe
system. The second subsection gives an example of the configuration of network
interfaces and IP addresses on a Linux FailSafe system. The third subsection
explains the aspects of the configuration that must be specified for a Linux
FailSafe configuration.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE93615-PARENT"
>2.5.1. Planning Network Interface and IP Address Configuration</A
></H2
><P
>Follow these guidelines when planning the configuration of the interfaces
to the private network between nodes in a cluster that can be used as a control
network between nodes (this information is used when you define the nodes):</P
><P
></P
><UL
><LI
><P
>Each interface has one IP address.</P
></LI
><LI
><P
>The IP addresses used on each node for the interfaces to the
private network are on a different subnet from the IP addresses used for public
networks.</P
></LI
><LI
><P
>An IP name can be specified for each IP address in <TT
CLASS="FILENAME"
>/etc/hosts</TT
>.</P
></LI
><LI
><P
>Choosing a naming convention for these IP addresses that identifies
them with the private network can be helpful. For example, precede the hostname
with <TT
CLASS="LITERAL"
>priv-</TT
> (for <I
CLASS="FIRSTTERM"
>private</I
>), as in <TT
CLASS="LITERAL"
>priv-xfs-ha1</TT
> and <TT
CLASS="LITERAL"
>priv-xfs-ha2</TT
>.</P
></LI
></UL
><P
>Follow these guidelines when planning the configuration of the node
interfaces in a cluster to one or more public networks:</P
><P
></P
><UL
><LI
><P
>If
re-MACing is required, each interface to be failed over requires a dedicated
backup interface on the other node (an interface that does not have a highly
available IP address). Thus, for each IP address on an interface that requires
re-MACing, there should be one interface in each node in the failover domain
dedicated for the interface.</P
></LI
><LI
><P
>Each interface has a primary IP address. The primary IP address
does not fail over.</P
></LI
><LI
><P
>The hostname of a node cannot be a highly available IP address.</P
></LI
><LI
><P
>All IP addresses used by clients to access highly available
services must be part of the resource group to which the HA service belongs.</P
></LI
><LI
><P
>If re-MACing is required, all of the highly available IP addresses
must have the same backup interface.</P
></LI
><LI
><P
>Making good choices for highly available IP addresses is important;
these are the &#8220;hostnames&#8221; that will be used by users of the highly
available services, not the true hostnames of the nodes.</P
></LI
><LI
><P
>Make a plan for publicizing the highly available IP addresses
to the user community, since users of highly available services must use highly
available IP addresses instead of the output of the <B
CLASS="COMMAND"
>hostname</B
>
command.</P
></LI
><LI
><P
>Do not configure highly available IP addresses in static Linux
configuration files.</P
></LI
></UL
><P
>Follow the procedure below
to determine whether re-MACing is required (see  <A
HREF="le85141-parent.html#LE80214-PARENT"
>Section 1.8.2</A
>,
for information about re-MACing). It requires the use of three nodes: <TT
CLASS="REPLACEABLE"
><I
>node1</I
></TT
>, <TT
CLASS="REPLACEABLE"
><I
>node2</I
></TT
>, and <TT
CLASS="REPLACEABLE"
><I
>node3</I
></TT
>. <TT
CLASS="REPLACEABLE"
><I
>node1</I
></TT
> and <TT
CLASS="REPLACEABLE"
><I
>node2</I
></TT
>
can be nodes of a Linux FailSafe cluster, but they need not be. They must
be on the same subnet. <TT
CLASS="REPLACEABLE"
><I
>node3</I
></TT
> is a third node. If
you need to verify that a router accepts gratuitous ARP packets (which means
that re-MACing is not required), <TT
CLASS="REPLACEABLE"
><I
>node3</I
></TT
> must be
on the other side of the router from <TT
CLASS="REPLACEABLE"
><I
>node1</I
></TT
> and <TT
CLASS="REPLACEABLE"
><I
>node2</I
></TT
>.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Configure an IP address on one of the interfaces of <TT
CLASS="REPLACEABLE"
><I
>node1</I
></TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>/sbin/ifconfig </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>interface</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;inet </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>ip_address</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;netmask </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>netmask</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;up</B
></TT
></PRE
></TD
></TR
></TABLE
><P
><TT
CLASS="REPLACEABLE"
><I
>interface</I
></TT
> is the interface to be used access
the node. <TT
CLASS="REPLACEABLE"
><I
>ip_address</I
></TT
> is an IP address for <TT
CLASS="REPLACEABLE"
><I
>node1</I
></TT
>. This IP address is used throughout this procedure. <TT
CLASS="REPLACEABLE"
><I
>netmask</I
></TT
> is the netmask of the IP address.</P
></LI
><LI
><P
>From <TT
CLASS="REPLACEABLE"
><I
>node3</I
></TT
>, <B
CLASS="COMMAND"
>ping</B
>
the IP address used in Step1                
:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>ping -c 2 </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>ip_address</I
></TT
>
PING 190.0.2.1 (190.0.2.1): 56 data bytes
64 bytes from 190.0.2.1: icmp_seq=0 ttl=255 time=29 ms
64 bytes from 190.0.2.1: icmp_seq=1 ttl=255 time=1 ms

----190.0.2.1 PING Statistics----
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 1/1/1 ms</PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Enter this command on <TT
CLASS="REPLACEABLE"
><I
>node1</I
></TT
> to
shut down the interface you configured in Step1                
:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>/sbin/ifconfig </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>interface</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;down</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>On <TT
CLASS="REPLACEABLE"
><I
>node2</I
></TT
>, enter this command to
move the IP address to <TT
CLASS="REPLACEABLE"
><I
>node2</I
></TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>/sbin/ifconfig </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>interface</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;inet </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>ip_address</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;netmask </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>netmask</I
></TT
><TT
CLASS="USERINPUT"
><B
>&#8194;up</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>From <TT
CLASS="REPLACEABLE"
><I
>node3</I
></TT
>, <B
CLASS="COMMAND"
>ping</B
>
the IP address:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>ping -c 2 </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>ip_address</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>If the <B
CLASS="COMMAND"
>ping</B
> command fails, gratuitous ARP packets
are not being accepted and re-MACing is needed to fail over the IP address.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE15769-PARENT"
>2.5.2. Example IP Address Configuration</A
></H2
><P
>For this example, you are configuring an IP address of 192.26.50.1.
This address has a network mask of 255.255.255.0, a broadcast address of 192.26.50.255,
and it is configured on interface eth0.</P
><P
>In this example, you are also configuring an IP address of 192.26.50.2.
This address also has a network mask of 255.255.255.0, a broadcast address
of 192.26.50.255, and it is configured on interface eth1.</P
><P
> <A
HREF="le84104-parent.html#LE73415-PARENT"
>Table 2-3</A
>,
shows the Linux FailSafe configuration parameters you specify for these IP
addresses. </P
><DIV
CLASS="TABLE"
><A
NAME="LE73415-PARENT"
></A
><P
><B
>Table 2-3. IP Address Configuration Parameters</B
></P
><TABLE
BORDER="1"
CLASS="CALSTABLE"
><THEAD
><TR
><TH
WIDTH="37%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Resource Attribute</P
></TH
><TH
WIDTH="31%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Resource Name: 192.26.50.1</P
></TH
><TH
WIDTH="31%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Resource Name: 192.26.50.2</P
></TH
></TR
></THEAD
><TBODY
><TR
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>network mask</P
></TD
><TD
WIDTH="31%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>255.255.255.0</P
></TD
><TD
WIDTH="31%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>255.255.255.0</P
></TD
></TR
><TR
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>broadcast address</P
></TD
><TD
WIDTH="31%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>192.26.50.255</P
></TD
><TD
WIDTH="31%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>192.26.50.255</P
></TD
></TR
><TR
><TD
WIDTH="37%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>interface</P
></TD
><TD
WIDTH="31%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>eth0</P
></TD
><TD
WIDTH="31%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>eth1</P
></TD
></TR
></TBODY
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN1841"
>2.5.3. Local Failover of IP Addresses</A
></H2
><P
>You can configure your system so that an IP address will fail over to
a second interface within the same host, for example from eth0 to eth1 on
a single node. A configuration example that shows the steps you must follow
for this configuration is provided in <A
HREF="localfailover-of-ip.html"
>Section 6.3</A
></P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le53947-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le32854-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Filesystem Configuration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le88622-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Installing Linux FailSafe Software and Preparing
the System</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>IP addresses used on each node for the interfaces to the
private network are on a different subnet from the IP addresses used for public
networks.</P
></LI
><LI
><P
>An IP name can be specified for each IP address in <TT
CLASS="FILENAME"
>/etc/hosts</TT
>.</P
></LI
><LI
><P
>Choosing a naming convention for these IP addresses that identifies
them with the private network can be helpful. For example, precede the hostname
with <TT
CLASS="LITERAL"
>priv-</TT
> (html/le85141-parent.html010064400016050000001000000202030717757313100155120ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Highly Available Resources</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe Supported Configurations"
HREF="le79484-parent.html"><LINK
REL="NEXT"
TITLE="Highly Available Applications"
HREF="le19101-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le79484-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le19101-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE85141-PARENT"
>1.8. Highly Available Resources</A
></H1
><P
>This section discusses the highly available resources that are provided
on a Linux FailSafe system.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN633"
>1.8.1. Nodes</A
></H2
><P
>If a node crashes or hangs (for example, due to a parity error or bus
error), the Linux FailSafe software detects this. A different node, determined
by the failover policy, takes over the failed node's services after resetting
the failed node.</P
><P
>If a node fails, the interfaces, access to storage, and services also
become unavailable. See the succeeding sections for descriptions of how the
Linux FailSafe system handles or eliminates these points of failure.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE80214-PARENT"
>1.8.2. Network Interfaces and IP Addresses</A
></H2
><P
> Clients access the highly available services provided by the Linux
FailSafe cluster using IP addresses. Each highly available service can use
multiple IP addresses. The IP addresses are not tied to a particular highly
available service; they can be shared by all the highly available services
in the cluster.</P
><P
>Linux FailSafe uses the IP aliasing mechanism to support multiple IP
addresses on a single network interface. Clients can use a highly available
service that uses multiple IP addresses even when there is only one network
interface in the server node.</P
><P
>The IP aliasing mechanism allows a Linux FailSafe configuration that
has a node with multiple network interfaces to be backed up by a node with
a single network interface. IP addresses configured on multiple network interfaces
are moved to the single interface on the other node in case of a failure.</P
><P
>Linux FailSafe requires that each network interface in a cluster have
an IP address that does not failover. These IP addresses, called <I
CLASS="FIRSTTERM"
>fixed IP addresses</I
>, are used to monitor network interfaces. Each
fixed IP address must be configured to a network interface at system boot
up time. All other IP addresses in the cluster are configured as <I
CLASS="FIRSTTERM"
>highly available IP addresses</I
>.</P
><P
>Highly available IP addresses are configured on a network interface.
During failover and recovery processes they are moved to another network interface
in the other node by Linux FailSafe. Highly available IP addresses are specified
when you configure the Linux FailSafe system. Linux FailSafe uses the <B
CLASS="COMMAND"
>ifconfig</B
> command to configure an IP address on a network interface
and to move IP addresses from one interface to another.</P
><P
>In some networking implementations, IP addresses cannot be moved from
one interface to another by using only the <B
CLASS="COMMAND"
>ifconfig</B
> command.
Linux FailSafe uses <I
CLASS="FIRSTTERM"
>re-MACing</I
> (<I
CLASS="FIRSTTERM"
>MAC address
impersonation</I
>) to support these networking implementations. Re-MACing
moves the physical (MAC) address of a network interface to another interface.
 It is done by using the <B
CLASS="COMMAND"
>macconfig</B
> command. Re-MACing is
done in addition to the standard <B
CLASS="COMMAND"
>ifconfig</B
> process that
Linux FailSafe uses to move IP addresses. To do RE-MACing in Linux FailSafe,
a resource of type MAC_Address is used.</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>Re-MACing can be used only on Ethernet networks. It cannot be used on
FDDI networks.</P
></BLOCKQUOTE
></DIV
><P
>Re-MACing is required when packets called gratuitous ARP packets are
not passed through the network. These packets are generated automatically
when an IP address is added to an interface (as in a failover process). They
announce a new mapping of an IP address to MAC address. This tells clients
on the local subnet that a particular interface now has a particular IP address.
Clients then update their internal ARP caches with the new MAC address for
the IP address. (The IP address just moved from interface to interface.) When
gratuitous ARP packets are not passed through the network, the internal ARP
caches of subnet clients cannot be updated. In these cases, re-MACing is used.
This moves the MAC address of the original interface to the new interface.
Thus, both the IP address and the MAC address are moved to the new interface
and the internal ARP caches of clients do not need updating.</P
><P
>Re-MACing is not done by default; you must specify that it be done for
each pair of primary and secondary interfaces that requires it. A procedure
in the section <A
HREF="le84104-parent.html#LE93615-PARENT"
>Section 2.5.1</A
> describes how you can determine
whether re-MACing is required. In general, routers and PC/NFS clients may
require re-MACing interfaces.</P
><P
>A side effect of re-MACing is that the original MAC address of an interface
that has received a new MAC address is no longer available for use. Because
of this, each network interface has to be backed up by a dedicated backup
interface. This backup interface cannot be used by clients as a primary interface.
 (After a failover to this interface, packets sent to the original MAC address
are ignored by every node on the network.) Each backup interface backs up
only one network interface.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN665"
>1.8.3. Disks</A
></H2
><P
>The Linux FailSafe cluster can include shared SCSI-based storage in
the form of individual disks, RAID systems, or Fibre Channel storage systems.</P
><P
> With mirrored volumes on
the disks in a RAID or Fibre Channel system, the device system should provide
redundancy. No participation of the Linux FailSafe system software is required
for a disk failure. If a disk controller fails, the Linux FailSafe system
software initiates the failover process.</P
><P
><A
HREF="le85141-parent.html#LE77061-PARENT"
>Figure 1-2</A
>, shows disk storage takeover on
a two-node system. The surviving node takes over the shared disks and recovers
the logical volumes and filesystems on the disks. This process is expedited
by a filesystem such as ReiserFS or XFS, because of journaling technology
that does not require the use of the <B
CLASS="COMMAND"
>fsck</B
> command for filesystem
consistency checking.</P
><P
><DIV
CLASS="FIGURE"
><A
NAME="LE77061-PARENT"
></A
><P
><B
>Figure 1-2. Disk Storage Failover on a Two-Node System</B
></P
><P
><IMG
SRC="figures/a1-6.disk.storage.takeover.gif"></P
></DIV
></P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le79484-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le19101-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe Supported Configurations</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Highly Available Applications</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>ECT1"
><A
NAME="LE85141-PARENT"
>1.8. Highly Available Resources</A
></H1
><P
>This section discusses the highly available resources that are provided
on a Linux FailSafe system.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN633"
>1.8.1. Nodes</A
></H2
><P
>If a node crashes or hangs (for example, due to a parity error or bus
error), the Linux FailSafe software detects html/le85448-parent.html010064400016050000001000000062560717757361400155460ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Setting System Operation Defaults</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe System Operation"
HREF="le99367-parent.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe System Operation"
HREF="le99367-parent.html"><LINK
REL="NEXT"
TITLE="System Operation Considerations"
HREF="le36400-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le99367-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 7. Linux FailSafe System Operation</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le36400-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE85448-PARENT"
>7.1. Setting System Operation Defaults</A
></H1
><P
> Several
commands that you perform on a running system allow you the option of specifying
a node or cluster. You can specify a node or a cluster to use as the default
if you do not specify the node or cluster explicitly.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5166"
>7.1.1. Setting Default Cluster with Cluster Manager GUI</A
></H2
><P
>The Cluster Manager GUI prompts you to enter the name of the default
cluster when you have not specified one. Alternately, you can set the default
cluster by clicking the &#8220;Select Cluster...&#8221; button at the bottom
of the FailSafe Manager window.</P
><P
>When using the Cluster Manager GUI, there is no need to set a default
node.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5170"
>7.1.2. Setting Defaults with Cluster Manager CLI</A
></H2
><P
>When you are using the Cluster Manager CLI, you can use the following
commands to specify default values. Use either of the following commands to
specify a default cluster:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>set cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>
cmgr&#62; <TT
CLASS="USERINPUT"
><B
>set node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le99367-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le36400-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe System Operation</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le99367-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>System Operation Considerations</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le88622-parent.html010064400016050000001000000070550717757324100155350ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Planning Linux FailSafe Configuration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe System Software "
HREF="le15726-parent.html"><LINK
REL="NEXT"
TITLE="Introduction to Configuration Planning"
HREF="le57040-parent.html"></HEAD
><BODY
CLASS="CHAPTER"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le15726-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le57040-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><H1
><A
NAME="LE88622-PARENT"
>Chapter 2. Planning Linux FailSafe Configuration</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>2.1. <A
HREF="le57040-parent.html"
>Introduction to Configuration Planning</A
></DT
><DT
>2.2. <A
HREF="le34382-parent.html"
>Disk Configuration</A
></DT
><DD
><DL
><DT
>2.2.1. <A
HREF="le34382-parent.html#AEN1343"
>Planning Disk Configuration</A
></DT
><DT
>2.2.2. <A
HREF="le34382-parent.html#LE10802-PARENT"
>Configuration Parameters for Disks</A
></DT
></DL
></DD
><DT
>2.3. <A
HREF="le96329-parent.html"
>Logical Volume Configuration</A
></DT
><DT
>2.4. <A
HREF="le53947-parent.html"
>Filesystem Configuration</A
></DT
><DD
><DL
><DT
>2.4.1. <A
HREF="le53947-parent.html#LE24179-PARENT"
>Planning Filesystems</A
></DT
><DT
>2.4.2. <A
HREF="le53947-parent.html#AEN1588"
>Example Filesystem Configuration</A
></DT
><DT
>2.4.3. <A
HREF="le53947-parent.html#AEN1604"
>Configuration Parameters for Filesystems</A
></DT
></DL
></DD
><DT
>2.5. <A
HREF="le84104-parent.html"
>IP Address Configuration</A
></DT
><DD
><DL
><DT
>2.5.1. <A
HREF="le84104-parent.html#LE93615-PARENT"
>Planning Network Interface and IP Address Configuration</A
></DT
><DT
>2.5.2. <A
HREF="le84104-parent.html#LE15769-PARENT"
>Example IP Address Configuration</A
></DT
><DT
>2.5.3. <A
HREF="le84104-parent.html#AEN1841"
>Local Failover of IP Addresses</A
></DT
></DL
></DD
></DL
></DIV
><P
>This chapter explains how to plan the configuration of highly available
services on your Linux FailSafe cluster. The major sections of this chapter
are as follows:</P
><P
></P
><UL
><LI
><P
><A
HREF="le57040-parent.html"
>Section 2.1</A
></P
></LI
><LI
><P
><A
HREF="le34382-parent.html"
>Section 2.2</A
></P
></LI
><LI
><P
><A
HREF="le96329-parent.html"
>Section 2.3</A
></P
></LI
><LI
><P
><A
HREF="le53947-parent.html"
>Section 2.4</A
></P
></LI
><LI
><P
><A
HREF="le84104-parent.html"
>Section 2.5</A
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le15726-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le57040-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe System Software</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Introduction to Configuration Planning</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le89728-parent.html010064400016050000001000000361750717757310000155440ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Concepts</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="High Availability and Linux FailSafe"
HREF="le27299-parent.html"><LINK
REL="NEXT"
TITLE="Additional Linux FailSafe Features"
HREF="le94860-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le27299-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le94860-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE89728-PARENT"
>1.2. Concepts</A
></H1
><P
>In order to use Linux FailSafe, you must understand the concepts in
this section.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN271"
>1.2.1. Cluster Node (or Node)</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>cluster node</I
> is a single Linux execution environment.
In other words, a single physical or virtual machine.  In current Linux environments
this will always be an individual computer. The term <I
CLASS="FIRSTTERM"
>node</I
>
is used to indicate this meaning in this guide for brevity, as opposed to
any meaning such as a network node.  </P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN280"
>1.2.2. Pool</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>pool</I
> is the entire set of nodes having  membership
in a group of clusters. The clusters are usually close together and should
always serve a common purpose. A replicated cluster configuration database
is stored on each node in the pool. </P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN286"
>1.2.3. Cluster</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>cluster</I
> is a collection of one or more nodes
coupled to each other by networks or other similar interconnections. A cluster
belongs to one pool and only one pool.  A cluster is identified by a simple
name; this name must be unique within the pool.  A particular node may be
a member of only one cluster. All nodes in a cluster are also in the pool;
however, all nodes in the pool are not necessarily in the cluster.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN292"
>1.2.4. Node Membership</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>node membership</I
> is the list of nodes in a cluster
on which Linux FailSafe can allocate resource  groups. </P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN300"
>1.2.5. Process Membership</A
></H2
><P
>A  <I
CLASS="FIRSTTERM"
>process membership</I
>
is the list of process instances in a cluster that form a process group. There
can be multiple process groups per node.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN307"
>1.2.6. Resource</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>resource</I
> is a single physical or logical entity
that provides a service to clients or other resources. For example, a resource
can be a single disk volume, a particular network address, or an application
such as a web server. A resource is generally available for use over time
on two or more nodes in a cluster, although it can only be allocated to one
node at any given time. </P
><P
>Resources are identified by a resource name and a resource type. One
resource can be dependent on one or more other resources; if so, it will not
be able to start (that is, be made available for use) unless the dependent
resources are also started. Dependent resources must be part of the same resource
group and are identified in a resource dependency list.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN315"
>1.2.7. Resource Type</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>resource type</I
> is a particular class of resource.
All of the resources in a particular resource type can be handled in the same
way for the purposes of failover. Every resource is an instance of exactly
one resource type.</P
><P
>A resource type is identified by a simple name; this name should be
unique within the cluster. A resource type can be defined for a specific node,
or it can be defined for an entire cluster. A resource type definition for
a specific node overrides a clusterwide resource type definition with the
same name; this allows an individual node to override global settings from
a clusterwide resource type definition.</P
><P
>Like resources, a resource type can be dependent on one or more other
resource types. If such a dependency exists, at least one instance of each
of the dependent resource types must be defined. For example, a resource type
named <TT
CLASS="LITERAL"
>Netscape_web</TT
> might have resource type dependencies
on resource types named <TT
CLASS="LITERAL"
>IP_address</TT
> and <TT
CLASS="LITERAL"
>volume</TT
>. If a resource named <TT
CLASS="LITERAL"
>web1</TT
> is defined with the <TT
CLASS="LITERAL"
>Netscape_web</TT
> resource type, then the resource group containing <TT
CLASS="LITERAL"
>web1</TT
> must also contain at least one resource of the type <TT
CLASS="LITERAL"
>IP_address</TT
> and one resource of the type <TT
CLASS="LITERAL"
>volume</TT
>.</P
><P
>The Linux FailSafe software includes some predefined resource types.
If these types fit the application you want to make highly available, you
can reuse them. If none fit, you can create additional resource types by using
the instructions in the <I
CLASS="CITETITLE"
>Linux FailSafe Programmer's Guide</I
>.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN334"
>1.2.8. Resource Name</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>resource name</I
> identifies a specific instance
of a resource type. A resource name must be unique for a given resource type.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN341"
>1.2.9. Resource Group</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>resource group</I
> is a collection of interdependent
resources. A resource group is identified by a simple name; this name must
be unique within a cluster.  <A
HREF="le89728-parent.html#LE99232-PARENT"
>Table 1-1</A
> shows an example
of the resources and their corresponding resource types for a resource group
named <TT
CLASS="LITERAL"
>WebGroup.</TT
> </P
><DIV
CLASS="TABLE"
><A
NAME="LE99232-PARENT"
></A
><P
><B
>Table 1-1. Example Resource Group</B
></P
><TABLE
BORDER="1"
CLASS="CALSTABLE"
><THEAD
><TR
><TH
WIDTH="50%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Resource</P
></TH
><TH
WIDTH="50%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Resource Type</P
></TH
></TR
></THEAD
><TBODY
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>10.10.48.22</TT
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>IP_address</TT
></P
></TD
></TR
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>/fs1</TT
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>filesystem</TT
></P
></TD
></TR
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>vol1</TT
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>volume</TT
></P
></TD
></TR
><TR
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>web1</TT
></P
></TD
><TD
WIDTH="50%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>Netscape_web</TT
></P
></TD
></TR
></TBODY
></TABLE
></DIV
><P
>If any individual resource in a resource group becomes unavailable for
its intended use, then the entire resource group is considered unavailable.
Therefore, a resource group is the unit of failover.</P
><P
>Resource groups cannot overlap; that is, two resource groups cannot
contain the same resource.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN392"
>1.2.10. Resource Dependency List</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>resource dependency list</I
> is a list of resources
upon which a resource depends. Each resource instance must have resource dependencies
that satisfy its resource type dependencies before it can be added to a resource
group.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN396"
>1.2.11. Resource Type Dependency List</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>resource type dependency list</I
> is a list of
resource types upon which a resource type depends. For example, the <TT
CLASS="LITERAL"
>filesystem</TT
> resource type depends upon the <TT
CLASS="LITERAL"
>volume</TT
>
resource type, and the <TT
CLASS="LITERAL"
>Netscape_web</TT
> resource type depends
upon the <TT
CLASS="LITERAL"
>filesystem</TT
> and <TT
CLASS="LITERAL"
>IP_address</TT
> resource
types. </P
><P
>For example, suppose a file system instance <TT
CLASS="LITERAL"
>fs1</TT
> is
mounted on volume <TT
CLASS="LITERAL"
>vol1</TT
>. Before <TT
CLASS="LITERAL"
>fs1</TT
> can
be added to a resource group, <TT
CLASS="LITERAL"
>fs1</TT
> must be defined to depend
on <TT
CLASS="LITERAL"
>vol1</TT
>. Linux FailSafe only knows that a file system instance
must have one volume instance in its dependency list. This requirement is
inferred from the resource type dependency list. </P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN419"
>1.2.12. Failover</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>failover</I
> is the process of allocating a resource
group (or application) to another node, according to a failover policy. A
failover may be triggered by the failure of a resource, a change in the node
membership (such as when a node fails or starts), or a manual request by the
administrator.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN425"
>1.2.13. Failover Policy</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>failover policy</I
> is the method used by Linux
FailSafe to determine the destination node of a failover. A failover policy
consists of the following:</P
><P
></P
><UL
><LI
><P
>Failover domain</P
></LI
><LI
><P
>Failover attributes</P
></LI
><LI
><P
>Failover script</P
></LI
></UL
><P
>Linux FailSafe uses the failover domain output from a failover script
along with failover attributes to determine on which node a resource group
should reside.</P
><P
>The administrator must configure a failover policy for each resource
group. A failover policy name must be unique within the pool. Linux FailSafe
includes predefined failover policies, but you can define your own failover
algorithms as well. </P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN440"
>1.2.14. Failover Domain</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>failover domain</I
> is the ordered list of nodes
on which a given resource group can be allocated. The nodes listed in the
failover domain must be within the same cluster; however, the failover domain
does not have to include every node in the cluster.   &#8194;</P
><P
>The administrator defines the initial failover domain when creating
a failover policy. This list is transformed into a run-time failover domain
by the failover script; Linux FailSafe uses the run-time failover domain along
with failover attributes and the node membership to determine the node on
which a resource group should reside. Linux FailSafe stores the run-time failover
domain and uses it as input to the next failover script invocation. Depending
on the run-time conditions and contents of the failover script, the initial
and run-time failover domains may be identical.</P
><P
>In general, Linux FailSafe allocates a given resource group to the first
node listed in the run-time failover domain that is also in the node membership;
the point at which this allocation takes place is affected by the failover
attributes.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN452"
>1.2.15. Failover Attribute</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>failover attribute</I
> is a string that affects
the allocation of a resource group in a cluster. The administrator must specify
system attributes (such as <TT
CLASS="LITERAL"
>Auto_Failback</TT
> or <TT
CLASS="LITERAL"
>Controlled_Failback</TT
>), and can optionally supply site-specific attributes.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN460"
>1.2.16. Failover Scripts</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>failover script</I
> is a shell script that generates
a run-time failover domain and returns it to the Linux FailSafe process. The
Linux FailSafe process <TT
CLASS="LITERAL"
>ha_fsd</TT
> applies the failover attributes
and then selects the first node in the returned failover domain that is also
in the current node membership.</P
><P
>The following failover scripts are provided with the Linux FailSafe
release:</P
><P
></P
><UL
><LI
><P
><TT
CLASS="FILENAME"
>ordered</TT
>, which never changes the initial
failover domain. When using this script, the initial and run-time failover
domains are equivalent.</P
></LI
><LI
><P
><TT
CLASS="FILENAME"
>round-robin</TT
>, which selects the resource
group owner in a round-robin (circular) fashion. This policy can be used for
resource groups that can be run in any node in the cluster.</P
></LI
></UL
><P
>If these scripts do not meet your needs, you can create a new failover
script using the information in this guide.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN477"
>1.2.17. Action Scripts</A
></H2
><P
>The <I
CLASS="FIRSTTERM"
>action scripts</I
> are the set of scripts that
determine how a resource is started, monitored, and stopped. There must be
a set of action scripts specified for each resource type.</P
><P
>The following is the complete set of action scripts that can be specified
for each resource type:</P
><P
></P
><UL
><LI
><P
><TT
CLASS="LITERAL"
>exclusive</TT
>, which verifies that a resource
is not already running</P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>start</TT
>, which starts a resource</P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>stop</TT
>, which stops a resource</P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>monitor</TT
>, which monitors a resource</P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>restart</TT
>, which restarts a resource on the
same server after a monitoring failure occurs</P
></LI
></UL
><P
>The release includes action scripts for predefined resource types. If
these scripts fit the resource type that you want to make highly available,
you can reuse them by copying them and modifying them as needed. If none fits,
you can create additional action scripts by using the instructions in the <I
CLASS="CITETITLE"
>Linux FailSafe Programmer's Guide</I
>.</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le27299-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le94860-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>High Availability and Linux FailSafe</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Additional Linux FailSafe Features</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>it, you can create additional resource types by using
the instructions in the <I
CLASS="CITETITLE"
>Linux FailSafe Programmer's Guide</I
>.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN334"
>1.2.8. Resource Name</A
></H2
><P
>A <I
CLASS="FIRSTTERM"
>resource name</I
> identifies a specific instance
of a resource type. A resource name must be unique for a given resourchtml/le90681-parent.html010064400016050000001000000106540717757331300155320ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Configuration for Reset</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Installing Linux FailSafe Software and Preparing
the System"
HREF="le32854-parent.html"><LINK
REL="PREVIOUS"
TITLE="Configuring Network Interfaces"
HREF="le97738-parent.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe Administration Tools"
HREF="le73346-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le97738-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 3. Installing Linux FailSafe Software and Preparing
the System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le73346-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE90681-PARENT"
>3.7. Configuration for Reset</A
></H1
><P
>You can use one of the following methods for reset:<P
></P
><UL
><LI
><P
>EMP, which requires the following:<P
></P
><UL
><LI
><P
>Verify that the <B
CLASS="COMMAND"
>getty</B
> processes for serial
ports <TT
CLASS="LITERAL"
>/dev/ttyS0</TT
> and <TT
CLASS="LITERAL"
>/dev/ttyS1</TT
> are
turned off (this is normally the default)</P
></LI
><LI
><P
>Configure the BIOS</P
></LI
></UL
></P
></LI
><LI
><P
>A serial port PCI board to supply additional serial ports</P
></LI
><LI
><P
>A USB serial port adapter  to supply additional serial ports</P
></LI
><LI
><P
>STONITH network-attached power switch,which requires that
you enable a <TT
CLASS="LITERAL"
>getty</TT
> on <TT
CLASS="LITERAL"
>/dev/ttyS0</TT
>. </P
></LI
></UL
></P
><P
></P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2461"
>3.7.1. Changing the getty Process</A
></H2
><P
>The <B
CLASS="COMMAND"
>getty</B
> process for serial ports <TT
CLASS="LITERAL"
>/dev/ttyS0</TT
> and <TT
CLASS="LITERAL"
>/dev/ttyS1</TT
> should be off if you are using
the EMP port for reset. The <B
CLASS="COMMAND"
>getty</B
> process for serial port <TT
CLASS="LITERAL"
>/dev/ttyS0</TT
>  should be on if you are using STONITH.</P
><P
>To change the setting, perform these steps on each node:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Open the file <TT
CLASS="FILENAME"
>/etc/inittab</TT
> for editing.</P
></LI
><LI
><P
>Find the line for the port by looking at the comments on the
right for the port number.</P
></LI
><LI
><P
>Change the third field of this line to <TT
CLASS="OPTION"
>off</TT
>
or <TT
CLASS="LITERAL"
>on</TT
>, as required. For example:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>t2:23:off:/sbin/getty -N ttyd2 co_9600          # port 2</PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Save the file.</P
></LI
><LI
><P
>Enter these commands to make the change take effect:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>killall getty</B
></TT
>
# <TT
CLASS="USERINPUT"
><B
>init q</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
></OL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN2490"
>3.7.2. Configuring the BIOS</A
></H2
><P
>To use the EMP for reset, you must enable the EMP port in the BIOS (server
systems shipped by SGI have it enabled by default). If you are comfortable
not having a serial console available, then the remaining serial port can
be used for reset purposes. This involves going into the BIOS and disabling
the console redirection option. </P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le97738-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le73346-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Configuring Network Interfaces</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le32854-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe Administration Tools</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>Chapter 3. Installing Linux FailSafe Software and Preparing
the System</TD
><TD
WIDThtml/le94219-parent.html010064400016050000001000000152720717757354300155410ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe Cluster Configuration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Using the FailSafe Cluster Manager CLI"
HREF="le15969-parent.html"><LINK
REL="NEXT"
TITLE="Setting Configuration Defaults"
HREF="le59477-parent.html"></HEAD
><BODY
CLASS="CHAPTER"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le15969-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le59477-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><H1
><A
NAME="LE94219-PARENT"
>Chapter 5. Linux FailSafe Cluster Configuration</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>5.1. <A
HREF="le59477-parent.html"
>Setting Configuration Defaults</A
></DT
><DD
><DL
><DT
>5.1.1. <A
HREF="le59477-parent.html#AEN3015"
>Setting Default Cluster with the Cluster Manager GUI</A
></DT
><DT
>5.1.2. <A
HREF="le59477-parent.html#AEN3019"
>Setting and Viewing Configuration Defaults with the Cluster Manager
CLI</A
></DT
></DL
></DD
><DT
>5.2. <A
HREF="le28499-parent.html"
>Name Restrictions</A
></DT
><DT
>5.3. <A
HREF="tv.html"
>Configuring Timeout Values and Monitoring Intervals</A
></DT
><DT
>5.4. <A
HREF="z957104627glen.html"
>Cluster Configuration</A
></DT
><DD
><DL
><DT
>5.4.1. <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Defining Cluster Nodes</A
></DT
><DT
>5.4.2. <A
HREF="z957104627glen.html#FS-MODIFYDELMACHINE"
>Modifying and Deleting Cluster Nodes</A
></DT
><DT
>5.4.3. <A
HREF="z957104627glen.html#AEN3333"
>Displaying Cluster Nodes</A
></DT
><DT
>5.4.4. <A
HREF="z957104627glen.html#FS-SETFSPARAMETERS"
>Linux FailSafe HA Parameters</A
></DT
><DT
>5.4.5. <A
HREF="z957104627glen.html#FS-DEFINECLUSTER"
>Defining a Cluster</A
></DT
><DT
>5.4.6. <A
HREF="z957104627glen.html#FS-MODIFYDELCLUSTER"
>Modifying and Deleting Clusters</A
></DT
><DT
>5.4.7. <A
HREF="z957104627glen.html#AEN3555"
>Displaying Clusters</A
></DT
><DT
>5.4.8. <A
HREF="z957104627glen.html#AEN3562"
>Displaying a Cluster with the Cluster Manager CLI</A
></DT
></DL
></DD
><DT
>5.5. <A
HREF="le53159-parent.html"
>Resource Configuration</A
></DT
><DD
><DL
><DT
>5.5.1. <A
HREF="le53159-parent.html#FS-DEFINERESOURCE"
>Defining Resources</A
></DT
><DT
>5.5.2. <A
HREF="le53159-parent.html#FS-ADDDEPTORESOURCE"
>Adding Dependency to a Resource</A
></DT
><DT
>5.5.3. <A
HREF="le53159-parent.html#FS-DEFINEMACHSPECRESOURCE"
>Defining a Node-Specific Resource</A
></DT
><DT
>5.5.4. <A
HREF="le53159-parent.html#FS-MODIFYDELRESOURCE"
>Modifying and Deleting Resources</A
></DT
><DT
>5.5.5. <A
HREF="le53159-parent.html#AEN3865"
>Displaying Resources</A
></DT
><DT
>5.5.6. <A
HREF="le53159-parent.html#FS-DEFINERESOURCETYPE"
>Defining a Resource Type</A
></DT
><DT
>5.5.7. <A
HREF="le53159-parent.html#FS-DEFINEMACHSPECRESTYPE"
>Defining a Node-Specific Resource Type</A
></DT
><DT
>5.5.8. <A
HREF="le53159-parent.html#FS-ADDDEPTORESTYPE"
>Adding Dependencies to a Resource Type</A
></DT
><DT
>5.5.9. <A
HREF="le53159-parent.html#FS-MODIFYRESTYPE"
>Modifying and Deleting Resource Types</A
></DT
><DT
>5.5.10. <A
HREF="le53159-parent.html#FS-LOADRESOURCETYPE"
>Installing (Loading) a Resource Type on a Cluster</A
></DT
><DT
>5.5.11. <A
HREF="le53159-parent.html#AEN4169"
>Displaying Resource Types</A
></DT
><DT
>5.5.12. <A
HREF="le53159-parent.html#FS-DEFINEFAILOVER"
>Defining a Failover Policy</A
></DT
><DT
>5.5.13. <A
HREF="le53159-parent.html#FS-MODIFYDELFAILOVERPOLICY"
>Modifying and Deleting Failover Policies</A
></DT
><DT
>5.5.14. <A
HREF="le53159-parent.html#AEN4381"
>Displaying Failover Policies</A
></DT
><DT
>5.5.15. <A
HREF="le53159-parent.html#FS-DEFINERESGROUP"
>Defining Resource Groups</A
></DT
><DT
>5.5.16. <A
HREF="le53159-parent.html#FS-MODIFYRESGROUP"
>Modifying and Deleting Resource Groups</A
></DT
><DT
>5.5.17. <A
HREF="le53159-parent.html#AEN4548"
>Displaying Resource Groups</A
></DT
></DL
></DD
><DT
>5.6. <A
HREF="fs-setlogparams.html"
>Linux FailSafe System Log Configuration</A
></DT
><DD
><DL
><DT
>5.6.1. <A
HREF="fs-setlogparams.html#AEN4817"
>Configuring Log Groups with the Cluster Manager GUI</A
></DT
><DT
>5.6.2. <A
HREF="fs-setlogparams.html#AEN4831"
>Configuring Log Groups with the Cluster Manager CLI</A
></DT
><DT
>5.6.3. <A
HREF="fs-setlogparams.html#AEN4859"
>Modifying Log Groups with the Cluster Manager CLI</A
></DT
><DT
>5.6.4. <A
HREF="fs-setlogparams.html#AEN4871"
>Displaying Log Group Definitions with the Cluster Manager GUI</A
></DT
><DT
>5.6.5. <A
HREF="fs-setlogparams.html#AEN4874"
>Displaying Log Group Definitions with the Cluster Manager CLI</A
></DT
></DL
></DD
><DT
>5.7. <A
HREF="le40511-parent.html"
>Resource Group Creation Example</A
></DT
><DT
>5.8. <A
HREF="le40790-parent.html"
>Linux FailSafe Configuration Example CLI Script</A
></DT
></DL
></DIV
><P
>This chapter describes administrative tasks you perform to configure
the components of a Linux FailSafe system. It describes how to perform tasks
using the FailSafe Cluster Manager Graphical User Interface (GUI) and the
FailSafe Cluster Manager Command Line Interface (CLI). The major sections
in this chapter are as follows:</P
><P
></P
><UL
><LI
><P
><A
HREF="le59477-parent.html"
>Section 5.1</A
></P
></LI
><LI
><P
><A
HREF="le28499-parent.html"
>Section 5.2</A
></P
></LI
><LI
><P
><A
HREF="tv.html"
>Section 5.3</A
></P
></LI
><LI
><P
><A
HREF="z957104627glen.html"
>Section 5.4</A
></P
></LI
><LI
><P
><A
HREF="le53159-parent.html"
>Section 5.5</A
></P
></LI
><LI
><P
><A
HREF="fs-setlogparams.html"
>Section 5.6</A
></P
></LI
><LI
><P
><A
HREF="le40511-parent.html"
>Section 5.7</A
></P
></LI
><LI
><P
><A
HREF="le40790-parent.html"
>Section 5.8</A
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le15969-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le59477-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Using the FailSafe Cluster Manager CLI</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Setting Configuration Defaults</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le94860-parent.html010064400016050000001000000100040717757310400155200ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Additional Linux FailSafe Features</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"><LINK
REL="PREVIOUS"
TITLE="Concepts"
HREF="le89728-parent.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe Administration"
HREF="le20463-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le89728-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 1. Overview of the Linux FailSafe System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le20463-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE94860-PARENT"
>1.3. Additional Linux FailSafe Features</A
></H1
><P
>Linux FailSafe provides the following features to increase the
flexibility and ease of operation of a highly available system:</P
><P
></P
><UL
><LI
><P
>Dynamic management</P
></LI
><LI
><P
>Fine grain failover</P
></LI
><LI
><P
>Local restarts</P
></LI
></UL
><P
>These features are summarized in the following sections.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN516"
>1.3.1. Dynamic Management</A
></H2
><P
>Linux FailSafe allows you to perform a variety of administrative tasks
while the system is running:</P
><P
></P
><UL
><LI
><P
>Dynamically managed application monitoring</P
><P
>Linux FailSafe allows you to turn monitoring of an application on and
off while other highly available applications continue to run. This allows
you to perform online application upgrades without bringing down the Linux
FailSafe system.</P
></LI
><LI
><P
>Dynamically managed Linux FailSafe resources</P
><P
>Linux FailSafe allows you to add resources while the Linux FailSafe
system is online.</P
></LI
><LI
><P
>Dynamically managed Linux FailSafe upgrades</P
><P
>Linux FailSafe allows you to upgrade Linux FailSafe software on one
node at a time without taking down the entire Linux FailSafe cluster.</P
></LI
></UL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN529"
>1.3.2. Fine Grain Failover</A
></H2
><P
>Using Linux FailSafe, you can specify <I
CLASS="FIRSTTERM"
>fine-grain failover</I
>. Fine-grain failover is a process in which a specific resource
group is failed over from one node to another node while other resource groups
continue to run on the first node, where possible. Fine-grain failover is
possible in Linux FailSafe because the unit of failover is the resource group,
and not the entire node.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN533"
>1.3.3. Local Restarts</A
></H2
><P
>Linux FailSafe allows you to fail over a resource group onto the same
node. This feature enables you to configure a single-node system, where backup
for a particular application is provided on the same machine, if possible.
It also enables you to indicate that a specified number of local restarts
be attempted before the resource group fails over to a different node.</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le89728-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le20463-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Concepts</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe Administration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le96329-parent.html010064400016050000001000000112440717757322100155310ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Logical Volume Configuration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Planning Linux FailSafe Configuration"
HREF="le88622-parent.html"><LINK
REL="PREVIOUS"
TITLE="Disk Configuration"
HREF="le34382-parent.html"><LINK
REL="NEXT"
TITLE="Filesystem Configuration"
HREF="le53947-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le34382-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 2. Planning Linux FailSafe Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le53947-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE96329-PARENT"
>2.3. Logical Volume Configuration</A
></H1
><P
> The first subsection below describes logical volume issues that
must be considered when planning a Linux FailSafe system.  The second subsection
explains the aspects of the configuration that must be specified for a Linux
FailSafe system.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="LE13082-PARENT"
>2.3.1. Configuration Parameters for Logical Volumes</A
></H2
><P
> Configuration
parameters for logical volumes list</P
><P
></P
><UL
><LI
><P
>Owner of device filename (default value: <TT
CLASS="LITERAL"
>root</TT
>)</P
></LI
><LI
><P
>Group of device filename (default value: <TT
CLASS="LITERAL"
>sys</TT
>)</P
></LI
><LI
><P
>Mode of device filename (default value: <TT
CLASS="LITERAL"
>600</TT
>)</P
></LI
></UL
><P
><A
HREF="le96329-parent.html#LE33754-PARENT"
>Table 2-1</A
>, lists a label and parameters for individual
logical volumes.</P
><DIV
CLASS="TABLE"
><A
NAME="LE33754-PARENT"
></A
><P
><B
>Table 2-1. Logical Volume Configuration Parameters</B
></P
><TABLE
BORDER="1"
CLASS="CALSTABLE"
><THEAD
><TR
><TH
WIDTH="25%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Resource Attribute</P
></TH
><TH
WIDTH="12%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
><TT
CLASS="LITERAL"
>volA</TT
></P
></TH
><TH
WIDTH="13%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
><TT
CLASS="LITERAL"
>volB</TT
></P
></TH
><TH
WIDTH="11%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
><TT
CLASS="LITERAL"
>volC</TT
></P
></TH
><TH
WIDTH="39%"
ALIGN="LEFT"
VALIGN="BOTTOM"
><P
>Comments</P
></TH
></TR
></THEAD
><TBODY
><TR
><TD
WIDTH="25%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>devname-owner</TT
></P
></TD
><TD
WIDTH="12%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>root</TT
></P
></TD
><TD
WIDTH="13%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>root</TT
></P
></TD
><TD
WIDTH="11%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>root</TT
></P
></TD
><TD
WIDTH="39%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>The owner of the device name.</P
></TD
></TR
><TR
><TD
WIDTH="25%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>devname-group</TT
></P
></TD
><TD
WIDTH="12%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>sys</TT
></P
></TD
><TD
WIDTH="13%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>sys</TT
></P
></TD
><TD
WIDTH="11%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>root</TT
></P
></TD
><TD
WIDTH="39%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>The group of the device name.</P
></TD
></TR
><TR
><TD
WIDTH="25%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>devname-mode</TT
></P
></TD
><TD
WIDTH="12%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>0600</TT
></P
></TD
><TD
WIDTH="13%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>0600</TT
></P
></TD
><TD
WIDTH="11%"
ALIGN="LEFT"
VALIGN="TOP"
><P
><TT
CLASS="LITERAL"
>0600</TT
></P
></TD
><TD
WIDTH="39%"
ALIGN="LEFT"
VALIGN="TOP"
><P
>The mode of the device name.</P
></TD
></TR
></TBODY
></TABLE
></DIV
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le34382-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le53947-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Disk Configuration</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le88622-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Filesystem Configuration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le97738-parent.html010064400016050000001000000213200717757330600155360ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Configuring Network Interfaces</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Installing Linux FailSafe Software and Preparing
the System"
HREF="le32854-parent.html"><LINK
REL="PREVIOUS"
TITLE="Choosing and Configuring devices and Filesystems"
HREF="le39637-parent.html"><LINK
REL="NEXT"
TITLE="Configuration for Reset"
HREF="le90681-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le39637-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 3. Installing Linux FailSafe Software and Preparing
the System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le90681-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE97738-PARENT"
>3.6. Configuring Network Interfaces</A
></H1
><P
>The procedure in this section
describes how to configure the network interfaces on the nodes in a Linux
FailSafe cluster. The example shown in <A
HREF="le97738-parent.html#LE47532-PARENT"
>Figure 3-1</A
> is
used in the procedure.</P
><P
><DIV
CLASS="FIGURE"
><A
NAME="LE47532-PARENT"
></A
><P
><B
>Figure 3-1. Example Interface Configuration</B
></P
><P
><IMG
SRC="figures/a2-1.examp.interface.config.gif"></P
></DIV
> <P
></P
><OL
TYPE="1"
><LI
><P
>If possible, add every IP address, IP name, and IP alias for
the nodes to <TT
CLASS="FILENAME"
>/etc/hosts</TT
> on one node.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>190.0.2.1 xfs-ha1.company.com xfs-ha1
190.0.2.3 stocks
190.0.3.1 priv-xfs-ha1
190.0.2.2 xfs-ha2.company.com xfs-ha2
190.0.2.4 bonds
190.0.3.2 priv-xfs-ha2</PRE
></TD
></TR
></TABLE
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>IP aliases that are used exclusively by highly available services  should
not be added to system configuration files.  These aliases will be added and
removed by Linux FailSafe.</P
></BLOCKQUOTE
></DIV
></LI
><LI
><P
>Add all of the IP addresses from Step1             
           
 to <TT
CLASS="REPLACEABLE"
><I
>/etc/hosts</I
></TT
> on the other nodes in the cluster.</P
></LI
><LI
><P
>If there are IP addresses, IP names, or IP aliases that you
did not add to <TT
CLASS="FILENAME"
>/etc/hosts</TT
> in Steps 1 and 2, verify that
NIS is configured on all nodes in the cluster.</P
><P
>If the <TT
CLASS="LITERAL"
>ypbind</TT
> is <TT
CLASS="OPTION"
>off</TT
>, you must start
NIS. See your distribution's documentation for details.</P
></LI
><LI
><P
>For IP addresses, IP names, and IP aliases that you did not
add to <TT
CLASS="FILENAME"
>/etc/hosts</TT
> on the nodes in Steps 1 and 2, verify
that they are in the NIS database by entering this command for each address:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>ypmatch <TT
CLASS="REPLACEABLE"
><I
>address mapname</I
></TT
></B
></TT
>
190.0.2.1 xfs-ha1.company.com xfs-ha1</PRE
></TD
></TR
></TABLE
><P
><TT
CLASS="REPLACEABLE"
><I
>address</I
></TT
> is an IP address, IP name, or IP
alias. <TT
CLASS="REPLACEABLE"
><I
>mapname</I
></TT
> is <TT
CLASS="LITERAL"
>hosts.byaddr</TT
>
if address is an IP address; otherwise, it is <TT
CLASS="LITERAL"
>hosts</TT
>. If <B
CLASS="COMMAND"
>ypmatch</B
> reports that <TT
CLASS="REPLACEABLE"
><I
>address</I
></TT
> doesn't
match, it must be added to the NIS database. See your distribution's documentation
for details.</P
></LI
><LI
><P
>On one node, statically configure that node's interface and
IP address with the provided distribution tools. </P
><P
>For the example in <A
HREF="le97738-parent.html#LE47532-PARENT"
>Figure 3-1</A
>, on a SuSE system,
the public interface name and IP address lines are configured into <TT
CLASS="FILENAME"
>/etc/rc.config</TT
> in the following variables.  Please note that YaST
is the preferred method for modifying these variables.  In any event, you
should refer to the documentation of your distribution for help here:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>NETDEV_0=eth0
IPADDR_0=$HOSTNAME</PRE
></TD
></TR
></TABLE
><P
><TT
CLASS="LITERAL"
>$HOSTNAME</TT
> is an alias for an IP address that appears
in <TT
CLASS="FILENAME"
>/etc/hosts</TT
>.</P
><P
>If there are additional public interfaces, their interface names and
IP addresses appear on lines like these:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>NETDEV_1=
IPADDR_1=</PRE
></TD
></TR
></TABLE
><P
>In the example, the control network name and IP address are</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>NETDEV_2=eth3
IPADDR_3=priv-$HOSTNAME</PRE
></TD
></TR
></TABLE
><P
>The control network IP address in this example, <TT
CLASS="LITERAL"
>priv-$HOSTNAME</TT
>, is an alias for an IP address that appears in <TT
CLASS="FILENAME"
>/etc/hosts</TT
>.</P
></LI
><LI
><P
>Repeat Steps 5 and 6 on the other nodes.</P
></LI
><LI
><P
>Verify that Linux FailSafe is <B
CLASS="COMMAND"
><TT
CLASS="OPTION"
>off</TT
></B
>
on each node:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>/usr/lib/failsafe/bin/fsconfig failsafe</B
></TT
>
# <TT
CLASS="USERINPUT"
><B
>if [ $? -eq 1 ]; then echo off; else echo on; fi</B
></TT
>&#13;</PRE
></TD
></TR
></TABLE
><P
>If <TT
CLASS="LITERAL"
>failsafe</TT
> is <TT
CLASS="OPTION"
>on</TT
> on any node, enter
this command on that node:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
># <TT
CLASS="USERINPUT"
><B
>/usr/lib/failsafe/bin/fsconfig failsafe off</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>Configure an e-mail alias on each node that sends the Linux
FailSafe e-mail notifications of cluster transitions to a user outside the
Linux FailSafe cluster and to a user on the other nodes in the cluster. For
example, if there are two nodes called <TT
CLASS="LITERAL"
>xfs-ha1</TT
> and <TT
CLASS="LITERAL"
>xfs-ha2</TT
>, in <TT
CLASS="FILENAME"
>/etc/aliases</TT
> on <TT
CLASS="LITERAL"
>xfs-ha1</TT
>, add</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>fsafe_admin:operations@console.xyz.com,admin_user@xfs-ha2.xyz.com </PRE
></TD
></TR
></TABLE
><P
>On xfs-ha2, add this line to <TT
CLASS="FILENAME"
>/etc/aliases</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>fsafe_admin:operations@console.xyz.com,admin_user@xfs-ha1.xyz.com </PRE
></TD
></TR
></TABLE
><P
>The alias you choose, <TT
CLASS="LITERAL"
>fsafe_admin</TT
> in this case, is
the value you will use for the mail destination address when you configure
your system. In this example, <TT
CLASS="LITERAL"
>operations</TT
> is the user outside
the cluster and <TT
CLASS="LITERAL"
> admin_user</TT
> is a user on each node.</P
></LI
><LI
><P
>If the nodes use NIS (<TT
CLASS="LITERAL"
>ypbind</TT
> is  enabled
to start at boot time, or the BIND domain name server (DNS), switching to
local name resolution is recommended. Additionally, you should modify the <TT
CLASS="FILENAME"
>/etc/nsswitch.conf</TT
> file so that it reads as follows:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>hosts:                  files nis dns </PRE
></TD
></TR
></TABLE
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>Exclusive use of NIS or DNS for IP address lookup for the cluster nodes
has been shown to reduce availability in situations where the NIS service
becomes unreliable.</P
></BLOCKQUOTE
></DIV
></LI
><LI
><P
>Reboot both nodes to put the new network configuration into
effect.</P
></LI
></OL
></P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le39637-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le90681-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Choosing and Configuring devices and Filesystems</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le32854-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Configuration for Reset</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le97755-parent.html010064400016050000001000000126360717757326500155530ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Installing Required Software</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Installing Linux FailSafe Software and Preparing
the System"
HREF="le32854-parent.html"><LINK
REL="PREVIOUS"
TITLE="Overview of Configuring Nodes for Linux FailSafe"
HREF="le29006-parent.html"><LINK
REL="NEXT"
TITLE="Configuring System Files"
HREF="le23103-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le29006-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 3. Installing Linux FailSafe Software and Preparing
the System</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le23103-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LE97755-PARENT"
>3.2. Installing Required Software</A
></H1
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>The Linux FailSafe base CD requires about 25 MB.</P
></BLOCKQUOTE
></DIV
><P
> To install the software,
follow these steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Make sure all servers in the cluster are running a supported
release of Linux.</P
></LI
><LI
><P
>Depending on the servers and storage in the configuration
and the Linux revision level, install the latest install patches that are
required for the platform and applications.</P
></LI
><LI
><P
>On each system in the pool, install the version of the multiplexer
driver that is appropriate to the operating system. Use the CD that accompanies
the multiplexer. Reboot the system after installation.</P
></LI
><LI
><P
>On each node that is part of the pool, install the following
software, in order:<P
></P
><OL
TYPE="a"
><LI
><P
><TT
CLASS="LITERAL"
>sysadm_base-tcpmux</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>sysadm_base-lib</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>sysadm_base-server</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>cluster_admin</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>cluster_services</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>failsafe</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>sysadm_failsafe-server</TT
></P
></LI
></OL
></P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>You must install <TT
CLASS="LITERAL"
>sysadm_base-tcpmux</TT
>, <TT
CLASS="LITERAL"
>sysadm_base-server</TT
>, and <TT
CLASS="LITERAL"
>sysadm_failsafe</TT
> packages on those nodes
from which you want to run the FailSafe GUI. If you do not want to run the
GUI on a specific node, you do not need to install these software packages
on that node.</P
></BLOCKQUOTE
></DIV
></LI
><LI
><P
>If the pool nodes are to be administered by a Web-based version
of the Linux FailSafe Cluster Manager GUI, install the following subsystems,
in order: <P
></P
><OL
TYPE="a"
><LI
><P
><TT
CLASS="LITERAL"
>IBMJava118-JRE</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>sysadm_base-client</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>sysadm_failsafe-web</TT
></P
><P
>If the workstation launches the GUI client from a Web browser that supports
Java&#8482;, install: <TT
CLASS="FILENAME"
>java_plugin</TT
> from the Linux FailSafe
CD</P
><P
>If the Java plug-in is not installed when the Linux FailSafe Manager
GUI is run from a browser, the browser is redirected to <TT
CLASS="FILENAME"
>http://java.sun.com/products/plugin/1.1/plugin-install.html</TT
></P
><P
>After installing the Java plug-in, you must close all browser windows
and restart the browser.</P
><P
>For a non-Linux workstation, download the Java Plug-in from <TT
CLASS="FILENAME"
>http://java.sun.com/products/plugin/1.1/plugin-install.html</TT
></P
><P
>If the Java plug-in is not installed when the Linux FailSafe Manager
GUI is run from a browser, the browser is redirected to this site.</P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>sysadm_failsafe-client</TT
></P
></LI
></OL
></P
></LI
><LI
><P
>Install software on the administrative workstation (GUI client).</P
><P
>If the workstation runs the GUI client from a Linux desktop, install
these subsystems:</P
><P
></P
><OL
TYPE="a"
><LI
><P
><TT
CLASS="LITERAL"
>IBMJava118-JRE</TT
></P
></LI
><LI
><P
><TT
CLASS="LITERAL"
>sysadm_base-client</TT
></P
></LI
></OL
></LI
><LI
><P
>On the appropriate servers, install other optional software,
such as storage management or network board software.</P
></LI
><LI
><P
>Install patches that are required for the platform and applications.</P
></LI
></OL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le29006-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le23103-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Overview of Configuring Nodes for Linux FailSafe</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le32854-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Configuring System Files</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/le99367-parent.html010064400016050000001000000131010717757367700155470ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe System Operation</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="PREVIOUS"
TITLE="Local Failover of an IP Address"
HREF="localfailover-of-ip.html"><LINK
REL="NEXT"
TITLE="Setting System Operation Defaults"
HREF="le85448-parent.html"></HEAD
><BODY
CLASS="CHAPTER"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="localfailover-of-ip.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le85448-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><H1
><A
NAME="LE99367-PARENT"
>Chapter 7. Linux FailSafe System Operation</A
></H1
><DIV
CLASS="TOC"
><DL
><DT
><B
>Table of Contents</B
></DT
><DT
>7.1. <A
HREF="le85448-parent.html"
>Setting System Operation Defaults</A
></DT
><DD
><DL
><DT
>7.1.1. <A
HREF="le85448-parent.html#AEN5166"
>Setting Default Cluster with Cluster Manager GUI</A
></DT
><DT
>7.1.2. <A
HREF="le85448-parent.html#AEN5170"
>Setting Defaults with Cluster Manager CLI</A
></DT
></DL
></DD
><DT
>7.2. <A
HREF="le36400-parent.html"
>System Operation Considerations</A
></DT
><DT
>7.3. <A
HREF="fs-activatehaservices.html"
>Activating (Starting) Linux FailSafe</A
></DT
><DD
><DL
><DT
>7.3.1. <A
HREF="fs-activatehaservices.html#AEN5209"
>Activating Linux FailSafe with the Cluster Manager GUI</A
></DT
><DT
>7.3.2. <A
HREF="fs-activatehaservices.html#AEN5221"
>Activating Linux FailSafe with the Cluster Manager CLI</A
></DT
></DL
></DD
><DT
>7.4. <A
HREF="le16877-parent.html"
>System Status</A
></DT
><DD
><DL
><DT
>7.4.1. <A
HREF="le16877-parent.html#AEN5247"
>Monitoring System Status with the Cluster Manager GUI</A
></DT
><DT
>7.4.2. <A
HREF="le16877-parent.html#AEN5278"
>Monitoring Resource and Reset Serial Line with the Cluster Manager
CLI</A
></DT
><DT
>7.4.3. <A
HREF="le16877-parent.html#LE29367-PARENT"
>Resource Group Status</A
></DT
><DT
>7.4.4. <A
HREF="le16877-parent.html#AEN5493"
>Node Status</A
></DT
><DT
>7.4.5. <A
HREF="le16877-parent.html#AEN5565"
>Cluster Status</A
></DT
><DT
>7.4.6. <A
HREF="le16877-parent.html#LE28488-PARENT"
>Viewing System Status with the haStatus CLI Script</A
></DT
></DL
></DD
><DT
>7.5. <A
HREF="le41282-parent.html"
>Resource Group Failover</A
></DT
><DD
><DL
><DT
>7.5.1. <A
HREF="le41282-parent.html#FS-BRINGRESGROUPONLINE"
>Bringing a Resource Group Online</A
></DT
><DT
>7.5.2. <A
HREF="le41282-parent.html#FS-TAKERESGROUPOFFLINE"
>Taking a Resource Group Offline</A
></DT
><DT
>7.5.3. <A
HREF="le41282-parent.html#FS-MOVERESGROUP"
>Moving a Resource Group</A
></DT
><DT
>7.5.4. <A
HREF="le41282-parent.html#FS-STOPMONITORINGRESGROUP"
>Stop Monitoring of a Resource Group (Maintenance
Mode)</A
></DT
></DL
></DD
><DT
>7.6. <A
HREF="z957117933glen.html"
>Deactivating (Stopping) Linux FailSafe</A
></DT
><DD
><DL
><DT
>7.6.1. <A
HREF="z957117933glen.html#AEN5830"
>Deactivating HA Services on a Node</A
></DT
><DT
>7.6.2. <A
HREF="z957117933glen.html#AEN5838"
>Deactivating HA Services in a Cluster</A
></DT
><DT
>7.6.3. <A
HREF="z957117933glen.html#AEN5843"
>Deactivating Linux FailSafe with the Cluster Manager GUI</A
></DT
><DT
>7.6.4. <A
HREF="z957117933glen.html#AEN5855"
>Deactivating Linux FailSafe with the Cluster Manager CLI</A
></DT
></DL
></DD
><DT
>7.7. <A
HREF="fs-resetmachine.html"
>Resetting Nodes</A
></DT
><DD
><DL
><DT
>7.7.1. <A
HREF="fs-resetmachine.html#AEN5874"
>Resetting a Node with the Cluster Manager GUI</A
></DT
><DT
>7.7.2. <A
HREF="fs-resetmachine.html#AEN5886"
>Resetting a Node with the Cluster Manager CLI</A
></DT
></DL
></DD
><DT
>7.8. <A
HREF="le37674-parent.html"
>Backing Up and Restoring Configuration With Cluster
Manager CLI</A
></DT
></DL
></DIV
><P
>This chapter describes administrative tasks you perform to operate and
monitor a Linux FailSafe system. It describes how to perform tasks using the
FailSafe Cluster Manager Graphical User Interface (GUI) and the FailSafe Cluster
Manager Command Line Interface (CLI). The major sections in this chapter are
as follows:</P
><P
></P
><UL
><LI
><P
><A
HREF="le85448-parent.html"
>Section 7.1</A
></P
></LI
><LI
><P
><A
HREF="le36400-parent.html"
>Section 7.2</A
></P
></LI
><LI
><P
><A
HREF="fs-activatehaservices.html"
>Section 7.3</A
></P
></LI
><LI
><P
><A
HREF="le16877-parent.html"
>Section 7.4</A
></P
></LI
><LI
><P
><A
HREF="le41282-parent.html"
>Section 7.5</A
></P
></LI
><LI
><P
><A
HREF="z957117933glen.html"
>Section 7.6</A
></P
></LI
><LI
><P
><A
HREF="fs-resetmachine.html"
>Section 7.7</A
></P
></LI
><LI
><P
><A
HREF="le37674-parent.html"
>Section 7.8</A
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="localfailover-of-ip.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le85448-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Local Failover of an IP Address</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Setting System Operation Defaults</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>PADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="localfailover-of-ip.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
></TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le85448-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="CHAPTER"
><Hhtml/ln15.html010064400016050000001000000105320717757276300140150ustar00gfergother00002640000003<HTML
><HEAD
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="NEXT"
TITLE="About This Guide"
HREF="f42.html"></HEAD
><BODY
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="LEGALNOTICE"
><A
NAME="AEN15"
></A
><P
></P
><P
> 2000 Silicon Graphics, Inc.&#8212; All rights reserved</P
><P
></P
></DIV
><DIV
CLASS="LEGALNOTICE"
><A
NAME="AEN17"
></A
><P
><B
>NOTICE</B
></P
><P
>This documentation, in electronic format, is provided as is without
any warranty or condition of any kind, either express, implied, or statutory,
including, but not limited to, any warranty or condition that it constitutes
specifications to which any related software will conform, any implied warranties
or conditions, on the documentation and related software, of merchantability,
satisfactory quality, fitness for a particular purpose, and freedom from infringement,
and any warranty or condition that the related software will be error free.
In no event shall SGI or its suppliers be liable for any damages, including,
but not limited to direct, indirect, special or consequential damages, arising
out of, resulting from, or in any way connected with this documentation and
related software, whether or not based upon warranty, contract, tort or otherwise,
whether or not injury was sustained by persons or property or otherwise, and
whether or not loss was sustained from, or arose out of the results of, or
use of, the documentation and related software. </P
><P
>Silicon Graphics, Inc. grants the user permission to reproduce, distribute,
and create derivative works from the documentation, provided that: (1) the
user reproduces this entire notice within both source and binary format redistributions
in printed or electronic format; and (2) no further license or permission
may be inferred or deemed or construed to exist with regard to the sample
code or the code base of which it forms a part.  </P
><P
>Contact information:  Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
Mountain View, CA  94043, or: </P
><P
><TT
CLASS="LITERAL"
>http://www.sgi.com</TT
></P
></DIV
><DIV
CLASS="LEGALNOTICE"
><A
NAME="AEN24"
></A
><P
><B
>LIMITED RIGHTS LEGEND</B
></P
><P
>The electronic (software) version of this document was developed at
private expense; if acquired under an agreement with the USA government or
any contractor thereto, it is acquired as "commercial computer software" subject
to the provisions of its applicable license agreement, as specified in (a)
48 CFR 12.212 of the FAR; or, if acquired for Department of Defense units,
(b) 48 CFR 227-7202 of the DoD FAR Supplement; or sections succeeding thereto.
 Contractor/manufacturer is Silicon Graphics, Inc., 1600 Amphitheatre Pkwy
2E, Mountain View, CA 94043-1351.</P
></DIV
><DIV
CLASS="LEGALNOTICE"
><A
NAME="AEN27"
></A
><P
><B
>TRADEMARKS</B
></P
><P
>SGI, the SGI logo, IRIS FailSafe, SGI FailSafe, SGI Linux, and Linux
FailSafe are trademarks of Silicon Graphics, Inc.  Linux a registered trademark
of Linux Torvalds, used with permission by Silicon Graphics, Inc. </P
><P
>SuSE is a trademark of SuSE Inc. Windows is a registered trademark of
Microsoft Corporation. Netscape and Netscape FastTrack Server are registered
trademarks, and Netscape Enterprise Server is a trademark, of Netscape Communications
Corporation in the United States and other countries. NFS is a trademark and
Java and all Java-based trademarks and logos are trademarks or registered
trademarks of Sun Microsystems, Inc., in the U.S. and other countries. UNIX
is a registered trademark in the United States and other countries, licensed
exclusively through X/Open Company, Ltd. All other trademarks mentioned are
the property of their respective owners.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>&nbsp;</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>&nbsp;</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/localfailover-of-ip.html010064400016050000001000000104220717757356400170660ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Local Failover of an IP Address</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Configuration Examples"
HREF="configexample.html"><LINK
REL="PREVIOUS"
TITLE="cmgr Script"
HREF="threenode-script.html"><LINK
REL="NEXT"
TITLE="Linux FailSafe System Operation"
HREF="le99367-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="threenode-script.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 6. Configuration Examples</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le99367-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="LOCALFAILOVER-OF-IP"
>6.3. Local Failover of an IP Address</A
></H1
><P
>You
can configure a Linux FailSafe system to fail over an IP address to a second
interface within the same host. To do this, specify multiple interfaces for
resources of <TT
CLASS="LITERAL"
>IP_address</TT
> resource type. You can also specify
different interfaces for supporting a heterogeneous cluster. For information
on specifying  IP address resources, see <A
HREF="le53159-parent.html#IPATTRIBUTES"
>Section 5.5.1.1</A
>.</P
><P
>The following example configures local failover of an IP address. It
uses the configuration illustrated in <A
HREF="threenode-example.html"
>Section 6.1</A
>.</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Define an IP address resource with two interfaces:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>define resource 192.26.50.1 of resource_type IP_address in cluster TEST
          set NetworkMask to 0xffffff00
          set interfaces to eth0,eth1
          set BroadcastAddress to 192.26.50.255
done</PRE
></TD
></TR
></TABLE
><P
>IP address 192.26.50.1 will be locally failed over from interface <TT
CLASS="LITERAL"
>eth0</TT
> to interface <TT
CLASS="LITERAL"
>eth1</TT
> when there is an <TT
CLASS="LITERAL"
>eth0</TT
> interface failure.</P
><P
>In nodes <TT
CLASS="LITERAL"
>N1</TT
>, <TT
CLASS="LITERAL"
>N2</TT
>, and <TT
CLASS="LITERAL"
>N3</TT
>, either <TT
CLASS="LITERAL"
>eth0</TT
> or <TT
CLASS="LITERAL"
>eth1</TT
> should
configure up automatically, when the node boots up. Both <TT
CLASS="LITERAL"
>eth0</TT
>
and <TT
CLASS="LITERAL"
>eth1</TT
> are physically connected to the same subnet 192.26.50.
Only one network interface connected to the same network should be configured
up in a node.</P
></LI
><LI
><P
>Modify the <TT
CLASS="FILENAME"
>/etc/conf/netif.options</TT
> file
to configure the <TT
CLASS="LITERAL"
>eth0</TT
> and <TT
CLASS="LITERAL"
>eth1</TT
> interfaces:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>if1name-eth0 if1addr=192.26.50.10  if2name=eth1 if2addr=192.26.50.11</PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>The <TT
CLASS="FILENAME"
>etc/init.d/network</TT
> script should
configure the network interface <TT
CLASS="LITERAL"
>eth1</TT
> down in all nodes <TT
CLASS="LITERAL"
>N1</TT
>, <TT
CLASS="LITERAL"
>N2</TT
>, and <TT
CLASS="LITERAL"
>N3</TT
>. Add the following
line to the file:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>ifconfig eth1 down</PRE
></TD
></TR
></TABLE
></LI
></OL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="threenode-script.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le99367-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>cmgr Script</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="configexample.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Linux FailSafe System Operation</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>bottom"
><A
HREF="threenode-script.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 6. Configuration Examples</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le99367-parent.html"
>Next</A
></TD
><html/threenode-example.html010064400016050000001000000047700717757355300166510ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Linux FailSafe Example with Three-Node Cluster</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Configuration Examples"
HREF="configexample.html"><LINK
REL="PREVIOUS"
TITLE="Configuration Examples"
HREF="configexample.html"><LINK
REL="NEXT"
TITLE="cmgr Script"
HREF="threenode-script.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="configexample.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 6. Configuration Examples</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="threenode-script.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="THREENODE-EXAMPLE"
>6.1. Linux FailSafe Example with Three-Node Cluster</A
></H1
><P
>The
following illustration shows a three-node Linux FailSafe cluster. This configuration
consists of a pool containing nodes <TT
CLASS="LITERAL"
>N1</TT
>, <TT
CLASS="LITERAL"
>N2</TT
>,<TT
CLASS="LITERAL"
> N3</TT
>, and <TT
CLASS="LITERAL"
>N4</TT
>. Nodes <TT
CLASS="LITERAL"
>N1</TT
>, <TT
CLASS="LITERAL"
>N2</TT
>, and <TT
CLASS="LITERAL"
>N3</TT
> make up the Linux FailSafe cluster.
The nodes in this cluster share disks, and are connected to a serial port
multiplexer, which is also connected to the private control network.</P
><DIV
CLASS="FIGURE"
><A
NAME="AEN4977"
></A
><P
><B
>Figure 6-1. Configuration Example</B
></P
><P
><IMG
SRC="figures/n1n4.gif"></P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="configexample.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="threenode-script.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Configuration Examples</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="configexample.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>cmgr Script</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/threenode-script.html010064400016050000001000000240210717757356000165070ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>cmgr Script</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Configuration Examples"
HREF="configexample.html"><LINK
REL="PREVIOUS"
TITLE="Linux FailSafe Example with Three-Node Cluster"
HREF="threenode-example.html"><LINK
REL="NEXT"
TITLE="Local Failover of an IP Address"
HREF="localfailover-of-ip.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="threenode-example.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 6. Configuration Examples</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="localfailover-of-ip.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="THREENODE-SCRIPT"
>6.2. cmgr Script</A
></H1
><P
>This section provides an example <TT
CLASS="LITERAL"
>cmgr</TT
> script that
defines a Linux FailSafe three-node cluster as shown in <A
HREF="threenode-example.html"
>Section 6.1</A
>.
For general information on CLI scripts, see <A
HREF="le15969-parent.html#LE41514-PARENT"
>Section 4.3.4</A
>.
For information on the CLI template files that you can use to create your
own configuration script, see <A
HREF="le15969-parent.html#LE10673-PARENT"
>Section 4.3.5</A
>.</P
><P
>This cluster has two resource groups, <TT
CLASS="LITERAL"
>RG1</TT
> and <TT
CLASS="LITERAL"
>RG2</TT
>.</P
><P
>Resource group <TT
CLASS="LITERAL"
>RG1</TT
> contains the following resources:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="LITERAL"
>IP_address</TT
></DT
><DD
><P
>192.26.50.1</P
></DD
><DT
><TT
CLASS="LITERAL"
>filesystem</TT
></DT
><DD
><P
><TT
CLASS="LITERAL"
>/ha1</TT
></P
></DD
><DT
><TT
CLASS="LITERAL"
>volume</TT
></DT
><DD
><P
><TT
CLASS="LITERAL"
>ha1_vol</TT
></P
></DD
><DT
><TT
CLASS="LITERAL"
>NFS</TT
></DT
><DD
><P
><TT
CLASS="LITERAL"
>/ha1/export</TT
></P
></DD
></DL
></DIV
><P
>Resource group <TT
CLASS="LITERAL"
>RG1</TT
> has a failover policy of <TT
CLASS="LITERAL"
>FP1</TT
>. <TT
CLASS="LITERAL"
>FP1</TT
> has the following components:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
>script</DT
><DD
><P
><TT
CLASS="LITERAL"
>ordered</TT
></P
></DD
><DT
>attributes</DT
><DD
><P
><TT
CLASS="LITERAL"
>Auto_Failback</TT
></P
><P
><TT
CLASS="LITERAL"
>Auto_Recovery</TT
></P
></DD
><DT
>failover domain</DT
><DD
><P
><TT
CLASS="LITERAL"
>N1, N2, N3</TT
></P
></DD
></DL
></DIV
><P
>Resource group <TT
CLASS="LITERAL"
>RG2</TT
> contains the following resources:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><TT
CLASS="LITERAL"
>IP_address</TT
></DT
><DD
><P
>192.26.50.2</P
></DD
><DT
><TT
CLASS="LITERAL"
>filesystem</TT
></DT
><DD
><P
><TT
CLASS="LITERAL"
>/ha2</TT
></P
></DD
><DT
><TT
CLASS="LITERAL"
>volume</TT
></DT
><DD
><P
><TT
CLASS="LITERAL"
>ha2_vol</TT
></P
></DD
><DT
><TT
CLASS="LITERAL"
>NFS</TT
></DT
><DD
><P
><TT
CLASS="LITERAL"
>/ha2/export</TT
></P
></DD
></DL
></DIV
><P
>Resource group <TT
CLASS="LITERAL"
>RG2</TT
> has a failover policy of <TT
CLASS="LITERAL"
>FP2</TT
>. <TT
CLASS="LITERAL"
>FP2</TT
> has the following components:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
>script</DT
><DD
><P
><TT
CLASS="LITERAL"
>round-robin</TT
></P
></DD
><DT
>attributes</DT
><DD
><P
><TT
CLASS="LITERAL"
>Controlled_Failback</TT
></P
><P
><TT
CLASS="LITERAL"
>Inplace_Recovery</TT
></P
></DD
><DT
>failover domain</DT
><DD
><P
><TT
CLASS="LITERAL"
>N2, N3</TT
></P
></DD
></DL
></DIV
><P
>The <TT
CLASS="LITERAL"
>cmgr</TT
> script to define this configuration is as
follows:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="PROGRAMLISTING"
>#!/usr/cluster/bin/cluster_mgr -f
define node N1
        set hostname to N1
        set sysctrl_type to msc
        set sysctrl_status to enabled
        set sysctrl_password to none
        set sysctrl_owner to N4
        set sysctrl_device to /dev/ttydn001
        set sysctrl_owner_type to tty
        add nic ef2-N1
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 1
        done
        add nic eth0-N1
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 2
        done
        add nic eth1-N1
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 3
        done
done


define node N2
        set hostname to N2
        set sysctrl_type to msc
        set sysctrl_status to enabled
        set sysctrl_password to none
        set sysctrl_owner to N4
        set sysctrl_device to /dev/ttydn002
        set sysctrl_owner_type to tty
        add nic ef2-N2
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 1
        done
        add nic eth0-N2
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 2
        done
        add nic eth1-N2
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 3
        done
done

define node N3
        set hostname to N3
        set sysctrl_type to msc
        set sysctrl_status to enabled
        set sysctrl_password to none
        set sysctrl_owner to N4
        set sysctrl_device to /dev/ttydn003
        set sysctrl_owner_type to tty
        add nic ef2-N3
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 1
        done
        add nic eth0-N3
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 2
        done
        add nic eth1-N3
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 3
        done
done


define node N4
        set hostname to N4
         add nic ef2-N4
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 1
        done
        add nic eth0-N4
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 2
        done
done
define cluster TEST
        set notify_cmd to /usr/bin/mail
        set notify_addr to failsafe_sysadm@company.com
        add node N1
        add node N2
        add node N3
done

define failover_policy fp1
        set attribute to Auto_Failback
        set attribute to Auto_Recovery
        set script to ordered
        set domain to N1 N2 N3
done


define failover_policy fp2
        set attribute to Controlled_Failback
        set attribute to Inplace_Recovery
        set script to round-robin
        set domain to N2 N3
done

define resource 192.26.50.1 of resource_type IP_address in cluster TEST
        set NetworkMask to 0xffffff00
        set interfaces to eth0,eth1
        set BroadcastAddress to 192.26.50.255
done

define resource ha1_vol of resource_type volume in cluster TEST
        set devname-owner to root
        set devname-group to sys
        set devname-mode to 666
done

define resource /ha1 of resource_type filesystem in cluster TEST
        set volume-name to ha1_vol
        set mount-options to rw,noauto
        set monitoring-level to 2
done

modify resource /ha1 of resource_type filesystem in cluster TEST
        add dependency ha1_vol of type volume
done

define resource /ha1/export of resource_type NFS in cluster TEST
        set export-info to rw,wsync
        set filesystem to /ha1
done

modify resource /ha1/export of resource_type NFS in cluster TEST
        add dependency /ha1 of type filesystem
done
define resource_group RG1 in cluster TEST
        set failover_policy to fp1
        add resource 192.26.50.1 of resource_type IP_address
        add resource ha1_vol of resource_type volume
        add resource /ha1 of resource_type filesystem
        add resource /ha1/export of resource_type NFS
done


define resource 192.26.50.2 of resource_type IP_address in cluster TEST
        set NetworkMask to 0xffffff00
        set interfaces to eth0
        set BroadcastAddress to 192.26.50.255
done

define resource ha2_vol of resource_type volume in cluster TEST
        set devname-owner to root
        set devname-group to sys
        set devname-mode to 666
done

define resource /ha2 of resource_type filesystem in cluster TEST
        set volume-name to ha2_vol
        set mount-options to rw,noauto
        set monitoring-level to 2
done

modify resource /ha2 of resource_type filesystem in cluster TEST
        add dependency ha2_vol of type volume
done

define resource /ha2/export of resource_type NFS in cluster TEST
        set export-info to rw,wsync
        set filesystem to /ha2
done

modify resource /ha2/export of resource_type NFS in cluster TEST
        add dependency /ha2 of type filesystem
done

define resource_group RG2 in cluster TEST
        set failover_policy to fp2
        add resource 192.26.50.2 of resource_type IP_address
        add resource ha2_vol of resource_type volume
        add resource /ha2 of resource_type filesystem
        add resource /ha2/export of resource_type NFS
done


quit</PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="threenode-example.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="localfailover-of-ip.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Linux FailSafe Example with Three-Node Cluster</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="configexample.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Local Failover of an IP Address</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>  add nic eth1-N1
                set heartbeat to true
                set ctrl_msgs to true
                set priority to 3
        done
done


define node N2
        set hostname to N2
        set sysctrl_type to msc
        set sysctrl_status to enabled
        set sysctrl_password to none
        set sysctrl_owner to N4
        set sysctrl_device to /dev/ttydn002
        set sysctrl_owner_type to tty
        add nic ef2-N2
                set heartbeat to true
                set ctrhtml/tv.html010064400016050000001000000105330717757342200136610ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Configuring Timeout Values and Monitoring Intervals</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Cluster Configuration"
HREF="le94219-parent.html"><LINK
REL="PREVIOUS"
TITLE="Name Restrictions"
HREF="le28499-parent.html"><LINK
REL="NEXT"
TITLE="Cluster Configuration"
HREF="z957104627glen.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le28499-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 5. Linux FailSafe Cluster Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="z957104627glen.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="TV"
>5.3. Configuring Timeout Values and Monitoring Intervals</A
></H1
><P
>When you configure the components
of a Linux FailSafe system, you configure various timeout values and monitoring
intervals that determine the application downtown of a highly-available system
when there is a failure. To determine reasonable values to set for your system,
consider the following equation:</P
><P
CLASS="LITERALLAYOUT"
><TT
CLASS="REPLACEABLE"
><I
>application downtime</I
></TT
>&nbsp;=&nbsp;<TT
CLASS="REPLACEABLE"
><I
>failure detection</I
></TT
>&nbsp;+&nbsp;<TT
CLASS="REPLACEABLE"
><I
>time to handle failure</I
></TT
>&nbsp;+&nbsp;<TT
CLASS="REPLACEABLE"
><I
>failure recovery</I
></TT
></P
><P
>Failure detection depends on the type of failure that is detected:</P
><P
></P
><UL
><LI
><P
>When a node goes down, there will be a node failure detection
after the node timeout; this is an HA parameter that you can modify. All failures
that translate into a node failure (such as heartbeat failure and OS failure)
fall into this failure category. Node timeout has a default value of 15 seconds.
For information on modifying the node timeout value, see <A
HREF="z957104627glen.html#FS-SETFSPARAMETERS"
>Section 5.4.4</A
>.</P
></LI
><LI
><P
>When there is a resource failure, there is a monitor failure
of a resource. The amount of time this will take is determined by the following:</P
><P
></P
><UL
><LI
><P
>The monitoring interval for the resource type</P
></LI
><LI
><P
>The monitor timeout for the resource type</P
></LI
><LI
><P
>The number of restarts defined for the resource type, if the
restart mode is configured on</P
></LI
></UL
><P
>For information on setting values for a resource type, see <A
HREF="le53159-parent.html#FS-DEFINERESOURCETYPE"
>Section 5.5.6</A
>.</P
></LI
></UL
><P
>Reducing these values will result in a shorter failover time, but reducing
these values could lead to significant increase in the Linux FailSafe overhead
on the system performance and could also lead to false failovers.</P
><P
>The time to handle a failure is something that the user cannot
control. In general, this should take a few seconds.</P
><P
>The failure recovery time is determined by the total time it takes for
Linux FailSafe to perform the following:</P
><P
></P
><UL
><LI
><P
>Execute the failover policy script (approximately five seconds).</P
></LI
><LI
><P
>Run the stop action script for all resources in the resource
group. This is not required for node failure; the failing node will be reset.</P
></LI
><LI
><P
>Run the start action script for all resources in the resource
group</P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le28499-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="z957104627glen.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Name Restrictions</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le94219-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Cluster Configuration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/x149.html010064400016050000001000000055500717757302700137410ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Conventions Used in This Guide</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="About This Guide"
HREF="f42.html"><LINK
REL="PREVIOUS"
TITLE="Related Documentation"
HREF="x81.html"><LINK
REL="NEXT"
TITLE="Overview of the Linux FailSafe System"
HREF="le73529-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="x81.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>About This Guide</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le73529-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="AEN149"
>4. Conventions Used in This Guide</A
></H1
><P
>These type conventions and symbols are used in this guide:</P
><P
></P
><DIV
CLASS="VARIABLELIST"
><DL
><DT
><B
CLASS="COMMAND"
>command</B
></DT
><DD
><P
>Function names, literal command-line arguments (options/flags)</P
></DD
><DT
><TT
CLASS="FILENAME"
>filename</TT
></DT
><DD
><P
>Name of a file or directory</P
></DD
><DT
><TT
CLASS="USERINPUT"
><B
>command -o option</B
></TT
></DT
><DD
><P
>Commands and text that you are to type literally in response to shell
and command prompts</P
></DD
><DT
><I
CLASS="FIRSTTERM"
>term</I
></DT
><DD
><P
>New terms</P
></DD
><DT
><I
CLASS="CITETITLE"
>Book Title</I
></DT
><DD
><P
>Manual or book title</P
></DD
><DT
><TT
CLASS="REPLACEABLE"
><I
>variable</I
></TT
></DT
><DD
><P
>Command-line arguments, filenames, and variables to be supplied by the
user in examples, code, and syntax statements</P
></DD
><DT
><TT
CLASS="LITERAL"
>literal text</TT
></DT
><DD
><P
>Code examples, error messages, prompts, and screen text</P
></DD
><DT
><TT
CLASS="LITERAL"
>#</TT
></DT
><DD
><P
>System shell prompt for the superuser (<TT
CLASS="LITERAL"
>root</TT
>)</P
></DD
></DL
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="x81.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le73529-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Related Documentation</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="f42.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Overview of the Linux FailSafe System</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/x46.html010064400016050000001000000042140717757301200136430ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Audience</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="About This Guide"
HREF="f42.html"><LINK
REL="PREVIOUS"
TITLE="About This Guide"
HREF="f42.html"><LINK
REL="NEXT"
TITLE="Structure of This Guide"
HREF="x50.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="f42.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>About This Guide</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="x50.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="AEN46"
>1. Audience</A
></H1
><P
>The <I
CLASS="CITETITLE"
>Linux FailSafe Administrator's Guide</I
> is written
for the person who administers the Linux FailSafe system. The Linux FailSafe
administrator must be familiar with the operation of the appropriate storage
subsystem configurations, such as the configuration of any raid systems or
fibre channel systems which will be used in the Linux FailSafe configuration.
 Good knowledge of mirroring, the filesystems used, and any volume management
system to be used is also required.</P
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="f42.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="x50.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>About This Guide</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="f42.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Structure of This Guide</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/x50.html010064400016050000001000000062230717757302100136400ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Structure of This Guide</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="About This Guide"
HREF="f42.html"><LINK
REL="PREVIOUS"
TITLE="Audience"
HREF="x46.html"><LINK
REL="NEXT"
TITLE="Related Documentation"
HREF="x81.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="x46.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>About This Guide</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="x81.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="AEN50"
>2. Structure of This Guide</A
></H1
><P
>Linux FailSafe configuration and administration information is presented
in the following chapters and appendices:</P
><P
></P
><UL
><LI
><P
><A
HREF="le73529-parent.html"
>Chapter 1</A
>, introduces the components
of the FailSafe system and explains its hardware and software architecture.</P
></LI
><LI
><P
><A
HREF="le88622-parent.html"
>Chapter 2</A
>, describes how to plan the
configuration of a  FailSafe cluster.</P
></LI
><LI
><P
><A
HREF="le32854-parent.html"
>Chapter 3</A
>, describes several procedures
that must be performed on nodes in a Linux FailSafe cluster to prepare them
for high availability setup.</P
></LI
><LI
><P
><A
HREF="le73346-parent.html"
>Chapter 4</A
>, describes the cluster manager
tools you can use to administer a FailSafe system.</P
></LI
><LI
><P
><A
HREF="le94219-parent.html"
>Chapter 5</A
>, explains how to perform the
administrative tasks to configure a FailSafe system.</P
></LI
><LI
><P
><A
HREF="le99367-parent.html"
>Chapter 7</A
>, explains how to perform the
administrative tasks to operate and monitor a FailSafe system.</P
></LI
><LI
><P
><A
HREF="le56830-parent.html"
>Chapter 8</A
>, describes how to test the
configured FailSafe system.</P
></LI
><LI
><P
><A
HREF="le28716-parent.html"
>Chapter 9</A
>, describes the log files used
by FailSafe and how to evaluate problems in a FailSafe system.</P
></LI
><LI
><P
><A
HREF="le55630-parent.html"
>Chapter 10</A
>, describes some procedures
you may need to perform without shutting down a FailSafe cluster.</P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="x46.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="x81.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Audience</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="f42.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Related Documentation</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="About This Guide"
HREF="f42.html"><LINK
REL="PREVIOUS"
TITLE="Audience"
HREF="x46.html"><LINK
REL="NEXT"
TITLE="Related Documentation"
HREF="x81.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASShtml/x6931.html010064400016050000001000000156700717757404100140270ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Changing Control Networks in a Cluster</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Upgrading and Maintaining Active Clusters"
HREF="le55630-parent.html"><LINK
REL="PREVIOUS"
TITLE="Deleting a Node from an Active Cluster"
HREF="le15663-parent.html"><LINK
REL="NEXT"
TITLE="Upgrading OS Software in an Active Cluster"
HREF="le26765-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le15663-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 10. Upgrading and Maintaining Active Clusters</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le26765-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="AEN6931"
>10.3. Changing Control Networks in a Cluster</A
></H1
><P
>Use the following procedure to change the control
networks in a currently active cluster. This procedure is valid for a two-node
cluster consisting of nodes <TT
CLASS="LITERAL"
>node1</TT
> and <TT
CLASS="LITERAL"
>node2</TT
>.
In this procedure, you must complete each step before proceeding to the next
step.</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
>Do not perform any other administration operations during this procedure.</P
></BLOCKQUOTE
></DIV
><P
></P
><OL
TYPE="1"
><LI
><P
>From any node, stop HA services on the cluster. Make sure
all HA processes have exited on both nodes.</P
></LI
><LI
><P
>From <TT
CLASS="LITERAL"
>node2</TT
>, stop the cluster processes
on <TT
CLASS="LITERAL"
>node2</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>/etc/rc.d/init.d/fs_cluster stop</B
></TT
>
# <TT
CLASS="USERINPUT"
><B
>killall cdbd</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>Make sure the <TT
CLASS="LITERAL"
>cdbd</TT
> process have been killed on <TT
CLASS="LITERAL"
>node2</TT
>.</P
></LI
><LI
><P
>From <TT
CLASS="LITERAL"
>node1</TT
>, modify the <TT
CLASS="LITERAL"
>node1</TT
>
and <TT
CLASS="LITERAL"
>node2</TT
> definition. Use the following <B
CLASS="COMMAND"
>cmgr</B
>
commands:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify node node1</B
></TT
>
Enter commands, when finished enter either "done" or "cancel"
node1?&#62; <TT
CLASS="USERINPUT"
><B
>remove nic <TT
CLASS="REPLACEABLE"
><I
>old nic address</I
></TT
></B
></TT
>
node1&#62; <TT
CLASS="USERINPUT"
><B
>add nic <TT
CLASS="REPLACEABLE"
><I
>nnew nic address</I
></TT
></B
></TT
>
NIC - <TT
CLASS="REPLACEABLE"
><I
>new nic address</I
></TT
> <TT
CLASS="USERINPUT"
><B
>set heartbeat to ...</B
></TT
>
NIC - <TT
CLASS="REPLACEABLE"
><I
>new nic address</I
></TT
> <TT
CLASS="USERINPUT"
><B
>set ctrl_msgs to ...</B
></TT
>
NIC - <TT
CLASS="REPLACEABLE"
><I
>new nic address</I
></TT
> <TT
CLASS="USERINPUT"
><B
>set priority to ...</B
></TT
>
NIC - <TT
CLASS="REPLACEABLE"
><I
>new nic address</I
></TT
> <TT
CLASS="USERINPUT"
><B
>done</B
></TT
>
node1? <TT
CLASS="USERINPUT"
><B
>done</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>Repeat the same procedure to modify <TT
CLASS="LITERAL"
>node2</TT
>.</P
></LI
><LI
><P
>From <TT
CLASS="LITERAL"
>node1</TT
>, check if the <TT
CLASS="LITERAL"
>node1</TT
> and <TT
CLASS="LITERAL"
>node2</TT
> definitions are correct. Using <B
CLASS="COMMAND"
>cmgr</B
> on <TT
CLASS="LITERAL"
>node1</TT
>, execute the following commands
to view the node definitions:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show node node1</B
></TT
>
cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show node node2</B
></TT
></PRE
></TD
></TR
></TABLE
></LI
><LI
><P
>On both <TT
CLASS="LITERAL"
>node1</TT
> and <TT
CLASS="LITERAL"
>node2</TT
>,
modify the network interface IP addresses in <TT
CLASS="FILENAME"
>/etc/failsafe/config/netif.options</TT
> and execute <B
CLASS="COMMAND"
>ifconfig</B
> to configure the new IP
addresses on <TT
CLASS="LITERAL"
>node1</TT
> and <TT
CLASS="LITERAL"
>node2</TT
>. Verify
that the IP addresses match the node definitions in the CDB.</P
></LI
><LI
><P
>From <TT
CLASS="LITERAL"
>node1</TT
>, stop the cluster process on <TT
CLASS="LITERAL"
>node1</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>/etc/rc.d/init.d/fs_cluster stop</B
></TT
>
# <TT
CLASS="USERINPUT"
><B
>killall cdbd</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>Make sure the <TT
CLASS="LITERAL"
>cdbd</TT
> process have been killed on <TT
CLASS="LITERAL"
>node1</TT
>.</P
></LI
><LI
><P
>From <TT
CLASS="LITERAL"
>node2</TT
>, execute the following command
to start cluster process on <TT
CLASS="LITERAL"
>node2</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>/usr/cluster/bin/cdbreinit /var/cluster/cdb/cdb.db</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>Answer <TT
CLASS="USERINPUT"
><B
>y</B
></TT
> to the prompt the appears.</P
></LI
><LI
><P
>From <TT
CLASS="LITERAL"
>node1</TT
>, start cluster processes on <TT
CLASS="LITERAL"
>node1</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
># <TT
CLASS="USERINPUT"
><B
>/etc/rc.d/init.d/fs_cluster start</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>The following messages should appear in the <TT
CLASS="FILENAME"
>SYSLOG</TT
>
on <TT
CLASS="LITERAL"
>node2</TT
>:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="90%"
><TR
><TD
><PRE
CLASS="SCREEN"
>Starting to receive CDB sync series from machine <TT
CLASS="REPLACEABLE"
><I
>node1_node_id&#62;</I
></TT
>
...
Finished receiving CDB sync series from machine <TT
CLASS="REPLACEABLE"
><I
>node1_node_id</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>Wait for approximately sixty seconds for the sync to complete.</P
></LI
><LI
><P
>From any node, start HA services in the cluster.</P
></LI
></OL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le15663-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le26765-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Deleting a Node from an Active Cluster</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le55630-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Upgrading OS Software in an Active Cluster</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/x81.html010064400016050000001000000057240717757302400136540ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Related Documentation</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="About This Guide"
HREF="f42.html"><LINK
REL="PREVIOUS"
TITLE="Structure of This Guide"
HREF="x50.html"><LINK
REL="NEXT"
TITLE="Conventions Used in This Guide"
HREF="x149.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="x50.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>About This Guide</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="x149.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="AEN81"
>3. Related Documentation</A
></H1
><P
>Besides this guide, other documentation for the Linux FailSafe system
includes the following </P
><P
></P
><UL
><LI
><P
><I
CLASS="CITETITLE"
>Linux FailSafe Programmer's Guide</I
></P
></LI
></UL
><P
>System man pages for referenced commands are as follows:</P
><P
></P
><UL
><LI
><P
><B
CLASS="COMMAND"
>cbeutil</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>cdbBackup</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>cdbRestore</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>cdbutil</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>cluster_mgr</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>crsd</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>cdbd</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>ha_cilog</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>ha_cmsd</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>ha_exec2</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>ha_fsd</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>ha_gcd</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>ha_ifd</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>ha_ifdadmin</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>ha_macconfig2</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>ha_srmd</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>ha_statd2</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>haStatus</B
></P
></LI
><LI
><P
><B
CLASS="COMMAND"
>failsafe</B
></P
></LI
></UL
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="x50.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="x149.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Structure of This Guide</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="f42.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Conventions Used in This Guide</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>="COMMAND"
>cdbRestore</B
></P
></LI
><LI
><html/z957104627glen.html010064400016050000001000001066030717757344500153110ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Cluster Configuration</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe Cluster Configuration"
HREF="le94219-parent.html"><LINK
REL="PREVIOUS"
TITLE="Configuring Timeout Values and Monitoring Intervals"
HREF="tv.html"><LINK
REL="NEXT"
TITLE="Resource Configuration"
HREF="le53159-parent.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="tv.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 5. Linux FailSafe Cluster Configuration</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="le53159-parent.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="Z957104627GLEN"
>5.4. Cluster Configuration</A
></H1
><P
>To set up a Linux FailSafe system, you configure the cluster that will
support the highly available services. This requires the following steps:</P
><P
></P
><UL
><LI
><P
>Defining the local host</P
></LI
><LI
><P
>Defining any additional nodes that are eligible to be included
in the cluster</P
></LI
><LI
><P
>Defining the cluster</P
></LI
></UL
><P
>The following subsections describe these tasks.</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-DEFINEMACHINE"
>5.4.1. Defining Cluster Nodes</A
></H2
><P
>  A  <I
CLASS="GLOSSTERM"
>cluster node</I
> is a single Linux image. Usually, a cluster node
is an individual computer. The term <I
CLASS="GLOSSTERM"
>node</I
> is also used
in this guide for brevity.</P
><P
>The <I
CLASS="GLOSSTERM"
>pool</I
> is the entire set of nodes available
for clustering.</P
><P
>The first node you define must be the local host, which is the host
you have logged into to perform cluster administration.</P
><P
>When you are defining multiple nodes, it is advisable to wait for a
minute or so between each node definition. When nodes are added to the configuration
database, the contents of the configuration database are also copied to the
node being added. The node definition operation is completed when the new
node configuration is added to the database, at which point the database configuration
is synchronized. If you define two nodes one after another, the second operation
might fail because the first database synchronization is not complete.</P
><P
>To add a logical node definition to the pool of nodes that are eligible
to be included in a cluster, you must provide the following information about
the node:</P
><P
></P
><UL
><LI
><P
>Logical name: This name can contain letters and numbers but
not spaces or pound signs. The name must be composed of no more than 255 characters.
Any legal hostname is also a legal node name. For example, for a node whose
hostname is &#8220;venus.eng.company.com&#8221; you can use a node name of &#8220;venus&#8221;, &#8220;node1&#8221;,
or whatever is most convenient.</P
></LI
><LI
><P
>&#13;Hostname: The fully qualified name of the host, such as &#8220;server1.company.com&#8221;.
Hostnames cannot begin with an underscore, include any whitespace, or be longer
than 255 characters. This hostname should be the same as the output of the
hostname command on the node you are defining. The IP address associated with
this hostname should not be the same as any IP address you define as highly
available when you define a Linux FailSafe IP address resource. Linux FailSafe
will not accept an IP address (such as &#8220;192.0.2.22&#8221;) for this
input.</P
></LI
><LI
><P
>Node ID: This number must be unique for each node in the pool
and be in the range 1 through 32767.</P
></LI
><LI
><P
>System controller information.
If the node has a system controller and you want Linux FailSafe to use the
controller to reset the node, you must provide the following information about
the system controller:</P
><P
></P
><UL
><LI
><P
>Type of system controller: <TT
CLASS="FILENAME"
>chalL</TT
>, <TT
CLASS="FILENAME"
>msc</TT
>, <TT
CLASS="FILENAME"
>mmsc</TT
></P
></LI
><LI
><P
>System controller port password (optional)</P
></LI
><LI
><P
>Administrative status, which you can set to determine whether
Linux FailSafe can use the port: <TT
CLASS="FILENAME"
>enabled</TT
>, <TT
CLASS="FILENAME"
>disabled</TT
></P
></LI
><LI
><P
>Logical node name of system controller owner (i.e. the system
that is physically attached to the system controller)</P
></LI
><LI
><P
>Device name of port on owner node that is attached to the
system controller</P
></LI
><LI
><P
>Type of owner device: <TT
CLASS="FILENAME"
>tty</TT
></P
></LI
></UL
></LI
><LI
><P
> A list of control networks, which are the networks used for heartbeats,
reset messages, and other Linux FailSafe messages. For each network, provide
the following:</P
><P
></P
><UL
><LI
><P
>Hostname or IP address. This address must not be the same
as any IP address you define as highly available when you define a Linux FailSafe
IP address resource, and it must be resolved in the <TT
CLASS="FILENAME"
>/etc/hosts</TT
>
file.</P
></LI
><LI
><P
>Flags (<TT
CLASS="FILENAME"
>hb</TT
> for heartbeats, <TT
CLASS="FILENAME"
>ctrl</TT
> for control messages, <TT
CLASS="FILENAME"
>priority</TT
>). At least
two control networks must use heartbeats, and at least one must use control
messages.</P
><P
>Linux FailSafe requires multiple heartbeat networks. Usually a node
sends heartbeat messages to another node on only one network at a time. However,
there are times when a node might send heartbeat messages to another node
on multiple networks simultaneously. This happens when the sender node does
not know which networks are up and which others are down. This is a transient
state and eventually the heartbeat network converges towards the highest priority
network that is up. </P
><P
>Note that at any time different pairs of nodes might be using different
networks for heartbeats.</P
><P
>Although all nodes in the Linux FailSafe cluster should have two control
networks, it is possible to define a node to add to the pool with one control
network.</P
></LI
></UL
></LI
></UL
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3192"
>5.4.1.1. Defining a Node with the Cluster Manager GUI</A
></H3
><P
>To define a node with the Cluster Manager GUI, perform the following
steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Nodes
&#38; Cluster&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Define
a Node&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs on this screen. Click on &#8220;Next&#8221;
at the bottom of the screen and continue inputing information on the second
screen.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="LE15937-PARENT"
>5.4.1.2. Defining a Node with the Cluster Manager CLI</A
></H3
><P
>Use the following command to add a logical node definition:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>Entering this command specifies the name of the node you are defining
and puts you in a mode that enables you to define the parameters of the node.
These parameters correspond to the items defined in <A
HREF="z957104627glen.html#FS-DEFINEMACHINE"
>Section 5.4.1</A
>.
The following prompts appear:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>Enter commands, when finished enter either "done" or "cancel"</PRE
></TD
></TR
></TABLE
><P
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>?</P
><P
>When this prompt of the node name appears, you enter the node parameters
in the following format:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>set hostname to <TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>
set nodeid to <TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>
set sysctrl_type to <TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
>
set sysctrl_password to <TT
CLASS="REPLACEABLE"
><I
>E</I
></TT
>
set sysctrl_status to <TT
CLASS="REPLACEABLE"
><I
>F</I
></TT
>
set sysctrl_owner to <TT
CLASS="REPLACEABLE"
><I
>G</I
></TT
>
set sysctrl_device to <TT
CLASS="REPLACEABLE"
><I
>H</I
></TT
>
set sysctrl_owner_type to <TT
CLASS="REPLACEABLE"
><I
>I</I
></TT
>
add nic <TT
CLASS="REPLACEABLE"
><I
>J</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>You use the <B
CLASS="COMMAND"
>add nic</B
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>J</I
></TT
>
command to define the network interfaces. You use this command for each network
interface to define. When you enter this command, the following prompt appears:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>Enter network interface commands, when finished enter "done" or "cancel"
NIC - <TT
CLASS="REPLACEABLE"
><I
>J</I
></TT
>?</PRE
></TD
></TR
></TABLE
><P
>When this prompt appears, you use the following commands to specify
the flags for the control network:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>set heartbeat to <TT
CLASS="REPLACEABLE"
><I
>K</I
></TT
>
set ctrl_msgs to <TT
CLASS="REPLACEABLE"
><I
>L</I
></TT
>
set priority to <TT
CLASS="REPLACEABLE"
><I
>M</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>After you have defined a network controller, you can use the following
command from the node name prompt to remove it:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>remove nic</B
></TT
>&#8194;<TT
CLASS="REPLACEABLE"
><I
>N</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>When you have finished defining a node, enter <B
CLASS="COMMAND"
>done</B
>.</P
><P
>The following example defines a node called cm1a, with one controller:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define node cm1a</B
></TT
>
Enter commands, when finished enter either "done" or "cancel"</PRE
></TD
></TR
></TABLE
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cm1a? <TT
CLASS="USERINPUT"
><B
>set hostname to cm1a</B
></TT
>
cm1a? <TT
CLASS="USERINPUT"
><B
>set nodeid to 1</B
></TT
>
cm1a? <TT
CLASS="USERINPUT"
><B
>set sysctrl_type to msc</B
></TT
>
cm1a? <TT
CLASS="USERINPUT"
><B
>set sysctrl_password to </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>[ ]</I
></TT
>
cm1a? <TT
CLASS="USERINPUT"
><B
>set sysctrl_status to enabled</B
></TT
>
cm1a? <TT
CLASS="USERINPUT"
><B
>set sysctrl_owner to cm2</B
></TT
>
cm1a? <TT
CLASS="USERINPUT"
><B
>set sysctrl_device to /dev/ttyd2</B
></TT
>
cm1a? <TT
CLASS="USERINPUT"
><B
>set sysctrl_owner_type to tty</B
></TT
>
cm1a? <TT
CLASS="USERINPUT"
><B
>add nic cm1</B
></TT
>
Enter network interface commands, when finished enter &#8220;done&#8221; 
or &#8220;cancel&#8221;

NIC - cm1 &#62; <TT
CLASS="USERINPUT"
><B
>set heartbeat to true</B
></TT
>
NIC - cm1 &#62; <TT
CLASS="USERINPUT"
><B
>set ctrl_msgs to true</B
></TT
>
NIC - cm1 &#62; <TT
CLASS="USERINPUT"
><B
>set priority to 0</B
></TT
>
NIC - cm1 &#62; <TT
CLASS="USERINPUT"
><B
>done</B
></TT
>
cm1a? <TT
CLASS="USERINPUT"
><B
>done</B
></TT
>
cmgr&#62;</PRE
></TD
></TR
></TABLE
><P
>If you have invoked the Cluster Manager CLI with the <B
CLASS="COMMAND"
>-p</B
>
option,or you entered the <B
CLASS="COMMAND"
>set prompting on</B
> command, the
display appears as in the following example:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define node cm1a</B
></TT
>
Enter commands, when finished enter either "done" or "cancel"</PRE
></TD
></TR
></TABLE
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>Nodename [optional]? <TT
CLASS="USERINPUT"
><B
>cm1a</B
></TT
></PRE
></TD
></TR
></TABLE
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>Node ID? <TT
CLASS="USERINPUT"
><B
>1</B
></TT
>
Do you wish to define system controller info[y/n]:y
Sysctrl Type &#60;null&#62;?<TT
CLASS="USERINPUT"
><B
>&#8194;(null)</B
></TT
>
Sysctrl Password[optional]? ( )
Sysctrl Status &#60;enabled|disabled&#62;? <TT
CLASS="USERINPUT"
><B
>enabled</B
></TT
>
Sysctrl Owner? <TT
CLASS="USERINPUT"
><B
>cm2</B
></TT
>
Sysctrl Device? <TT
CLASS="USERINPUT"
><B
>/dev/ttyd2</B
></TT
>
Sysctrl Owner Type &#60;tty&#62;? (tty) 
Number of Network Interfaces ? (1)
NIC 1 - IP Address? <TT
CLASS="USERINPUT"
><B
>cm1</B
></TT
>
NIC 1 - Heartbeat HB (use network for heartbeats) &#60;true|false&#62;? <TT
CLASS="USERINPUT"
><B
>true</B
></TT
>
NIC 1 - Priority &#60;1,2,...&#62;? <TT
CLASS="USERINPUT"
><B
>0</B
></TT
>
NIC 2 - IP Address? <TT
CLASS="USERINPUT"
><B
>cm2</B
></TT
>
NIC 2 - Heartbeat HB (use network for heartbeats) &#60;true|false&#62;? <TT
CLASS="USERINPUT"
><B
>true</B
></TT
>
NIC 2 - (use network for control messages) &#60;true|false&#62;? <TT
CLASS="USERINPUT"
><B
>false</B
></TT
>
NIC 2 - Priority &#60;1,2,...&#62;? <TT
CLASS="USERINPUT"
><B
>1</B
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-MODIFYDELMACHINE"
>5.4.2. Modifying and Deleting Cluster Nodes</A
></H2
><P
> After you have defined a cluster
node, you can modify or delete the cluster with the Cluster Manager GUI or
the Cluster Manager CLI. You must remove a node from a cluster before you
can delete the node.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3292"
>5.4.2.1. Modifying a Node with the Cluster Manager GUI</A
></H3
><P
>To modify a node with the Cluster Manager GUI, perform the following
steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Nodes
&#38; Cluster&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Modify
a Node Definition&#8221; task link to launch the task.</P
></LI
><LI
><P
>Modify the node parameters.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3306"
>5.4.2.2. Modifying a Node with the Cluster Manager CLI</A
></H3
><P
>You can use the following command to modify an existing node. After
entering this command, you can execute any of the commands you use to define
a node.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3312"
>5.4.2.3. Deleting a Node with the Cluster Manager GUI</A
></H3
><P
>To delete a node with the Cluster Manager GUI, perform the following
steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Nodes
&#38; Cluster&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Delete
a Node&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the name of the node to delete.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3326"
>5.4.2.4. Deleting a Node with the Cluster Manager CLI</A
></H3
><P
>After defining a node, you can delete it with the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>delete node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>You can delete a node only if the node is not currently part of a cluster.
This means that first you must modify a cluster that contains the node so
that it no longer contains that node before you can delete it.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN3333"
>5.4.3. Displaying Cluster Nodes</A
></H2
><P
>After you define cluster nodes, you can perform the
following display tasks:</P
><P
></P
><UL
><LI
><P
>display the attributes of a node</P
></LI
><LI
><P
>display the nodes that are members of a specific cluster</P
></LI
><LI
><P
>display all the nodes that have been defined</P
></LI
></UL
><P
>You can perform any of these tasks with the FailSafe Cluster Manager
GUI or the Linux FailSafe Cluster Manager CLI.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3347"
>5.4.3.1. Displaying Nodes with the Cluster Manager GUI</A
></H3
><P
>The Cluster Manager GUI provides a convenient graphic display of the
defined nodes of a cluster and the attributes of those nodes through the 
FailSafe Cluster View. You can launch the FailSafe Cluster View directly,
or you can bring it up at any time by clicking on &#8220;FailSafe Cluster
View&#8221; at the bottom of the &#8220;FailSafe Manager&#8221; display.</P
><P
>From the View menu of the FailSafe Cluster View, you can select &#8220;Nodes
in Pool&#8221; to view all nodes defined in the Linux FailSafe pool. You can
also select &#8220;Nodes In Cluster&#8221; to view all nodes that belong to
the default cluster. Click any node's name or icon to view detailed status
and configuration information about the node.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3351"
>5.4.3.2. Displaying Nodes with the Cluster Manager CLI</A
></H3
><P
>After you have defined a node, you can display the node's parameters
with the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A </I
></TT
></PRE
></TD
></TR
></TABLE
><P
>A <B
CLASS="COMMAND"
>show node</B
> command on node cm1a would yield the
following display:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show node cm1</B
></TT
>
Logical Node Name: cm1
Hostname: cm1
Nodeid: 1
Reset type: reset
System Controller: msc
System Controller status: enabled
System Controller owner: cm2
System Controller owner device: /dev/ttyd2
System Controller owner type: tty
ControlNet Ipaddr: cm1
ControlNet HB: true
ControlNet Control: true
ControlNet Priority: 0</PRE
></TD
></TR
></TABLE
><P
>You can see a list of all of the nodes that have been defined with the
following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show nodes in pool</B
></TT
></PRE
></TD
></TR
></TABLE
><P
>You can see a list of all of the nodes that have defined for a specified
cluster with the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show nodes </B
></TT
>[<TT
CLASS="USERINPUT"
><B
>in cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default cluster, you do not need to specify
a cluster when you use this command and it will display the nodes defined
in the default cluster.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-SETFSPARAMETERS"
>5.4.4. Linux FailSafe HA Parameters</A
></H2
><P
>There are several parameters that determine the behavior of the nodes
in a cluster of a Linux FailSafe system.</P
><P
>The Linux FailSafe parameters are as follows:</P
><P
></P
><UL
><LI
><P
>The tie-breaker node, which is the logical name of a machine
used to compute node membership in situations where 50% of the nodes in a
cluster can talk to each other. If you do not specify a tie-breaker node,
the node with the lowest node ID number is used.</P
><P
>The tie-breaker node is a cluster-wide parameter.</P
><P
>It is recommended that you configure a tie-breaker node even if there
is an odd number of nodes in the cluster, since one node may be deactivated,
leaving an even number of nodes to determine membership.</P
><P
>In a heterogeneous cluster, where the nodes are of different sizes and
capabilities, the largest node in the cluster with the most important application
or the maximum number of resource groups should be configured as the tie-breaker
node.</P
></LI
><LI
><P
>Node timeout, which is the timeout period, in milliseconds.
If no heartbeat is received from a node in this period of time, the node is
considered to be dead and is not considered part of the cluster membership.</P
><P
>The node timeout must be at least 5 seconds. In addition, the node timeout
must be at least 10 times the heartbeat interval for proper Linux FailSafe
operation; otherwise, false failovers may be triggered.</P
><P
>Node timeout is a cluster-wide parameter.</P
></LI
><LI
><P
>The interval, in milliseconds, between heartbeat messages.
This interval must be greater than 500 milliseconds and it must not be greater
than one-tenth the value of the node timeout period. This interval is set
to one second, by default. Heartbeat interval is a cluster-wide parameter.</P
><P
>The higher the number of heartbeats (smaller heartbeat interval), the
greater the potential for slowing down the network. Conversely, the fewer
the number of heartbeats (larger heartbeat interval), the greater the potential
for reducing availability of resources.</P
></LI
><LI
><P
>The node wait time, in milliseconds, which is the time a node
waits for other nodes to join the cluster before declaring a new cluster membership.
If the value is not set for the cluster, Linux FailSafe assumes the value
to be the node timeout times the number of nodes.</P
></LI
><LI
><P
>The powerfail mode, which indicates whether a special power
failure algorithm should be run when no response is received from a system
controller after a reset request. This can be set to <TT
CLASS="LITERAL"
>ON</TT
>
or <TT
CLASS="LITERAL"
>OFF</TT
>. Powerfail is a node-specific parameter, and should
be defined for the machine that performs the reset operation.</P
></LI
></UL
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3396"
>5.4.4.1. Resetting Linux FailSafe Parameters with the Cluster Manager GUI</A
></H3
><P
>To set Linux FailSafe parameters with the Cluster Manager GUI, perform
the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the FailSafe Manager from a menu or the command line.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Nodes
&#38; Cluster&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Set Linux
FailSafe HA Parameters&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3410"
>5.4.4.2. Resetting Linux FailSafe Parameters with the Cluster Manager CLI</A
></H3
><P
>You can modify the Linux FailSafe parameters with the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify ha_parameters </B
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>] [<TT
CLASS="USERINPUT"
><B
>in cluster</B
></TT
><TT
CLASS="REPLACEABLE"
><I
>&#8194;B</I
></TT
>]</PRE
></TD
></TR
></TABLE
><P
>If you have specified a default node or a default cluster, you do not
have to specify a node or a cluster in this command. Linux FailSafe will use
the default.</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>Enter commands, when finished enter either "done" or "cancel"</PRE
></TD
></TR
></TABLE
><P
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>?</P
><P
>When this prompt of the node name appears, you enter the Linux FailSafe
parameters you wish to modify in the following format:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>set node_timeout to <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>
set heartbeat to <TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>
set run_pwrfail to <TT
CLASS="REPLACEABLE"
><I
>C</I
></TT
>
set tie_breaker to <TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-DEFINECLUSTER"
>5.4.5. Defining a Cluster</A
></H2
><P
>A <I
CLASS="GLOSSTERM"
>cluster</I
> is a collection of one or more nodes
coupled with each other by networks or other similar interconnects. In Linux
FailSafe, a cluster is identified by a simple name. A given node may be a
member of only one cluster.</P
><P
>To define a cluster, you must provide the following information:</P
><P
></P
><UL
><LI
><P
>The logical name of the cluster, with a maximum length of
255 characters.</P
></LI
><LI
><P
>The mode of operation: <TT
CLASS="LITERAL"
>normal</TT
> (the default)
or <TT
CLASS="LITERAL"
>experimental</TT
>. Experimental mode allows you to configure
a Linux FailSafe cluster in which resource groups do not fail over when a
node failure is detected. This mode can be useful when you are tuning node
timeouts or heartbeat values. When a cluster is configured in normal mode,
Linux FailSafe fails over resource groups when it detects failure in a node
or resource group.</P
></LI
><LI
><P
>(Optional) The email address to use to notify the system administrator
when problems occur in the cluster (for example, <B
CLASS="COMMAND"
>root@system</B
>)</P
></LI
><LI
><P
>(Optional) The email program to use to notify the system administrator
when problems occur in the cluster (for example, <B
CLASS="COMMAND"
>/usr/bin/mail</B
>).</P
><P
>Specifying the email program is optional and you can specify only the
notification address in order to receive notifications by mail. If an address
is not specified, notification will not be sent.</P
></LI
></UL
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="FS-ADDMACHTOCLUSTER"
>5.4.5.1. Adding Nodes to a Cluster</A
></H3
><P
>After you have added nodes to the pool and defined a cluster, you must
provide the names of the nodes to include in the cluster.</P
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3451"
>5.4.5.2. Defining a Cluster with the Cluster Manager GUI</A
></H3
><P
>To define a cluster with the Cluster Manager GUI, perform the following
steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the Linux FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on &#8220;Guided Configuration&#8221;.</P
></LI
><LI
><P
>On the right side of the display click on &#8220;Set Up a
New Cluster&#8221; to launch the task link.</P
></LI
><LI
><P
>In the resulting window, click each task link in turn, as
it becomes available. Enter the selected inputs for each task.</P
></LI
><LI
><P
>When finished, click &#8220;OK&#8221; to close the taskset
window.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3465"
>5.4.5.3. Defining a Cluster with the Cluster Manager CLI</A
></H3
><P
>When you define a cluster with the CLI, you define and cluster and add
nodes to the cluster with the same command.</P
><P
>Use the following cluster manager CLI command to define a cluster:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>define cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>Entering this command specifies the name of the node you are defining
and puts you in a mode that allows you to add nodes to the cluster. The following
prompt appears:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cluster A?</PRE
></TD
></TR
></TABLE
><P
>When this prompt appears during cluster creation, you can specify nodes
to include in the cluster and you can specify an email address to direct messages
that originate in this cluster.</P
><P
>You specify nodes to include in the cluster with the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cluster A? <TT
CLASS="USERINPUT"
><B
>add node </B
></TT
><I
CLASS="EMPHASIS"
>C</I
>
cluster A? </PRE
></TD
></TR
></TABLE
><P
>You can add as many nodes as you want to include in the cluster.</P
><P
>You specify an email program to use to direct messages with the following
command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cluster A? <TT
CLASS="USERINPUT"
><B
>set notify_cmd to </B
></TT
><I
CLASS="EMPHASIS"
>B</I
>
cluster A? </PRE
></TD
></TR
></TABLE
><P
>You specify an email address to direct messages with the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cluster A? <TT
CLASS="USERINPUT"
><B
>set notify_addr to </B
></TT
><I
CLASS="EMPHASIS"
>B</I
>
cluster A? </PRE
></TD
></TR
></TABLE
><P
>You specify a mode for the cluster (normal or experimental) with the
following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cluster A? <TT
CLASS="USERINPUT"
><B
>set ha_mode to </B
></TT
><I
CLASS="EMPHASIS"
>D</I
>
cluster A? </PRE
></TD
></TR
></TABLE
><P
>When you are finished defining the cluster, enter <TT
CLASS="FILENAME"
>done</TT
>
to return to the <TT
CLASS="FILENAME"
>cmgr</TT
> prompt.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="FS-MODIFYDELCLUSTER"
>5.4.6. Modifying and Deleting Clusters</A
></H2
><P
>After you have defined a cluster, you can modify the attributes of the
cluster or you can delete the cluster. You cannot delete a cluster that contains
nodes; you must move those nodes out of the cluster first.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3498"
>5.4.6.1. Modifying and Deleting a Cluster with the Cluster Manager GUI</A
></H3
><P
>To modify a cluster with the Cluster Manager GUI, perform the following
procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the Linux FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Nodes
&#38; Cluster&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Modify
a Cluster Definition&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
><P
>To delete a cluster with the Cluster Manager GUI, perform the following
procedure:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>Launch the Linux FailSafe Manager.</P
></LI
><LI
><P
>On the left side of the display, click on the &#8220;Nodes
&#38; Cluster&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Delete
a Cluster&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task, or click on &#8220;Cancel&#8221; to cancel.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3524"
>5.4.6.2. Modifying and Deleting a Cluster with the Cluster Manager CLI</A
></H3
><P
>To modify an existing cluster, enter the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>modify cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>Entering this command specifies the name of the cluster you are modifying
and puts you in a mode that allows you to modify the cluster. The following
prompt appears:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cluster <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>?</PRE
></TD
></TR
></TABLE
><P
>When this prompt appears, you can modify the cluster definition with
the following commands:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cluster <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>set notify_addr to </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>
cluster <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>set notify_cmd to </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>B</I
></TT
>
cluster <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>add node </B
></TT
><I
CLASS="EMPHASIS"
>C</I
>
cluster <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? <TT
CLASS="USERINPUT"
><B
>remove node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>D</I
></TT
>
cluster <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>? </PRE
></TD
></TR
></TABLE
><P
>When you are finished modifying the cluster, enter <TT
CLASS="FILENAME"
>done</TT
>
to return to the <TT
CLASS="FILENAME"
>cmgr</TT
> prompt.</P
><P
>You can delete a defined cluster with the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>delete cluster </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN3555"
>5.4.7. Displaying Clusters</A
></H2
><P
>You can display defined clusters with the Cluster Manager GUI or the
Cluster Manager CLI.</P
><DIV
CLASS="SECT3"
><H3
CLASS="SECT3"
><A
NAME="AEN3558"
>5.4.7.1. Displaying a Cluster with the Cluster Manager GUI</A
></H3
><P
>The Cluster Manager GUI provides a convenient display of a cluster and
its components through the FailSafe Cluster View. You can launch the FailSafe
Cluster View directly, or you can bring it up at any time by clicking on the &#8220;FailSafe
Cluster View&#8221; prompt at the bottom of the &#8220;FailSafe Manager&#8221;
display.</P
><P
>From the View menu of the FailSafe Cluster View, you can choose elements
within the cluster to examine. To view details of the cluster, click on the
cluster name or icon. Status and configuration information will appear in
a new window. To view this information within the FailSafe Cluster View window,
select Options. When you then click on the Show Details option, the status
details will appear in the right side of the window.</P
></DIV
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN3562"
>5.4.8. Displaying a Cluster with the Cluster Manager CLI</A
></H2
><P
>After you have defined a cluster, you can display the nodes in that
cluster with the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show cluster</B
></TT
> <TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
></PRE
></TD
></TR
></TABLE
><P
>You can see a list of the clusters that have been defined with the following
command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>show clusters</B
></TT
></PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="tv.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="le53159-parent.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Configuring Timeout Values and Monitoring Intervals</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le94219-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Resource Configuration</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>email address to use to notify the system administrator
when problems occur in the cluster (for example, <B
CLASS="COMMAND"
>html/z957117933glen.html010064400016050000001000000167200717757366600153220ustar00gfergother00002640000003<HTML
><HEAD
><TITLE
>Deactivating (Stopping) Linux FailSafe</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.57"><LINK
REL="HOME"
TITLE="Linux FailSafe&#8482;  Administrator's Guide"
HREF="index.html"><LINK
REL="UP"
TITLE="Linux FailSafe System Operation"
HREF="le99367-parent.html"><LINK
REL="PREVIOUS"
TITLE="Resource Group Failover"
HREF="le41282-parent.html"><LINK
REL="NEXT"
TITLE="Resetting Nodes"
HREF="fs-resetmachine.html"></HEAD
><BODY
CLASS="SECT1"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#0000FF"
VLINK="#840084"
ALINK="#0000FF"
><DIV
CLASS="NAVHEADER"
><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="3"
ALIGN="center"
>Linux FailSafe&#8482;  Administrator's Guide</TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="bottom"
><A
HREF="le41282-parent.html"
>Prev</A
></TD
><TD
WIDTH="80%"
ALIGN="center"
VALIGN="bottom"
>Chapter 7. Linux FailSafe System Operation</TD
><TD
WIDTH="10%"
ALIGN="right"
VALIGN="bottom"
><A
HREF="fs-resetmachine.html"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="Z957117933GLEN"
>7.6. Deactivating (Stopping) Linux FailSafe</A
></H1
><P
>  You can stop the execution of Linux FailSafe on a systemwide
basis, on all the nodes in a cluster, or on a specified node only.</P
><P
>Deactivating a node or a cluster is a complex operation that involves
several steps and can take several minutes. Aborting a deactivate operation
can leave the nodes and the resources in an intended state.</P
><P
>When deactivating HA services on a node or for a cluster, the operation
may fail if any resource groups are not in a stable clean state. Resource
groups which are in transition will cause any deactivate HA services command
to fail. In many cases, the command may succeed at a later time after resource
groups have settled into a stable state.</P
><P
>After you have successfully deactivated a node or a cluster, the node
or cluster should have no resource groups and all HA services should be gone.</P
><P
>Serially stopping HA services on every node in a cluster is not the
same as stopping HA services for the entire cluster. If the former case, an
attempt is made to keep resource groups online and highly available while
in the latter case resource groups are moved offline, as described in the
following sections.</P
><P
>When you stop HA services, the Linux FailSafe daemons perform the following
actions:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>A shutdown request is sent to Linux FailSafe (FSD)</P
></LI
><LI
><P
>FSD releases all resource groups and puts them in <TT
CLASS="LITERAL"
>ONLINE-READY</TT
> state</P
></LI
><LI
><P
>All nodes in the cluster in the configuration database are
disabled (one node at a time and the local node last)</P
></LI
><LI
><P
>Linux FailSafe waits until the node is removed from cluster
membership before disabling the node</P
></LI
><LI
><P
>The shutdown is successful only when all nodes are not part
of cluster membership</P
></LI
><LI
><P
>CMOND receives notification from the configuration database
when nodes are disabled</P
></LI
><LI
><P
>The local CMOND sends SIGTERM to all HA processes and IFD.</P
></LI
><LI
><P
>All HA processes clean up and exit with &#8220;don't restart&#8221;
code</P
></LI
><LI
><P
>All other CMSD daemons remove the disabled node from the cluster
membership</P
></LI
></OL
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5830"
>7.6.1. Deactivating HA Services on a Node</A
></H2
><P
>The operation of deactivating a node tries to move all resource groups
from the node to some other node and then tries to disable the node in the
cluster, subsequently killing all HA processes.</P
><P
>When HA services are stopped on a node, all resource groups owned by
the node are moved to some other node in the cluster that is capable of maintaining
these resource groups in a highly available state. This operation will fail
if there is no node that can take over these resource groups. This condition
will always occur if the last node in a cluster is shut down when you deactivate
HA services on that node.</P
><P
>In this circumstance, you can specify the <B
CLASS="COMMAND"
>force</B
> option
to shut down the node even if resource groups cannot be moved or released.
This will normally leave resource groups allocated in a non-highly-available
state on that same node. Using the<B
CLASS="COMMAND"
>&#8194;force </B
>option might
result in the node getting reset. In order to guarantee that the resource
groups remain allocated on the last node in a cluster, all online resource
groups should be detached.</P
><P
>If you wish to move resource groups offline that are owned by the node
being shut down, you must do so prior to deactivating the node.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5838"
>7.6.2. Deactivating HA Services in a Cluster</A
></H2
><P
>The operation of deactivating a cluster attempts to release all resource
groups and disable all nodes in the cluster, subsequently killing all HA processes.</P
><P
>When a cluster is deactivated and the Linux FailSafe HA services are
stopped on that cluster, resource groups are moved offline or deallocated.
If you want the resource groups to remain allocated, you must detach the resource
groups before attempting to deactivate the cluster.</P
><P
>Serially stopping HA services on every node in a cluster is not the
same as stopping HA services for the entire cluster. If the former case, an
attempt is made to keep resource groups online and highly available while
in the latter case resource groups are moved offline.</P
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5843"
>7.6.3. Deactivating Linux FailSafe with the Cluster Manager GUI</A
></H2
><P
>To stop Linux FailSafe services using the Cluster Manager GUI, perform
the following steps:</P
><P
></P
><OL
TYPE="1"
><LI
><P
>On the left side of the display, click on the &#8220;Nodes
&#38; Cluster&#8221; category.</P
></LI
><LI
><P
>On the right side of the display click on the &#8220;Stop
FailSafe HA Services&#8221; task link to launch the task.</P
></LI
><LI
><P
>Enter the selected inputs.</P
></LI
><LI
><P
>Click on &#8220;OK&#8221; at the bottom of the screen to complete
the task.</P
></LI
></OL
></DIV
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN5855"
>7.6.4. Deactivating Linux FailSafe with the Cluster Manager CLI</A
></H2
><P
>To deactivate Linux FailSafe in a cluster and stop Linux FailSafe processing,
use the following command:</P
><TABLE
BORDER="0"
BGCOLOR="#E0E0E0"
WIDTH="100%"
><TR
><TD
><PRE
CLASS="SCREEN"
>cmgr&#62; <TT
CLASS="USERINPUT"
><B
>stop ha_services </B
></TT
>[<TT
CLASS="USERINPUT"
><B
>on node </B
></TT
><TT
CLASS="REPLACEABLE"
><I
>A</I
></TT
>] [<TT
CLASS="USERINPUT"
><B
>for cluster</B
></TT
><TT
CLASS="REPLACEABLE"
><I
>&#8194;B</I
></TT
>][<TT
CLASS="USERINPUT"
><B
>force</B
></TT
>]</PRE
></TD
></TR
></TABLE
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="le41282-parent.html"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="fs-resetmachine.html"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>Resource Group Failover</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="le99367-parent.html"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Resetting Nodes</TD
></TR
></TABLE
></DIV
></BODY
></HTML
>html/figures/004075500016050000001000000000000717757442300140115ustar00gfergother00002640000003html/figures/a1-1.failsafe.components.gif010064400016050000001000000227460717757442200211030ustar00gfergother00002640000003GIF87ah̦@@@,hH0I8ͻ`(dihlp,tmx|pH,Ȥrl:ШtJZجvzxL.zn|Ǹ<@'+2z}.{wp,|l~y~1|'`0Ä[ͩĥښ&Pƪz3/ъR`ٸgB'|HG&yۆQ޹|	=XJt+K.Oɳϟ@
JѣH*]ʴӧPJJիXjʵׯ`ÊKٳhӪ]˶۷pmOI]/꽂^N~,xa!ZCZz7QØiT䒁>͜/-ԩyU&ۃi1M.L6\	₻Yweytr77[~:j~ܶ}Cm2@M*7`y XoB
!္}m(bV ؽ7":!~8b&gb"(D:wH*cra"CRcHH$LW]!'";s-^%]:a9%mx0h'$Z硈]t
hgh,"`4ΨxFDi~oc;Zꢞ:4e*l$^B~$+F/NlZFaZ+hق	75g2-xŠ.庇@Ǩݵ+[okL+	*I0SLY40 j\hH29qf
g)2/x)s1.o3V#k<s,g1>[Lۜ4,nO+FY5Wut	,vAto7a{^k%L`{R-Wߗnwޱ<5—x\z3r*C7i~U|x.$wݜgtGyШ]^{NYr*8'=]7̮=1/9:]6b/:GAVC6BpȞ</)~D3K2G
RS=(=ddPP+4ߎ@(̡	­&7]dxX-ciLlD&1&=\ie>!g9"ٚ'"*58lrEl*
?pF%2FIRt'#aE{5@6: +5Cf$.zb%IEZ0`II1CtNIv(<Pjᒊ)HIJ3\-(<JIc2`ydݢ'HsY&Z#)XfP]dc#$}Rj̙DSea|Gn~QKc<(H{f OX.wiM6r.4eP`3dgFtPI+W'<
wx7#SxYؕtW34);n թʖNU`e\*b}YYyAnCeQcRshM+Yc'z}E^oؼ&z
4,3K*YUe
a*SU9*KYӲN$ewڸG-XW*]w:XR֡*SS/o?IHIJs1]CFw]Ud]ل
@
}p;Y^&"n373
`2*|g46K<`#pMa
AJ~aXs`Bņ.Fˊĩ6j߲r VA9d"KB,)=2ݢetYOr</טOb֋L9ne:Y-o>3;74AEe&4R 
8Zђ3&J4DRsZ
.u/`Og>@U[ +	t-1:׿Q0l_0WB86l%W;6mikaF־
krӠSvMzη~NOo3'N[07{ 5.(OQ핻0g8Vҵ'=ϹЃ&:iLwԑN=X9"ʀ@Zg 8S^v0:]_'SۻkW{w}=Wz<SnJ+ڋW]Ŗ^x3Aŝ0gc8秵SqzЗmח{Zw}|ވ纳OG'[~F/b{fo; ]{տ+]zV{t?w-oso}ztWL}WW|}PxzXrxow}xrt	Hsu'rW8n?(8/'x'(҂~L҃H{~9hqWRx:H&X
ł@xSqQXhx<tG{F8s}6JH*#ip(-^`*8=rȈ$xXq{(82aWxg؋k8XVNjƘʘ춌i،Cfv'F&ՈihscmU0$7݈$舌hӎ.9jt\|ht6mn(9g7
9)e Y4*OOBV'i$-0ɒ(#I5i/)<Y3>ٓ78@9G9mǔC]pbI:A@EVihX	Z)Y]g9aiXcYiyÕ_k)g)sgoqYmkwQzYMR9_FRVEPZ1LyFhR+=#5*q(W=[-b`y	iSea-M:*+1vfbd3,	Z>:xq6czSU/>)FZ;)SC깞ٞ[#UK%-nqRd䙕򙠆/DSv(Gd{#D]8TIښ|ɘLyi&n֗"ɢ3c	WãI!kHK2֤=j[TP
W^jFʙYla8R
~$\d/[Z^R#14"(d12	
R0u)~qeGcz:II)*^cj	iZ[dٸ~0y	fdxYveZ|P9Y*P09isY+
H ]O:{冨|Ӯ(	DjZ暢o!qaLz=KQIѰJ";K<zI專NFe熲vQmtѱI+kax&(?a
I=ȴyZRD;3P[YWkX[j_#Ta[kJ[][gkmni[kKosKegy[!{iӷ؃xr)\GǺq[;[ku"c[bDadѹH4zf)ɻʧUYNz]\*ekiꩅ}*:oLq5[׽&锧J2:[}e>۩I-CYѫn	\ک$	jZKh:`Y)_**A{{|(j¹s/Rp+:D 
3|]üE\>ȝ9c&þSVW+2XuƘ%jfl[Kk[-XuCjplKKmf!y\KTh fRUȔtOcLL>ȓ@\
8t.TJH7;|!E=1EbQV6*눫<1_,eUO-.
l.AQ.̪uP̪6-c䨨|fT5lLQ霭,něZ,P0ϒ0\ee¶	Zݜk4Qh4>
\ѝu4EhkMb}m+5#X,BԦrK]
bMʊB%G"́Z}b=AI,JMO\M2>|~׀؂=x=Zi-l}g	Dz7R3ֆ׏$RؐLj3Y}ؗ[v՜0՞Uנm|l\iԨ
׎#dٶڸ*]ٽٿ}ѳ̝ܖ|͝
1;9]~Z{ݮ}]ι=g~7YFmֲ̟EMF6:zܔ݁Ϗs}caI~
N.UNǭѩMڝ'^6~8:K3*
|N]o:--O-n#DM*z4?Q&LS]?upg.uM[>n!n2+VA9S
. ~nnLkeBJUW5gl_%:S̡~JT'N\~"ުtF4]CvJ
0l.qқ."]QTQm.N#>9}_>7?ZҧL:$
4xUoAr^4\ber@˼*D*OҼ
0>ڜCa}B̧o~z(*OZ@垧`
*͠%6ήNuOg
GL,ϩc?&

̕0|vO3!5}mN*ܫe/^&-eM,q [}Y)pќ6HoАӻod^V;{_x[Q/94pvpS/~G,p/@K0I _`(dih
jfWec6x|F,zColf"J
z`UmpldYM5|s|Pus/`|([k1Z6z>w8Y~
j}f]bilqwJbUſ$ggÔABΆHٕ|VQ|/\zS(H+~yo(1BC,eY
%Xc2&aK3ɳАHh*"^EѼ@G|Fʕ)Pj<zd7N}mp\U,c߿v
FN;;uӵ{CL.Wsy
#
e-FvtmF~7,ӦXVj͉){^P>уH#{mdqg.Ynlԡn[nLkfNݷKx\cJwm5 y]V`ypRW߆{Ơv%p)y]v"E,ފ>'ֈIc%s4ݓFe XRuJfH6)DVURɒrȤnn	%yRwD8#j)gwc^oYYx&
cGVřMx%Y/6NIgZ)$y!nGaڢ*g<JK-Eܦz^&%ZykF\rFq:YJ[eD«;!zau*#ͺҖZ&
fK/j_$4n.l`՚+b\ɥ2,q<d+</6pp:q

h=r(T`0]VٶAs$*d{6bmpV\b1݀OYw]ƍ\8&޲EíjZ~cz
͵瀧HM^#a
ZCܴ[{خ9*I/>W2|v_so}^mwnjښkFU7֊}cgU^i+@PYxšPܻN(	=F\BXa{av	PD3̇|}?ۑD4PX%*Zڢ"H!pBFAEJcL3^cc
:ZLs|G!]sDBĐLF:b#")NsK[%3T2D
MI e(AdJE ,e+=JXL(miKC*Wɸ_nf/5igSÌ
1WBSl3pMe"1ڼ˙Mo2!g99Mtӓ\'4IYSlg=Fxs|.OKAMG7ͳτ&ęC*JԢk&HkpCI9.ϟ+D]z4nRlH8;>mM=f QgTUm75էN1^V=Ϯ3K(UWѲ^իJz-f=kh򊐻ϯ^"i(]*ג.֒uFc
XCNVCl*{YjAV>-iɀPd%CbjgZ֘->kvumamk+-iF׫uu1nwυ.sjW(vܷ]ot{:v#Q׏<cJ|=Yj5owsi	xeϨ0RD*_9P^[Jiu#yjpKCX!q40@ RH5f^Ld:qC5hR\586TO><*89}dxܞ1U1}lRo\VH殉ΎqqbY9$qXu{W?h~b ҄tt
̀ࡗd|rk+jfˋ6Z
D|T#c{k\_oH>wl!gټuYsμ}ө.
5-j*t![FB^3=EejZfgLq|7Kۢw.Qސ-k0J/\Xx~|վt].
%[=NC;yE3(.Vs}\P!Vw㖞MO^MOIW:0q&s+.kvGCekf`OWtZ]}Gmݑծ8/n\x5]:!?=@b/TL򓏰y`B2*afe'׻շ4Aog"S =N%[s"JvP{&c9w.+)()0˯(Oܷys~?0~/o?ev8Lu.BXsY$o'KYw|o7-cT 9#D/ 54FNVy1	Fx7X[h;ȃpGx.86(]M„L脣D(tXFk'W2EN)RS`b08WtǂApf{3Puxm!"8燚`mo+dž{oZY(Z*HKwd։E50ȉyX_{<euG(JAhhjhVCIhg+IxIb˸CD\w"@_بD؍i'xZSuHv,ꘈ츈qF"8o@h$nd*ȄH
EY{UX|WIk8g獌hO(8hE3-H);m5Ir
1Iw{ȀfR:'#~|uwJh<82#x<<Xre67(﷒ƕ])099TxoYnP'ɔ'HG8fyyrNٔio
y_q4xwgƒj)m+P8rW6,Yn8G]5,TFY,WlqQEQ[@*Ԓar\X)0ɛj8FnɛD19lXZ2w1fb^…4/9#dH2tQɸ%B)4(;HWSy`0-Fy31ϴXO*˴
yu遷VmLWounYٙ/BE>/sxy@d;3z`4]F:j0II@*FmU5yrRԛ>z&p?e9v^ԛ{ݩd5`$X<Q0{*4	|{iz>:ا9Vy0JGQ.:t:H<2 `	jYtԏa(9)4KȠ)y5_*~iکI	t8շYj=5$
lYuYb{U2W|89tN=Xwa
"Ur8Zz[ʭ֮e(|髛a)ꬩê*B	KA İM)vD<{CU5"-H°:X钏-E'-8bT3^- W.kEAKC{EiG;n:V20M?<vHUj[`a[O;PԵiuj{l{pqBPXBq{vz[H!~{a[e|[K۷k{tK;;{K0m;[{ۺ;;html/figures/a1-6.disk.storage.takeover.gif010064400016050000001000000170050717757442200213560ustar00gfergother00002640000003GIF87a,̦@@@,,H0I8ͻ`(dihlp,tmx|pH,Ȥrl:ШtJZجvzxL.zn|N~%~+}-
9!0š¿ΞǗI̪͌ЃԼ
֫ޓ[>e!,vϠƒ8CƉ%DrFSYeFL^Tȱ# ~͔R6rL&τԸRg*Mw"Jf6\aμr*%p@24iȖh!eӪܐ6.{K^ey7b	+Nxu7oEcBəl1=g#QnUSRM@Tv9UvqHv
٫}EP/܋3:XK.Dtk#<ٳ7{]4ynb-|d$sMw3hcyt-^Uw|M'ց|mTW7d1`}(!hfTG"y(1h8<@)DiH&L6PF)TViXf\v`)dihlp)tix|矀*hg	2f(|VTVfe@+֗)ҎH	ED'MR}o|u:撄*ibVtj,FQ*i]IV>њjIuP^-[T
f!ߴ`^\+cbRy{[UșXW\[\@b֮-m&b5@\Wi^z1
K֋Ʈny5)bunM|Ebas)wxLt.+[J,>p,ܞ}:8XZVlspbtIpx|߀.nS'2vwIy=9_ژimf뮗9]9	{{#O7E]$3|s|4ꞨQNo)qDVj7ܻLo.~>۳~BOrʟ.<i ؁QPH\nO
s?/\p@}"4A0>%	5BH6QB"*C!~@1D3	 
qXEUYE.^1 J1~d"5:b"(mq]xGEm9P-d$h?.8\$~DKG`JȤRJS>D(qYJ^j$RRdH	PBo3,HKk$sr0dJnk'1=gLB 3ݤ:FF1-Jkg*?P=>sJ ~h*zO^nb#8=QXhI9]zhF5ʉq(+DSMRFaThΔ4j||Q"T]<<xTФ3g}&:IYuDiIw:9V3bNV	35E
J5z!ZբuB9v[*leV63aTźrcMmrjS
,h?+sŁmk4yyemVZuo˵Nׂ
t%KR
nqY]ن]]yZvdmKz~^M>
o>߃<EEQx[qyb^ϤYcB^R.#AsϹx!AM./X"\{+h٭aJwWڶ,f^xȭjH`>f>0G˴s]myT5bWAUˆt2)n[gʹtr`:iw:l[$zET;(/:oN8bjmԞ.kHZ
u[w2+͡5
V?^t|>7
jSzc7/6n+jRVuII
J75mo }%nu[mZ{l3{B~7^ <6Qzw?@Îɸ
qoM
RXv<$ypp~3/>s+,T='
pqC	υTv
͠W|4
lpڭ^WAuJ?A]rI{߉{
Guv.ıu:L_d7^lzMrgzOFLJ=Л)V;}E:=
K{@k𴏶mo_^o"o?Oo|_P~O=Q`v-^ѧ|wG}|lGx|?}sۆ'~sczzzz~׀|nW(H~GlM'7X~"m$(%xCWG~Mww3؁!X~v7(uxA(<~1(LާppOh~w%EWX\so`n+uOAV"C/8e؄5wxfhxhP}li{.hEeQs
pȉG'z'nXHyWEe(Mhhoc(Duv8T7#W{SO_#Tg(|xFn%DEz؉шxƸȘSE؆n8_~A(8-;v&Nj5qfa(J"xCHX8i0
IkuK5Yqf!ׇǏiyU\dp(S⸑ْB2ɑt*T8x*KĐlᎧAtfhjN3EXp&(uQ
yP\8ɊtPj~bYu]֌Phh99G9H5#Thp;9ȕIwJǖ/I$zɏjv$#)7C;`E>陰ɚotX9+1Yyf٣{py9UBtiEuLI:ox?]ŹA0]2M}=ex	DȘ~)0&"TyKq{hxeF"OB47r(7sk<
 ڡgnSIuǠ+
ӑ(:5z<vnRo@!?j]PuSo+Qs':)=sXZ\ڥ^`S80vأx¦Qmk*sZC5}mPnJܶ9tZ'6@hJ'Oʧ}ڧzvNj&ޖQZS֨xj>5
l*کꧩ5z]ZmӐcʟ
hèJDh*gjm*=ʺZ)hڬ9
` ?ڨ*I*CI*MګZrZo8>u$ׯj8
qokڢ8?zs'wJJ8{ez"=q*!-2p(ʤf (:pd5{P(
@ۛIJbR_Y_²Zb,BP+ua;V۶x[;4Kr8oKby8t˩W{+k$+xk}ˮezWzU1F:h{	'B`)[+&f{[{Ucz0KKh1[_
J˻s'ʻЛ{X:+ۺ9;9˹Ɋk۾ٛ+5%龆kۿ{RK{|;PJ(LhKի	,0RVPOZa
*!2'f*o13g2$K5.1؇R<f蛾2`{Br17,בgɡD/Ƭ()"+Mn	tL<y,xp8è-aT)*'
?,11zӻ\3O|.D/D9MX˺1RȜQZr l0T/CIټ̏ f*1\\ü5Q9Ӵ[r0bƨ<0<ꁜ}\DȖf	q϶hܲL1ZϤ2]k+
@%MB
҆Ҡ-u%x7MB4ݛ90pTאq5mldwךR[8M=hGCU@ڼ_̆Mɟg	ZfyQmNKz}Qj[t*L\5't]׉Tֈ{ʆdr
W[M6F#ؿ<
?6Mi7[V݃b_|LjMJ\<6#Ұ˦=Z,֋ܰ;wM>ȝ3f
7>^]/Y"Ҥ͐b֥nsDK
ߐޝl
a@
XQk9M
>D%Sхq^!M⁛#EC.5^0$=ܼ~H΢-.Pg*vQϭ=U
]PL"	p9K4Wr`5,w,Q
܎s>ymզݟ
t~Zk~Y˖nOc˜Y_T~/<aaFޛ2ߌnRHmO^P
ٞ]DޫĎ?;~{Wo}
o/q~h׋ڣ.wP6M;ȷn~\枑ϱoFtwJU
^M1뗚ڞ__i{g?.1)J"?1A(d*8@,=yNlNld:!/ϒ>k/6OBGWFU4}!2|AiGXd...c(ڀUO>
	pWq
U_R^,mR||[Njodm/-^ϜO%?Ҷbj{P}O%x8YocO1;4)o_oy
Ɋ+7Q==ط"0@0&Yf2)hlPlxH4LGLxGKeZجvF7fqLŕz^}3I%Z<z@qec@f`yLNw $]Yyg6%~}>2"vk1?;x^ӊ$snĦɲ{(H?z7rcҽX5lmf^}w)4Pn32`*>"QG(3Qc_$!8o;JjEKf:VLDĩC'bRJ xٓOȥLo8}ʳMZb]Z~Kn]*%Wj
;blhhC}u
,,+NݕZ1cQn)f#*À^RϰLUNgFȸcXvud**8kiK?*Rt9ͻ3A6Tfb緯~S^srOW^+΀3_}\U~Q@u	ׁVh
u\u$ _ePzB H8l%xayPH9LZEd#dAq&,)#Zi%WT=Q Rz]݁9FHQ]CK;iK;kO>)E5|}VR'`G	W$%^+vyT1sps|'49YgjjJp'
)fe+ɩ\R+BN屩t&竬XxRV[GLe;׮g<;,5{OM[zԕŖf%Rx >ROd
YLGW[Q{g|WF::6p:p+LX3V]>W]3s(Ѭ!q8ǘJG814S͗:jrVKm3Q5)y|mFe1P}ݎ96~x=K<`-׹ahM_~'yM
2m@-~>p۞{xdsͶC\wo吊HsߖmMrq>z&9o,_o/?O%j~gOyb4+m	P|	Àq?tFa-@CIlz-‹ƾ2u`r"X%pĐwf˄+f(%&N	8:fDqqQ
pkF9_M$F`ii1,ъeD
.&8>|#h9k]V␧ǖw/lA*
5pq9$7MrO?Է0ÁVVR{J	ZQ4Y^V$|F5$2+dcrD
e׫hZӖQZ3xMہ/8S#vpOv2m		d;iOs@|3X,:dIP?infAS'7шR9X2-QL:OP+I@P(e9P^DIҚf'yMX6*r@XӠ~QLfL^B97Euͤ"zUoN/ KWׁQ
	j-dYJ.85kUs&%`0ПjX~uDĶ ,e[iG~ݬ)j+hVl~ТzeNNMv]qmm99vFT-]~\XַjۆܼuR]	
uw]bݕ4Ê;-7[\F-mߛچֽZ;r6ů43w#8`+YLnp7K>spz/l#(rķ54Rŀ0{)8αw@L"HN&;PLe\%;html/figures/a2-1.examp.interface.config.gif010064400016050000001000000570540717757442200214630ustar00gfergother00002640000003GIF89aN 6>0;?Yv-;@ bxP@lz`v}	΄^ogB/,܎hUvL?;&ο{qOI/_X9 }[celps|ɵ̹ϽǷ;²̼ʺǷŵȹο̽ͿvR/yU2{W4~Z7]:_<b?fBjGoLpNrPxV_wìòŴɸȷñų˻ŴϿɻpK(qL)tO,°gA!jD#kE$lF%`G͏b;lG+vT9lVxdr[.|^z?r= 9j7͜y_P</(̿ssspppfff```PPPMMM@@@333000&&&   


!,N	H*\ȰÇ#JHŋ3jȱǏ
ۙIɓ(S\ɲ˗0cʜI͛8sɳϟ@Sh.ѣH*]ʴӧPJJիXjʵׯ`ÊQhӪ]˶۷pʝ,t˷߿=+È+^.^Ɛ#KL[Qin'nwvM)n:[^ͺM1Cx~{w⸍Me?wt6seS;8FŁ[uݎrmv/mmw|6#ϿwE\6۴:N77N<EC8G:Y;ߌM<Û<dNzb94hk9E;)f8|N6%Ah:X^ÁH|@(;(9]`&cSn9iCHf6WuS68pÛt案&:טL)d7֍ēb6X
9L6Q#mS]!Q#8qZ7)*무RnbCt:ҁ$oY8܉::Xk,ժ>uRޏsHgtӥΰjrN]fkmTY.ͥIu)6T7.pW<gpa ̕"lK,r02ls	'-p*J aD@GwA*qG*cXg\w`-dmhlpM-"*sR+v|GEK< )DbI.5(uk2(BoC
4kqd)J./o'7G/=")|cahl mpCn=1M%ŹL:ձul7:'H
Z}/|+ҷ/~򷿤lsJwԭujw;@H"Rz>|S?~P4,
ЈhL􄁌6w8a(O@0>qxLb'P&b
-(^	hP<&79<d$ݑrrL^(^p$A'0%"
B-HXC0;d ,Vn̅/vay<gq[w.n\E.n	^D.tql3{Nf3TB(p'
Bc%YLit4olகFCqN`2E'tff/vRPJ^tb.BɋH`@Y˂2	]d/`B$9K&I
[Pӣ)GRu]:sʕJӤ0>`袕/LjWOvbMJKU$/ȥS[%iFze
w3C9P+/n^όGCAyh!o@Um/?
,E1Z7zGN_ՓќbWYXe.vE/nt-^p	iK7L/jU\6%#u/#.Bμ59]0XvĸE}y:ŻoS[*s\JԘ
*IWN`F;ND'RU-EI_"XƎ%yV=F0Y3ۙx.Y3x@.ܻ=йCpkc☪*p6٢δ7MDEwk֥ݼG8Ε3g1VSNVQDQ
PxpA&6q.hAB#DL
(+!aζn{v4ArC#NvMzۻ67}{c"aB
TG8Y$	/`D&q|E5F7IHʛ!+(_y[3I(2w\燢q|rS!%(HzeFO7Է3:uCꦮl=`ı;ݣw/6};ow9idx,>5{CfY˯j@y8K'q=9&2}rߚOW{?:~e#[Ο!Ӫ>co}Eq1_+&1?Onv	Qo$12WBW,q
؀@
0tȁ (8((
B
|s.,wJ1;Uт/HǃT4xtD8{F8H(J=XKzMh3WXzYO8Qa!E8fȄTvVXbh_xacXLׅ2LJBHᇐ0Cg!G,{)ÈF!tG(mFvh2Hvclnȅphrh{؊`xh0HXX1(kqr8vixru"X[w RwuX7x؋SQǍHو887#GGX؎ى8ؐxY3sYxR;C/(Qc3&12Y0	8H)JN1/b"51;ل>@iB0EyyM8K)Z) *I1K3\	^0`)a#"" B
'|)!B @02/
@"谗}ِ YyQsv'y	)yf˜I YI`陠)A)it!1ICR!
!! 0%Aaɞ+))H"D2%I$)yYH)ܹ
Y,iɞY)Yp),,ڈ)BJ9gYA,+̢)УQ )h, @C)8zC£yCj<ҖRR
9&b=FݡCꙪҘ,]&Lꤟ<TW)R@\ʓJ+a
"9lrq8j!Q
'aC1p"B1sZʓ⨐ʥF1XYpj,Jѩ*@:uo2F
#ujE#Ao]
vJ c$Z
')jʥ:
+¬)#҈
"jqJʬJ$D٧
"jsA6j*9:"!-
o)r a:$oٰHǚ
+(@(z"A~Zr:58?CB!,*zٰG!
,`+zU۰N;e;ag)O{c[сI CDɷωaR;$P>Ѷcp륄z,ُGۆه;3(rAHQaj[V1
aMaGqRXё00d
Kr0tk-PqnT{H{{]ۥQz*Z!1  L$b"H .QaskGﻖU!yiƲ)pc!<Ҩ18D,
:.GA۰ݻa,R ـ"ֱs*SЫb/$1,!68I=,@AE|֡ƥ,aČ$!T],	F2+|s9F!!吙=J)# 
S%¡HC
8 H2	 |a\ڭ/QG*C*FL"ʡ|,C\ɰ̤a𛱷*tLI|,l[RA.>bΦ"c#Q#l$yh,J1&,uH)K'l<=l]: '*;J&ꃒ醐Y1ś'"Q&ʭ#)2ЋW+N&Kвȓ#1GQf(,ZEx\]&"BUͲ-}Y,wmac}:m]n!!B݄&M)g"R.
R^}؍>%jÌ!̡ٯ"]ЪӒنm0:Ȯ	ZDHΎ܄fz,3Ȑ@\ȽЀ$|3˞Y)*qލZ}<"䀞
pBų<1l-bl0̂##]Ý)n@3M*˓.,۰)	ΧdQ|L./A!
ڰ풘j+-ᦵl,#f嚲/N@+_K@CnFBTP.R>$T"pN$3>`]+=b!c>u
:ޑ#0Di~F,~"Qۛh-xj00~,逡Y!Ʊr`aXYN+ՎވA
_+QN1A>+8H}i9)>*~ƚ(
c)O"(@mJ
n"=$ϡş95ILHoGQ~!*33:_`&.)'HzGi3)EOzqﲢ昲03@)Mz%b%QL MV),ώ70Rnj)M-yQL",ܡ9"322 \*<)\ߚ}e
ً_o/ښ
?.<_2o͜>!ʨyo",])#RW&~	e#HA	.dpa;%NXE

xA<N\6p#Ȃܲ$1wfO\mـf#Ws6zx-AC
l`9u1x23mH29zYHcǐ=mu^48:uu;wNqNe]^&>7yvk׮N_,L4CUf:bێEn{,)αǠYsΟ96#8MV-[w٧Gݡ,Pن(wr'+͍\m^4BntAM<<R%{ϰ>Ĺ2b/@9p ry/BȻ*#mkw%R"l	ꦎ:JpHG3Tsc06"+l<ඔR(f,ҳKD15%BS"s7ѓwd. utXR%t)g0(iK::Y,w
VG2KFQvXb5
҇SMwsO52ZO&UUXezjm\~̤bz4H}heȩ?	ۈn*| pXF	*@Q$|Z.dL`DS+ڭMdE^{}MNF?rX'&.(!Konl]6bHK^jnank&Kjch)v`6mq[!%:^VH)'b%h]W8|r%".Ç>(<"c;x]Ƞo5rXՑuHWR<዗M`]!v}wzO6#/zu
>|[KFRY׿r
!+hAU&OMSg~Rw=QQ<^%]U{G;M	 ǧѤ|9I@tu
T{[W;u04.<HhBp7+l`XwC';bW)E`9^؎ 
{ߑפ:Ro!C!S)?QrEНtPo1џ2CI֊7D"z͈p#TRG",T@
Tjnn0Dʐ䳎Bƈ%轴!u[a5#̅eOUBFR\!wd&fa9@ٍeҙL*	2Ԏ3Gq!QZr86Ĉ:*!ntUR%-A*z3|K "Bz4x4'ʐy%I>J}C5_#	JU_B
EGL
\*!RmV9vU=RLƖL%NFAV&kr%,-BTdm̉?J
/J|֗F%蘬pԈ'`݈
u0ЗiZ-$6$Z֪fK+k#TPȑE=aCD/OmmQ]ym0d+HVN<nG"[Z7ː(ʺ;_y'nS<e7KF{e؊hl!E7a"j(LIGڭJLL )bc˱cis5BVAj&9ec'd[I2Y9`|/9obaecs;Iy.YY0:M{taB̉F(?kъT\iEZƴ;5N:X柨l8"Bj3-zҪ5mZ-lzڑi=lNS}lfXْުkzڛ6hhwQ.mnWNmF9bw罚o:XZn~'85'oOC}3([?p3M(7yGVrns>\59qg(ts^ҙ
cjQ[!F3ŭtS}=8î6GVu;U^w۹cL=|ݼlW3⯭3̎6.y2S>ږ?;yf|H$}MzYgY9^gӿ;{H{_O8|'Gϧ&s8oȞd7ʾl_(oF_ǻ=C!@[1̱L?1Ԛ|	\@5A$S
1Z\Sx@	\ܾ<A=AT
l- $\t1* $Ĉ%<'D8irJY*ؖv)1t3,rWaC	qCCCz۲)98؆@4B4ڗ:9	(y>|6rJİBD9`qDř|وmXdBCN|H
(Ȑ̈a5d	42-'r(h5
\9w()Iu(!tld	 Z-uӜQ$ŴAv	-f	Nj},ml%o	5$}<n|dGwxly4#5V*-(%79q+,H&0qŎHz8I7%0I$c쩗I
ȸ-EbIͩ>࣊!	$!(BQ&
9xubdn P˦q;=L,.P0ܨ˺LK7LȄܫXL؉q8ن|h;1<=u{S\ܬ L0N9	k\*$٧XkNAͷ1H:Y<ǜO8N$>1O,ӘNz	pLTߡ!P:LPHëێ$v\ưPQ\pl
i2UUQ0=L5UF4,ܐO$	l\"ݺpASr@:Yzq5Ep^t|L4
>1=
ۢxGB
޻s#4
|M0ąbMMSwTYTKTC,S9C}%Q 1CM9qO8a\O1DŰ/s8	f`qjx#p+_m5;V VG|aH׌)߁:Ub.b
uVp.=Xn#C]n#-z0	_z}7JԗFWEMO_m?Y;Q5!/`C8Ǡw3Lp!BٳA
ñMڒZ>Vb˪0lT`Ǹ,]$.
xZb:W)&1um
v;UO|YY$לuH}!	9&]2m`JrHlС4T/b]f;RY,\#~q(Y*=i[[5܋"4b	my(!Zh<XnÍz̒0C9)0eE3EY
	Cܖ0!s_GR=&5^Uaڛ|$,b͉kPJj	
u
b<M
'MXH	fiZ oj(rqA,^I쨓*BݷJ"	sP
2/6[p,~	xPb,b8b*I=gʫ`^{Ya腘š,رk:S`dM*C^Jx]xHZ]QuX:	e]q8!.KnLM[<QRVyZ=7bAǍ["~e8ں:݂А8m&[VҪFYlYHQNVtgrgˠg繥(-=!h]	LBU"YAiE],,g9nh0߶MY6jjj.@jjkk.k&kviijN&$~`&:a_p*XR	쨕*aw1ew"1B>e4^mnm~m؎mٞmڮmۮmP댳^8lZf0}lђ웸-.R2<c맿`Жц{ՙ֙u8jhhP0p8
@JH	;,\k^Ev1ojH.p?pOpmgo4¥$Ao֌&
Fϱz	ee@cd
\(so pqqofZuqKVq"/r#g爣e.S#r*$q2van\ږ-a..r6osmٴbن(7ilYaqxHYs7otG_+7@G!NBCoTyp
JgqI/lzj?Yzto,2"JYBMGz
zemY	:#E׈PdOve_vfovgvhvivjmX=
TWqdYzʐY'ZO_[\]?^qxO8r/;QG)mY	!
%=\@m1w^wO'.xp.XxxNxfZxOmB}-9W_GQY	F;]#aw fOin򰗣O:vAc0oF{z5
izpz.̹{ka'_yz]!))pψtቾx|~߳FLO;|w|Ï,@r-G}Fw?ϸv}H_a(׽NDr0co׺֗g~}/ww߸xY)u}OlfN`omSw0ar
o۶wݸ#n6qQM:՝f01=4Hn]Ay$K7s?-j(ҤJ2-z@>MȈ
ZĨ1G"A|y2%r,<8sfԚnmoܠʗ9zB}]7?E,N^<gבqey.E^ʘ%ݶ0htѕ{:@wadʖa~B']zVlz`ٵE+qK\=#MO>G
aM{;!: nB og!_qxa8G$*塉8`+*U18#5x#9#=x^AN#Ey$Io
$QJ9%U礕Yj%]z _9&eeg&m^*X۸]bEqڛ}֙g6*b7L^DeN5УDQ:8Qeu~j*a/5i~39wM<LWeFMiRn47.J穉:ӭ@=sn삞D,sk4FE@V5e:UF&F;YﴣS6NAPrs7#i#:AE,vp虜#/[kѼ:p︖p<wq<B=,:+eVj`j6D%:;vMP6FH'AtSj08UX1CER)%tOGFU=*8T+Y]jRs6ԋvpJe\T	E씴\~CNd>:yO
UJjmۥ:@Y%-Ie1>zO@S
/F/P7o<h⹇#$+::E'cʏ1OZuYJ{z;,8k*C	Y'vKk`Q̟9B3;L-(|[5ل0	1!3qtO (^
PMXsO!fb7Nép..t1䆫2l%퐙H#jHI<EG̱&yWs M/1Kn^rmB$""blsEA6Ahϝq"?Zz[d!-4CAI
aFnT*1/^8/\@p#5[8xh션%.iYuIR~i,Nf,0u^0\9}@-3(vbuL@3+G=l+)Ё\,(BP}g}(DЈR-D/эR4(H	ѐISRt.}iQ
әK\^rӝ>)P*ԡF=*Rԥ2N}*T*թRVխr^*X*ֱf=+ZӪֵn}+\*׹UU+^׽~+`+ءִ=,b2},d#+R,f3r,hC+ђ=-jSղ}-lc+Ҷ-ns򶷾-p+]]2}.t+RJ'vr.x+񒷼=/zӫ}/|+_𶣥ǽd2ʾI>OH~0	Xp-
7XpzsJc3
:Ximx®egpnj[`Wh̡G7ؕ(q#5MB"1sS,j1o]9x~G˪z2xus>,X֭/uLn*/IѺ8<S	:rErEK
9Tǖ;W|ʉ!zfF+oPCT6	QbWyMh$
QE^V@||Cp0u CDنB:&
خ66L:ôá$r>-J/a3d\mKBW8TL8AD+Sd)DTNxwG|/k︎?r7J'H	cdb$A,4H@B1!F?:ғ3N:ԣ.SV:ֳs] C (@`A0	ID&4	cD2@QQh'JF%&=t@0xp3A`8E)b\?=Sճ=c/Ӿ=s+`!Sa`3A
kxb 7ȁްC8BPT0x;򒧼1y{=?^
__-_=_M_]_m_}__J^-^=^M^]^m^}^
  !1AQaqq`} `N!/B///B^ `!$!y` 2vB/^
22t Vڠ` 
a` Y",V$^/B.P,/B.`.h./b.B/3%/b.B/B0r,"8 &` a!*)_ba+Fa8cb.b.dM"0-3"0@b.0-B$2t00CN0t.0C(3ha6t!3BC
CHB't/L,t7K8&~:(cģ*:$Q`!bd.C
dB",<d'Ra/DT/B
/3BA0pe("2BG[_Lj9';ڡOO#+B! %aQ7*Sb!$X,B.H$04BY.YF&/tfSB'i^\cvb؀Me;a)@_#ifc)f3F&0@S."QdNeWF.. 0"hf@2,ov&r":]bl"<Ҧ@_Pz|pRvB3B@΂cN-BiBd*dW>$3آCcYB.%|NLeky!zzf{*PR"%.h(4#A(B*$C#
-F00JvajeYdI6e"/Z\&y^D2{%>")5.pe.f#6*Y#^3WjcKb/5zcΧxj(;rNRO2mRZ`)&Ra6)()b_g.*v_$AMl睐'pBp"\
 8 B-+&.+6"2(N2,ó^+fn+v~++kRZk+-B-B܁A$<A B

X#D%'X5DiHC3\,64Xl^U~lrlƞ,,,Ʋ,>̂,Ԭl~͊,,"lк-.2Lm&mX-JIn-~-|mڲTnڦz--ֲI-nmBẉ
R,.$n8."	HNnQd\p蒉膮J.
z	vm"mjJnr~jjIrH2nޮRNImzIRmVX|wM3/:4/nMqC=oگD};00'/07?0G0M5`]0347|00		0

0;TD`a~0
L}_V

0/17gJ?1go7DKM1
s1(DZq[VD1qױJqeO2"0  S!SQp"O2%Sqq%w'w%1T'2%/I5d=2*$2,#2**K++sr,2Irdݲjr2/rH3d	3qn32O+3H1c9z15w[Gaci3){918s879s93F3c344ó><k=/=s_>4	Ebt.Cs((CWt#E)tb1tREctEi4bqt}4HHSIIG2J;tDaQxL?4MCMNNACtM5Q5QQ;RӔRXL755?BELM5PTU#3V+VWoWuDq&7Y;uZZwWu,uBKu1ӵJ#I5<5BK@LmG
8Jx
ϔ +9vAMO|Q@!hv4s9{6A65lCm,GE<ܶLC5CoSnL;FA6nSS7=DoMS1T	kk{3llGԆmhBC9Tw1OGlTFd[n!9pf|FYz'ƫ8^Twy4[97[twAwr(D9T;ͰD@xF.u$0axJ0pxqDOkH8SWx]_AH<IM<ȃDG-ˏo:N48,V7Y^d
IH90CD^D[CG`8Gf]Q;x6L9Xy3IhNL9F9T[֐Id7O$=@ڔD@ڊěE4Qit#z@eL[ԴKԋ\Fp;w79OKѺm8O=O28-uEvdP
DӘ\l	_.g?J2{);?.qÒ3YNω5$mCmO҉)\
I|CǪu;yHEzDXؙO8˧ͱC9<}!1;A3|atR<ȓcx;Wā6,Kp^PN<HRCˮXI`}BdOSIĪ#[Wi7ƾ}%5%a,=#7:[DF{k̼!xq{pْ:LDOY!Ә=2E/
HA=U'}"%̫EpXJ?Ű9S8zMZH9WM9,LsCFtyEO(<=fދøyO>dwϣ(DDvGXĺv߲e\~kWr9\HoaBnanЅ4y%C)YGKyygN;yhPCNfRK6u*o
Yx2^oʑpAS~˚P\6nf&l*(^Z	ˉ[ƓWՖnۻ+qxf5\.a6ithѣIsjիYfٸUᶊA$G෰cn]A;wEP^d<nĩ[͹Q7n{됲ri7Z}~]S^7tԉˠuԑ(vv
uʉgx|<4J)IPv;H*ҎwD+uA0'.F1l&<$&mqkʓjf.+I.iL
*NlL>Sr@o%8R "JZL5UMrt)qTSPet,]VXeYe2IS[zT%ub냕detOEIn#KhqTɖ!1YQe
UT5tCCVvԪۉǻ&ɆSj gW=IxuSQ*7s}bu}0phckE*i`>nh7)42=81OYOba#W+ɋc[ENw*|ZiyFEЗr{11icr5.vV`d[u6gt'&p̌CVuA.q涡mP%6,GnX@r#y<#2!5n$T
׽C;30Wp(N刮qh!܊J<9@L.W\:UߟV>iy&:&D!gԉP8A83B%O9\|cAy3`EaZїX!DBC.곍r.-D3ax~1U#zq$V
|1dS9qNcJdz-LG(Xy\."R)y$FZ6u2npL'RAQ0eU(j2yؗZeȐFiHv&RT&}.I*V)3Wsłd[\6LjfСo>ŚTMwě$\83%lÞNOu6TN)ϡ%/p$.!m3.$)y[PR>&UJ9p4=f̕ЅTh#s2#yR:XԅJ[#"҂.}iTҘ6jVwP7\H#xǔIKt9L92DQRZɃE,c6UB$B"؀&zt`-u0[NWRVUī_WXfc!eqQߨE!Q	bf*Q%D+!)"Tx$0
`n*	H@L*ZT=iSdA%Zhl8.,j.`pg9Cұ$@@w
pW).VjVX=m}k=r"fY+
)8 Ab
&!kGJ0@8PRX*b=P
@ vU_dX\q%F<Ns
YcT+a7OO0cS7;v 1xK|2V/+SrqLovXT'ˇ8-CH4H3+\#BҒ9JllGխ@4]p@ Yx`݀JS*[Nu$F	:;c[iaQE|}GFLB	\@8!+Ybzx<A!bSuw=<*SjUys),_˜˼h7I;J!ԩ^u_Vo7t]GG:husKq3MFtOiO~εk܌{[Cw;2i^OYxGK~J<&)v~|c}UUO4[߿FWg'\{G~ňBs|->Ocxѧ>/+g_~R釻-|
IwoD?wP]P"Pw+O7V<&ooISbVZOOg!zI:pyrxTP00]hBXP0YbɐX̐0
Qb
ٰU00?J'9p0K0NP1Q*~bRp0+_hB?Q:EQMH0R1Waeq%s>P1EQ
I1ђ1q4vz1E*(ƁP.9Lݒ R R a.R!r!E"R.!#"7	*<2SI6H#y%$O%$a#%WRVZjIbIP#mҙpt?xr'g(5E(N)())&ErR=r?|&򋚲5*R,+N-W,0,G-L^蒢2S.-e./l/c%.en؎/oV1L$1)p4! sV$2/Վ-%A:
G<(0A4og24Q1QdcJ8%W3$\D,ܢ911vwn0s]vSz7gN5i%6
31%zdjTbii,Fn>l&idoTHS;s9B,/ܢ7h*-0B<B1iGDPa&r>7鈦$.f&StF5^B9Giz#zj*nd1mHH2~zB"1Фs"cGR;bALǔLLˁL״LKq0RN	s,"H&G C#4
OiʈG(rpgkNL/TNM"*ݡA$Nⶸ#e/=T{hPcDZ՜pRlD96KS;5(>*C =荊T
mKmڈY4h*Q4qި?8&zT¢8%S#XebX'S]s?HKH	u:@BȈB !!AЁ!
EOf<d<$K	4!/]d]Y]ݵ'UShD&D)4$To(+?6%BVdwdħ9lO"-lD"$8!=4JZ
"_N8FL|"6eiWbcfQfo6'rhΔ0tޡ>ElPmՉ$6 bb@G0e*$kol{1M2Ō <q6ؠHBA7,@rtѐ"t)4jVusskcL*g57?65qybqw8&lr-yP	"!cVyIWTw16eS5v,F(j
t
鸦C&p6)wxuxcCuWyɇyyc,Qmš7aAl0u7j``wѡb'@,ⳭdT-*e
X#T$wxǡ.):}ct#"ÍFB|A7ד5N15cDnBqzO7nkQY!8`?&mc!"7IX)x8qaMB(8vulmUbOD= 8_.^VߑWGF·{AsL7\%B0el^F.SB'!Ai3JrNDyPLzj;?YO6rTdr
"x"$4S0
R;J-Hbc=#FCB֘bڡ#Z'+ڢ/ڢ@S 9F#frHsb^Eb9#BFlE8Y	]#wfo=uڨZک6$0bhÿ"n+s$*#fH)fc;W[I:KP\vozz!~ZdZ(a۰z$.Hj8gvvw%
t+>=xLƣvAwǃ#mpkA
.r$pSEw¯u[w{[;mxrB}۹kS]su;egº}b[;{SñlųY"1{1GVٛ[&JP6ieK%ށ4&秸xC\GK5٥ՂL_ڡYGF$Wā"VGTyP$D)NǗT"d:F4~7ñhÉk9]C!*DGCnׄ>DB91(+tcʩbF%.tK!LI	ziʩ\q!]Ӂ}(B'}K9."}^NZ%ƃv"!$sd!TBL/k;*8ǁG]h}z.QLց"HCX-b)9Q=c.A"ODk4n5i}uЇБYK:=n-‚`裐yD@!DbFнA?]aD#bp-W|)t$.h(_!>B}
~Bs1'qg`HO"s"HcTGWN]^$#j1ס"gq!-OcJ.\&?3K~韢Y7&ҡ,
>!?PО52L^^L5R_/1yT'_5p/1-7)5?=݄Nbd3{WD0^`^5ޡ;*ba(bՐ߫-m[g]VhS<YN?Vߴ}K1*>X$$/1EA>6,&5z=tٲ8yﲡC[6qN8tmSGݶ3:&,XМ<[6y	4СD5j=G:}޷e9mq{G.[uWu嚰numk܉`vW
Y[ta
Ō;~9qٮnm}KVٮԲu9t@-dO$898RLG.w%o7tu{7gԂmPqt#
>Tڻvfwۿ?zy۽iWɗ|t]vYH54^yZOzÔUgW	R*%vE(R;v
*oA(9SWr7GFdi^wLZ#+&F0)cw4*EFcfN8dreTXBsy7	Ǣ8%ݩ`B]vbH
OXJfNߣ<Ief{"g7#Yu~e?=Dbl
Va%O^iBz6n#7y'u@aٝ<eT$^Qb94P>hA8d&ʢ!N(GRZBj-bđ=gUY6cN;$N]7"ILK0=B&L+pGu9#
`9x0@UD#sYqL?
uROMuյ9muZou^;bMvfDmP ;Ŋ\gMڎܓ
9vWԂGPm>]~Mև76վEcMOSFԴ2JôxS6Ys_f]eҡ'ZS]X(J#}3{Oc{.u*8$UsW9ov: afxgOJ .ێvOk?`bV@j'P@C"ǧA?95teDZ24<!6Ɖ&X䰂<1?BEmL2j< G9d$ctak$!犈Gl&P""V1`ȇaFhUE>rlR7;~GS#Uh'p'TSɨB)0xTt"<rRc	7lٰWA	vbHGjvHS1k+|7s+ܥ}XGbcă~BZjV	HS={a@hLxr	$io
K^͑pߜd?q%̍tAA)R?#N8/*SJ	-O:W$	8.yR*ǢczgT'(F!vJ<Ѥ~HnCnDjAQTr6w%UI_DŽUkd:ָꇪi30RR|_ֿ
vEY`*6I]cY46e2jv,g?Zz6-ECT;vmk_vmovo
w-q_+-)wmsJwԭujww	wn@;\?=Sճ=c/Ӿ=s+`!Sa`3A
kxb 7ȁްC8BPT0x;򒧼1y{=?^
__-_=_M_]_m_}__J^-^=^M^]^m^}^
  !1AQaqq`} `N!/B///B^ `!$!y` 2vB/^
22t Vڠ` 
a` Y",V$^/B.P,/B.`.h./b.B/3%/b.B/B0r,"8 html/figures/a2-3.non.shared.disk.config.gif010064400016050000001000000172350717757442200214010ustar00gfergother00002640000003GIF87a~`zR+;Y3]8?/ɽi=X'&7mHųqHsSPhF{a2, &dihlp,tmx|pH,Ȥrl:ШtJZجvzxL.zn|N~O4wg3U1C/Ϲ%-*'Z+#'"_Ac0kwEL 	<>
'~(ѡH!Ȳ%:`2%J5bO@b!͐?L.iEB
i¢Ѫ9EiIRò̟HGFikZYkYL]KqW9.>+Vbǐ4e!'_̹ΠCDBӋ3Fڲ$@PAp Æ\h၆
	&LXУKNسkνËO(OP<4hpA6dP@ 	Vmo
Wq-އ ($h(hz|g~h *Ƞ2`!mo'q!s*F)TViX^{7_}_8`&`U$	CNhIf$Of @
wrW(&z.v#4y:#)PEV$Kn複juv*v*r	3ic9ɣ?IT	RxJjdP쩩67"8'h-Z[ϖke/z)c5;曣&	xjzF~m
|-[pG"ʮk>Hrz2+Tzʂ
_۲s"R|뺑n^J5ul87]bGmt
?.Ki7Jx9j\]/׽UGGY',yhNkbzpm4{G7	L3Qp+W.:t[ܳ؇ol⚦^ݫ#H{S4s؆k4g㰏tpo?k<؅gtي6lj?.=ogӺe_؈qϗ}G8l@?m'HAP~S `k5)!G(
3ndnX29lb~C11ܡH#!q" E9@qKb+*ZČ1U8cX[uy7 vD?q0Har\GM>e,"#9J$H]htdHOZ10cU%98ARt+I9\}v9IC޲0%z!44SD1;c#P&+iq`Ӭ')o!#9aN2sL%*NqShI4ce1[NnS$Â怗8($ŀt6(vp0<Zf\@-QPH4,KSjȔڴg<o
О`~)̜44z*Ҧ#)Vu:{*5
Afh-PSS^ekQTڒt*JH2fXWUo%kaS&xeauWD2rM`KyzegJڽu5dzjvU%Yڵ^s8,4GϚ6akK*
t{BږnFSڀ0t]{]B޴QYN#SVBa/"7`Iڹ-%_
u%y_'X"\;\.8W/<2x?,8Q{X|:&kKθ2(yEұD0}
dx)cIacIfqQVcvq[$4AD=+_0p։3L&ً=g-mfNe05рيG;Czj|;E;MYSr%.{y]j<9˔5۲B#P
e=:4Jk*ْsmdkz;n#e{[\q;b
h!҆5ܑpz׌ǍH?[ Fl>Z֯wT3$rnw&I#jL;׷Yljwz%GɋXw`ONnxۓw]{[ѽ_.S9]sC3YMdf9`<
rDNyop&ԍ]+Xnz]gwWjU=6WS&g:a\/r&9f\;>oli.WKo-[.[>Cr%?em_.69QEgfj4W=13yi+X=.ǷsEG\~_mO?g-̗p'7X~&'nm=!bzkqlqk4wm7G~/ssRw{}Zg|Fhn}C@7Sfk>zgwb~nˇ~\WWw)}ীs|{Tŀmŷ\!IJB
Hox
5RyHC*僦uwQhST0QAP`xՁBƅKX7L04ƆY%EHPP@~L

0LvL\Ȉ(Mx(UHTRL؉ׇxL5Juq!'g]X9&[d߅8hXHf_UeOĸ3aȌo%Fv䊯(HhȨuPh∍VV^Φ	R	wg紐hLgWD9؂VGQpytVTzgYّ~(r{3a6wl懠8t'PX6yw^<ُ/$%ŇE1%FL9S
9zNRT*.I{I@Y_yE)c	[Pkɖ7wXNȐU?w0PYOJ7s>Qf)tmImdItO{،KUřXugpbh~)zȎ-hG[_ie)WIىkٗɏ2G)Y	'Uō8ToWU,29jX	>i4I4[ٕ
=Ԡ (*aj#*|Y!LhzAh(8If79]jDzFrHG6*ɤ晆ⷉ4i~';moAvi踥4xmE׆NEt:vI'y'p菢xx값HQʨF~ҷ1nl*	#)l`Jfw:^gqq٘!:jbq8h	߷Ч?b
*iwfm-z		omYo*WN:JVzK麮JH
D:JzKLN;Q{
K'k	;{۱б ";$[%;(*,۲.02;4[6{8:<۳>@B;D[F{HJL۴NPR;T[hXZ\۵^b;d[oTVʚl{nr;%hju[.,ʋ{ȭڤx{}|{nT8˦&!˩;XlUw+i݊rsw~{֥_ˠQ*qSʣKt;Zkț⺼;[ZŲ?ASA{A<ߓ@V_@HӽC;?<ރ@&J>@;;3<Ss@<"Yy"9L<cD?>dܾL=1*71b9!,+<?<Cxl#; 2*abætzǿ!4>>,};Z-*ݒ;šY拿
=в9u3c7f,55d>-b+_S@,A諿gk;ӣol**#5h<1
;5|.S1s\_̾3cȈ;g9$;T790T/@%99<.m53RXǜۿH˴ɂ9<,̼|ʬ"[`|l%3f-93͊$r-xL̮̽lA3s|D# |
v<^͗в-]ی}>ޛ]\ʔ,XBDF6KK`4Ӗ3EB  1kDۀ/-+@+E
Ag(K,H~jj
ycxr=t]v}xz|~}ׂIF}؈؊؀֓]J=ٔ]ٔXzbOٞwy-ڨڗؚŠڰ~mIfYغ$ۺuMN*ټ]ܲ
܇ؑ]ędέMQ}ݟ݌1
č=-MKދ=}XyLy}֯4MMm=ڝ-rߨpݰIDnwL{ >=]0z౤L-Mw:><f>zBnGHJ>ط=M'L>@n壸(nm
cen؍=G
)t>dewWύI("NmeM
(
N^܎{E^]m'[M۝)갽˨{.~u7긎ڲn|濞s~Bݤau젝ɉN9n흍ũn7.f/nݭޝ'/﷞z~N푾~=yS> "+
%?_߆4/ݾ{}.zM>z鎧</NRLLw+z*kmԚY
Y/9?j[/mOx)Ev|lFsh~boOΞzY-fh
v+__?q@tmGG.7eO{}	HgOǷVsv?
4hs;_~s/8On?uOOq_:
mnsu۩7
toE^ Љ#Y'+ۺ%3]7;/FPx2*_+6E9/
*Ff6.;S6jZ̟`XaMOd%$%fceg'[ hhih*gddgmfnoء)(!+2qs!"O/7rhp1j1Z(u=9e78~nz,d4d]<e{M FC6cs%4Ȋ;vZI<
>Y kh3ǙIД=AGN2$i&TLM#)Ov+)v5ѐX,U:*jEk˧jР'n5	'&6gW`1ۺ=…+)5ѣ..T1+L&)d&(s֗tҜ={h46y.Obָ~mۨu?RAg3C/8s=-&zӫ:ygYĂ
o|eɷ
t4ۅMHwai>Zh$\!yIXU!RTOGmuH`ω{g#"3VX6V6F#E6CIe}9__E"pRZYaz#fGPB$TFU!#O&c2'_
:IHɉf%hY!]XacrF)xg䤿t)SZ*owuhifVvzl')
I0E*hqѳZBij,tE!;Ղ٨fC깆κ%Y#/Ҫʹ&fcq!pl0*R)aKWڞdÒ!.,c)#apr.ΌyCj(w.j2Ϛ3~5n{wXL7-݊Ѕ_zA}ѱaŭ_1~mZ	\87(8'm&#Gf4~9ǃв]6#**Ub~ N8-NADtbDbf{xzrwg^D?E=;xw&5z9
}kc4i{ͻq⧺~Àc*,ޫrc@ŭ=VVV,ƪEl"3E	Ok$O(LBP
;$UN%2KS
)RJ$#@|U7CHJ8f.zZa4P/4HchiUã(e27z#C!$&Lȍl$(C'O_\@9ca-I,UJ\ډ?̊@H1.Tխs0?=$1&5o-OJ-
eAۊMs3K·:ڑ:p2YN팲?rZ+%!N蜚V%]>S"i%mz;3V\$:MxEo,eE{Kk<Z'U%WȹT(MUtJ'e%@T$eTRFţ49rCY"zg\jY2Ԭ$3Ë,GI򵯙_+Xu=,++ʟ'e(b/6Ǖ;Ql(;N
O|hzhÎ:&0̒J6֑t.LUh7yŞB^ڷ;Uzb5,Jz[SuTn[kiuKӭ:[n#2(b7|<9{ux^ݔL>ۛݾjrY.Z
ܮOZɪv5h)JҸ^S0o-ʷ4Q
Dg)R/iƜq9I~2A(SRn;y^r17"Ӭ5n~3,9ӹv@c,=~3-Ă>4E3ю~4#-ISҖ43MsӞ4C-j;html/figures/a2-4.shared.disk.config.gif010064400016050000001000000166740717757442200206170ustar00gfergother00002640000003GIF87a~`zR+;Y3]8?/ɽi=X'&7mHųqHsSPhF{a2, &dihlp,tmx|pH,Ȥrl:ШtJZجvzxL.zn|N~O4wg3U1C/Ϲ%-*'Z+#'"_Ac0kwEL 	<>
'~(ѡH!Ȳ%:`2%J5bO@b!͐?L.iEB
i¢Ѫ9EiIRò̟HGFikZYkYL]KqW9.>+Vbǐ4e!'_̹ΠCDBӋ3Fڲ$@PAp Æ\h၆
	&LXУKNسkνËO(OP<4hpA6dP@ 	Vmo
Wq-އ ($h(hz|g~h *Ƞ2`!mo'q!s*F)TViX^{7_}_8`&`U$	CNhIf$Of @
wrW(&z.v#4y:#)PEV$Kn複juv*v*r	3ic9ɣ?IT	RxJjdP쩩67"8'h-Z[ϖke/z)c5;曣&	xjzF~m
|-[pG"ʮk>Hrz2+Tzʂ
_۲s"R|뺑n^J5ul87]bGmt
?.Ki7Jx9j\]/׽UGGY',yhNkbzpm4{G7	L3Qp+W.:t[ܳ؇ol⚦^ݫ#H{S4s؆k4g㰏tpo?k<؅gtي6lj?.=ogӺe_؈qϗ}G8l@?m'HAP~Sȑl ak25(	G(~n4\BƁ0_4hhЅ9dq7Q(hD .#3<" Eb@qN"+2cZ
?%V1f80[sy96vDG@Q24
BÐ|@dظEۀ$$Fqd#II#tF(0JX2C*IzpR|C,JT2lC.XK,җJ0+r
E2Ӱ2
J3QG1.I4k-&A!^b!',io3,&HpŞQ(&1x?yz,
BBP,l'DwڒhTQWb
GAQg4uAc,3Qa֔LYQy4)<]
ȏTR>My"uDEKԖ3GJ՞Ғ\MUOcfYys>զoũQypZWa4k$3d^
;ѯUdlGTVR$EZ!vֲث^iRREfzZV_K3MfZugi6UX~6Ynn7v-hq}0\n7%.tqzmBww{ӫGT.!z]WA|Ww_Z`1
y;ހJ &sJyA̋	˷"pm3<]ކؿ-bϘ*GCauj=	fv,lZW6lqoa,u29&bYɥ9=L${ҾT>,Đ%cl1aXc$^?gAGSrxn*n>'I\H=HA9p0J[H2Aɳ4J~iH1	
j\G"dV弑\%8A`z +]fW:.vCh3Ն
RRmڟdodCN5lte'GQ{n2vx
nzfuncOY5~m۵8BB-"\!<+]Zc
⛎'}pZrʈ܁{lx|g\GUL^4Bmd{
M
;?fnvH(_EZ}ͅ%t1svE,v|@$fO=C0{Ƅ={?Mw'z<bnskKWot a^?uSh&eIz+?u؉ërD4ouv\-ÁۛC?7'KOQ}dgel({çn	P؀Tr{w؀}g|$mK~e@%ׂu	҆
uKL8K4xbW7`GWhzv2g}%q{sgny5X(UZ]7q	t[pAdC4l-wka'V8NqXfsR0S8hcdo]wuWPt0Yh%oqgrW~K8yH(&gU!t:Hh+H}F֋qU7@!Gy悗ą[{
׌HzĊǘrsژJ w~ȇʼn t긏	Yи(jGpؐVhV)8W7nh(zhJ%H8؁rⓍ"):p@0;0#YvtH(9A	?I,	Q(iaSY:Y5Gjb
iL	P>9!s8En)Nٖ#LTї[qMIFyg{cI*Xxs9v陓))G!!阞Y|/Y%:hgfIo	Y~߆|"&wirgUz	UIOА	A2G|7zٗnt&wÓNdEzjt؟vn
{gDoynMdօ[WJ|Zh{Jq,JqXiHpLj0ُ-.g=JVeG:gws4)C:Ȅzarl9muǂM}Xp_whhf*hV!TwlȡikQs0uÆ)*h:upsG4
R^vZtK:؉JayW1|J~
ۄ)ZeRpKțΩZuu_%:혬ū:l:QLZ:z)躮yjTIx9ZzJ";
{
۰;[{۱ ";$[&{(*,۲.02;I6{8:<@B;4۰zzJL۴P{S1p:o_zʵP*sf`kb/څ*uiR]ۆ1_t[\ktrv۷Ovls+|iQnˍseooR;Z;Fߊ鮲ZKkXŲ?ASA{A<ߓ@Y#ԻC;?ċ<ރ@Ø51
4;;3;Ss@<J"9K;cD?>g6C=1*71b9,+;?<=2*abtٻ۾>,};Z-*ݒ;áYƋ;ӣ8,+-\7:v99ZS"5:ť2R37K5L;`ō3s
Aɫ&ܽ;_ƈ;H9;47~90Tכ?kc<9<.N\3,Q蛿N{(ǿYM9KǞ7(|w?LV;3-~4J0aq$L@Bˑ؜W<>\l|Τ>s<{rŤ̽|"ÌΎ@[<Ъ<ȺF.FF-(+%?E,Kѻl-Ѻb4F/Zhp~M*kҚn++CHm`1
x)JpH
gaPR=T]V}XZ\m՘kc{&\Fw]]f}h^}RHjr=rNy
ԕD|~]v̈́xbY}؈]l
yZ؈ؐ\ئx<;)ٚSMLMm٤ٞ]]x	
ץڈ}W5?}~
۞*F܅=׺-Mc-\i=9؋ۢ]mYmJӽZ]Y܌1ִ֩%a=T-޽]Tǭ^]}mz}=H=
_^Wm۫S
NtoeRyeo]$%>'>-/^1>nM5nl6}ܕZDm䢇|<䘗f%FUX\~Vj)]+cePaNUkmzo.;Js^unggNZ^{N}^wxFم^_.ٍn`n^~֕d]IA!閝z>ުؤ@y.U.묎ުx믽}zqU~سΕ[ŮWn͞OϞ^^NnNޒ_inپ=Sq}TzYE}þx~IOh]-gM웗g$f_y
]
MC	R824._
?:o}Yb,X
+]aWMhO^.=d`ĞpOLrOXsZ\yt߇bO/oq{CهՏ?_^pvxߩn/?y_kLOU浐iWn>얞D~wgmrj~o>5Sh
%KwGwG:~v'G/bߏuNpq "0%9jʺ/˳.`;߳@'(#2\2G/*RV0u}}0pgh߽?`֞!UV\ښX!%cX$Xgb]^ߠ)(k+&gf-d[l%l-殺1Ti*sjt,	Yd76o0q| =e><5=+h	@
%\=w #xE7VQig#y%?8C^쥺)w2YgCdCUȑ0m)OSOR5*Zf.\bj(l^u	[rEem(9dwolS0n&()xɚ9&8^7Lz f@ON6ōzvRY׊
}I}Zi	eڝNJ/<1]t|7?OG\~*זk{q *Ҥ9nRm~M8oY9d(YaE7Ves/KHYaxЇF20xg*}
s=$BZ^F$J$oNdKcb$|楏W"enKr6uP)gjvŦi6	R+	yQdzd[V$9fӏ_X	l)\7R<Ey>yfjT|䩧jOBe䫮bEkRŢ2$Ϣᎅ]0@q[Z9jѹֺI]hTeܒ;"@+kqK"c0
z#b}	j*](1VqY@SrAej-C-6Ȏ,O;/?K
PeѶum[KTH=fzg3dG&xY7qK}|%~8#03{K>93oN:<q:镶U-.zګn\s'n1[nxS<#m&\%c/Y=k1KhZqgJee8>b?6;
,M{@opfgq+;jb
Cr#Bi",R^=1D`0"*HSP	%=sVa'sVXOe‡Ɗ
٭CZ?6>oxLsc¢qL
"a)l(A3D)p۱"jƊtQt~$*u@E0.D0{W cvPaB
MsJSp!q8٥P#9!Tzz,fLR	<[)	=s՜f4fL$Ҳ9֮A(_n/BDзTZP1
IR&m`=pm:&<NWIhub,O8,x"YĜDCʊR1J
FiˑHH᳣^ZAiӠBwS"Se'^J#RG6C-SeZ0KyOэ$!M1ИuW"T#bHY_O񳏑t(Jϵ
,o
<zBQ&;<L^d%۩V@<%kYըYrrVul^Gj}-lҶvLmsۈv^nR=.r2}.tIֽ.vrޭ
p+񒷼=/zӫ}/|+ҷ/~|C;html/figures/a2-5.shred.disk.2active.cnfig.gif010064400016050000001000000211470717757442200216230ustar00gfergother00002640000003GIF87a~`zR+;Y3]8?/ɽi=X'&7mHųqHsSPhF{a2, &dihlp,tmx|pH,Ȥrl:ШtJZجvzxL.zn|N~O4wg3U1C/Ϲ,)'f-&".#Y+$R4 1 ;WBHD{v|1^@Sԑ>8y$'\ɳ7`Ɯ3Mq$wCҧӧ-a3R8IW?ޓCt)[MM:ԈdͷSF-^̘Jb#K^˘Tga?]o9ӤSwLρ0(`Ȱက
<<РB	"(_μУKNسkνËO~	T@
.\pPaXP@(Hi@lfnpgru ($hӝz|ݗ~.5HƒVmo
WqɡPF)TViz걧{G}ǟ
H*`h?DVx$Jnؤx|Y%`8f&ɀ
I&!~ft$DAs~ʝّz.zc4y:##@*EZdKr같j*s1K(%/~)5#;hknFʫk'ir+rn
Zf;	9t^*.f1n/wܯ:[z.m7k:zsj<r{Gsƚ֩Lb+ lk4аi)Cm6x
vѠK-g-ԭzm5u]ڭ
#y*M;7[wn86:x{ڸڏ'ӎ4ɰWͷ
b9GL71mkKoݼS7/‚	۷xvc̤{~+:?$j~ S<gH
Z:w?f;]$س(@}>)@ @P5
e谆8$o؋&c C.LTb #Ruaal1{!(1f$83jEt"G7Xc/h9Qzt-ي򐧘B  "R"<>2$)Kr#=ؤ@9FOn;ePFSj9P%`	HWf7p	^J×~f!Tt0C2?8LFR[Ze$g$9ʳ2
F8\e8Wy/tq89MDӝd5by@d'WҖrәȴ9φDԦ?!PE('*x2=h@"̓ -KJQ"!iJ
NJ)H@JPOItC`qZw=]' qG
zNԤr|QџTeZQ.8HNqS~Ҝ5GPmm&%&x=
+RB4jִ(%VL4se:O!yŦgmXV3c(jؒATBKrnbvksk\
lr;\n
.][{SVc,5JvJ?uSnwu/z5FԾ4|ݗz j"^CnΛ7K.7,`2w/Jñ}e7.8Oě`a84EjW0K&X2G@T9.f+{.#R2lNUĪzRvmBL2YUvԕsa5@C(,pYVrO|Ɯ}JVauLQ3UǥVլtӜs;;Z֌Y>XtXlufˠm]JÓ)toamPRzok2Cѽ4h%HiŮUƳ-g^jorw*[ѧg']Vţٶ/,]ġdRbvwycyme\ύti^^T#J"ޒc13DƬ(YzhD=-Dz۹-EBczvWlث C
|ԝg/~A+l.mK$/jZ:g<.eFKDMO;!ߕ:Ĕ;ip|I>]*W$%x=>{p5z+M>O?WSԇ-ܿǾo.Wb{='vgurGo7qxx'}}*8'|x_
Hr%xZ|Ȁ[+HsH8Wт7uǃ (}"ǁ&8ܤq!#X>\'}lK~^4GO7x-qx,QqyW}maxhEDŽFPnOw[ekeHvHrM{hz+nYwnTqtMty7GjO\5"5zKiKXvVfyQUNQknĉuA'j7yXT7kWU苖i7tGщigj_SwCHWafsDSm'HHOfĈxXzgr!UL!xNxυ͘XP?VQwwQٌahي]Ȉ瘌Wj׎vviT8P~VthT!	SZU茎(UHSpE1O,ōwHphQtWKyx}H
tq4ȖAH~tH_vxi~՗
WY`HgUh{m`[}4al~ؙeo!7hr	E	tnyW[T{aЛYm9i@BI[BIqiٜ	ʙfIٜɝyk`Ufu	jv!afНiIyGCឃ`ۙ4Z
%Wz	ZĐjG)*|ڡMȢ'gԔYY$j~9hN/y*<Ze)we)ʐ.蟒dUS	}/*<ӈ@:U9:&Z'IA5a#
u(@uov?V2uVfjȆHfև-YyTvlizjmG٤Ykzƥ]:
TJaZfj+J6nZ2'GAJêon/0ڟ\dJnY,@jEi|ZgZnF:7h.Lte6j՚yjpą+	抒}opHyUT븜ƮSypZ	 Kj6۰*oRzs(ovh,7%JW
`u&jqt
eh:\
vD9QP㸧m(Ȝ?rg*iy'R	yid;
۲f`R(&3	rfWot@]Kmׇ*EPבwSʦԯ5KPuhu3~VCA>	KZX]r*U1"Ixa*Ǚ+yo
2E*iS{m@;[+g]uƹMkzmg8YgKڨ4{;ػ[xV
mwnދ+{Jٞ__먁}n@K'רRzI{/׫~2|j%yEw,«	2[Eh*쎤ho;8T;=@m5<RhFăJ\GJK2N<3XvqO8L+zB)V04l,|Wv܏yw[ywKeȄH#r̒3{,\neYz_5Ƞ<GƤ܏(|{ʒ˲<˴\˶|˼˾lˮ<\|Ȝʼ<\|؜ڼ<\|D<L`Z:[=Ё\mlxb\vpѤG;\}ߋKp{ʟ*]l*
әqvɇm6܋ԝVh#E-BԈK<ƸԍJ`X[GՑ^`-ldͱc}jfpz@s8<)|>+=-Ɨ@<d.%׌ؙ@10vm8,z؜>7-:bX|?ٞڬM190A]ا=B]6)A4ܹ
%AB,ȆB>u],6:ѝ*X*S3?=]ۉܸ|2Bs;=64SR-$#5
AΝ-ݰ8q<];.;Rb߁->Dw:-=	4.L/^=-yc@A-M?=U6s=t39A~9<.N]y⨽>SB	9NR9ssJN@9m䶍81^P>=P s<n`N"W<Y^(I}k.%6M9b1^"*>ɽ߃t.A.d= &"Tު`.	DZI.d<ExHDkG\ jϹ>,옄nڌ쟤ih LdgѬf%e@lϵNX?_n;x|/ŧeO
_-HCx)Ě&(BxN)2+3<5x>K=_F!'ȌtNJNKغ\RJkܴYj?_?I7.o^IқIX|OIs	@fiL8$mӂfYQN7[깃yFB/ʣ_TZfRԢzhjsZ
3ufoϴ&d5O-˯My<;wLjW1/u #Yb&ۺ/\1]n=
Fa'\2'4*RVSe>7,泋{5p{U
?·N V!
!^#$VVa_&\%bg\$iiTW*+k,g.o-opq1n2r51v_v3tӶ`z5rN9|$>:51w+	NFkdrcFmY,i
cG(u#j#4i,,%FlfiO{`qC<j5ZT\٪.ꏨjdUuۯi%0,Ya<\U[N~-lw
޼DRqd	l&bt{칃d+>|sf994ѥK_K}5	qp:Msuת~9c8tCw49<[RjLz[|SxWSݷb~-R},N6Wv|Nm6_gYuƒuEHNmlU G
V҅יM-8e<\cH9fc+@L/Xvqm"RC2
wd0YWW%=T,^	,~7(ӌ U<	elhsgkn$b:8j敀5 &	jj))#BJZ_ZA
▦iX_cX^Γo|@\ڨ+=:
ka}N,>eNN",*mFK#|t&zdK.
.+n>ڨsp`C93Z멹
n`O+ӛFjjn1Ȃk%?l-笊gMl)8CB,5}η5XސIOgeQ3վ`h5uuEN}6$F?,hc5܍ٝ[yv7x+5>n+#}jywmk99H=EWGRyLb:ql.ۦӁ9JY4({̲ǹ!^⺳$iƞ'ef,+4<Cc|n)i/n'@Oa{Tg.w廖{pYJia`x0p=|(GEI_TX<)0SJ+J*O0G%b^a"	X԰Uj{c19"`x5z~+ H1]nH:B2)QT8?֨hebB)CtY_<F$@
~#/"vRsx_(#31vY;,w@$Z<"Hx3r𩥽CSbiuse/AKIu3iK&2ȓo6EMow4s.{\'${}JZyeF2A6P4	5CM2e>=8'Ğ(֩LVtIնЭ1Ӡ UFG҈(\MsJ^G6UO}JHᔧj8Ң:L'߈P66OM!JժR*PyO`=Xz`5S=TyկfU4[ꥼN:kPW5r`):EKbQ56BQF岘fYQlG
gڤ4*CS;lWھm~ʒp%%yKxIGpP9ɍnn[jFe$]&?awb(\T'(H%dގo_r<M+QF|]5y
20118L <+`je۩
'~RY
g&Oc^dBG`:B<a9u6B
3Fs1+Lpl&?`aA-Cΰͷ-4k3|cDtK<OqLYR-tP<GѦnKbqџCAYφs#_%”UYӺֶ!c25]kG}ӤUEtM;Mlì	dn{ܠuS%s;vW2ߍ)˻pǮ]󛖁}fvݼc}e|xf<%J%.dmܭR[qve)9B!TT[ߒ}H˸G#+:ʅ0UVcz;[G7ލRÛ4ЏYu%}ڶy=,Ƹ*&u^`\<G^ݜq:M^ʨ֙>@l!sd'0&o4;^*U7X[MLʽy_|$b[=鶻hŋYꉙ^nu\/ZKG6Pk/P*нEpԏf4x'?]?O }X^˱]4XASeyyA U5)I׉bX^<P_Ř8s}\1	ՋYO-ZE^!q Di ՁQFa9!FyFKإߏ^U
BX
ꝉ%] V !幓 
% <F`)!auUN\l_$fY/2-n!_E'^)#'31F#5mU#6v5f#76v#8Z#
#9v7#:r9#;~:#<;#=<#>8#?Z=#@?$A>$BA&$CU	DFDN$EVE^$FfFn$GvdM=GH$II$JdJ6$KK$LƤL$M֤M$NN$OO$PP1;html/figures/ha.cluster.config.info.flow.gif010064400016050000001000001166210717757442200217170ustar00gfergother00002640000003GIF87aν{{{ssskkkcccZZZRRRBBB999ZZ99{skc{ޔcRZJRBRBZJRBƽsksZkRs{ccJΔ{{cJޥޭƜ{ƵֽƜ91kZέsZJcZ!޽JB{kcZƽ11)11!BB)ZZ1ZZ)RR!kk)BBRR))JRJR)ckJ9Js{c֥ckRRZBƭJRB޵֭Υֽ{ֵƭk{cޭ޽ƌsk֭skBJBRcR!)!ΥƜJcJ!9!{Zc1R91{k{J{Zc{ks1ZBRsJkBk9c)ZB)Z!kJ{9sZR9Ƶc{9k1cR91!B)Z9{c!{ZsRJ{)sZkJcB1JBR91!B{kRBcR1)BskZRZR{c{{Jkk9ccBck1RZ9RcBRk9BRRZsBJkkk{JJkZR{JBk1)BRBsZJscJ{Zc9{{ssRZ)sR!k1Bޥkk9c1{RkƌcZkB{Jcֽεƭ΄k{Jk9{Z)kZ!k{ZZ9cRccJZJZ{sksތsﭽ焜Rsk)9!J11sBcc{1R!B11c19!1)B9J,H*\Ȱ`8b"ŋh&NǏ CIɑp J\ˊi6rs͛8s#e.hJP4\Z)FNPJDA"K׮?*ٳh۷p_iݻ!#˷/ƘrhLxU_[8UL/3]3ϞC-4/̺A8r(EM۴ڨ+Ywɵ`q.<g}+'sЋ:#w\wg_U/7&;6wORwsNJ'`!F&SK6H ^b0^RN =NGy8UsEEl?qF~OoVFl~\7#GXl\ĆBRRL.gޗ@hhiѕYVyvxf~y-%՞zܟ|g؆XH\^ađzzXL_訢*&h餝q#hesF#vpEg`jИgd+u^wűkAk*ˬJYyXon߂И_^D#DnFaG]e)pZmalH$
?oqm,\vhGE^qcjܜf_2\Nx\4ی:OS=%J֩i
BQ*FxEWhKjrYGԖmةl
Ƶ|tmPl|IaRHTz!PjYPHw}3>cޅWlaF6R[Ip3.?cgГڇ_KNţpEwǿ/&Ab؟X=t	| | J硠bAz2dϸHBz-<	K<p
gyuG!
wXCt}؉./>!cqPL(Z/St
{EJ'.ۢH1:,x n,l\oȚ*^>EI.LEqdJ0
a$-IzaN&7$PX:T0-le*sX
JD1({H4/*p$0#3frTf'L|%yMWŖle.}p,:	LS1x
#PmӞ^Q=k쳟$'<cHpSmė	FݡQ@Dm1DE;(!=z(z	$m3AtmnԆ!xiiTkJBz:gTwL͔3f/LSԧ@UPI]OT.x"J3:d.Y
_0aBC
$z-(^+L2dCcWzdjvW(aِX.,
}i%^fP(+CRŰBY*RZl-BA]pV7ywn{
8.^	pf Wemg
ԥQҨRҊaiE6
JRr%o̓.5ZPRNX	_Fb+\%
;E5‡a{+_$a@e$alh֙U%!JȤz>#e5,B“e~CÕsl2s94IzK%!brB֡"f/
-ntdX%t6LRHHŸU6AV}GGJ>Cpive&ːƓ+򌅴ĖYM
xr!On3z%k6rB'sc6V|C'*61װ%jN!2aK5򙓻L^ޛ ph8P\q9q·GED^"7*6(U>MlEW3XM
Ct[vy^\ugI/ѓT,@ܨvQv^O	
08vW
aS`64nY81i3=kluvZ޽8}旦_BES	SC,1Д]pc@J(<)~LsA&~-?lƘ
6$Ҹ"P6$aXqg445?~ ɲ dxf4vVcqĂV"I :y[E
fE^&`EXsUa8
H4ug/}|08˲jlk=cv(R3=胊"j6%v8N2؄1H7dj8hi;؃U8ZOf8e7%p3SCgXl3n̦s' NHO8*A֔)h؇fi^l8l&˖rxmvu؈7^:gsA(Z0``0cl4}SodȉQ}#
	@E E`0~uQ@P,h`j@@@0Pwy7J(j@ٸP8 ``n8 ),\P
XE `EP@YyҔ@
Hȑ)rFp\P)0@90Gn:P	xDiHIJɔNisdJ]P;9
XW;bJxgD9ywy[PE`Ly xilk9
iIIXh!	YY0y٘B_ YitpElɖ3)J@9 `iɩi0X؜&'Y=IPp	
ܹsy!K鉙KYY
[٘)ϣVm49t[
``iLJ.OGd0I@␑p!:EP!iP19	Y	)ɍ
	p<
hJZ@"xLZ TW
_N\:Ɖ0D

xp*c~\?) p}h
s0 I*h
?ğaQ0E |JtHYIy1jN =i":MY
y*H &:G*IZ꧛
=JCњ}*@

v0eRoj)H;zbۙ~Jk㚯)$!jr@0

0jF;
	@f`q	9B: MIt
18:&;eЧ1
TX+NwK隴`Kbdkh{j^%9e{k}	EqZwk˷pHuZlJ0x{z![{	${B꺴MK[lk)-` 9EKyrK Ǜˋ{]p.{MZR[Lk{4`

۰
@nKv[y[M?'&K;{Y<
 

!
PӹҪ;{up9g	,LþkG
P
M
X"[ڋk+:0hjl.*_̽b0pWGŵpȈ\Q<̀
`,;mzMk۷q$LTLPC 
\<˲|Ȋ"
(ȑƓ<S~N5t}MJ !1flիtɦkɠILċ<ڀ
Js,D`ŀ

`OߠH˴Õ<ĩBS[0S(^)¡M

\!Qg[r,:Ѥ?͸K#
%lȈ|φKP@l-T&@ˠ
|ư\P?\a|C[Ph@a$Ej_p)ZO(W/%+_PCО`p=PB${JqZ16G$sPqzkŜ
B6˴\

"\ΏC5&
@ϐ0ϳlˋ<
nP
D.\0%E#[pdeTv	=0
0UjP+ƍhojhTcX<"M[d
<L۞Qp΀ӵۊܟl,c\@HfIZW+]B#Y
@v0?;>
RSXd4mT2baP~e)*5͔L}ذӲm#
N=;/]`|
`P |B˽e.,0LDCF]6V.2V]amE`P:=ݣ.1v7F0.%)0	&Q*#4RóG/&η!pӬ-0@=]*`l$h8T
Pp
PnAl	  XIfݛ-^wG[	(4f^}/_p0	
=//vuXh2ATh`~%v^peA!;ٔLsvx`0-`0>^n[36}ܞ?
ڷ˸`	w@j`],#c):&:ע2hQH_pRO~Iag\	koA=
_mŢ3j&zE[0IPdD[_B=e-51O0e`]KM,90
@
^EM@q;\#m5wt-Un
جa
n°~x5:y>8}?yAӥA	"Tn:(`-d?	l\HdI'Uf3)PM9s)ŠO4@$PI.=Ti.<`3nN42W֭Xe~׶0b˰acl1mڌ5M7iͰRҰ؃a/.i
ي]FkqeVn޽~kVLBGrV^!	&l!oKSq5y&d^yѝON]zuץSOW>};7+D \bœ!3G-3r;ū߿)N7TpA).l08c?Oi1KPqD|M2"6@qFkF&nG Qb:›
#
Dc&a>83Gm)I2$N'a20Dx	ő;삌20
6&TABltvTNQ6xtQNTPB
kQ4K0RC/IOtA5WrOB5g	bq~cbusNƠи
4SHuRI˱Es^W"S30.4kNҘBW;*92&:cF`	_:+gf:a<Cy_ڀ
$$"͈6 0́<H,,ڀ0

F6x0D;X`@ 8`nP/:^jx)=,gÍoop|LLgL/;˰:,hVo&q~w9[``zqR/Dj6z6j$p$P4i$N3/~#B
#qÊ#Eq<5aņ 
	j5#1BLeI $0.QcP|SKS1r*G2q
d {F>Ph(``Cr0qBa*ޝBTY,bj%6h#1kBaa1UAI  lD*&
3XPˣp>;~p\jX^	gaB3°h F5bP27-Ҙ&c~A=O/,l`X(2YІdAf<N)%,_,{qt	/8\?
T\_P#$ #9ID%6c\:h
N%4iSX3BBf
X~dPFrCBa\OIeu:9h-	i#6ajX&l✜juvbu=5xcža\cI5KK\0r \"	@F5Q6P%8فZIxɘ"A0^}7Ms-h+dgtFe1m(cC^`AY%K;MjUD`b4#]2,Ms\XRkJ󂀹,2k[0660+0
nr"kBKp C/7ƅ<,/\.yM_}I\E5qPb!j@ 0r3*ιayKoX"K崅|?L'	kp	r+9=ji)F	f0>3I`Ԍb8qaʀFSaxQDh/'+=
h-E/O|"\M)Ӗ;C-/7yqzʾ鲉Sh_CvlR,gú
֥Ʌ[8]6^枾C{:01&|Ɯ0T׼lwt`:UsiCǪ	!-o\30}Bp85ދ6^l/#08ņ-%Tэ~t']H/@CQԭ.u`[z׹5tN[z(Yjȋ|4NmM0w8~sgB)"n]mP
'^g|xG^|y@-|}ax!}r΂cda›1DM_s{;Le'hktc'_g~| CBoAaY=WW4R3sZp4skwAG5JŬe++6<l@|@d<7өR2뼖3+;=fl2PH#h([†}sܱ`+;B$dú&lBKB)\6"8>Yүcb==Ppك^./p8(Lx>@:tҁ$[8!WB%:fh20??W@@=p`а?^+~ƫ@{? DB,Kĕ`/E%\D8p'K=|	3E`Yl_Ԝ=Pܒ
+N!/zŠ@;@7jL	E[w|dE,.g{=HԾTHZE4	qBF|cC;C<E;PN8Y^،`Y
IPEq+ Ô[XTCB35^d֓&(ZiRĆSI4tIr@XN๐|JG@p48IB/<%jIL[l,G\ī&@B\.!o7KHMFENP˿NpEiJ3".r}|GĀI[s{ɞ8ʸ4&Y^l\ 5t@(D$T8!PG~t4@/Jt΃̤΁/`MDpʧ乑@jtq64>|OtNɰDZ䔲TNT< KtYKGXBH" ;˔t\O,6O-ML|L=ͽKPD4S/ڗL,xzL3lL%}=QL
dNL˫,R'5
%Nf;NuRy)#}DN7-S%E켾)UL9]M4
0Õ$#vYOxayY"<2@4HN)TG92A]SK
mR3TTN|%
҄#R.=Rݑ$RXݕ&(Hp[R<7mSe#S;btĪd&M88!^
iU'T!95sUHU1AE	54(5SMV%W4H>yWjVW	.
P؂q:f$
d[XrSXLTwT=YQY2./_\be[EV8يY͞QXR̽V5[V@XG٦@YpX[^r=/@ZU-HTE{T	 I-X\%P][]؆ŕ+v$ܺ56E]}!eE1t[µfC.-2X'0P0Z]İs\[}ZY%[0(ݓO6Vݜ1m+h8:ZuӞRD18km P,UÈZmS_[VE}	نՂ<IN=ݱ}omqЂ1GDl϶e=m<vRе}̜T18/?s0A2	ن` b*ӻ1ÆWE.Pw8Ws+-HYFϻ%X<R+F&0)
&|ObxbȆhȰw3oe(FKoTpf7FaB[m8?M΄aa%/*[0LZ'm.[5cb2@x<iAeqژm^P,:+c(6Sbf/ǵYdߪ5f(寺%_1'~.ы)Celxb].5ɐ8"
0eHhh}g*.\m6E[@qK~~fsf8Udͭ%ѡ~3dBgbbUٌ+_4$|[̰VWm(݉U;j-i"f [rUA菖>JkjaFF&AX_lW.'bxpHdPV	
#bZXel\>.(dhkmdq`\[vhfkalӆ`@؄ǂj`sHbjh/T v|`s(~ަ
WM-uGN$j(rmnſ0` 8i0fXgsdffֹhҰ hdViVhGi52dhq@υD0k0f-q9n*
ifpSC2jl@p
p1WX_VpopÎ8d[A4IƘd0
nqf)cHF.p
jΆd[\"saq;
NHI1jNjhrp@/N@tK!nFbpt
%;Yh	_?/Lnz4e#`lHHq"lJ.)+`/1)uHw:&oP@]NjgH֏%aKϴ xuD&c ]gjgL%8XgNY16gMnȺXkd"tgmTbI,Lʁ?p/!*/ZF‒*QmhkTTH&Wl':dXmq0Dڣ؆\C&2}F`.b19Gho[	;(wh+qnvϸb`{wa(!{'7ɬ甤WzKq7AcӀ|z6>ie0ko2/UA{7Q{4@`TndhُRiF1v*6L0~'vc6)!hBPx(~0`Zcu5m,h „#/ZIV6l)[L/a攙{Eeɔ!k֍4pͰ'
w#jiVbFlˠ
Jeך2+F,Ko.aʤ 4ht)ڭb̒
1ḫ=7d/Q֭Plj0ĊiSlڤʈe$6YIdo2vM^GXlC2ذgvܺw6\y'!"xVmj)5dI8Ɇ4d~L+XƹEfkI*Naԝ&NY8\y2#K( "XE|$30cN9c9Lx[I_e{)*<x35”9%SM(^$L&Axa;8M:䓷6%%Y!HzCP\1Pv5a3LI0h3-	E@Qؗ*ʘ205\pM1 cL9́bIZh`Zj#0ט#3R5X0vrUCz+뭳.Bv9*̈8Iga32)Tjm@CYۤUە>nۥCCQ7i |p`cIsdL͜7ʞB!/ĸvHk,4/s"#	W'xauyS80!l-r2Ȭ;.N6hK6f_%HC5^!=-Q6ܳkC``CmpQc5*50#54bu`
DD_!h8o0ռFp|
3(My#ʯc9H5;(1K6T"63ddY=kOG'6Y&1ńf M9hSgذ:LmPok!	BBf1myk -	L2=
\dє_ТkPX5aC<€RȼUH#9.]!{(\((!5Q`2 k&PX,@NAY'5Pƛq,#xh*ܡAB뜺4XK~Qljo}
;qD+, ?z
E+xrml[>^(e.uaw|]rˋI
W
KY,t$^p,Ov^#Ua)22h_8>bU\i/Zc͖L(Idy91H#BNm,6aM
縼l;R75!9HD(I0$.KjV	;1e%WI
9h"_NnoKv^i$r;}ZԭjYjQِg!OyT<vĈ6vMkuūp.QUmh8zJ6nlWe^N}kYe9<1g	RUp2\8s2|YmRI'rg3eeKkzcq0tRwLL&
\F{X(LF[Zvbs@Uve4wgnl	˖v#M@QDEg6ȬݮEbŸFn(A&CYT;A>ɡ;1w@ƾU*5Vb&ar`fI_@JCnp*[XOxl5bH5=2L'>2iRvHԅ>[mkIÞ? hgD0=L7Dp2;.kl|)4à,.j4,jI#4_Xh(:1*e8_KЎiC91""A	Ƣus^uͦ
ù.lϟ5p<	DԚϷG\M8h|-qwSvH
ct߃ѾkxdF_/ьC;
wȍDsӴ9~FG{ĖFyanSbi.tr5[}qwkL0RNm>q5oB>k6]'K11<ztJE$wXӮx[ذ9R{"
%mj
ƗKb^q/kZn{ fg[zU{[AbCӁFi'/zp`Rαf&ٸz75lpgM苞n}6L2~V:UDzlsE"HYm1\)_!DAi,>9ߤ	q_NUUέeW]SbgZRpx9_2W$Ua[+mڼۢQ`%v!aZU:=}9k@Q`D]݋,Ba',BGA	U3l
yޤ6E%#E{iX)s`C/(t%P,]ݭ"!!suB0B[}YdZ\(\!p1^12]ԍaFc+2c6#A s"R6"$b
]a0bߴ!>rs@48a7_NQ!R ~!dB-0ʜo!E.^bH=B0Tf	HPAd4C5 n_H_4Pe8-8O}sy%b12H=	E-2ud!/$}
!l.`|Zd.PW(E+ZY,PH,¿ZBb,$aɚMޗ'd$ a@$l,9%`C0*dJFH0[TjTA)JQY&SvWv"p)Ă,/Boք]a'8&!["+(P"8)Ȃ-B0,gBZ^a(fx!~bRz!4WA&g#Bu.h2/Ȃ6% Pأ%g݁"X,e4$Zʂw£#BƆ]݁ 8'lZ'p"r+sh&'tB\a@A1zg2j2ٙ'I)'^YC]nvOF{c~2AFap{Lg'X'T&vh!,%tB% ^H2$%()'8"aVM*bzYc:)dBn&b=ET"'X < "jfi\C
j`B"*" 2C@~!%S
6Pj*vcz*ND!X#`V\>j"軞#XB"n^zFPdIĉ(@ZU^}%X+Iġ|M*ia'|az]NMȝ⁴ÆYĶ!r*Na,iòH(ဓFzp8n#BinUR½]bcFDuVL%Llx\-3lkl*$m5N%D-T==禆EO4SZL<m+ؼ|+d.,mOb@c01us6 ]┸(B	k ͪjhkCnFbk&&^*czKήQLE=nR¾oc-ﯬjyAk0ap-iiX"#™!ꚋʒiƍmō6,/?6FmjZplX׭܀X,DoBWdm9Vp$-mr}׭Pl/ؽ[
-o0N/2~S	NM+߂ʹb`Dap
[%ш͹~vMg겮/ұXKM0mȯ/-ʡv,Ȣ\#C!.p6'qZ'h!~r00Go),`A?lmrۦlc/1
qq1.{rU1#k(nsP'g M2!gI""M,Vṉ$'arh2rr<743Գ=~^kM??#GG9oA#g/j:rr3~!*]
]r5vtf-Ǵ$ǡrzj.r3
K3 S-)ۯѴF3bNOV4O66*z%q3AAj155Mr^
EOE
FKYgo1H<!')1_4D?6`=vJ/"S$,5t`?	I'eWvJ_6C74TK- /piuTEtӢYMP|r8/R״Rl/oTmW1L'o3?3,RZsrk570k'6;\5xR_1Xeq!wfXggT>cƩWv jG44Է%CTlÒe/fgHgv-c^Bc	4$;<JWru6׶g]Agw'l_6Aq-R9p<uWII[77u?vLT9цCl#ҍuRrÞ^A8lyl^o1<{1|Tkxgx<6D!8Qa/<y58.p~zM%xKo;#\Mbcs9ƭtxM<RF2Plk9t)Ar=z'vZT벡@:5(3qǍPP.ilCDG`swGSpz{
s<;D'pݪ[r>NF~nz){9=8ĸ+hX2IUEjaWA::!=g=7>D;_=kW=pC|I<#*zɻq/i;rhtks0OE\HFůy<7;\AaA\HG2J>·W2<z]F?i{2Fϼk(@/K.I=>P|У8Gɩ7@?=YC3`{	$r%6
)V%HB:XX)9!BM>2fL3iҌӒᐄW>L8O@qE	1$ɧ:q|z .hrW^Ɇ-F%˭kѰu+P(ɷs֥
ƍۗ-.mB~Yrd77FCJB
qheь"UʴhՂEʵL׳Wvܻq֍{kZvq0mvrX[oo\1ÇK.rm:xxkwiM;|x	Ϳ+z*.h!::#rn
@#!!!ʐU<H1AjŁF ŕB.
E<'"# (ot\3䦔>2gm	3qX;2Ær1,y𠋍:<?ڴǚ0ӛq#6[Kn5<L$R~K)CI-mkѦipN5mG[ӵRV9鴩pob'HTSyVo香bGYuQ%V[oE
)unu^SQb/\۔}_ɬ6ǰv}]gѦmb=p_
c$,ܫ(V9ecY]Ì3ب
"iWFXj+ڸc^3ݲ<.qS>zU&CVZ6nY`kFډ:;ܬ?Nmӵ	ڔ lUc՗ܬluRY+n2vVKsa&eiIs}.ӽ:Α%s_4e!eBga]U])%W	oǩt0a}~Ӣ6W>}a;<k(NԮWiOK-!6k(#0A/}[e8Ep~5aAhV*F3Nkxˀ0a<Ä'LVذ֋j[Bq͎jzն1ư5Q
s4Pe3豥mgnHۉqxSK̈4~A
sc	T$pwGzFM,eаoTx3![l#,Y:LZ@RnQ['>j,Ś;xpPÖD/AI7b^L,Z6[d19j ȺCvc8)jC :-L0dӃ8'y-ن4E<h$-/72hT6OT#V.ꑐY$$^,Ap⒄
uKIy+N7Qe`SΪ3ԇ-B3Gk$dKϷDp^%pSr;m&*E`5,)Od cHF9	@̡j6dASqoڣZh~r'd!k*`,R59{GfJe)Og:b@CE4b9J[.N<8	DDǓTfWtX'vNkPqgDWFuQxݦ,'"d&w6'S"`.Xk@%RT:pxt5CL6~!0H
d̗x$ƅ3l1tl4^%A:S:"f3.Q8~*@H&ގV<05`!+2*{#@_8$e3ADa`#861wCD.2Bщ*YMjܪZQb@$F踜af\(FBu
,Ba	5a6`+gیDҴڴ{Ek`apDtT9Q_-2ʁr\CqIp2GQ7y1i wm:@3aPZBz5rp
cJi	
f<Ya]F-wj|ld1̰~5a# 7ok؀2HdRE(B~ޒޓ)d(Ge\9A]pF1<K[BżJO"UE3
nbyWV؜ZavFýv7	6awmpFt%1W%Z7dAg&>fY@FQB̧pGO/!r[
ldf*>ٱg|EQC_d7|H{G:K*8`$E7
xhN{BA톇PN~Peݰ!/[4>2mJ+!`xz̈@&`MA
N޴Nk",G/f^ƜlávAаAAh<p`grh]nzr#C7@ZLlk<GhHhZװ
KL|^EִNmtF̬e&K2!Ţ~Oo x#@L%Okx }"&ui~*\`ogƱ FptP-'Ț%Ɓ)s
qbbxLpQ^gq#dn]reg}ʼnإpCK-M$G؊gIkBrFfU`dV+U3	~K		B+˔
~(2F%S%2%eURepm- N.#$b }	.iU)NNU
N;''@-b81s1bey)q#qҫBFU112a.Ju͑443#Ȁ
K8f93gҦG̨_?"07>qƁdV]R<c++|;A#sHI@rb
!e?!CfU'e~3(f$#>p(m<V08/4CS7^'N*{m÷D02
P̫
i pbJ2^Dc/82$SԳӞ
0#_BKCleJ;a7@=p'DQ?H4[,bo(Z,Jc"e1!X7)FQ5Rb	#2:Q#I88!`U%UUUyReN*?!AP7A>@XȪQUee*-5@bBAYAIhj֧<QZ.8xfK@?@MDJH7WT+	M_n:
(ljRubUbIs%da_ALGH,Apb6^FX[?Г*-&/tQrWҙPB7+

x!^UkR8ڂJVdPQO(@>AzhTrc,a\%#l'糇.d)܀lR!kgp5SknWDWEW=
:aTAxmYxab-3~4=%<Z@:!VAl|xxdg!ZBUAUhAOJr@Wr-=u=A`IB`D?pw`63iG+a(
Zw|m~8
aVkH	dQdtI=Iӄ8!=8*ALY|vbL}c'
l[ky	^xv6C$X-!47fh# NswSD,c{9`ޚ\sbz3Dش:׍qs:===ŠOwIuwA`dc H57l.w"nvqXTa]8(:Ķ dAIX)e
,Q=QdK"r7S~8Zx~C74	3G8xUCuG"z8C8QUvvPZV*yۘF 9{#tԒ?m`{t68~9KQڠXZف_٠pmyrM.QR8:m}#:9ӝ՜ؤRypSZʚi:@ęq{o׹ٙzɞYIw+Z~]Kԡ?B6eA`E:M~YxccPIzl]D;m>_Z!Euz  Y+B79'=9FYש񹮹ZdY}M'WDZҬ	⫓9WyOuԚXj5[{yHKZl9[P[?;Y{ێ";;< ;ZhoDIY]a;aԹm;Rz%[Y{<ro{V{Iz1ry{{[ "ŋeܻi\uQVocD4$qM9~gۓ<嶷"E3¡pµK¡4,\PyI]̹[
b[[\{	ϽO	[RpÛqYw	뜀\'Q\Ѽz2|d\S Z\ky/<7zg΅{<O[ZqZw1k9;ݢZS-ݦ=m<=ѕߝF]ˍYٽKʇbֹqKyԗ}5Uvŋ;:^=ŹYYw]]7]ӹ=uۑ\C>Ϸ] ]E<}(K=/Vwճy蟧{֡}[^P%>F؛Þxϵ=7u
b/ݰ=kW$PPhEd>oSKPLhdwS\R36Qib%W]M|ZӝQ`jB^m2)<JJOш!V-땞=ޣ!?^GaV5>~
)ҡHS (@
H!'Hpfl:$8++Z1c&hD)&b,[ԦYn}-.lS!Iqx&L:}4R':)qC8N%x[.[”)&nżuВU)Em^4{)b4JұdƔ6͛464pͰzBNpaĩkV
5RUۦ\wb"N&l֭Kؘ1sy6MiŶu߾'o|//ӣ32]Rv}
6;l1}o}y[Mp]T@g\j8}]Hcfh giڂl]cbI!edJvOPuX0sM֙ShiN_IxDu4z(Tu]UWqe'RD5h} Zj5Th%]M>
AO~%OT182ՄsLgТo'RsYVNE6lasM2Ԙ33z9j5Ì!HѭbaWal.xKj'6R8:1%ˣ71[K.FdmʦQB'묹8/mbi5抃L2Vn5TL0$_K*H|%X_GbB2F:8S u`ZlX("߾E/^oT"uD%Ƣd+&4c`f0<\/Ш+*ƌ+E!<-FjTFd̤8S7J1v=&@HiMYQ:,El8VJiJ戝sᅚ:w8GcyݾO9ͅь	xM8՘c5@;jV1X@m@AOdUyt*8䀅E!݀@ȼ3 S9%dML)),"Aئ:6ܪ
>	h/~c;

M0֋U(/mAd~s	b((O7Ȍ0&@"<(qB+3R2>(hDY#ZN BHn4l&QA´'b4c`r5Ѐ5e $*w6O졅޲03hAQ󝯇ERS	9
H5O
1Vy]ƺhG3(,(8J:pMWp],6
QlT
SZ_0C8T$O"NBXẸDO]=D
7x5ʞ2WLhF;h‘b"XFXxs!.5tI:$׸Hd`c,F6!L|:An&!dDX"l;e}v"m@Dr'7q3oB(U)KP2"pCщTGQ ΘCFpC8e0G8Eg6gC4đfbg%!|<^BBb<6ܠDz
*ClaK(-d%K(bA'GZV̵QKƔ=ҮWccv3i,Fx6j8C_"l6f 22gwp9se\#_}/LdPSp9A41d<Uuy3ķ/~lc z($ݗ=\2T],#{/ejhŘ4Xh -XKe1t8B̩@Tjӗ#jhLU{vTIV%x,*KY+(h:1GK}?nA>p^DͨdAP6q
NJ(G1ьoLPE(<&-YQ\fdM&Pkc(3&c^[%4;؂i_V"mm0FV%jl3ѩpHz6V
PD/xHrmoj% ֩5Ana͒v;4QMblX?p1欱Lz6!2`*h,T_ސGx[kƨ!97Lzsh} 5^nte,{ֳFZͱ.Ä%tAN-pa-~T/2_srb4+ؠ~]@IuxP;G5dB5kĵ:zF3Jb*YJ0&F,h;4|puKBp8Yt]]W,,>#kT36
~	^]I&$ }4Gw1\{rQZقC~bpZK1|{K}Qy57@rZ6xr9x4gm3
7a}q3A_gyҐ
 $2{]ăH+Yw"-'A0o1y8z!GxDŽTрwo?wZwt3|W~gH}r{HyW;R}d~Gzzä(Ahh1}3VRy!|7Q~Z'TrG(耭Fr}u>m'
AhBH}6s}$ofX?!!w(L$h8Xф
PhCu/Å<tD5y
hX9G
Xvo	P$4X)cx2T`>q|D(ہOh\uaa!Yqzx"XÏ
Yہs
\f	!IH9ԇߗyH*s
T1n刟dN1yfH~>sE@jcWI)7$	;?Ɗ|UY;skq<Hr@_##èg	8QẁTBP]@Xu6CwP
@BI}	w Kh	X9HP9כqwfh$H96ȏ1i/)(tywQ|-p
I""u/4du9Dd9!A @i0zK`YJ9єw
@ۉ=smmY0}٠
Ա%<};-zn@	I	`I$:9:~98c`	Y[

Ȥb@|9Wyc97\	`
u~6p"wXzPTCr9u.FuX	K7JAa`]9YUd(ګh	uwh1X)]@ {FO!Lq8@PJ{@	`	P,xQz$PSQ櫓#qx^(MzO;|Z$zA{\PUOpWXI(8 BcFJXsxxRk51?@vu$JIAZXRacTIPQi/;jPBf%KyKKN+Iy긓))^dZȸ1Q˹uóކQ}+lsj!30N :?˼򨷭մ̫9dۼIfbԵ:.Be_ʋV[a溻SzaDefM37;v'KS{TvG+z7LvEۼa˾;Z/y/$<Zʬ:c-"1+d|N[F%1[̯UT1TgZ\yyˣ:r<'A5ZUwEZ[]F9K·,}{ącJ̷$og<bW,_Ʀ\9\ƨil\A;K}Ǒ<7|慗Cٍo7Ìߋ_`4|!Ζl:\͞xaļidq)l<_<cƒܻ
(<mlTq̴=x2</\A(%-\\t/u+"yl"o:
<ĺ}Ѫ|k-0XxEPR=\k-о

)u|A'M0@ԕAp}(ռx}/]ϋ3ܷ5]7m\דӆ
UA`C0&M}bd)X@km	}Ha;JQ
Ѷ̳̋}׽W 8UCSRp:mX8TOSIP:pmب]=( әD^pY:pK\P:&"ܝٍ<&ʎq;
h}Y}]_-T=LY:WZMç}!s#=p{6ܻ-}:^`/!QBI	ϊ(0"`.MȞ{ٜK,֮=Q	j`9}B+[h˙8n7vndpp`0cpN41@c0

pfR]Z@Ѱ
p~+>N^ڼFv,ϙ
|t pt>!g}޷c0`*7|
0EB!)V
PBuq@*Λa
4-iV$c`

؂C4.BO}=zȌq+uȌ!hUo1)wHs
"7tN)[yᆈs(,CäQ(j]
Kf{״n1QG[^E/Hc}C)9\qِdQC@˗);aOc%dElqV,qrg(miZ}_߂BaȾȜ۸L#cal;B^"A.AP_M3wYO5P=^m?`sҫKԘ;:&/a
l.4ƒќeĖ-Ö[1Ŵi3M4p݊%8\S8mDCFIf,bȬGc!I
:U>frbŋ5vHHSM6;p`sSځ\tŹW`…
.oĈ\1h2#maîYL4؏v;
."ɭ]=,kAm4dK2up˙ōiKn0?۬aMTX
[Y2h8έ[PmQmoz{GL1C#F_1g;-I$ڒp_'lJn0⎣
HAj.0|<n[#&RoE4o>ꓬ?C?̕<L4T4d/Ȅ̾ЈIq, \јPKJ<0GfCbOqIfP齤EИkL4m"h~)PgPrQFw} -ƒhDd,'JMUU$DV2.^ri{)Ҷ[4*V)zƙX-®thobD8!2,a4h|u
*PEh bVgfy7
UԛW)z?1RȆ&b|IįhW/X&:0S:4Lz?-ra*/,JJa*EXy˺Hp*<967ڰ<%.$H`Iİ/:F.Ypksed!fK|bŽӝ!^Vl@Y$F	{&AjBː$n]ʚhJ|/&O)lr~2q~|s1
<Jf_"!ElT}8V5נ(hDBF"+`G#T)z̄Qd(cqFB#tblL"!s5oAJs 8;,'nuq[_|
3\kJ7~᜞oFW0h$(2wBiqN]pZ1
H̼q|31xӌN]F
rcFDlDpax;2IЏG7hd׸3ޘ#v|3YtBTe"6w.Th2QI=P\Spb:(4N#ŋPpv$*HE(2	F8qJlpF|RAtJÑk#7р3kG2O}ށ4eyL#DjՀ2sTc02xd8e$D0P8_:1@s`C8XjTCȧ7GS(4€dn]PCAg(_:h4R-8!8\_H6v@̀F9t$*B]}f;PFu`KәRs|AC1JRN3Ԩ(2H2c{e+q"qtҠ2;q6	nVQ@WI7WUWTM6J<5&-$ӸMfH/lC jʱ.3q
qp|RFᕌ%2#̊@7эo #F(Ba	$AwEobPndw >B^'O$Bd#JX 
8|.?A1ڤF2
 @ISddCՀ2 G&Ex21@+*2/ጄUC46r(d/X.6
o0h({h47HG"%&k?FjCkKv禝̙̽>:HhQr#Jn,
qdTg3G

mאkNA+hlmu"@cjRmNc5]oacS54
\#3jWSh9b@kh;l[6a@f +ųsaL&ۣkQߨ9@_.RY)^;sul낋1?$qjdM
NɥE3R[
Ӳh=:fZ	P/j<ѮΧ6>H*n\f,2n5?I7Xflx75rM ۣn )ɨF;'@l>R>	e}!KKV	@l>keaRؠ adcRo]eD޸r1c-1߻"c@˘;?k1"fP
	+Wqf+"@UpS8f
>(7밣`>b+224p}|eHCA#1(
8R+lhf(4%TO37^_k&Oۋb{
32@2"$>:hZe
>s"?$@ĆUO<dD><像b`.ˊD,BF䍰 ?<(\2@dSʁ]×33o((H/_2
ޫEBESD>:ZGԙ\p(<TsG̎`.8mPTN<dD%f<(	簅n<9[^@33-Pr\<D@}Yh+]sCBdFth<-!FUDT4ZAȔfƂəL3E[e0C[$Gi@)t&8H8IHT2ȤGQINsÄ LDF˚G}NK:qTɳ˴"ܽzDq˘,L40G~
hN5"65C9t
:<.f`#eM|IJXsÜ*TJe؄EsBδ*p̹M&DY̅نLJzTdfT
pnHآ<$HE3@ f`ͱ$0ΤL
fθr<oNÂfT~ƕ$N2N40D͜U1P­dhnNĩ]ݭ	MЛLj

P(Z`~"3UJ]N񚈦FZ$R̊Dω3& E hPRP#`8t-W
O
Rʪ1D5!2"e1/#F@$ͧ^hNN
D1R*%TL|lRSyRBDJUJ\AzTӣ
RTCHOPCGVm]<O<HK5UΒOl͸
SuY,-M@݋N=ĸKf]~Rܜ)/ӝ 	#\	iO¨ָam=0gMWppXl-ں֝pu%^PhKD@pY֬W|V$=pBl}FNOIQ8dQbZ7q7܃MXX?
=ʜؠmJך`.4``ѐy؞qe;ABS#7JPڦS!#=0WuD"`ȶE7@^9Mt͕¬=_5<XK^ؔ"B"D-nAK
؃RUU`$G(rۋAHGXE4^hG
^ SfڃJX@@GS*DZE!Gm
4;6
ۖ3X(pBpNXY尌YhGRG 7 ;PNP[_pNp]#57@OX``YY׺X^X_i%`.%ښ@E_
UPU0
Fa4pB(J%^NP(EנN `$EJ%&6c'ZUZ< K6UNK l<@K1.CvE(`GNIdKEnd@愿eJ.d
V=Xm-S68H7BPAAFdNpKPBʅ_gBKpd>fKXE.dJۍI^EdEhp@AVBnbFf<eMej_iZU<@=((Fh/M(fKhK@gF`;؃B@hFM.6ЁDڂia:h όiļ5j{ b x6V7iꪆj^jAZj[j=jȱ<e>͛KjvE%"uj~~k^i"%b6ָP6,PǶbދ03`l.6mIjelоnVOܽH6e~F_F$lN~K	r~j)X+"nnh-l^n
No_+#%H0h8ƋƋ66^l66Hl @ag4p/xՖvV퀢mWV}om	@mpp'(<h/#6xpIpnp&pppr'@ppr/o
/H+Gh E5E/(.op4/Ȟp&s``c$ r$&#GL7kM?3<F/Dgt[	/+@FWZWu>u6ǁ NMN7e>vcGD=Xv4ZZxkpWu+h5)tE/SxqwvΖwz~u:gZ0@F~w.E7LjRs)J_v	7	POD_tpxQ0놰Z4tՁ6N7\gD+pjXg/ү耝{؀~Їy8`P'PSgNj'@{'{}p{wypDx,̗ʿ/|
8|
σE`iQ@/|߀Oo~JtN	ǁ'p~4\П?F	yطgy;2[ojO(W p
~7~
;]Lp†\֠(q"ŊQ(çoɒ&O,j "^n!ċŚ6o̩s'Ϝk
*t(Ѣ Lt)ӦjĔsFFp^
	O5HFXUȯnղ"+;p츂v/߾I
,†GĒG6Svv2ʰcBlpfj׶,:nK\yfgή[~-;6ٶkw<x̷.<8_
յEL5b&㩻<wzm;G#7_ҖbuўuAUw=G;TO=3@\VbYU#'0=ŒM\<MX#O6أElCI\U:,JN<heLGV;CR=3!S=9:yi7XXqțvY1
EEh:d;`;pNHTV
#tO;N#Oe =3;Cڂh(1v
ph`p0k2BlJ[-NFBYs=3=3;C>p9R:eZ{v:ꐹ=꘴*=e	#^i	APЭb|1CEE#\&|)+L20 N=%;$vhB@I=I.I9t=
6v_it[jTӹ=wcj<뎩Й.6a{M<(^0pd˕_mvą
j7IB9O<[‹;LR`	x#s^¦CBnPMқs18nޓ=vqŰwC8>D^<=CϟaCY}8ʡSzw3PZ	ܐ@	GLhA!1j3~mg4	RF87@8cPgÊkEQ
{('<\`WuN'lj{fU6A!)A\R5txLJ\01 Ѡ,7s#'?CDKq|AȾƔDKìD|`Ga>tdd&Y	BHӂO)j>W;c°B1VKhv	R,KJDȌDP:4	K95]J:QLp`fNE"><),:楨6cH)4G):b\	8PwL<RaB6r>rјiWp)LDͬMJZҡ\CCS>$d&Ҩ!PRGV?qKuj[ֵzLs.fuXپy9
̭$a	qsK&qm$I>R4J:y&^>uS= Vz>쁎UCcUEMTJIMuĪޮ\<0t<U=UOky8>$jVKv[um~/;%<?٥5'Q[]sȺB8?qoT#laorзud~CZ+տmo&N6у@D5!k.t!
~ld͐W
wփH`吿۔_=MD"C>#i)
#e$2$AّH6'0Qb
0lJlÅ<	q…?>K|AX|caQ0F(J
rAmjלm]YDycXpÅQF@0@|:HO\XcedBH:YsQnj>naC-&@+50Zn.הM{;Lp'዁V

-w|)O	BpE
*doN
.~.m@UQBod\F`'84%	14sWD9wBܥb)]Lk*_9(:rGl'b"Dx-8AQs地"Q@A@^hus컯ovH \ydZ׿;D";\2Bqb3܏K-a	DXnH}ԀTvkje0۽{e'ӫ+]h Ӛ?yh3ZwDo<yl{zspcfYrK("DZP^
DM^"hO-y
`_qؕ]!`ܞQѯ^J DǧəŔDI_(ٙDq!Ȱĝh88vOaP`a@AAi&BʔOu造Et64C7t4tC3hC1lL!Y!
bu6 "*b36bMTE5h4'+~4h3!ʟl""#"806D" b#6Lc"*"0΢6`$!q"V3:/24V6\#0C71h8j"<r/v6dC1c1<b6P5J,7!=`Z7C#c1H$?15$843pAGH*vA44D2IDRd3$,J$N"k#8#J2Jj?*+c1 
Hb(.6,%6`CJJdOO#Lv%`iA""SSDJOJCPaUG6"3,7<2@14%6d'KzCFQaeCA6#34(3@C^%Y@F"cax5#`8t1#4B5T/^:S%5b:r$fee`t&fe-08i&3jF&E,jC4"f7G&c(^/ 8C934%3'?NOfc4A?-cmj5$6<3u"C2\vJd@eρpYCR.48uZg5&}T%;`&dS1nN/<Rr^RF2!$)l$bNb0(je~6"Zn&C'(bnRb3e&Y~;B!f&qIi=K!BS^C2PC94dg^ddk.@2+(CgE#I2'2p)55S>ekZ$xJC3`&؉'Hx%(r'1(C283|"e1t&%w^0£&hT:",(֣Q%v:**S.03HjB%*Y"6t̡sixmzޣ.5<2i}Y%.p L}h"58SJ+^&}2C8C6/ b""~*`ڱ^DC1
0Jk4Lk185`v"1Z*(,gjƢ,6@9bB&w$1C1@YvdFB'0m&	`=+wR^bB2Tc7\2|C6bC(|+F6˚6S6"4C5ùr9ÊNƫ33 /le2,%+6/PÖ6V$2p-&2 3+&ݶf"64S4$2`CD+"ۂ,8m
lv/LC1,0C532Dn5C7$C6DNԜ(&#m>vddƫ61 5("Fn'B	ʝ.20-<CJ9@@"^Hlӆ%g>yrĸ26l18oϪd1to2#C-B+MB,13JhC9 fb6CpV1i6$i>b0tmk71/6–	
?/C8Tp1/24o^C9 /;悔hkpm"i,b0L0,/(jC2$6$!%;
Zq̱@35@CO1i0(1ޱ
Vf1JYҕԆԒ#)?N0k5jd1D#7#+A3b3L#d>rz-O%6d-n(/=rVf,u6sJ$dh22V5Bs2v/cQ*_Bc13N3"s:b.cq#d/ޢNP+B19?s,Z)sP<213K:"KK~F2E5A9HB>>30e\ekE_'e`C7'zLHc0"%4ڝ*S;0P7bLu#z'"1hpLYtO<3_m4~^cQ4:dF134)34<f7o3>V5^dRuPb4D^^5e3`S-43~2ba'¢fѬ@OPffsY:IHTGvV`C3h'0C0BpmnAoC+jv0~6D1#3qpp)v`1ܐ3tcuXbNO4MG6`b7~.-h1<'|'aD$@0bS0wXf{^+RB/0:Ғb/-%HU[YB4C"CxFWNB8(m,,?."1C#d00v\XY9
N$K@Ry0SOtsX)؂ҴG$6',w\n/H{4NbC.\9)ww|3m'F
v(m4AxU'*B08N&.7y]'dw8B_*,;GYŸ5:[xB,{,*$;:Ԛ[1x.-#FGAT{9B"| ,{Y _K !X/,mt <!Pu#T!\3|M;EB%83^I"~]! na|I[^PL  "[^2}')B8Kwy˛}؏Yx˸۷#D[5WK}shHuSv>~}¹;0k>q>z[1y=Dؓ[k0.Ɂ@vG=r3COǁPgm(=ij?WG,?Ɣio~Ƥu?@8`A&TaC!G5kѸcG\.V'bI'QT%8r*^8cH1rHԹgOqƁYq|&UiӃFjUWfպkW_;lYgѦ",XP;n݆Ȓ=ZNhVDa"Lq_ںE&Md~upg22mfŊ1+
2ebFs67ؿm5bذn-ؤa-tn}gyw
5#ٯɵ,\Kzv׿?@bgkSni4*?0ǘd!մ9PnlPH-\A	ańx#P蟃 @  hc3➹fWSb:aP8I^1j@`5qN?BrH"@#HM"H=R˺VK
'Lo2p=p	H9"`H`OX)B!9"PEPSVԤV3X`Ъs…P@ DX[`!#
` 87q`q]zWlX\Q^z3/U2ԐCVbTd!#H
p0# W'ș
p"( >Ǧ{d |ZLS`%rQpHbM;8f@@X r	Xٴyǹb;͊"mPSM5B\/TZ
MP@;hi3fO
f;(G `A:pkfDYX1 8u	D}#l,b XN Om#&ʝEh8ƹ!H#@ H#f
x$<=@diha 	-Q̐ @hmk]Ct7-%H:Sض.A BÞmPR -h	vD"ǯt#: عO5&(ӠЀP @
uB`fF{4vH!FIozA0/rAHz@0Ye:HK+ࠀE"-gzJ8K!hZFn<7Hi0CA@9"tpaE29P?,^8^H;
ܾv#/-F9"=w}(K5
-UNw:H&7]x(\r2@~(QVʡ$2&#І,B;)'xϦM]Ix!HE(4ZTIVU5눊BB-D#ն+v3BfBX#]QDok>  W.)p&d!r(cOl>b
PPfQ۔4L@JjU|#-e+	FL"o[FtB.\;Χ6>H*n\f,2n5?I7Xflx75rM ۣn )ɨF;'@l>R>	e}!html/figures/ha.cluster.messages.gif010064400016050000001000001253310717757442200203570ustar00gfergother00002640000003GIF87aHν{{{ssscccRRRBBB!!!R99kkkc{ZJRBRB{kscZJRBƽssZkRƌ{{ތsքksZΔ{ޔscJ祄ޥޭƥƵֵƵskR{ƽ91Ɯ91kZέsZJ{sB{k޽JB{kcZZR11)Ɯ11!ZZ1ZZ)RR!kk){{)BBRR))JR)9B{JR1BRckR)9RZJJRB9B1޵֭Υ{֔{sk{cޭ1B)޽ƌsk֭ƽkk!)!ΥƜZ{Z9s{R{Z1{{!9)c{k9cJsZsJcRsJkBk9c)kJ)ZBZ9{1kRc){Z1c1!9{cZBsRkJB1kJcBJ11JBR91!kRB{k!J{sZRsZssZ{{Bkk1cc)ZZ)JR9RcRcsZcsRZsJRkBJkkk99RcZ{RBsZJs{kscJ1c!)cB{Z{R{ssRR!kƵΌkJ)Zc1{{RsJRk9JƌcZJck9{Z)kZ!k{Zk1{c)sRcJZJZ甌919猥ﭽ焜Rs!B!J1sBc1R19c{1R191c19!1BR19RZ,HH*\Ȱ 8d"ŋh&NǏ CII%eE59I͛%
@
KO_ؐӧPބ8OEu+OG76JكuȰqɶ[J뜝KnȈj݋\3ʷJ
^fڼ#i}jyΠ?M"志S6vҰ_ˮ#z[vқ{M<p6}+挶K/Hf1R)SV.ϞdG`2yo2Pu؟m'] >"GԃB8[hlPhEEm ?o|zHmHt|Ď%VFGE#W>BIc)$qGEJIe'QYfOFn aD]gHS]dfɽT)'sVJi&'~*(ؓ+~рM	)I܅>%~qGQn__|+V_d]yG~:֊ƭj
hzW`tNnVGm	])E(c%"?m4wxqQQ{љľ4o&#pBvqEga垑Ŋ®cE"18)rE6|rEF/ld;#(026arHY!FgkbY8ߴӳE=u{Z\{}μ(ip6i=buN_xQG\OkZb+PEݜ{
['9dJmQ7:}
ۛH.<ľC!q|ǧn}f3 ׇ!\ܳ>gfO $#c'~|ȼI0<]4W8)Jw,t%3^pƃe#F qFL"/MdH)ZQXbxY닚cF2h<'ȃfc9ڱxfX{IHfLHE:2B$MDZ$&7ENjRN['IJJ2\;2%,8z_{^ZAvtY?al22K5lf_KH8@Q-,Ѕld׆l
58(n&:	a,d:/xAh@O@q!
Cyҁ(6?*`T
$@DBC H
~Da;!KC!<B@0{C"H88
DjXa@ Djd^XQRI̙@Mր0jpm.`qUu hJۏVSq]Yk[@ :0`@[B(l.)sSUQ2P)Ik8ڵH@5[Umk:ķ,+,B
-Dι=sj%*:3YmXB0`W@լ|{ۮ~=0X$ַbc"=wf&QMb Q\҈{uT` C=zrGen_2C	暂@z
5p!)~TI<lQM<Ns&庢YS;:*hfJҔtsN;6ʼndAϧ4ř!7/<0@Zָεw^bNf{ЎM`s!cؑ|eL(vrNvMzηotEUzRit2Y(B;'NqY*7j(O9*g"aB2  $Hu r@<C%L̸P!@Ѯt4#o`~9G?g[10EތA?apNcєDfcV[XC8!ruB#(0^Q0
/X_*w JG0؀BHu~zET}]C7菧!H6&>=ЈPn0=73@n?)_Mgܰ{Ǭ?{CP~]|DNRq{H@\PzR{|g	mE
H3QPn@@3|Ap a|8@3wAmL1Sm~7q)GhzWD7wgE(rWgB=QYRPnDm XH[Gk@~8 җEbL9Æ=D&u
6W~XJXyjjvh)np9(1dX4iiF?Epf,Hp؆yX8cgCxCɲ`bm@n;ЇKh7hʘ̂ƈ́A،|n8uk~(E؋tǎ8$m,=N!|=(؂xQdYH1z
eQA}b,0蘎8
A(	=)c!HPkW~גď6!<<n@O`sւDPOh=n9}Piؕ\ַnBpqYRnU	yX&jxy|)K\net)%ٖA)C*9 逥7T’@3}B;7@RT.=WaP.)P!`Y)V8'tt 	@Gqy΂P.0iO!5P	Yh	iL`

@

ϐ
pK`@p

p
@[CZ+NEѝ.YӟZy
@`㰜
I6J


ΰ@8:Cϐ.:@ڤ֙VQa*N:<@*DZpD_:
*$aNGp
8:ǐ
w*hڐ7ـpz9`9EKY:Gјap	5ک+BJz**9s*zZmA0ـ
ܐJZ w
y)yװ{JЩ >!PZtZ9ɺڬ
0Xy
z檦
B:
G:3t4z˫jࣻSЭ`}80+2
˨
ꫯj+C`az
oa`G
pHګkܠ
oB
˦H{
۠˳;[PɱhK6!+yj4W+K@jUkX˷:
nQ&{
 
PG:F
t
0
Ӡ۷[{ߐ
*Dpa[rkvwۙ+jv铷۹<

r:
ԀÐIJJU
 ԛ
h+
[
0˹xʵzI+"Ф[̛kۅ#a̼k?k P
0
\@9
 01
,,9Ő@Ȫ|
@s:+QGK;X3|P"\%\i{8Qo<q\dk%|"Gz
c\gi|Yl
0
ր@fkŲ nq@í3p	C{
 `LMk˚"ƷK‚w7l̄,%|ŀ\
|
ȬN̵٫	mzH<Xlƥ@<˴Y	\\x<_G͚<,Ћŵ	KJ\|-
ڴ.	i`0 A
z
lPcϟ,ή	|{
`Kv8i׷mͽC,
ѰK`]ͯ%}
W\Հ=	@lRLF/̺X/{ې
9
@T,|Jp	\0 M׃L̬ۻ&ڮ
|P6=ܐڔk
ɀP
M=-17[G[ 
Ƞj
	
=Pٟ<М`
]
޼arO-ޔ[}{P

ɶ	Z0
;
ڡ
+Wېj
3\
`ʄTkB;=	Q(c~hR:Tb|!>CN۷Ӏ_ܰp	|B1{{pܐi`TWZ{P	LJ<Vk.30
π}
oӕtig-[Ipq֧~>}潒p
p
À֭IdmBQi>
P
,P
]Q
=

\Χa
P}c*pP
ʐND;W^nE;#V;
ɐܐMcP
m
Đ
0=kp㎳J̀@Hߧ5	R<~ԭ  
@	
䧘%pg.O;;8`
`Y
i>[oSc
p`P
p}
NH!b?YB0
 [p0IaU ͐+,m
P[P	 ?/9h`gk/Q)˼m`{g.^„hXEhY6n!ƌY5b13lʐe{[7jԼQv
'N}ЀZQI.UG!sLj՘M,h@S'O?%㱨0欙jR!lM7sMON.nqѡ;
ɐXKfslȚ07Uvf9*ptC4H=5|hC
sѥKCT7nC=
.$\ufnC^WsJi8#lZ8d+®ĈBAX0B
ѐLpJC:ki	L0z:G@ʦ3nnhO!ĜnjABPrI&tI(''ⲑh~IkF̆8)+o
*2RsMvB78w㍝tCθx[OP2N8[N<Ы6*RL3mCqpm!&$bqFiiIL$[;(ʰri	IQAgP&PQdFk4K/c9$qUw]%	x*Aj&&'b1aTk`׀H%l#T;YG9s*#RO@;NP4"QF{Tc4$	vt׭@21c%ZcIww5gmq0g]#CJTU1S-ZiCMgx.4!a\P*o0B~8zL0^٠=9cENN܉Rvq]]NiEr6k_0gm{̝9
k|k[Y0ӆ'P.viGi὆ sGNI0'|r9!1Qu2:Î Rd)ޡ?)g(DA&!^]|
{_
!lڔ}{w@=IŊF%GF2i<ݑ0EIFs<.M;.Ε.)#u99ԡ<N|
*gF޴qh;Quq$/QIT"M<%f`⑬	Gl
'h
ݤfW̡'aE" %&Y#le$ēm’|d$%GTfH2j=@8zёԢc`F
\)QK#zˊ C
@Jm\',׊8qv'ϴ׍\hŢ)4)w(RL.әRrSkۧЏDzWDʕ&xDQFʚpi)YR(H3Hİ4˚7Q/U'NsT0(?*SaբNJ?0HԱNnzה		XU4lTR+O\B(e3X
CTY!Ii$ .ix i{8Ov۴Kϳ)'0q.飀H:Ɖ(Sr޾eӈ,Ӻ:7(d[Ɂe"L2P.b=x&n6Hc[&Y@;fn~n xo	|}ɽk5áӾ$ʟ\}I6\Z_Y5JkIAxU,%@uL`O`.ʉG@qh{!%	S'ҵr%],49`ї"p%]Ȓ+dr_V%&lG
#/8t\|IdhՀ]r^WʏAgҖ4EךfN'J:fܘjUrYd@LؤIq#5`SZH&3MrU&sO-[x,6-qr'-3	Km)MqZ4ս.<btљCrm],ڶ@fYCnJԵm?ܿ_=Gdp3mF|wY,FB-aЪ2*8cF}2D,vk+a+:A]Rl1{Y]I+jq
ĝXF,N=(նBZyCፆ$]ޫ< ^7
+tw&WfרnVG>J&hqm1ڵ(;υn`	N|=A1
A^Q?A5W~|#D)Rg(?%t("x|.ZQb5P 	
KwKȽ>>CK2[@sKHW]eX7ϫ]?7`4F="sL0:)zAx=>NGP9+[y	ԢQC)w?<p@N8S8-(\7`P,B\D<8G!LhCH.7C-NKxC.,%?1B%A&̿Ø;!2dCCFDvC:R3lA.ZÅaQ<DC6<FC>@.{Ƴ+;%Q
MdE#Ã7BK@b%`LKLPAsoEXlGxtE0DžH;$H{srHHHxD+Fqn	:
rFHF 0`!ZTȄD
edE%I;lCU"((ĽD/!肧$JE) \G$dê,zcJrJ즨|KKXʡi%yp,=Ȃ.p5H\I|	%J9̯;IF|&AL.ʮɯ.pLl<.L)H˗(Y60h0mȊ<NLNHP,"1МɐФod%)>DOɿ<6 d!((I(āϵIpK%O)6}ИOTP:OPvI0 4OHPTHO"Pm
n\MdmQiQP!HNˢQU
DQ<R#M
K}R]IM(l
mP[aP܂PRER1O,P"%2m.6
#08]55=SEPe)R

R@9REuE}Ѿ$*t
BUL!5<TR$]RR#
mTUeTVM=5[S4`S<}S<C	|T^uZ&Y
8_DcN_]V%!&}TH
7=QP=V]U(mi%kLk-S}rѡQVOwRU|WDUKV
Wh<״RfV(25VgU\-fuV
a_U؇-d^5؅XuGPvmv-Wz9oXp}ה
?ݼԒ!xYzTSעtMYNX`X^X}e]XYSЈRU&Z
3[+
cZ[>M5YEYMȕeZ]Zۻ5ћUWUOu5ĝNeY5Zɍ\Yۦ](XĽXU[(A[ݽd_uۡ5ݳX:U(Yj%Y
\
=½eZ7\s>]]M}WN^|\ܣɽ~-^UU=Eg2Lʤ4]ղͯJDͶ5_a%Y_1=K$tј)J	Z^KhY-^MK$(I5D`0:^PC	I
_?Km
bftHFF-,@,X1@/(P78.@,H.HP#pPAXHp`H83/0+.684.(>c6C(Ω\^uP$48.%5h\1M΂P8ނ#cc:0Xp8^@;XPvMS71h/1R>e˹XS]EJ3M^KL1Td4Xg>c$8(80PeWVX`q8h؂p..F/X6Rfmۑm
1d^V=:@5Eb4m/̂5Xgi8e8|:g\A8%9v#@;h,tԃ5jF# rsjjЫ\M ^kamh>eka3.0/h$N6a=08g$p`#0%pXze%X
)>l;4@/XEFknkǘѝ=]u.-(!(nO.Pm%A4pK0bvV.K._c\n7~A Ae؂PY$n;^7bn,F#	U7έ=&ҰwB
qm^}`	
CC
(Sٴ"Ml$b3g8ݞT1qު /*`@P=m\TB!sj q ',nK(_/11 e)4WC.
G10LtkHD׺1f6UC'n:E:̨sG$"t-:vNSc$J4tihgPtFOou=`}v8eϴtxXZ_N?'nu'WP&rca(ke;:uh9aw57
pgkbh0wtWfvWPF̅08U	9
3ŻDfpbHw"\zb'b@A.G-.b77c\&#@P26W&~b0@<z<cDB7 3pb$;q+xy[_w:WɅ<O!cbN*I/Jpkhxd auߙkh>KF;Іfi}PPL/x`^&zc`Yvp(zheUdO~hQ_Lmc7H30v1p@gȗ|ʷF*mhT58`T'qƁ!$lܶ9S-Ym̘qgݺQ`)i$ʔ*M"i6]δis'̜md)d̝gAS`
)tiSXԴ5_âY&K6^\&hm]iInV…B&E9z'S.	M8t|4EyǓѪCFFs'aBFSܸw%Lר={M5dFYk+R-Ǝިec֪j1^Vfm<:c"SϚ-y~yf{7E룉s`AZ=TAUZ)O!Kb,a-u?mn|Hoenh6@r0sMG7avlH1I$ƄEܒUHmXvkqZlbXyf5!GhLE8L1$ss\a!1v%&
M|^B
h-}aiKnt8qݔwOED(0 8H`V" bQk,FKx@1Wc?hj3}qY[Bh
Dtډ`|B'u5.82SqmQЃJ[>A7yCyIOpIPmJS1CXa٣bRQ6פ2҃6p0(q!3Ls^l3b]FHaIctx1$ UN7UTudpccpA6g4KhE{bŌ*ښF0'72q玉]Gh(3'F;3hafyyeWp:ݠe&9CM33y8<F}.3pr:5H<MghE)4>gtby2ݽ#`PORp7|nzXSӤ<Q/EH~͖ZnxHB(m|Ga
fpiPа6!sna	C<8-0c@?0&R]А
dCɐH1xepO@1(#9H2!
Th3bKTF0gN0LX#%/!=N$(C)QzRA
\!SF5VXFgs1QK@qc28f
nb0e2|&4)iRּ&53T"# F2k4})8!rPF3qdLѰ4n0CORMf&q"L"JD5pAH(F3r9jL#hIESkT`3dPÎ0kuF=*Rԥ2NM5ʾpEPfÃ1
8t}p@*ׄLH"Q1A
DF9Jb
G7I*3DHCVj62$<
W’=-j3.{ݨjI0*7)Vnx1	vO QjJ&E`(WpBDՃw~o
kn[*,P"ՠNUs>`vqZ38y|^3xnwP0/sx&/&*1
k".N5α^ăPKoı;r?X8Myu6)IQ2h&%РJ^2-ik4{ݸbR"Ͱ*3҉l`׈Fx!g|؆?!b64ÜC+ь'iIOoіv},h7qn8q%1t
g$C0@ڷg8#~8,$=UL0/tY$ؤl/e=	0g,ڦT(
^ Pur1
Ϩcs
d$9x(>[uxެoJ (5Mj(&}9HTx7	9Ģ:]b*u1ni"Cߏ:j#pzP#7y~,</%HF#U/
gbxapB.癏a]*5,BQg;|n78ޱk;	-&>%~W6|IWzنwhۀ!&:sQxW]rS#
^IP6-FCs
*/~gMQ7hN2Zꓯgׂ|
_znvoyDǎFޮ\m eo>W_H
Ȼ̨o[=\6at]^=5O)Aqޏ\@J	"W.h`f^7t5P'B	6``\`i2
 U]# O
J|]$MI/ a7
 '_d:TlR }a!GaʤEaau	$]5"_9bk؆6`!ɜ7 # !^¡*^Tb\ѝ&vxb
H(m^$qIut"7_^3~C}˻Q)fc 236c3BM@TcbBd@1zgtEa/8B9v4bDb#<f66`7$Ā4.#?c#d&=$rI)!"+P1xD'' .}&Gdo$"N~B0
JnKdK^K^CL*dOP0Jy@bU"m#53dA^a`T~mHF%0-u?q\%S'bVU6	nFf4&WezP6fr;*d!y&<&dƠu\f^%46j;^-f	&\+BdLE0؂L6a8Bl&%r>!\B)o2]df,EF#$ 57ve@%yWb'B*B,Ă./{ %d[ghF'n|<%a{g|g}Zc`F#T2"0&m f~I0zr BZr'|'\#BVMs]X hS!G&='A@%A@(('(&<!Ns~üA8z~r^dյ#Br)#x$0!.BϘje!LA]b#"*cPᨦ:@IBgި Y\&i#*6
L"f"Їz(*hx*F*
(ƪ^]r
	tE*1)ADì^m,P2ϡ~]+G@>❈ghdPEe@K׉A|i&VFfVFw5UOHI8YEaƄI4|M	̓xΈTH#iǧ͆U%!bU!&ȖGtKA|dAQJYMٗx,<̎PTA0i-f-,!Zd*m,v ,xp~EA}tƌ.M4@lLm~b#cJH
JQ.zJLMž&M,4Nln>LڏDXt(p.zlIt|AQxX[Zmmjm˂ݍ-tAt|<~*N̰ʹ dȳjjnAo[ڭJ0Up۾0
m#fYuAVMnkmnGm6M/SO>/C[݌oެ&^
1oJ"ߒ[߰0
ˬ
o*0  /+!!'&sV1##'RLND'DR^(IlU@-[P,pȲܜꔄ@:$D$j0*fFX
x4EMZ|M|fxfCD,;?HMD<mԪEQ^ 0%2A#8d⢁DW*M}茄A	7feh4K[̲Q,26tC>nYC1qOqJ_ 1RR4A!NnnBz.xuFK6#SSJu[QЮY_6LZ\wAxks1D@`;1n|TRIGgG>soPJIĪt^3JJ#̈́AMIHe-?NıزK(8aDam{v,[D˦2SwvAg6'рd5{᭴u7xΘ$	.5w{4#I ?uSǷAGA)7p/G(FX(U7.r}P"ҷ
PIplM<H'2m5{Ϩu/uR0ߖ GޠDZ@7@6Σm߶{n2vmďCo'votg?W+qOySYS77_82y_"6?yj7s?y4bWnh#8/4wWI#l7Y q3xz0&z<sAP]\pn8?SF@׺azFm*mMEYYA@8R@U{R(C-EMt{4kXk*g4;	:$S6r
Jnw
L?<;@LE{ R||ȏ( X\tnPsh4@i?gg#d(:#KaM׎Aw#IxJӞ]
V@`3ӠlԌ_Q|푀zOIC{Ϟt>Ath[5ŷD]/ht};
(`7E~+lJ~Ol9gz>צ3;iY͉xB{3f#XTES 8t[fAZ>C<2J= zA8)7{5;,5DдႅI88
4LN$wiM/\D6Yl3fG+Ytf̕bȴygN;_~'P4OpxgRK6eiTSV^βuZ3[5,PkնM[U.݋uIp`u1/YE|jdȓd^c%wshАVZ	\qYvvj֭7jwM/Mq˕CUSK|{lR/TGG}nqN~y$jg~+xF
nyF@z0g؏s.+
+,IpAE"E̫	0g'wPB	)ȖhQ@o
2q(
<n2,:f>)͔610p<Ώ
K.0:L3qDe[5Fc˪=-J2IѦLB
#\S;OU5gkk*ݓ	0=j6U}}>ԚlaƙfqƙI5F08#hcoXÍkvX
sEC,H: 8ę
Gq5#7Xp݈8͂[46VW\
Yeu#͕e}A(an>biK
1;8#$m6b%h3h.:(h`i>6Pm9-
;c;[$H
G%pl/
/Ds޹矃$Q]Ia{}^>Dd	j|4e0fu1*H#/i/<{,h
#[ 80@8}WH:64?_ؘ<0hrFvR^tsbJtNcE5
'Takf48AɪK:hndLP$jF/
z@~}/ >38@Rͯ~NIBTD\XB%.a	\pn5AF/a8ACƊV:ᴀ@K
&2{ kl$2Hh؆&В0dK?bHĖJԥ,Az	94  _	9_5~g열lKΰMjgi^ZIL#)M	Kmp78*I?l@G
oxZFEF-0A	jڨ&7CHD%0?bE1l$0P	hhoix?p!]sDmcxWU
*rhT[`Y"p3p&
k$
3l@'08!j_AP
eT̏VΥ*1İֱJ
l !sk55e<!5R<
n|FF)L
{YVΐ1;
gle\q+QF*08!;Pl,k*'	,jvBLoq۔\ZU["䠵Iynf!]úږhC|G!м%T~Zd,1cRjh(3^	C68Pur_2!|UnH'NqXn(+> NWI65Vavdp#z3q\g;y][=<щ󐉜g c_,4(6(.J-爭Gqpee(/1Hk3AdX#@F1\Ph!EI]lcVlg?іFӡd(#te4!j#@4ZdkiD#<\Qշr=4(F6HkfY83f
ix#g)vP!I^r)WYr x1q
p(yьmu4*
b'0>}PM&霔;ّAg͊017 
$fN:(q^w+ow݌cLFο%gIɘLF6՛edػ.bR{Y 
=Q?Q͊gBM{
΁pd44Qg<?sح!H'(F
ys"
-nl@/6fx!
(52}P`n%~oȆݬN,x!Nfȍ!ӘaMF
A[2JI֚rVaa7hNi0
P*	
P


ǰ
΍$׬6׎)p*D&:7^+H!C(4U+/.@+j8OA$Ih.19>E,U7^d+0גgVv>>"m~0xV"0V@jmOq?BM
fp7DH@%"Ց#L 5KNJr5EaDLP!79~JB9D$G9@1¾ZJG7l$#'U)cT2-(R&!#+'O%գ(_DemKPr%xNF27.,X,+ZD+5+WxC,&1%-+$S3- 3'o*
l#WV2KuC=2#r~ҒвG[6O(4/K4$,#,R!9->'W5f91:b419ӣ7If邋8s$K<IR<:7+2:A=	#ʲS#Ī3?b,S=?uKB@Ca`]`,aGq
lIJ?7T@a3[>d/`?,9EPH14B2`	z R4WET2C2Dm3]faK8EJw!RBN*B>F'*( vaJs%@T,TK&3*TOߎS9kO#aKt-QKt	L%:30tO?c8 M?
P$U!S9*t%^Q,Q=F-5r1xShVsW2,`UDZѠ0@bt[u!,3oWsL,ݠ@S22[[)u0YI#[5IY8Bn\rRG\C6bM]vrSuF`4#/3ӄ.69.N)3R. UPedbq<;V<36h3*M	"a`?azFag0,*bSU%@8f1R?bh6/;ju@	So%*a]qH:_'m5"W%0p@
! 7l"U%h}R6qAŠ^t @I)&lWrANH)b^s*H7e'6pb\b-Zn@"h<OayWv7z;cxxy;};7[$z?z,}gZbDC4ix؄zFetO~oSZv-3B٥4pp4؁٥(δqwWZ"t%Q>beX*Dbc=ERyXO_
cMv<BR5U}XDpYaq]cxA|LX?wC4ט6xu"k|C7}8cOyyՐPee;5 <x8!X֋tYcٓl+8-D)Ϗy%w9froXtylժ}yM eY
a'9UayAYW9ԑEI2y~Ýt=mG$,7)yr븟٘XS.Uv4y_XQWz9-,Y]37MZ̙ji7՘ErI<d8B]:5ykX)_fWG*ڢ:r:EzIGD/Y4&c#AZSKQZ;l[Z5`Ew o:߷{:M2֪uaFu+OZ'Z1u;E"[a*=:96Cyk'ڧE99CR˶k[_1;H;H8[?ٳ_ƲC#,5rbVb{F[J[JE{$(x@={ ze1
|µzcI
V8S"=e۰Q@EՙKPv1U;k{$WDvV-ftE{'XFaB&]zgm@iDlHuz\ںZǚ;.wE(=n
U+Xs\5\)75qLW#){#R<<&,`<\ }D3|Џ|ʝ<˛rI$}ҽGd1<]3`<qz I`n+6e<*
,"8	։؝Ay]j\k;+̓J5!DF-\{{apB3ҝ!C٧Cš}N͝=^$׽AsItZ$Q0)A86@md%[9uj-)3C3~]]%]&EV"mDBeTy;B&86>CAEU~Sޢ<gCrV-4(L8A*2K+2,F-?$O{Xͧd<_vH|SRN)	5m$PڗPLV+W=y߱7A?#KkR({^&mDLcY1`
+p&2a+DeNx``폥_Q:Hu$}SͿŞ}3,ag1iήƌ>{֭ߞqkƍ;zA7צf0.Do1;<{NG0F2&b+
dDj4i5k-jr֬ҔY"T5̉*^
7ܹtA~n^4Opx.Č+6k3bx'ZYh[K1nū75է[Fi֐]NF0?G7Z<N`\p^CԄp8aȣR.於fk/ۯ}`Z	ڀ w5%t3!nXwKPDS}M6טavb("^U1L8fuv6^`\=1P
38a8X3͍8SM״_}8
B#u*aPgF!U0y*]7gtgqW8L4h2TZwS0Ŵc[HF~~36]%DviI7\GGDiP='ܠ h˜ՙ{ȍ~Z+8Nè)4bN^[ח~u4ʋ,5ִM+x$pQ`n-LyABe6pR)Giˍ^ɬDofLaxm6zK+D۸1E-. _~v6ӹDqV5 L4lo)i.QKA
jKvdt1髡vk
3\$^hL0rHWG
èPIe7iD݆hA[#ih4%}!5o~sƟ3+x 	遍G%
1	1mJ*b)(
A@^D@5a9L483L17,3̲k!󦩟Nme#AH?ל]?{9S
B@azA8%2ab\fW;\)K;o"uy=AXeȗ=͠E9sPM^	 /;Vm4P(D1F7#mD=TJK(Q{i]
\ڰP#5:Cs
Ư /X
=V@;*,Da
{3A
A#T4CkX6ggpgH@1-ȗBTX.Kyʺc@F&vߕьZA#ԸBF1yfH#!s$!	AkxأMlD'rpLSu5hXOJpA! [@Fao5(&n<Pt9jtͤ&:S$HFAxl#LJ1qn4Õ"J'0>Z&`̆5p8#@Vʌ3Q]_pPj5]Ё_ԸYcdguBN3ܨ1e/ar#_1X3СHtflQ"6]c#0	G3j%ֈ71i0
i$7Ab
mo۫"@F 
J[Jj8c@[T01UCҩ<C)ak.HF2@DLOX5m
7VCSel1(hC)ۥR5hgX$7

Ό!!dLc@892o{Cd<Ë^
$GS4pgv! 8֘#7̈́אn5E;^TfuTe1&"5.3Fkpr4BC(}j*{1(K@EdU

9sI
^\12=zq"Z6wpU0A^q6zjy'M6,4K`27,5IbDE&(P[6ui9,7±Fu!:X6(,VBst7==/,iR2A
)g%ҥ)"$3e$?gbHLD)62IT	A%ʴDҋZ=X'y4^>
2a)vD'vOi
5L
HC
"
Fe}
w{q_bB'*vcXU#3QmrP$Ce#<1]iZeaA5<lkb<P2MHW (
"N7!hhՀ}$(vgD3}y̓66=O5zA߆6s}$L&oj dɚ
431!?|l&	qy1z!j@6z6Hn2kP&lÆGT27
|h|9@uauU[hF&|1`Ҁ8lAhQ'`Hx hAP	=pu_P1L1l12a9*Bdv<a0h$u0d5hN:CIrY8\XaHcXNHoEYhBAX!|'	9]@<]@H!mZ<&D%HS؊kkqn g$7hj3H8wlb&kbH'U}ypoaPW4BH׎;QM`uwh⤊!'I=I{{6銬&!b6d*A}yĈ@%+y	AwKiwjs")9r[-ilylH77)HKO䌀ʰzUYlm&ۘNtuY1{y	ؗD	QSHuX&x@)ɕ]{_y|As
)}،`u՚Z(|雞	)y


`V7y-H3h`
gɕViYщ0r
kߣA5#Y;yV0io"Qmx
TəK8W1Q#)II~@
;=L)tXȏh:k7mDp`=Erx
i!d`	R<h@Y'Ҩi)J0|@
GzJ|aT7
XZ
}%Caң[B99ȝQI5z``(on
"rWdmH jڕ1 ]87r9p@M
h(t!ꗅj()z
*E{hɫѧWUx0
1:&@L2Y|R
:z~ySsP:Ϫ)j񁨉1W)(z b0~x`
RGDɭ~I4I(i3JkT
_ѣNR*ocp	`cZGL''+CYz>

:`8vJAwYw[xw[)x"#a+볗0Q*qxl8+	@N0(X@`w	A'd:'d
gjpBpQ;wZQZ^AP[o
-q䯺P	;'	 GQsK
 +QP4۞ꀕKJx	̫pz	u]=/㺿
Z|		{	*|	
\о|[$o`gl
	:I&|	y	/JFRk'޹~:Cw+pÑʲ .;' [_W쳝w1a"\K	VGiƗT'B['ydN{ZU1Uw擄v!SQ|`po"yp	K]yu4k	Z>ɞ;G2wy'GI
+ko`A[|pe<ǘ\.xEX5+P'bIp| `} ~bҢ?=·B0Z;5xp{ÁZ	Z,0u!	
ʘl/ל)]R+p(
+=ļH-t\U#6||^U`|}2Ԓ} Ά }t!'/)
`KY0a=B"vdž	;LUQB
>}U`x̔z$CՇg ֓*M9X-ٔ=3}nT]wt}U; x}[~M>ڶڝ\w$w؅Q.ٞmܓ]qí}-1<%emϟv[;^kµ\})O}m`GGkm,uK`6Nmpy7:С6<СEC\-0]Ρޡur0ԗR֌Q,o?v-o=mqb^ _]†H0\p\ bY`bF>E~"EEP`gj;p`t`斴y؏rCKz)7>.nt&J P@}p`v^uwHa3
0IYl@`m00XL>!H Hpa"Z~UC^p`ZN	fI닅
0
nmcKpG54AnІouƞ4$,^P%'npW.mNA57
(?gNQPgcy1nuA~OQ oIbJ'ONFw@hw@r.qNYp^pp_T%2o(-^fȪ2> } ?`]N
ICb0QQkpa^GfQhp+LH
nikVg# ^/nP
ю^_<څo/` ``	ޢT$mq1h(@=	1$1b"4	dH0%<ɗ,1iǎ6"I	N=}	ЛECƜYh%YYĆn,rM4]AIhd9)7D}n.BRMZzX`…߁K-(URƬW_Ý=J&^7`FZj֭]~mj4][N1׮,FguC 3w 	sgn.~bypWq^͚GsϠ3['{y.>$ɶpۭz8".0c$`
B1.㌯"85@Saz
s褔Vz̥gv
1d 	H+2,ԲK.(ABAj.5hi/80ȢH>XcNl<Ft#;ucgwqzF>QM7"ɤ1[
Kǜs=ywVzq'J2ifa}v$iZkZl#ٮ$MUq*3)Bb4Vr7Htݛ&clշ߾h0zydՁ'pwY's1x{QqQ'r8TTsruY0_9gA]B$ԹhTt9zx0vreOS;ZTK]չ`QݙEǜ%Y
C+E#Jt#@yh:73|n*
̡ZQǜ/Qn.VsQn[b߁;tE ŽcG=l@,8c/~G#JD"H+ݭtm[+\2%t{ݽ'ugmAGr~'r[kG,G軕_X`Ё>\/GT>K[WP0BA)YH@W0$Ru==H{N	(CІ5!YG:A#9G9jL1džDK<֑Ke@w3ȭ!r ANxrMAZPu84@ BT^8
EB@z eN#2p{OkTYƚ	:wJ./ҒvDSB=ѥְ0nZtd,ÅZHr1及T:.Y󚝱7N|Hą;A_h!;{23)M.S$]9C42y	Xʂ@7΁ʥU'"f՗i3yEUa1	&8~%9!HU.j\@wZ|
X>DnhD):ݐWKjRàcዔ̈&5S6q%<Յ
f|)04:rNpƱ4 Qk5w$;\#سŬp3<ήw]_$/
nhAy5td00`4t(<e=	rE\zcgAK=
\nZk%43X`7irl[xD,(=XFx8T3Fو%s^5zǯ1,a9>80V#-Y&f
ĺ"jR$@UWQ`qY8Hk?x}Sa-P7,&Ŏe*:ʁ )cs5Emw:ҢB!aH2P̮!8dܼ2m/_c.UT'{
"`֗#*;6nXQ3)uxXf-w/GNzzK[f-HTJ*Y3
;su6wTg;gl3Ҫ(_um3f$
Je!u8m+"0Cb!v:4c
LB獝.
_<X2@`0,DL!g}}79bv!<ҧ!ker,00H1d^Q;=e;1T'<$ KCӽPuJNci:<^_nc.'8QA!$kF6zN!0'齡߈_|-G~y5Ds6b6ET,*M-^Uxu~BNaX#w
=8ڕ.
3uj4=&ěH#;KȠt;
\3\%Nbd~Ov4b݊(dcX\sRo !̂@ʂ:B[,
$۬	>r)

z1ȵG츈B1<8?lJ?ඝ2u!DBx6h⎌8{h
bz9wb/p3躗. A#y#8;hBx8:4/u/0k	(ʺ!</*r4p=\)Bf9z٧2/o6
Sa;;M!iRAh,0x.>X:k8StA1'[?yOx˓@IC?a#6
l.X@;7
?Xvd]4ʺ}xQ{H5՘>G?zGHA?{`FN@h\
)ȣq3HGH{ǍHm}r$GѨ{Ha,p*0IIɃƒC6Ü8@w0P ؁T80I}MHбJJl/03jJ0KL{$`<\̡DilNl1ĭL<0p 
"@>B}JLKNKLhCv:FJdM|KXKpˇl]k
cM!LüLL\dĸLtC>2J2x?8|FDTK`NFxLM̲͟QHjdT3N4ȃGSGO>ȃz0LF||<(āpSĚII͞P/7d4G弄%؁*8E;	,><LL(%p<xYІ<(R|KR!0L"`F F8T	.E14FL OF0>ȃ+PvTԁ+&PD+,]Ht2ӶT5(7 Aȃ,¼&5tSP
SL5T EXT}j
d
UTBm7x$
S5 9`WB@&u8
.%Ju
W*Vl]0aVV/QWҟ}V4֥ZeT'RTX)їMTw1H؊ҋ-w:X7 ]R Y"X|}DԘk%
c
a=!@UVu
7{
uVřG΢TLZOuZ^uo1nS|S|ZKYUqeWmUR
2]	[ۃ}QLKtm]X0
ѿ
-ƫUmT%D
_ݣIU]}u
Uu
%U]ޮ!ۼR]ƅ1W܎Cm͵Q}5UU_qܻ'ހ2*ʵARLeY%܅
ݥ)CYތQϬ=™kݰZf!-^Zm`>聶8ޢ]Y^m^v]7\E8*a%brBY_@ߐXKˍ(!"5	 v!M-a
[m`ѵջpa
;3aNyḊ=c6<[]FBmjX
eva"F{.c*T-p^gkm#^XeaAbbmR6-	9hq8-KY&c/;0Һ%6fpkjojHgsΆkP
dXa]pθ79D]5n8gr6gqfG乺dWڡ.sns.h.rVgj@Xh1聒%qO,~@#06hs͆Ya.00s~cȆFgjhg2&.:r
)X)6l0jNg&3U.WW.uv=lgn憣jNnq_>e})R1"H"('-klVnȆfpDmvc_~݂lln`kxmf`kVk^
l|V
7|UXJ	>-ckvNjfjߦ-V>N,vކ뤶q03H
.ZneK!@n9nk(o>j.Ά.H
A0l`gbbg oOoxf8HPJU,#K&l	va(bٞmt^m4Ak
UkUtmejvb^ap&cglm%f܋	g4(dHj m6j/o7mI,/{VY0 hnkpd-l^msٶdl)K9mjtpjNvn>cpvicGF^\ptp@bh-oC@S?;}m:iHgd(/tnȅ`npf.#HokZLs.uNq>v8%Ojm-klxmigpvj_wk`78rU8V^}*vFlg. kh@dHkqj'p߅GNa@! ؙWs#mx^ -`W9_kHpd7xx?GZCg	fkf	mk
/kp^Ooo8mWZ\Ɔee\)^:TFhkjj@wjZwpc?kixmoih>pxgjqfx7-ht뺶z}mxhth
7 {gi
V}vawb>w@#p ڳl]x͙Ĉۘal-dٞ=놬oϸd!K`zFZ6Y&7gZE<	NB}*8ݲaXc sv+yڑfJ=F)Rx3bĸY'2j^Ku)h
#>LD)өhMbp`άy3gp*~y2҇O=!gՔE,V͚[f:#6
\gt#sVdOޘ<.NXq5dN<mIӗH
OT|`ƍzf?Z}IeC*filW D9C]1
eL**i!jM-yCA&euYggIET(arhŐCH4M24r8G3|
7r	*Ju
Vb%TP
CmSR>ErQ__M4&N4@l{E@5L5<VA'R!bgD)~yU^54ӌ4Q 
2c
N&@aIW^|M}>ڥn!&t8j3qX9n>*3]M8X6lM3*+IƬ"䩥,'fZ*^ΩNC"J(0P3mb;׶Iw@~5X54#Wu'%rvzMTeXsH8!1&_ɯcNxi!hJeaoWB5]^LLM<3\N3hƎ7l}TKsDn+m%QuGWKPsXSg8$C0qQt9hoaɫ77L3{U5T#Ur`p"H)dٙB(5)ʄ1KLM1p!ۻ1&Lt6b`+v1llc@D8:ȋLNF '(F<e#ɐVgM#Fȶ	Ptc.&5Jl̦@U
Q_H8cFbHnߧ"f8`ai0v]D NRblF~6%Yi8ܘE#R0p/(V茝mh4a0װ!FR%nDd[*}EgρN6_rKM\.8OAq`L	B(B+1
<2fbk
G!DABa6qFt4.T+Y(NXRZl*
:P^	t/BOD@%GsA8e#(
3zs"8eF3rk2R0.v.fO]yC F5a|Jnr1
ŒXt;`e.@q#{O x/<kӡg!J@DgV+TG7WnA	?i◕]ccۜtBg(T2V s:Mmg*05ŒKB\T.+2oIP# &ӪYmaØ D-V|8 \9W&2}lky!^٧0uTdr#F`TDSQG:DU l#רF4۰p=e#\▰Z-w}27IKnKZ"	J!—PY>EPJ8\J-?$֘xƺ
IF19!Ois:5FLzގU%;CHEy0ok;tqMnpDܙu-MQ<;@<mdl~DYt'%qTV'2kp^IY8v1ϕ5t`+g24d;@6tH%X`*dbRsT*qh	)CXlҩzjSD4W>A[oeeh֟<<	3l$W^8ZF0t?_6&ái^(5~Qp@ُ!
dF(͙R1d8
8J^iѼ6X#m#v1KxJiB_Y]2c:\W'6_7mܘƩ/򗛓6dnsn@ӭa/gx!޷%,oƸ1`d	#~*U YQxHXC@im{$cO} _p72򺦘{@'!o3paJk/KSCɸ4]G2<x%R1xaӖSQYeā'C~]8OPC7HCoUc%A0p.Aw%?]MEP!Z9GmϺ
u 4h2
MpF~'84<Jp߁7pD1͌ǽ-]M3P@8܆߬`ìHL8@a\Kɝ#Q-hyCat\HvApY@Ŋ[٠ @
(
4-'r"%Bxѓɖb#ۼ]Ў_8F3X2aD@ADK4,57E(h&!P6þU#101ӗ#{N3y#D"LbX[<‰!\'.sG;c
\2V#&i^VT'N8ғL1`F]G0@}	<'"9rVG0H)9$@D(A0>_1R6%Q~@P*B-D.y#`†JuVF9G
QlKRL^xI5F(1JrVR|hUM4lC:ފ$'+B0@mE0,!#DR##'`"lf'fYA`bVfX`er#@6]/U5`"2INkELviUV`uW?řcIw$B*'xlrXb抈"zwhvSBTgwhw}
h(盠A8@ҙ*Φa!NrD<oful!zby	(C6P[vapV|<%p
0&xgXƼ;R@pAHď(>rRDA:
,('h;r%0B3@vUnhvYCz,~i^`7r׀^]#d(EAA-Ws9%}ҕ|%	*P%<%h*'\%`#P_
SEe~T^X<OARiq6Bj2g~AԖ&l<RPyˠ!A^-DjLꪷtCp^='1ݿ'lۍrS!0"0#B]r&v&hTs8j%wH*4+s,O
~cD60GVlJq,A]9ʬ-5]ɓle._D),d+(駌A*ֲAQ Fw掹L"8hN9UՆ%-@R[	i<-|m66,	"X].-#~T[8L@_ZF]:
̶* 	Hh\Ȯl~FZn)ݕ)ʭUSd<SAF89:)B-4^B`flv,/(Txm^(tO(T9.JͶ'*L#iůn%xM=2A#roYV@D-br\A^ڂJCWp
c.NBl՛e_.İP} j20'`.
a.G
zAR P:FQ1d)B-\CLI	DHfʰiO#ƼB,؂.@W~=ܔbr.si@E'{&=N]>iUSj"c#G2$wf@E@
'ĬXžVrx\23:337ED4\+r,~zd&9ۛ(13%m^L# ڔq)jqS4#_<q,AAsA>_!s-ukjjiVt*@?,TBJctAB4־ LiFIУ<j#k+c@%+;D
AE4&J#+g[C!8&l*J5&0fƚڊ6юkaBA}ucF@̚GD8OPYfE5+:hkǛvavjB!E0Mlp%6m7@RTlDnOzNvo{p/vʔTHrxugs4DXGEPWgM|
fhw~yX⛜1rwvd
^{w9AY3ov~>
7~ﷁ{8J(7wSes3A,8KQڱ˰AY8ٹk78BRdosA@#
A@8vA-ոM8븑vhsGd@ :7ZEA0
,i0JE"jApwxxa73Ty3yA4dczksz{z:gA3yAy+M+oz7ԁ#0{;C{KS{[;;˹C
pC9uOtAY;@o9ׁC$({{;@dX7Q{J_׈d:c|kc;X
<d}TkYߴ™9,<ü?ʫM爕|ί-gb|y<ͅCz`ƕ7|<UCW=mW9, #=ؑz4BSݭr}קA˻f 7~|?7[=&7RC~.{Gs}s#pAZ?Dkh~ӣؓ僁A绁4~糷~~#<tM7#]J @ xعAte~4@O?TAr	L~*=
;}Mc-@%a$TaC!F8bEdbDžmdd$"˙/
DfYG$hLtSM$>e]Ơ.b<F".QakW_r:v@Umۆ[5ܺtŻn__z"X.mڸAE3
8'nOi+Y&hƆ=[vmڷmZjnܿ}Mlq^\ys1[ p±_מv}XLlϗG^}{s֓%1cq&YI>8$|`pcLzNC6Tn7ND8DKOEVtۓn om++ Z
D#3XH7h#!ɆDc((H$I74JJ,H>q͉N7-:sMOĪͫ].9ͨB!:F!TCRwҍK9qPL;CDS9LWCUFXq20P]_94W 5Q8pkQYuqVgvXio,+$6OXRն!h-H5"u6Z=Q]v"{;wiWD㍓<`}UỠW`s%Z߀)ۂُ
ٹ䌽8]|Y*	W_e{[ymgWbC6gfD5Nne0v7ti^:Ꞗ:lK.Ŧ-jkŹ^Jf;m~1鿃.mDFoO\p\ܾBV+^{аr5_XkW9UY}Z[z9w׻eǏ{/wۋ3>iÏs㘎0bN(yqmg)4!Ŭ{[޻

cGcz"_;xQO}̍z"l =+|W5PN+_[j"h
/sx|g)!0/`TP  V7-;
SM1Jw2;"g*h.C,e#?$um_p"0ØGq`c|1A3$@|,Dwl208L!AɁ=::Z>t!gE	,{]$irNR*]WY$4`
sc12KZsJ2[9n2l3	͟USN$pf0p/Cy	TH9hMQK0DdY|+n	*PP=vWt`4r3#)%OzI`WD0QZC2R9)Ng3O- SD`p;w7HjLj|(SUg>UUwlm'골&*\j.LJ}%l8YBM[zRt1ۜՐulc
":%&{R:-&*mJ{6hFi;۷޶nVז!ɵ.RCk[
vfz]f5`邷M.yKv+d{K[+,)!
^6G=at9MKߌװhoc	l`+}]nyႡ>ekuCRWZۂqHZU1b٭3-{;ҋUulw2~0{$RU?nle+˷/㾮+f9*CEd]xm1|:ٚYdSs8NJaXx"sIu<>u;
i~m^=
purcAdPGzFeՉnk_і(dr!ɲrm}9\L4Wgv親-f7IQ}em%y&r#V7}?:*ev;c'}d	{֜†׮vR8(cD$ז'@\.O^۔Ϝoy`~p$xY\7̶<r|RUw	wZqj֕Gx9R;ƎygwzQNi=0QzC:ԁ0:t#hw#]7G񊧲QlKv4wIbsŋWlrpo~rC/*Cl=YțC7>ׄl<aKzusi3Je`'>Gc&}^؆l֞m.(윌z!V܁OF
Pfڋ-!`LI_D~-Ё.xH`:	dXTO	!	a!xObd!֎
_
a-G߁,ΐ*_-dլKEE Ǥjp
V7Ld
䨅y[b`P*NVOODrNƬ-B#QqhQ#p,?nADa>Q-PJ OQ/_DHb@ѽQqlCҚbe !qD#2NȠ.L/x
b"kA`!m!QR# F= 2K%YR`!rf;&AN@XXRr*r*?/hp-â**2"jr+%;+3Ʋ,>.0!+R-IO.@*,.R/;/mr"%!`:*r,S. !.#8a (%/sS33=3!12u%!8!&+!_/c @VDs4o,9F/@.7Y(3S88SNB-.*118Sm;o'<<0S=="-,ޠ>9!?
!;|?"
T3@ Aۂ
ABS
@CwZoC943W
DM4eE
sEU4q=@DECqtC1%vGA%E@>tIYpHt];tt<A߄
n4KyJLeK]pZKMilL`<TLKSڴINQNT9
TE OPQO4QrQIK#;R=SA5TEuTITMTQ5UUuUYU]Ua5VeuViVmVk6@FPWkUAaR]vW
)XcTyU`!ZV-^(a[N!5]aj"*<K>aU!5`a"V6
aTax]յZaO<κHdI!!\vU`lND8ig}>Aj@B+`lyfgU^VcDllc%j82"l@mAm@l-sk;@m3it3iWj2b Wr3mC7vmnvn3:8V @jaa<7# lÁ@m6ġq{WtsWjPwv82"j V`@ iA	 mx\wz-Xւ@xll7vw#Pxx7]וR	x"X@s 	kl		Η`H86Á@X@x`<8pGpGou
 67a	@q@@qyRvexV7l/9` ua68bx6r3P0؋8#@qA`]`YQNXw+HwUl`kxy``kQQ9mVI7xy	qu,5uA٘@5lO
zIm3Wl#W5k=iA}EzuV–6ٶ7v<v]W Yp88
t:٭3Bqٶ"5[Zl=ZlOyV;v:1@``oO	0w
@b؏Q2C
vvZx8FXp {  X\ {p;+V#[c9:\[#n:؃r;kX rz kk9xaȁks9s68+I7esWt]sytrS]yr5B3btOwt=tu<O|[]!>)u9#5]!vRUiv˜uQhUV<4rnSjg5N 4b!\V -}%@(09=! _Sua,,]=?2.!`ԅ=^;>LD)өhMbp`άy3gp*~y2҇O=!gՔE,V͚[f:#6
\gt#sVdOޘ<.NXq5dN<mIӗH
OT|`ƍzf?Z}IeC*filW D9C]1
eL**i!jM-yCA&euYggIET(arhŐCH4M24r8G3|
7r	*Ju
Vb%TP
CmSR>ErQ__M4&N4@l{html/figures/machine.not.in.ha.cluster.gif010064400016050000001000000320770717757442200213640ustar00gfergother00002640000003GIF87a;ƽ{{{ssskkkcccZZZRRRJJJBBB999111)))!!!B91!skcRJB1)!cZBRJ1cZ1B91){BcZ!scZskνkk1{{!֌・εƭ罭ƄZ{RsJ{sZ)sR!kνƵέkck9c1{sk{RsJZ!sRkJcƌcZRkJcֽεƭεZR{JsBk9{c1sJcZ!kRcƜ{k1{c)sZkRcsJZJZ{kcZεƭ,;H*\ȰÇ#JHŋ3jȱǏ CIɓ(S\ɲ%C.\ !.T`@!@
J	*	`$zUQ	@֪6U{ѷpʝ+q.D †" 0!':@8A4dPw&D@ӨY.Hs|S ~a,! h8УKJj &~ j֙>[f.^;߀po.n.		dUp=u@|F(aPdAO@`<_HD"Sgބ8cI`%SP
 SUYDW%$giAV\vR`d)Wflp)6WSw>H|UsNd@!aW!#%fFZ)"@7!@!KRZ2tS)骬@l'hK
0ےT
TWc&+~f7lj"MtAʆ+nFv"Tf?i@nԸhtS@,A^P(KYwW" SZ_! &Q%&Yfy,4l8lVH|@.J^	P$@``A:p<SY5\ݧPv3
ݳ	H=܈@6A`cwf#P	6d~4I#$?
`ci.m
GuX\t<O+@v-A>'e=玀3X	F-"OtK jx6:p{`7xl m% !9¢h/(6	0h0aZC0NYQQb&ppNg9f2(NNcJB~k8Zg@`b9b#I(y\ +@ೀ,бK'Y[[6U0l+	
odvqcGW-m8XԖ9QRU.,R"qm]@v)ܑQVߋ@0eH
LirV8'۵Lvě20Nsf(Om縂)oc$Z"O4Vu0<`n6@\
7pS\Ȉ|x8):䄩RRGgǡY*9&2ЁBGgRTt1p#pLU{8@Tb%^ΙM>m@vjB6AUd2_ŠU2ujj3(X=9
luQZ˖4n&c{%Y)ֶAh1PBVkpWt?=gq[ȅml]= YZv/ll9[[l7msQSً\v]W\^thq{[r0tY+]˲	VXЊ6ruxZ؜ݯq;`-{bw+nnƸ3nm渿,a8}ld'['38-wbG2o170>3Fd,ٹs)<co<b.tбsc8V΁N3')iؿ<zF,a
N,4*x",yOú>{j&Yӿ5=MfPؙ&u{xI~qi<muO,zvla7n&ݩ,oW-F3n>Z
vyn]sW$j6[Mqk扛ڣVHR}f5%9F+hO@=oUVwGv2{ŖwNuǶC뱂WEq8fUbz	Z槰LNc2k@Kze\xȓu4+oJ^|yЇ~ϼAy_}]Ǟ}m{މC?;PWp㗝aLoNk~/'/s.c	~??~yL?;QC8X	D8:BԀ`2525S73'|0$X'*Ȃ-(18cw8/+؃ǃB8D8yFxHzKȄMPAx}B > ZȅAS3} npkjHRpn `18 vRz(Ucs0\X Ypj^^Xnau2Gp^ ^r0mȇ?bX^pYxip>|uxHww0({'(l0(XȋH0X؍Xq|\Xk0(["o Ĩ)؈rnHI{Bp8IW!IIjPp>'i'X(WhyHІmxg#Y;>/i&L ȌiH^Ɇk<)XopW1C42w-clpnRxٔW0X)[c2k`ȆK	dYx0Θ42"(x?Sr"yo{x'ɈoBvɆR	j蓚	iP1G!T7i8e/)%biq!Y@RQٟhzx9(y*;yn9=R4*Ly*p` 	0)pX?<yG(*p.Hiug`f`gZh%Ix^hl"ȚHJSJ*yHYè*1$zK1ڱ""g/?q5!N!,}
!A8ng@[@G@[@kZoyH i"	V@\e`j	jW3Wc/J @4zۑQ8q~ȆRE@F@b0b@n@EZ\Afhze@FNGgڗ

o2#!B
***WJбAz\gCPm9Js`{ZY9y*ɒĚe`&[gF0Z`b[`*ZJl8 3Pc/ Ka"C:um{Ppva!qK24ZBPQPnJ<LTPYʓhK%|9S K~KBKg(s8Zya'yz3YN}y0[Ҽ[gt<``GUZۻAQ2^ށ(C#6C_ڤKp8NVEK˿	^"ujT JJFP;HRxEP!0 *B"23;V@\G8\w<oIkS92[oZpp1	"po.52Œ²7q7tPǘZP[p{yJa%dzfp,<J FiʗMiȐ+ɚR`hw¸ȋ]!qnҤ5gUkZ{AAK{᫒HRtF!
aIiOо8@v@ܫd;,ɧR|.b#k,!!`/2(pȗ	MNJ_>O`x/1"p]~p<ݤQy`<AZJMNݍ:a!`UzQI00=%a/pCP|iQ`,71
5X\s[8u@ekɔRϲ/ya)2[=cƙ@ aLzz̰OJJQ!3`E%CoSߞ 51P
@R+KAy۝@5@4́ R(vqi@F`y`Ge-"y,+	ʍf@fNڇ;?@F@ܦ4̡^m%Hʡ/-ӈ*pn>y
2`=glYfP\Krwŭz?nB:
INO@\
;ۘ)HP..@-8mKNq#1$"."IZyOY nj֨>y T^>yCNٚ>nU2~n!̴y^>y%3* 10:2xڠg
~>8A;l9NŬ~۸yQJPIN!:$y./x+^O#b!YP?2zaAyF(Y턎|iCBhcPg9UnpP⍺^*q8qyuNܝ\o3ކ\9	>Ќ3`elո Ͼbk֎I*zzU `R[Po!Y9LY<6 ?_x	09^`I	4RAӱc4 =v|#Ƥ)Dc̕51d#GN,^#g4ACBC.]&0uh	`jՆj ׮_uh PrR_>=paĉ'~QES}
1jDÑO<	Q4tsfkq͜;+c#ň	4!qɕ/g9aFsŌ@9DcGL4g|>i8h8yS&MܹyL/(5WqT*+.B&`  .`
""B.X (AFo8{2&:Vʎ3A
c'8 9@bHb:j
;8xM8St:.<;8#:c>"8r>&,pL<Ԑ#oNPJ)&P1(TLV``.*ZCLVSu`W2Yh,TjL5t"RO#`R%
3P1("!3\ZB/0¢7`jxF@;(&84;()C#.(-
-~Ç"$Ђֈö1s”>4Ĉ`ǁ}| HUUt/`rծp&z#|*ؠFk 
K9NRm5L
:H
&آ4)Ҙ;?Xz=/ACtI~ssmc~"*Ȼ5[5rp#	_6pI70ߤA֪db:Uw.j*YPhaf	jE]Y *$U	k5	?ޓ>{kwmĸSC-ab{?́nP%6I֐'~G)s<A61\CL3%?A@ RXCufRCO~N~ùSd*hzIyQ. 5x^8pIEzZzGCPPVTL0
8+P*+@`JpTD&RD
`FAHI&$%94.!
W0-$ds4)4dP|I_D$&09cOQH	80n(3r@
[ ~0 UBrb6(×NPDUcH|&ґR8dz1L%IdBY@B"Pe(]:2TWRԖhPճ
Y)C,ܡ:H]**j.I@mpr[
i@ 8}[LI![p[Ij]HHE0ziJeMrj&<x
H6ZϣĴLHzX% 5
T(IF0 @8z)R"!¥`I,լrjk[V^!6@dm쪸J(X⅙(9Ƅ[5RZ]kԐ+mu,5
lBk#к%a;ԈPk$%'g7]J9
Ok}nBʕֽnv]'EW;%2UlJC&U'<NIaLQgRX8Ǩ9-Yz/B!Hx_gB*n/2L\̵%nY&<n3~L/<&3IOzַq%p.FLb/I1
 
-#6hUlYvPMziKqtw)Bj'kqu2$z(c%-iwu-닛Ƽ=z]=RP쳁JBdJq60%Y2&wuv#/-@-B]+C/t_"pj(gZzXOGKV[Ls_l
b}v`S>.[ķ?Ӯ|wBwύMS,4Up[kn,uBd4ů2f"`
S[E(ir)Ub,
֒6a S^W@(A
Xී`voʩpJoX1[JX
x|.фrJ|eHe	iSZ&NIE=ۏ	U_sia~(]*XmPcAdp@);_t\d@rӯ{o.n%)/أH	@;)`Оd
A9ֻ
3@;)T6A J{ZӲ:9h)84" 08!	p)r#Ã`?,K0ÌJ'5w36A3A`@(-	<r51#w;H,haB?{;c<s"	,[`c0: !B#D$Eh	4ȖC֑j3B:GD3"yBk#RBF[
D9JTd+\Y2	-X/7>*
y>K>¤88E%'=iT:;u?ltJFk9qlFԊ)zx,AKfL86p@=x2Rgiډ~KE>ZH.Hӥq	X4(DkG-xcHz8hyFSY@I+HIf$]@dKI$,4-KTABpУC#đ=2;u\'D˛LQ5*.$3Y,;|>C<L<c.)	5 (LMrA©8a
C@$wפ -M-DD|ɉh9F\<$x}G\j#"7pDOAOĂ<;N8ƶ7 2`O4rOzγdC3,`;PܰܲGPŕkAkf +`
4ʫL$5A0Q[7"̧.-O
5.,1-"p3-N*P9T		p	0h,Kӥ=ͫ$0MAӈM1.E3Oå+ӅT$4=*3HH<UGRő)ּPOXP̫K
EES2Uf4I9?}X=R.UZ͔z3SV֫TU8HC}hQw""M\]"
Hە,($"ЫxC,`luJYEouRxVj]XE'@4XK)"~SŵEM)]MF


	
,S#, S)#/`X,hՊC
%[rMXhmX]ھ4H	݉EZ1uE9(Ђd
f9D:A[Yñ\rP5e֠X	'DO
seZ
\2@+lZ]ܼ"y
(ޢ"T
`@N9	L

Y
#\mM֭[=-Z\\1(7h	͍\!3E!Ru|Sٕ<CX,
xZ8	UUY:ɽ]
h+ߣ8U8'جWaK?BJTJ8ʤ\ʬ5J2*茶 0j3`}Ј32
.'9+*.4XvaS?#	@#a
0Ήb0:\%A1#P%Hb,&8.	6( 2.c:c;.;c?g(:@.dC>d/B>dFnd,NrdIdd7JdMVK>
LdPedZdQ>eTCRSNeW~[e&jeXe[~YOe^e_fb`8F#fhfflfxjF6mlffp.gi gs^g_FgPgvZqfygTvgxg~e}~hIg.Itg^h@&h[妨ahVN!sPݨǢM0^i[hgdQxpt=6ȀyȐ
ai@~gdKT1
KIHPDG [iӤCh(RꀣIi:N!k64V>uA^Q@yk{6ºNlf#58\
QhYj@6ب8H0K0d(.mоrhmRmmnm&nLnXPMn>压ȩ0j
fhʶi	B`W^$o6n(0(j[!pIjoAh,[kyBpu5oIAAGnFYlq
Pqk
	x
0.laȖhWl'zPmjǰrnθ%r0_q0/~p3Ojr5ohr7s9iFs;bjnd
<r@P@7Y4o8ytHtyJtKQ6tOK>2utTwsIgVwuXwuE:c\_3'0(ֈ3x:^';]dm?IXiČި'nŗ
]R\Oq#:JGm,,Hvy_"Rb6?1qw
xf+=誳mo
)8vu*hFv3H2xg0f?*;Z*"%ypK*΀G?"zf:HM3θr骫}	)(z_fl(=">*zP3隔Qe<7g?x*'#2(3׈b Bͫz{m#001ȃ-0(z6ǫMf1o|*X?%<jbjxۏx;`{-A>~&*{7OQ':p03%?>J|)s&J4hHi$H"9rx5i`$ʔ*Wl%̘2gҬi&Μ:wS/r &)>ӧi@>p9p!C)!NhF^Bjײm-ܸr5H2E?<1Uhj
g$qɸk3ТGV[D(5p(4	b22*$k0f"j4`J3o91Au3[@K8bn!Fu8؍հA"ňﯳB%qj
!ba%tzq^EQXxhF%7"%؜^duFz?aUXQHՆuQJ$M:H1^phfp\nQŎ.j	pñaeo$uI WLW]fTq^bbfe]TXdgwJ:)ŞՐbe)B-vVuᢖ!}EQ)J]XW%6azXG=H[B*QXrA&Z{-NOܵ"dbA0B`CbPcnd&elrf-ʱ^E}Ph<bLPL!?}Q;2ɱJ+Y!v ܣG0EYOT$\a}R%}tR`rE,moCJpQE5FcX&:*GE#6$!0zơ
iQcuqFH1apdyGDHۑK.lE]rIg؇Fh 1Asj^N~;EYc=UCGH YA{9G9bo=p;KjV?OWEk fm?ObH␦ 9|3!g8t/y!qz2A(Yq=&tD&2$YFHV
Ұ,Ҧ58_	hͱU*=24N|b^$MR(Dhn:8\5Jz"7$"䘐"rQdƌD2^4JPq̇PEٽZ2
~
EMYdrL-t衡Q9|e|P#<95wL¸e5%2%'7T)-;ᤉ189tYH@ֈ!lfȱɖz)#.j`I*y3,Y΀9.τ&L%`A֤%Y)|gCAH,QpѕliH›TI6t26͍phJT8ȱ7=K(=+U6#RbYRS&ZԲ4O}eS
չVka*Ou|*T+t=lꐬ5d#y#I#ٵYfW3yYzeRۨ#R8N*Bc=DV1
bln3-֔=r!i!G9w~"NCސ!eH=n>6@QЂwKhaB0pMT@'?FXCB.QSI호!eQ?)K$UB,*
\qI,c'@	b8B5!`\;8X	D8:BԀ`2525S73'|0$X'*Ȃ-(18cw8/+؃ǃB8D8yFxHzKȄMPAx}B > ZȅAS3} npkjHRpn `18 vRz(Ucs0\X Ypj^^Xnau2Gp^ ^r0mȇ?bX^pYxip>|uxHww0({'(l0(XȋH0X؍Xq|\Xk0(["o Ĩ)؈rnHI{Bp8IW!IIhtml/figures/n1n4.gif010064400016050000001000000551170717757442300152660ustar00gfergother00002640000003GIF89aH~벆f	{py)7HVebetݝhjĤ̱mpc~vνiChGk_~KqUx@mnQPV2iz	MS!^AvP`p׿L|Z]U	a)v0~}Yz/ׯPn&0@=~A{"hm|PN|j81hg?@o`pƷqJB|`뚹K\xrr̟ȱϺ൏侜¡ãĥǩɬҹֿȆQϒaЕeԛm֡uأw٦zڪ⹕ŧƬδm4q9w?|FŁLˊW͎\ΐ_җhӘiӝr̸Z!_&b*f-i0o7k6ŃSʍcΔjӢоPSW^(h4vK޺ɶEJLŲwœϤƻyc~hmrÌwwʛ֯ۼϿppp```PPP@@@000   !,H*\ȰÇ#JHŋ3jȱǏ CIɓ(S\ɲ˗0cʜI͛8sɳOՋQ^AxD{]R
@UyJpiՃKٳh~=~ e诠=FkpDy~۷OSǘ>A|\j̹)˼=,xYo@Pa XLWꁑW <ν#;Ž÷_>յ󎼏8e|T>s`?ǭW\'=\ݣ@p%^=QYFO?&o0hnᣜq)Qḟ=CQ;rd?c[edEx=bs]VU~ͷ"W"I@T'ti)P?kk~H$t'<NZXsinA\3AVƘ>ۇr\Tdxj뭸(AZp#q5@q⩛)=iZ=M5=ʛTn覫*cmT=ˤ
D'O?@8ȓ@uO`
<)UAjjqnB(rEͣ3=KڋLv=7Εߓ<[m\
K>{ӏۚ?߇#̯p=s>RpN=s3Y]_=T|cOV=\G_tsq'h֭'>~xătNlR9qO!%箻M/ďT$7G/Wogw/o</oܳ_HL?<BP<H̠W@u]p GHt0]",@R0"'DK8a@qC1H$ouD
61Pm
V1XkuEu1`E1htF1pDF1xDtG1t` HDLH'DΑd9'HΒv()#L͓f8#P͔LYH	#T̕eOX"X̖Mh"\̗fLx"`˘Prx#NQnBL [)WdىA&

)̌-pKCs3 b)6΂TT&<N8eHrZD4lļ&6vmQL+$ҹm)5U:Ht.J#`Gu
w	"!Qʰ=O}?\d(AHAԃEiWA2䡉zC!JQ&T`AU<t @*td-)BY*YOҔfZ3,
iTXW4oaJ&ij܌JSV&9kPF]eW",)hIJՖTBl*XgV>KiXRV6",:Vfe1YyVJE\+[	jU.
T4Y,DE}k^rz_Z1)aY[l%F1{65+/*L,c(9\ׇCZ2mfwuubIgAZ͌6%miMjpF~%2X*bmlZҶqXb7dnu[6*[ÙD[U9TM:[d,'
 ]UHgPNu1{;RyVnFb;ABY&{}䞎2ISiv<MGN35hHGS.QgXHW)5B^Mb]Hf;Ўl,{ζn{
l1 vw5dzsDη~,?o1O;BG|ϸ7g\+^(O?0|,MPy AzۚGqsDY:OB;yyҝ{D;;[I^DL;D⪫=^w.4&<:td6;/6@_!kD3<ˮww`WoQzނ=L>[<kߑ''/1B՞A[B4@E@X7,H0,$/%
b^<N0#`~)'Y08p~-t
۰
~-1YV)X؀hS|V*P$h
8W!+P.`'xH&A

J؁+ >x?5(:qF'@58{'%%(	
JyȄN	QĆ(n5aj!h>8>(RЂ/(@h,;@GzX{zhS/؃h3D<nHfimX8%oCv،ȇh
Ͱ$qBaHh#@@v<D((Ȉt"x2h،H!؎ȍ#p+`9ȋ
ivB
H#8.ExxPȏV(Y4`C1!a(*,7P'3CD#★2`xؑVɑH
߀@x()^1 ;p68bjyN9PRXyiA9EQy2FWY
~_b@xX(D9%li<&He19闷	
x(ٌZpə)xi{ٗB9I.0'0	,牞)9FqHP"
ԐY
[ٕй*ө.)A)<F@O p9#ɐYHEṘ/x	I~	QDE`HJ."@ʑiZ
X1@h1@Dp9CJmD$/*'醇yQ	~OVJ6:-i
C*F	C*V
MZO
Y_@䐪]z_4`		OE)B*?G8OPwʨ7wh
HG	BpG@C`$pi@??=`C0@=@"L*ȉ
頪됙뀙z


ԪiXٞ~iJG*y p)59%kW8	,?O`
?80F QIʨʫYPDp,ruQuʮݑ
˰H
RT*p
+@p
P0
`#ʨ3 F0R}zD7&*˨>KAٵjph+O@j8{V[X{B˺zj*
zx^iUܰP0),ۯ3@j#GPj<й:+U˾+۞C@IAj+kls[kްK0[˨̺8`Zh1/k/_BiKKQTjH깨qi;/ʩ੠
ppP.3
 "[6lIŅ|DJjilGP;!qGhqJ R皮Y[lR3j?0Z=0`̼/ZGXܶnqDžǃ{/lۻǀ<q9J`C`̫یD\FɟePt2@@	<jjq%F`?p˴[F;F/Q	lR
P@Q[L\FΡJ,ÖK qJи@J@銯=ݲ	F@KiԚK<<>X#,0`{L/8F[6ʹEPKeݞ@Bc
CΪˇm5Pj*dk]mӥ`lءB͛],۳pi٠ܿmm]LG[͟M˝<Hlōɽg}Zͨ\Rʹ<"ѝߪ:;кDخ=ܵB
pYcfޗGj:`-ᜑ֢$1m]}LT;=
L8`=PĿ
!]%~#R0>2-9U
-=λ|桭NP~1,zi>npO,ȃ|~>]>Ak.Khq舮sޥu9.~9fݟKaβdnڳ>j+8.Şޙ>
	?A~ڊ.NL[-֞ƾn61nہ.N__o
`$:DbOOL7Uoa*I45MA/'/Jc.q.-q'4LOoIKb#|Q.aOyA_qSWX10*Q([407QQabsovrq8~!qXϠE]05b!0KB7[1܎9aP4hbA5o(؂*@;!(/"&!J$ao4Q]O.L3/XM"5o///Ao'݃?yA{E;~-G;E$Y$I/TT#K1eΤYDNo.lPD1j4cȓQN UWmWuu=Y_Qe[qΥ[2|=W@>z1b_y3Nlj\,Y\2$uFrL"<dΨW=헯Cƫ'w7"Gǚ¥isJGٯckwD{3npVbM[~v]z\zgׂ_}"ӯ]{'"G-N3*,LPL
!b	'P5H$|(~Dg~a!7ː:4$Ji$BIX$7豈'mQGƄ$&j1!*1C
{^|ǯ#V-&,	C<7dRB&{$ "9)РG:xj7/.lhHF1	6IAB<$D'UYu#Xe)ZOONMǗ[tU}G
~f]$y'sؗFQNKWҵ2xm8|=$|V-p/?}
ZP9$@ŗ
'J&?+U~beQGe]Nf}dRQ<\G_٪ͭeq4uvCQyj4H|{bǿ*(	5WNa∍F7bϐ U痐Ԩ|g|@ǨRX9ͣ
zF7*
O'YOWkrEr.gqW[{<WFqՠ{N'LT!!'bI,vB3Nw<8<
9QpcI@p.g>"2A+bb <aF{^~'‘0rGVC g`(Xom⚴(Q̑g0"π6IM:/P%Vl<\hؽ\z`8bk=yILz1a
7^őNW#p=2ٖ?'@_cK#]	:Ioke3)MJadKBy8I҄ARˉ;CL2/1$n.&׼H"2>S$ftygA RAU{-PPYMP,V1
	#E89wܣ$ԇD$uQgrh0YӤ2Ozt [`*bcGOCl5hUS(jw m|77O[4!@FA]j]0=Z$M	ל!Aב̣7wk
`2;-Ց-Aٶd5P|^+xy`PBfp
jjhcۘVlrf$L	^D>A-c5OᑎԨϽNwޑ)\jE3B~$}I=]J@.?2׹MwKGep@YWAa/6H	
Fۄ#
KBϸ5&qM/µ3&=JA1>ށ>Y]
9y!ByiL
DqeF\<b)
& |f*\E `s<4$#A%ϒJ(DVzǍ1)WƃPM⎀@hҘ^h]\f{ͳ6:Ά	dگ#"!I9@GvHKz1vTdfT$9,767pь>fe=k4Tctlk1գ|ma\M;%DSWʸLq.-ek8TV$wo>FEd.Ն"V)KNdgOoI걏|6Z.2
;:H9DˈS'ǍG?<㖅]-Lɞb|!!tR{}yYß.һ^gꗯq%ғ}V܍~F4Dkri<M<U7cua'"53~'3Z~ӜSǞ?2S-}L2ow4h&,F}>7\?5_????¿C@C@	? @0@cgRl?|@	,Aͫc1.q9%q#!AA B!B",B<=0	4!:0B*B+"oq"8y# B5\C6A*@ANQ)@i'%@h=?'P	T"%H$؂:8DDLDE\DFlDG|DHTD;`B>30 ?	Y"!;DPEQE<p$q0TqHv3;0QE[EET8$"P%8" `%%#x	=
L!btA!_
+	CEnFFĒhB4y'xDonzcVuoPqoQX 0$AYTy\Hył"H<$`d+%% ZƺhFxLw""(!;IIIII98DzJvDP:IP':|Jʧ:ɩ8EnXW~k`}\ur2#DJ|KI4EI%1C!D"14B `JPR|	k=IJ$AYǜ8d4;_0̪4E{,,dUvsBȵD*4z˥˞L3QLg%gM4qLbI"ÄiI%G(N3[_qXU`_0ڬL&M"̹8QNPO "p
1N\NkHPz4zr0&cT֌rCOOOPDݜ_ [EL	
|L^"Q$'+}4Oҫ,M2/G,S<t0CMu'Сy!&1‘Dwz	%]pWRPD;lGP#IgRԨJakXOo
ol	eR5u -(%+GS(?B!G$Lca/($޼TBHISLUUmS|!@dE	F=.!L=	MAYU}jUV@֘`(Uy-+dMe5sstu}&ve60$XI
((}NU)H:*q%0׳v_tck(,x3A#$@30]Y?##'ؒ(zvX,(Z
u`#e2 ڝq"#YWp]1 Z*%	oY+[iς۹5!؎=M[~-	Z%$ܻ5%vXVpk
%-ȕXaAb!PPm[](۲m5mn2+vUx^է] 4mX(^IlEM<A 0_=ޑhP^\]ܷ^٬uۓezrVӌB8x'5
\M_ϕY AxZ̵ؑWۭXuX]nh]a*>䛊'6	b*(nZ3-X!7,6	%0_Ҭ%ߒa8W>	cHQ%>n!;ۀ̐Bv
ѫ>2bѾsd2b鲕x
~c5e4'9]9 B(ciaƤ\?(BFۣ_cLaae͏GNThf_Eފo(ēl.3~J*c~`		tʛB<ڶhc*	 8Uf@|!!L|f߈Ze`a	<7(B(޶'Z㑈aApd}CvnfZnu)gk!TY8*꾙:,P8Ю7Yi;V	2^0y /iQh04Y_6Teim麰i\uiL+	ry`0^C.Hly9K¸>g+$K
~ZSmľFF36Ƥҍ朆^~%lǐN(5Xl=7[`{5P){Ck˦lin>.igƷBe_cl&$;@ޡV}`po\k6qÇ·x3&(&y8md^滾ﺎa.eol *}x| ţ0bu{(n*1*qt8'	Cp6킪o#7JA9e)";*:[tёБa(*ո
Qp!pQת٘菈l=]>'?'ק$__`OIEugu`1v*hKٔ;WQv^qǝgft_]B'lrvvTwwbtyoz7({0hxxxxxx/ Zxx,87?7yyyyyyy'sq=zOz_zozzzzzz3 zzz{/{?{O{Wz1z]/z^{{7{{{{|Ww"2X|o||ȏ|ɟ|ʯ_85|||g489}O7}_}xѯҏ}ۿ}}}޷|pw?i90 2@~i~oǷǖt~Gx7G~<	Er~ėE?!oW~?MW~'$(`
2l!<hqO:v,r#Ȑ"G,i@:'WtH 2gҬi%F2
93Dsnj(ҤGS*m𥠘NRzNV=hְbdJ(Tg*SH)aףt֬ߖ*m5$pޯ*l(˚7?$$#
-4ꆙS書GӰ-){6nͫs־:y8rŻ3hР@
o^fڛ.{;x?ڭ|}v}=|=W|K~?Q}IUi KJvLJaB\t6asV a("/jG"s&"N-ʸcA1و,
~>"g	[+=zJR!t9#xQ"ZZaHISݘpn&f_ld'ʼnڜ}ƧAZw^h

l'_z$)5*Z"z@]$jmbuJZ_憪f+]e,elF
lrʂ,
KG((kz޶I{_*nnZ.wkAJY[oOkۼ6pM~e
[.[z=۱q2l.APF2˼\M.2cg1ΗL:s>G3Ѫ~<]HWLظpN4]p&̵ب
ݗioU‡j}2nka}
}ީ]2VXuZwt6
.q-gefluM:GsyJ4dCo:g:a;CkF{ƶ:|NKቊu_\Ǐ;g9~ѣ@<@=2Ƀba3=Y_do#}L5g~""~>>#x⁏xCh@ꡏN@T.qp+w1HF<"(0<$`{#>A~<z̃	ް79J/WE0!Г&DQ~{|_?CBLR\@l4Xs{KJ#
!N$H"MoL$Qx@y|@ 'qOd`Q
gُ
0ԥ=jv1n&1kb 4RqkZG=b	8M.kӉ#C!
Gc#}c'67Q
dgCN=0zݑ<FjZ$\s	h;"2O		J'P[FԔ7R'ůIu]O#P9Fe,ctzձ,xM[T?cu$)+Ϊnf
)[U.x
]׼)OQԠjSJt+H
۔a=)d"YPvjWdIfzLXD&:![MD+!mFKښ)=L3!QЅ)$T]%&
KBpnٞUUB
<a}V	L_9=Jb^"-jq\"M`&4^\yD$%\ˤocDB>ěqfqٜ,d8$,DdD*21kb xE/(Z%a2YCƓo}^q ȧ@\Z/&tP1,fQPq"VQW»#NЖ`
d<V<yx4#-IS:@^N<8GClQ;l~WhYk/&!SE-h18XWUlpьA3ڝD%>g$z*i[>7+2!?~tQPy8ԉGoI8ٻ3C+kRʹ11b!/P%Pq	M8G&t\	fB(2af9Aڐ&lM~2wS`>{ֶ̣9~
HBm
IBś5n]H%(A"=1%X	NX//6Uv*?A#/7I\Va;tR<W:owbĞOA
Qٿ)*
egv+2I佻;~>i5y{ 
p~g^\31|$Wz#*@A @1E0t_ݟ	DA_#x"x=`'#H!B-,$h%`+<$ku*5EH@  t$  ؠ@@ V|[At[X==>͐)=lF5C?ԃ<>ag0\1OE!!
ڟ!O(T 
Ob"#x"##a*H.B,)`W/D+&dB"!"x]%)ʖ+T1
!bb."Ƣ@b-@6p}=;
$ݐ;<)@t#<3Ô-a!0"&"K#Ң,a #!cb?$vB$$*ʡlm&(*5@@*vB*R?0֣0#>"."/fG">cU1I2j#oP@WN<$A@L[Nd[dOPfZ!#07x7\UflqWl.*luVr7xX5BTT.a]RR%SS0e`nfe^e^6e_ԇ$w$1BZVeV my5`-)`Z^h$hd*
efe%\&4C3t)B,(Hlqke/HL^ifH'o&\C$\&P&P(&qf'Dr6r>sZ&+^Mlu^(\&(U(~eeHaS@!A Mf(Ces$++&L**-'tIuo*'zNpH8hwU 'ĥB%]wQAe*%u(؂(ޥh'$Nj
B.&*`%(*LwiB+Ч@L9}%].(؜)؃Y&B&2Ti|\iaƨĖ..*i%X[&|+IM읝͘.“$BBIq
yh
ʢBƒ
'ן=´}miW%B%(jm,-(B,ªh.zϢ*h&+T@@}&Y
l*Yܲ2+(B,1\ed)RӬIޘ%e/³ٺ*)	[+(C0+VFfkehٕIF&@Hۺr9vWіB&ʂ,0
*|lǨ>ԱF7OĬ$+@DB3Z+,viWha^vl,/@*m*
Ӷhsp@Rj$Rx+%w5\C+m+
Ş(
[486<&-2z-B!|蝏6ý=܃#]=?@C?tZAlQ? /BuY>.{I_?8=[:T.~k,nl }hYiz+lۖY-B4d6d.ޢz.ZCZ6aê};Z;{5p[v0> Q<U
ϙ_"
ZS>ēlEhw
i*IaBv)4@ݞ.-opec @xcx
|1;<4_GyC	.#1"!,{P
ՍO&p61?	oڙޖlvIB.1-0BNq/m[:1"<!=,R5
,)BA2PA42Z5ۨ0RO&1')B.-m10C0r+kò2rjpb2!j.?9͒

;Z3pD!4ۓ=JWm#@C624?PUG\&ݮ9B:-(s<3=K=s϶e1a(SE>tL}/˗-qp5>54fp4RF|	>]Y7BM5*\G4w2M:t;BO3+=uu`bpVG>T/B/mZ<8ԧ!oAH?6D<OZă>.?Xl_ch6%Ov_s8ϴ96N*v<3v=?6c>k1q&5:h1 7(LZsaC;/Or_>e{ekws/`/yvz:'{76|sdg7-sc7xP=w89ztwPw,59w29xpiwF3wM8bt4c7}CZݷx/#IoNs׸u?/o&
DLIP PՊxusȐz?7xu|x9828k̃Cp\Wq
}#cHJd+Dșk9uw	׏n?Uk72t׻WO#yC<TutCyPJՃ1;JyFDȤyG;3d7m?˅GB{BX5EUTEA>l>X>h-a<1	BpRqr&.PEsW{C6l䧟= 
nAwAMaS祴?60\4a	

m&19<7Jx@|K<'9|c|;&13C#/gZ#S
AS)z.~P?۟X[=q޿}PAsÓy&l$ښA1۸Wfn;]g8Q7..l}'=|03m9y;	̓mᩅmgyg/(Ú)L2 C/g{RX>g|.<1}ʳ=|%pL=:Q8SG=s=@һ3=+)ī]n٪UKT2Zd-Ck`L[6lٴ=XaĐ0yeJ+Yt:uygN0PfgP2i
5ziPxL޽~LϞ>}8**)oW}TM9V^}Q$~:_|3y>{rcAmL0{aQάthѣIU+(S(eU'V0ez%锩Qzu2W̚jp[bqk,ƎC,YJ5wŗwK^={lOo}q%NbxOxOXx'J
MboMJK%;;-Z{-j-QrŷSHqZH8kz%
%k#D"IȋJ)'yLм\0<3EUc5d6tG@Inah^EWIf4b'C3'-J}{Ӕ,%sL=T'5]l3F8i~%j>F"inI@ui_4˙d,Sf=MߜQQ&Scd_afCyJg25/x(f7Jh_tSLDzN_݅`A`^IdٵIvu^~=d-eP[R1P&ѥSaPd9ؙFjaG5^S~	d~jeO%uZJPfDžd%$\h.cE:kf^ikie:lV{Fv[ﻻ̣[6kpi1{m#߸Y̳<;;;qsE?;mfunO;۝4ݩ=xOWƏo]ݸ/MK~{ԋ_q5;}[i. @. 	X ρSF%F3׿-
`Q7@)dV`
R/l`A0o#<}%z^!
C+^Q7]HSh x4b䚡P44	V\xl]c˴W8 /uCD{ep
$U$EyO`I?Y`g!rK^bF7b\ЂMj4 Jc)k<!'.NR(I2I'r|`;8:f"U/)FWuhTd!I+fRZqW$(PvSBgD?0{4c"8ˏ_@"RB҆B:hL3uT&Pc&I".UK[r/A&x*Ni/_t‚=aE#3jjK1a}C?nq	br܄0)f՚JsͧY[ukPJUuL>PU3thq5?҇=.Օ|#D}`ZC+
-myL\5"q
UY
vEsY*US2yKq<xeuIL=d #0wL^ۏ73lK1qQ-
LEN4"Jwݝp4nKÛy-c`L̈́
pB!iMbփ80^zGG~\;S$(\4pd5a8)"1SYz0e,odvhe,Sn=rL#E+o`7\()YaYOǐDZ_yQ\~Ǽyq>[ m!%..<^榴zǪBɇFV/z
cP65-Ҡ4NmNw-as<1r,zX\mg[ܓ	u\o{e;k<8iߤA6Gm-臠}0fB`L?hŸ́~1eH-aqzE3ʻߜm,%vɢծ׵jPk&]Cp_}K>aBx^vo\q1]d7ܷXv7Gwa˝G&|+'	?\*81I'o2E<D+%X!)|Kz3^;S=N!	J[{aJo @B}|8s>?N~KG	?cr~#O/J]2OI&PE&$E -&cjCKPB<FQ]NIŃ(kzX©*k%F>cfpX.Zd|za
&*`TbC~;0dHz
%c$FLL)RkSB!+kblS$CXƐ
F
ͤ^?QlL,aګKNbA@"a^.=!)Q+0Up
aODcN6d,/BzŖANT q#h1Ρr ׂ
%^>qC"+QQr@<
wЁ _%
R'rJ-D/R"0،"լ*_#=T42}vQX$q`+aR&Wbۺmm$
!((0HDF+!=6R)m)2,11cHrDv;
a%A R32,U#&L0n6r".ڋDT)D3,S10s0 .8S1K*E26973qP"@0h0sL:)/0<321'=#939 L(2ep;Mb7{<?<Y4TS>3Ljs*?+?-@@@G@?A4> 3BS4E-4=j=EKCA9EHw*SEG@@eCEC=FmT3qT@CBrI_p)%sBK-t@
S444STI9%l,&!5%t%
Mt7O PTL=LAL1M4&״+Rt:!IV;i.%@UC;T%0@<TU PcP5S5)RQrMM6k,
Rnb1 `8KКT(
CUD3.RTM%*VO4Pe]eV1VWEWwGC@x.M1HC2-3۬/-cݚuZ(kM5\Mb&`ȵB@uPde
<dKdOeSVeUq]%^u^ǀS2#`%
g۫ZwmHNO'o2VcsK_KCkudWkkWe2F	hFI#ft^]q>fW(g&N`+(bj@UcpCvkVqkVQ%>wQY
5%pV'6ljcv$bWp7Ecapk5_MFAW\7!"t1#r/@|>k/^.?ԑ.<DVnw<Kx7kWL9a%Aô!2=>h`Aak?R+D48+*!\4RkS4TkW%w~+pͤ(Dׁa|W\wxorL+++ځ8X8x?Љ*Wp4؋U!7;ءP΍XgulJs3y8v9!$y15y82EYI9LM)=rY]`9^tHKˮmYqtY8!`8%.yىPrEBS?9mXuDQHʸ
|Y癞ٞYٟy-q3E0Gc5 o瑝5c	5P@KڤOSڟMO[jcKo	u /"uz
@:j{3GZکGҥdG	WPC
.tx3Z.&c9ӪLw_U\ú`:c1Z:Oyq7jK}_@3Z-{P,+/[%`sTyY>uA-2#`[e@۹[ۺ[p׻p	[0!@zٺ";oy03rW"S(~;'-<\ۻk1Λ {:L-'ckB$6CO+RܲCa/>$8 C^ƹN?*n\3AabށaCdD*_nu\»8,Һ:
`W'q?DwB!=-(G+c**gǂb-"*W*a$a)H}a3os]w{؃]؇؋؋
W!`aZ
;AX4HP#},$Ǎ6O#'BB!+h<caƘ"*%2$\ґ"'}+H$DN\]Bj`P
8p<#z#:{<avv~tI1'N-r3,3-M,J/j^Iqꣾ/%XN
`aLe~-ݸQDl,
aM5[0O/cd2+PBS?žK>L!@탚ἧ%,=0`ײ0.
,0Ԭ08?\wy
/D!1C-V%\ݴ=?Way3#Nj<1R|yY}^n-,.O=Wo>p/}#x~{Ow_F|	3̙4kڼ3Ν<{9С84ҥL:}z4ϩTZ5+=u
&QI#p@ -={yy[7g
JTQ;^#ɔ+[9͜;{:џtjز\(.
 `⨩_=/ċ8̛/u5ԫ[=ܻ{>|xsvTpxʏl>]>c`8}5R`>h}C`LEFZna~8`0b*U*faM-b6SαAdB2bQ2אJ.9C
pL^eyI!ҨebjPP6e@8fnY^ypމgMeymE@@sǣ.:(	$0v2Ji7z(cLyA~
jj&Ij
k6m@bFk
*!yUl.&U&NKmֶ,m~n,wNnZU&nދL  %pC;ԥ=jv1n&1kb 4RqkZG=b	8M.kӉ#C!
Gc#}c'67Q
dgCN=0zݑ<FjZ$\s	h;"2O		J'P[FԔ7R'ůIu]O#P9Fe,ctzձ,xM[T?cu$)+Ϊnf
)[U.x
]׼)OQԠjSJt+H
۔a=)d"YPvjWdIfzLXD&:![MD+!mFKښ)=L3!QЅ)$T]%&
KBpnٞUUB
<a}V	L_9=Jb^"-jq\"M`&html/figures/software.layers.gif010064400016050000001000000542270717757442300176370ustar00gfergother00002640000003GIF87a{{{1))!RJB9B991B9s1)֜k91c91Z1)ZJR)!RBB1ƽkB9RBJ9kJBΌ{J)!B1ޥcB9cRZJ{csZkRJ9cJJ1)֌s΄k{cRB֔{ZJΌs9!Ƶ֜sJ9c1kZ9))sBε絜ks{ZsRތc{{Z΄ZJƜsZΌcޔcJ1!R1RZcJ{Z9έƜkޥskJsJ)޽Υ֜{cΌk{1ֵkJֵZֵRZ!kJB)scs1)B9sc91cJ!{skcRBν1cZννcZZRJBB9!)νsk)JJZ1)sZ9)s1!k!cB1{)csk1)JZJJ9{{kscB9ZޔZJ{R9{scRέƥ91B!)!1s!)sR9)B{sc{cRkRBZJ9RkB1JkRkJc{ccJcƭ΄kkRsR9ZRsBk9{Z!kRcZ9ck1{c)sRcsssR{cBkJZkZZJZRBRcJc9)9RRk!cs1kkBkZ{9csZs)Js1csƜcs{)1!,{H*\ȰÇhNċ3jȱǎs舤Ǔ(S\Ҡ'F#cв͛8UXg̘s
J(sz*sdIPJҫLciׯ`tjӑAê][<o*z#l	ۿrhQ׽+/`dG4sYt޾S?Tg4M&ډ ՝s{X$\@ı	m9nλc8#Hp&OC<n@ ErtX=pΩo>f|g~߃
y_ѽEy%\l[X`wNy{Q#{x"搃#Xށ"1ZH^a$jTAqG`R(#LUס_`&Zޗby#aX!:=hbFiYi]7&t6$mpQťiU8ab_&J6g"Zie@:^Xgj*zJ$lkz%YĴV'@2ߝE*eZ,:crSPkZ\v混d&$9Z.=JJQļ_+,yll`$G5@gsۆ\Xڱ;$|r)CH|{0& p$;XnXVD<=(-P	-WNusN-[wZo}<$xcf	 	ʧ4LoyxO9}3{%ІWxKLti8k3++P@裛\-Tv+
{=8n:w^;$m8ħh5G"	N_{V${·x!|'I:lfs@
Vz{ڴȇ@o[^fic?	D >le+!F·B(iIS/K~{BwAf\VrC/la3xqK~5MmRhA
Ezd+Ol vI	xGZDށ0xC#!!2DQJlbwHFS%iMj]A9>Ӎ!0%">H@0z:+])IXʰ-oyr`+\K|"v,֖@
z2)I^A-PpMD_v(0W6:rRh::3gOf MяF_S]dR;)Y$܀'=sI^r-'HqGNfs(LezP!$4Ƥ͞	7$4BU(JΆΑ	J]jEqi{ jGC^#5־5\4ӪVֳ5FXN((	կc]h;cd+bP5q+d=dLįSHW/T{>ԗL,8~ikz#,`;G~˷@@ 	?,"[]duBҝJ;{԰Nc+J'x,H
=׽_;Lj7E۪/d{lJsBO
CsKҖu[	%Xnxjl;э:@7ծ[(
䵱'Hs~/LurȄEr'c5,?ILb)I]0d\:yϊ9dhZB:r3%6gxRn(KYD%r!|ZTeglh)͋>!+Kǰ~kbt;
	[{>dƖб7qNA+gY~55:æN@P֫Ԧv5mVtۇ
(mD
:7xkW{hM
wPCbY>8߭pPo{L&?\{[|
5Z&s,rwk;3e}H^剆91.	JP"
6+η̻?7Kktp#=.
ZBȭon8AW#C}c0ϲO8,וtˏ~P4X:6{.z'tţk&]7=
>B@.	cI]x/_Њ+]{TH vy>}ғ0}ȏ&ǎ| 250;wug}&'hDfvfz'{GcKP(zmu{Dzw:v{7TVӁxCZ'}uޗwqu(5;h%GA'tlp5\Rz(|V,X^#J8rׄ5t
XoBdhsW-8I@!@eg{8_HHn8nw|/2O1XUz r{}GW.7q'~'&/YPIG
wP)>4w؉.B=ziz
d4g|xAӋ%1V2؆Gȸ̨~(hCM$qx@A.Q|HX=Ԋ`pffcXJXz
zuxǨx!H		hǀU%p(w/XBsW;=ȎsT@{/`0ИGHlG#Xh%Y08S8Ȇ!		''02@YyE1?(%@ 6wmْjEۥ9H(9G-4`;	ȊpOva9oxsB8,)[	(
z㔵~i}G#8Lb9d	xz	4T0iuDE6h:6DtɉT9>@(Ha2]Qiw΅9`YVYv[XS!@	Ei)oXtiyq[;ӛl璯)Z5Fw}
bYqw,pX@/ K=Bd4ȑ9Ud#)z?0HJ
*ripQRiO@6[9R*UJc-J;:AU[oI;V>
UD5iI.8Is%2~{biM;?AZEx9GJAr*e:UE:G,pʚuukk[GFOjih{*H}z4n)&S5V頠ŨxZayY
4CMm:Ti!TꑷjCJ`ڮFFڭ*)<˚c7zpiKٟ1o%j@D)cZ:T4#b鰼$jcy9:cʕ;mz68L.˙AKɧJI4I9z/YwB)7l$1&GpFȳ겅2{fkMi';@]`,g+zڷ~{揕*rB4!Pb h譿6t6[a]yAu
a*A+88l}#u`p7YٹRb
	v	/!༞[}2+G
cFy@@	^ 	KYɠN{Z{TxJ(	fr;.5ZϻTT$~yTwjn 	%	``6J0\C<'
p	0|
4HK};A=X?L%`^GxR~clSyxWp#,SyPj!%^p\ٛ } Ix[{K+;Ɖ	w ɉ+:~6j9ɣJ9p \ypCe(eGSƜFi&װUJYV6PˏZTRvȰIƍT|sErJjg˾#>"em 	Rr	Gϩ<X ^`<OSLšL"e g]s!̜ix+>׫}@	\(@Cܳv_<tkr<U<eӊ𽪖(;<ImŞёW=E
G``;j-ny(M|g]ӸK+9MVM	](PKJ
S<MMMM˞\(kpt.A۹z׾m)hE|tm<Liςf܀+ |l a4
t4=⪔Uf}0\6g{~-:Ilw孪ؑ:쯹]@P~~F(06T	cЊ9B\`{ 	˗j-p+ޛ2px'D7܎`^t
F`Gp/`ڜl|}NJjd@Pn}pp{CZ5
<5>-QNێp	`ʛye4M0؝*cR.	`ʓ`2O=m9gM̚ޣYn	~`ďNRzj6XO0kO,<lbc>v~p`[=;-IFH63Nolo-^	|r,·i@k^pPy:G..K9άP
jə;}K]8Kj?fNh먜3ΒP.~A_=m}r|Z8T> JO,8E_+/Vk	7,`!@|pCV262o߆`P	 W FnUWO
Inj,]*x/3sdoQ`z,0Hin\Z
<臐( H`Ÿ(CxʶA2A..dC%NX.?ѳ&SM|(Qt@"It(
+Oy!<@OAoJѣL0aiOQܙի9nD$XYewEÕqc8"fSɓ)W|!X4x1dgOB"etSPN2W<
0gС)l[p"M'Q%L+GDP\بR!3<畳N<`J+kq,#?9tG$9xgS'h=S1:N9Ȇ`

x$rB"3Ë)mYc
>ދϤͯ#8XA|!lj9(
3l3D(#<eLqdNޛ.i8Bx0H!$˒T+&;.J>CJ+E=+JQ$m+#0
!B0,Č*
) 	++0;U+نlPӣ58Y퐻LDLG+VH:jS\@[Q-P	%DB7IX˝hJknfhtD^I>b x!51%ZƤ]nH$;3S,W͕X!tK՚vqwqDM0
,8!l(M)9HSnar't8wA8.յE-ߓA jIc~댰[VuZ}g>.Zo]fcNz#-
^aK(xABsTQQMro+p+4l3<d][^E&$! P*B믃[*Ix;ҿ8YGuacIv} XA'x|<r!)/[杗5]Չi96ZL"0vV$'ۧ8Bjs쌎;̋ZFgnܸvFKq("{dYrf6AUOŹ}s<0WBEgy^G=,RO^&*q5J_V02%xaGQ:8ŊT1z3a:PB؞OFp@z7Rff#<qd?#5LfHd!/B#=*Gv<FmU6 5lE֍RD
\T/2}$,tYD VnE*-%j9	&):!3"zG?c(K*YBu,	dptnL''yJ~WhI?4B&CITȔH/gUR9&r^?I
Q&JA ԕ
wRPHԦn$҂D_αki
cU 3ԥur[Jm`_p!AAM9Ab<s2,ȸ̐&,5ԁ.5$r@I`#SQv&v<ub2bB_f	vvX8A	#9]:ŌO9(V?SfQEVW59хMX!,aLz^ycŠ6;i-xTULU#"a{لi%	Zx[9`M)Yݭ>v
nR-]3^/Y $A	CP`Oɑ96=A;GB|&cll[>)z5=nZkbb7ʭ01xMp+Wya+zǸ+8ZE6U=3ؘTB񐈭F]3?{%v<f<R59-Hh㖷[e
w۪gF}$R\ OVf ֶUd8㶣,ˀ'.EMhh־vpUJ^xK{,,?.l>y=ݼh̻0w:է&y㐈>$|eLv.`jKl̊oPż~zyC9r:i^v	
э^ޅHh*F`]'nx16-8lsmr*^P>4\ԅ%~VQ3^t[dv8^mǎC>gɥ,*s_P\q~<,ph9:Hwk,f1Q'9);AR&)֓;:;s<unmX@{q?0XqG
ӥ~

[5"1`'a-~-۫8 Sl؆kXg=??\`|?xk󻧙n383#F]sk<@M2hׁL?t?44:ͻkA\̿ϳҳ< %
zr<2:4?rTjx)B̳t[Xg3KӍ#@%-Pl>){99Mқ';AXDAt-AۿmFU)	|9
%0#
+ϡ@&ÎE$8&CBW*@EZ.#0(Y2 !D7L#d;2hX<q$GsBD,>aLI/($3č`=J2B9<:S仂<=>@ąs|Y{SDi^ћ`	14#/5cگP?"Es7mdT4B>ܼfE|HD<DLG;5
83!A#c;'TIiFq;%|p+E-|C4>;HΒ=胍\9opʑ-CLɕjI\@AL4DΔHrȂ<dhJ2uC:J4)CrJKXD-<-X0>\@>LT*K89졄3JTJ@AJ94kLIʟ,%R,ʌEsbLLPLkt,M3KZJAΙȒ:[ŬҟYXPMX,G^ D$O=OL*Nz$8C̓4kǓ@\?MȊQrE!"=#EYoPfн{1R,h-4,c2)TRj)?3ITPV`t固0:˗#8X-!SЉJ0c^S|el3P\HO5=M5V@.B45]00F9ÕY/O` ĠuDmUvQ0Q1u2?cu^(-4Dg}VZT|%eex/؈< /m]Ύd,@W7Ƿ:CET"EeTa]H!O

}VZhOӞ+(I@/JXEJx5
M1ؤN:܀CX
GeA3=YJMYTi@feYtٗVIYmv@[Dx!"UiJ2l[JYV-^` Tbȅb5uYZIeU:/t
٘h#ڵZe9
zX[ۍ=x]Tyz-Y#]HĽ=\DZ0ɥ
̥g2 VdKfXì8mZ-)'\nFQą$ͳLŽZDfP6T
؀*)
ՆB K;}MR,_vn@_(g@p=o8ĨuPWe\^_KMfWNT` [Մ]

nh΂'`]3[-? XU&nD<L_gaXX]A@b5G|_\BTʕVQ[L2ЛvjHŒQ:,]]bA]9_n(JTe\؅WɰlAt@tc\@_cManc8mm83`aV;)x`^֟
bo`FvdddAJFVLdNi0߯nmV8Vi(e2ȉ"s<#Їe5+>m˚tf\g@h]dZ`\l{.P]wA;&
PF*2|BB$.X&&jCVr a.V{>fdn~6f\^0tj6_&e6gU]'HQؐS'Fdaeni6T}e͝<uttԢfjV`i~cyЁHM!tN׊sgx.'gYОi&~`>+NX\d_FVnflUކv <gЪXg=fV?lUmn&x7fӅc`pV3lR\iОp s"v]&k.Phlh_xgeP~k|no`\T\Z_R~oVgf8|8gƂ&ԃ?f(KAeY`pn8mߖ6kL~5RZdV_5nnnNqdk
9(^n6H`~aZP!op"mp%Ͽ8qXxrjfa0> -o.Gq/OqVGUǀe6ؖ|szV:VrRs#p=w:,3!nt -tL}*wVZmXa\"gj}oćCqzFYXR?#karp+zW;TYYg]j]@ZZ^.&J_Kgu-g7kFkq\xU0SR;卆Hf @?>A45XH]ZzN_8qv_kHvtۄn`hwxl'umv<vyQp6R=O6=dhqwyfX^Zpo`}'z/us_/uSoz׿s-yn'{?T\^.ry-wwPrsWdkQHsz_a6T67wXEwV$=d8Pȟw/{fVppw{Uy&uB}/dj`jH}WzUn_[!g׹z޿TYkU?Z|b++V@jeZjqJǏYr]mq2gҬi&Μ7-m_ժ)7_HtVɪZ#֎a1"WF+kfJ*{Vʖ/c+-[/fkV…ŵVĮocHU#ٺU%LG'PD"ŔiQM2e*V[qUرgU$8pm8nwWp`gXX+VĊ\wWZCfgϖ7;.~5AU2kTMu*dn.fuV3xXq)C[X#WhEL^Ywv	
*r+"W(FYedr
7-Wj 4Tj(HRTReUWŒGIJV+
pe5Vq͔[q9W]x8#`m--$J,яLdGMfP$}ji܀9TQrJ)b&)h:%mYVIb}u˟mY3Wuᢨv֢K++C)/E1iNr6*jj*J&Fݙ'|덟$%sB'^H,Tܨ: +/?G覌U+O7Ye
6h c2X(J,^n,X}zɢ(e
״[XDOEF(2,,\U4aAmf(MɆ,Z4qjWLY{u
˵daJ.Ţ""^*c7[("4LO8/|i_[[*wnpqb0.صN:f(/€\FXtv{{-,}iyjzR'y{܈w㰙@LlQVٽ[G2hDGZ>'9Csǟ/g;P\%KVrhcM_E~L-T²5-[SQEu(b!ٱpER!2$#Ź橕boQjofU=@Z1B^(A*<SG5jF>'t,bƉ"*lEXVhC?6尘)ęz(MMrh&+eWs+B)JυO4RAV|i,6j+![񏭤'4LR< 'HFωW6E4qvqЙ^A]$0egC
^—FV&Ԙ hk'tTUA)L.WW2Qp8_ċ^xH|NVka<W:8GJt=NK`nΡgH%S₦XR%
i=.QbjMscU++f.bs\"\=@JLz-6=JQW׳"VYajvgAt89VVvcE+OT	&oV)^ 1k1
]hc>d0V׺*9kЦ9p2%)o;֕1l+Oq%Ј+RAL^&$EM%.ǒ\C!ѩMP
WGMKV<)|,"\gz~Ѕ+qsoWifDC{Ԇʩf9ɩpʘ`vjT4FZJ1䌣zY;}U!~qLѣnDcIQP>tk,c?1P.fwZ)
(p0xi.{:ġN_kKM"Ј8vꊝί*F\ءfiӯ	րOh%Sw h9dN*[YRrqm0#U4>Ħ@Ȭ
[ť\So۟)X֬$T
\]s`F
k R{0'%LEg/xqf0]|AG#M
Bp?h}{*֫~
($A].kHgVl"zֵ ֔_&k[J-2c=ҙrU\
[7oƋg>Eyx,^|++=y;C,{eE^Z<U7\e=C'yo[mGQ8&Q7gpn:
XI\a07YV]ěT,n/»\XSъQEI~8$Q$]It¨8 IÇyݟ2|É}]Q<]8RY]'e8)n
z-(]YM.P"iC&4RVKd]V.T}`-߼QUzG[ȇ_a1$C0 !0/C :C5 93$r#&JVz/˹^S=HdIɟΔ*)Álj40aE)€1C
a442$1h)ʇƥW>U FW&Rc>
΄u9"*nU}-7CY+eZIM
40 C8I|Tv!=!],)J=aME%YE6H^HIn:jVĀ
UÕLbYZ?.Yy	#5[<CQW,#<1$BS*	]q9V%ޠ_MH)|PIY@Z
	HKͥ:!*:4:L9"`
Ua*"6 bV!t
oA&a]TH0j%hJ֡V&[	Hd@@Y¼!EYj9C8t8$N&8?9
&w
[BL-9%sN&ܞ%WaD%X!QIvY5/gd#y*Ziy?=gRyCyMC:&juZ/0|!$234P
pRTSrJ"
E1ENTT&QW]:ÇE9v~| 9hQxxfj*@(?ßhk2-e?C.M/H502!
@85LC9Fԇq#^izܗE>	bH(b&MXW"HP(\hwxZjRT.>YhB1!yq30$Ù]24P>:4铊
6-(&-hV#Hh"SI(e^Gjuu_5ڛ)*h:)*+:G%.8@.H>$8B%/(>/L(QRLXû.e-ɫc\*5btCrp/H,`9)zZ:,yF"+6+fl5l99Bn>*W1?TC872,g쨾]f4f}(xi@6eEli3-7C:jrgLY-`-jmy+؆mblYB3DL1C:We^y3-
Q.6t.rG\L&0~cM *OlB-`wzꦮrmnijR-jW:dXB.zC/8C,BÁl*ZJb_rnyY/Kl
fZm¢@JүG:(*xu-7C.|Mf!8Cm8 :øʇg
kK,lCd{Ĝ`	n3ƪt(04Go$²op
B(-ق,8C3Y.j)Ui%8O~0h3dvqJ'D~W[~|&5	
v|PԖ1 rl/o{6C1-ot7Trya9Ǩ/$'+XF(Op>f*
UFopL@#0\CO	o
w#H*q Gֲ2?s"w97LC\~1om//6'5xP@J0B;[p<OŨ6Eb>˪i2H
t.B.6t1#s2'Z0!8s^1 K:a:bv[[-p".i2'[P1hP22F,@aG>G71IXC7d@RU
1өCSVuhU3z-nq$3x?N3nX!*8.$?/TC*$C5CJ<wk)5fGDBa,o5WRveRױAo6>gcV1WuDv2Sjob4l߂6c-U8/f725]"C5ms״tt,$?u҂/0X}0AtzB3t1K}72n3wftY17c./nZo[-`33j$:'oLJDu`Hs{xe/8lpp<tڷ֚6~o@V0Wϯ2+lOs:71@:M^>.P3ly5/_Xry(a8y_4JxB7?CPH뇋/o6Bhg깎g-@jv!W[0*}ާ%czɜB2se/Q.1
6QB+ğw:eyAA7{{B0fw1C6ƳE{/0å7Զ!S	J?{;7R×P cx>Ú9M@w.31׷۰l<@k9BYC/ ?aO|cȼW^j:ΏK	Qw3XKfxүҿ;/;~?TcsvWS{6|3}1zNW;;/deOf2ŰIQh0c<'.@lPcj"Z{ڎx37黼`CW:Dj~U{6ذaʞUf6n˖Am6n $L7`ƕ4in̖eˈM#7mv&ĎYCE2*TSN!F֬h`,\u"EY#Ho^Ek[޼L/`‰SX:J+zXjUcCIP!ß֮IFqcG &yŔ\Rsf͚8yPҗ&7NFZ֭YǂaEgѪe\vo4nK/i暆Lc)Lj~43
BP[!Z͚6<h^m&#θ+˜n*;+6,ؒ*8`=۫/iuE?^dd$d|u	rH7SgIh!,Dڒ$.T7TFDJ܈ֈ+ni(Qh*:X`>RVXR$lҽ}t/>`Qo2!tr	gg"̢곚dF8[:O(>p޲
8	R~'j(04FNm+Q2bX!ъ5jMoz=w⛲frAF-Acg=P"\4pC:7Bte7']R&t__ba\
^@,SU{+٣l9ct @;F4&mĥf(g]I%z
$ AƗQ+H</ڈVkc=H[Xʖ26F!_'2&:vNlwۿ-/%-QzVQ /)y9
$rMOo<
4@XDžʻޚk#h%.ь&9
8-8Cya(uJћ-Q(O|C`>
PGS64H,\.5=xa`-mi#l
KpNE9'|
\儨}
~NK b
Ber,c]],(ddN֐5en|cn.ŒYz‰
)Wk,XwD+,ZG'H
.k=&+g$ IJŴh6.#G9gƠ+,ӌpytrc
	Bh,Q9Vd<}P r4T"@'مj#L%@=cs6%`C?ǖd CqCPCTSrKa/a930{?1^$yH"ң5Nk@I cM+瓡D[aU#(2ZJXF|KREcC@|ke833Hׁ!.6C Jә֊9$6b1Ŭf7;ˎ>%󞑔`.<TKbҵҁm>jg`
!@LlW<kq	֥LlsD_fl+KPUcS蒫~"/Q3*+0m|5W~/]@6pAoUK*xLi:9	7Un{)5C4ҁ'{BaUp7ld()
L"GXsUkZn,C# @cgT`x 6
pe	L ˕;X\e]d+/!$zS@rWM4ƶ4@I'],	.X/I̔Ԇ&1Uwƒaa~^ָ&UʳВWYlܘ	7tlg?4TҮRX!)[Vpc-6=ma:f*sj,Bh礁5(	.dᔋ!Lz+/YZ媴;F<=qF mL
Irc~1yl@ܨj&%rFMA<fێc.1j!]qIg\a`_)v*:[^m+;
DÂVv3qAY5%&0	<?+{Ҟ0xucDR#c&\*2`&O.oN4ڪi*.8Z uH@@aTP.~	\.`
tNNnmĺArI2d*('$dޢ*orL!_ @ PEh@.agafm
@`"@V`0PNmʀBbLWljMĢ*%L,&-CJόpj\@**p&g	
_6Z@Tg- hܺlH
^Iڭ0nEp|o#zf$PB*	"P=`>>2@@@" `^LXVMJjOğLמQ{ز
n01T(U%H>4&
HBGl`'gʯl^rR"fqL,J7Xz/N^,/LВPV\ȇ-ʘ[᢮
OR1
BF`H<h2u8 `l@& PNMf!i@$ZHQ#q+-,:)R---- B.PNPG&L`Pp0m&0129	ޯCqnM$'2.r+Q*q5#QA
PpLB6
p}*b.7[jLa`Q0'IL223ՌqvC;@2<y<
(D,#1$p$s_x3HRcE8@0W0%1t:Qr2)%.4Cm"44IO""Eb,BвlF` u6B ق
IJ zxR1p$S035̤Je"2 4&Q2*L+EƲ,-F$i
NTN鴙.@8R8`.iH82@Bb,bc
f""ȋ1TEWE=$>mFmUNcg|+
s9ۢ"`ȯQuZZZg#5TR5̕D5]ד==Q-^Nc7NRLiz`iJ0 T 2^&elJb&EO+IKT=ILkd[S.	E;M6[-mG
N2-^
˽hhg@ ll`5R#6Ru!D4)jTiL>v+w\g|aE]״jm^3G5`W5"XE P­Q'ڎaB޲s6dc|O#aUGd[EӶuUVX6vOrv".m U(XS"Lg~`:q/awb:DaWdUt=,]fuea|kWfuF`gE$Ul`w1=IY'j!Z #T	H\"]M7n$-M7^*`Je_ɢ}V@`dJxɑJ
Ma;8BRRs*$*TqTr@w<)dVEWUUlVIFuuco5X|7B-xØҊId8Lx!BakzKK8Ud)UQTw듊bn}lH#@Pظ0Y' ;9(wy'.7הvt[dKdcY6gMU@2[b?,ad(dJI%g@Z_Vc+ՔC7{LmmugU`9N`,ЂXx٬$ȏsyb075t.Vlď"b_V[Ќw!y*.@z ݰW:%Xyu]϶]zڮNu 0`DHMZq)cšǚE[ٕ[HRW@TJT	-J ǔMiU۫_Hks]SL9u8Meَ	/ccfw> >5uMy؆coX:IWͶ	w5ۼet4`VxT:B	[ix[uaԭ-|HfnҮz:U@{x{{=U+޺G\SW+*2h w,WYV
@kR%M}ć<i@-ب)rʭ\w<U'ea@K<E%
lXJ`E'}%!.;;wDtׂF-(<{qωZ)
ԧc0M#h1|[ϴc~;wq}c`W]@& a %W{rĜׯ_{,l uK]%;e]=U= OܣI$\=u>۩_@-YkN1],}Uo/7
;hUba%%uU->0㭝c?<M#/+[懾iTl;ܧS˒ӧP艞g[ޕ1[Q~m=	Ys˾ik+%v^xS7q^TƂ<~ܿToyށ/-խ׵s\1efAI닯-~^NoY=)oQ?TalL\gӋ_CاC֧|P|?&?B_l1i_c-?gr<0…:|1D8`x#.RPA	
8``A
(P|kڼ3Ν+!+TK!=	5ԩTVEǏ$(P	LIxU5ڵlk^գbaʤ6޽zfuA.|>oc>E9doAE;9+9g@;qjWLY{u
˵daJ.Ţ""^*c7[("4LO8/|i_[[*wnpqb0.صN:f(/€\FXtv{{-,}iyjzR'y{܈w㰙@LlQVٽ[G2hDGZ>'9Csǟ/g;P\%KVrhcM_E~L-T²5-[SQEu(b!ٱpER!2$#Ź橕boQjofU=@Z1B^(A*<SG5