\documentclass[10pt,twoside,fleqn]{book}
%%ADD DEFINITIONS HERE IF ANY%%
%\documentclass{article}
\usepackage{titlesec}
\usepackage{makeidx}
%\usepackage{tocloft}
% \usepackage[draft]{listlbls}
%\renewcommand\newlabel[2]{#1 & \newsublabel#2\\}
%\newcommand\newsublabel[2]{#1 & #2}
\makeindex
\titleformat*{\section}{\normalsize\bfseries}
\titleformat*{\subsection}{\small\bfseries}
\titleformat*{\subsubsection}{\footnotesize\bfseries}
\titleformat*{\paragraph}{\large\bfseries}
\titleformat*{\subparagraph}{\large\bfseries}
\usepackage{graphicx}
%%ADD DEFINITIONS HERE IF ANY%%
\usepackage{amssymb}
\usepackage{epsfig}
\usepackage{amsmath,amscd}
%\usepackage{diagrams}
\usepackage{mathptmx} % selects Times Roman as basic font
\usepackage{helvet} % selects Helvetica as sans-serif font
\usepackage{courier} % selects Courier as typewriter font33
\usepackage{type1cm} % activate if the above 3 fonts are
% not available on your system
\usepackage{makeidx}
% allows index generation
\usepackage{multicol} % used for the two-column index
\usepackage{sectsty}
%\sectionfont{\fontsize{8}{8}\selectfont}
\titleformat*{\section}{\footnotesize\bfseries}
\titleformat*{\subsection}{\footnotesize\bfseries}
\titleformat*{\subsubsection}{\footnotesize\bfseries}
%\titleformat*{\paragraph}{\large\bfseries}
%\titleformat*{\subparagraph}{\large\bfseries}
%\usepackage[pagestyles]{titlesec}
%\newpagestyle{main}[\small]{
% define header
%\sethead[\textbf{\thepage}][\textbf{\chaptertitle}][\textbf{CHAP. \thechapter}] % even left, center, and right
% {\textbf{SEC. \thesection}}{\textbf{\sectiontitle}}{\textbf{\thepage}}} % odd left, center, and right
%\pagestyle{main}
%\titleformat*{\section}{\normalsize\bfseries\MakeUppercase}
\setcounter{topnumber}{2}
\setcounter{bottomnumber}{2}
\setcounter{totalnumber}{4}
\renewcommand{\topfraction}{0.85}
\renewcommand{\bottomfraction}{0.85}
\renewcommand{\textfraction}{0.15}
\renewcommand{\floatpagefraction}{0.7}
\newtheorem{lemma}{Lemma}
\newtheorem{proposition}{Proposition}
\newtheorem{theorem}{Theorem}
\newtheorem{corollary}{Corollary}
\newtheorem{guess}{Conjecture}
\newtheorem{definition}{Definition}
\newtheorem{example}{EXAMPLE}
\newtheorem{problem}{Open Problem}
\usepackage{amssymb}
\usepackage{epsfig}
\makeatletter
\oddsidemargin.9375in
\evensidemargin \oddsidemargin
\marginparwidth1.9375in
\makeatother
\def\sgn{{\rm sgn}}
\def\mod{{\rm mod}}
\def\lg{\langle}
\def\rg{\rangle}
\def\grd{\nabla}
\def\Chi{\scalebox{1.2}{$\chi$}}
\def\blm{\begin{lemma}}
\def\elm{\end{lemma}}
\def\bpr{\begin{proposition}}
\def\epr{\end{proposition}}
\def\bcr{\begin{corollary}}
\def\ecr{\end{corollary}}
\def\btm{\begin{theorem}}
\def\etm{\end{theorem}}
\def\hs#1{\hspace{#1pt}}
\def\e{\bf e}
\def\zb{{\bf z}\hspace{1pt}}
\hyphenation{Lip-schitz}
\hyphenation{di-men-sion-al}
\def\vs6{\vspace{6pt}}
\def\begq{\begin{equation}}
\def\endq{\end{equation}}
\def\pr{\partial}
\def\pf{\hspace{-15pt}{\em Proof:}\hspace{6pt}}
\def\rl{\hspace{6pt}\rule{2.5mm}{3.5mm}}
%\def\sg{{\rm sgn}(\sin(\omega \, t))}
\fboxrule=1pt
\def\e{\'{e}}
\def\noi{\noindent}
\def\mtx#1.#2.#3.#4\par{\begin{array}{cc}
#1\\
#3
\end{array}}
\def\mtxs#1.#2.#3.#4.#5.#6.#7.#8.#9\par{\begin{array}{ccc}
#1\\
#4\\
#7
\end{array}}
%\def\mtxx#1.#2.#3.#4.#5.#6.#7.#8.#9.#10.#11.#12.#13.#14.#15.#16\par{\begin{array}{cccc}
%#1\\
%#5\\
%#9#10\\
%#13
%\end{array}}
\def\vt#1.#2\par{\begin{array}{c}
#1\\
#2
\end{array}}
\def\vts#1.#2.#3\par{\begin{array}{c}
#1\\
#2\\
#3
\end{array}}
\def\sg{{\rm sg}}
\def\xmod1{{\rm xmod1}}
\def\sgn{{\rm sgn}}
\def\J{{\bf J}}
\def\F{{\bf F}}
\def\B{{\bf B}}
\def\T{{\bf T}}
\def\ei{{\bf e}_1}
\def\ej{{\bf e}_2}
\def\I{{\bf I}}
\def\A{{\bf A}}
\def\X{{\bf X}}
\def\Y{{\bf Y}}
\def\Z{{\bf Z}}
\def\W{{\bf W}}
\def\R{{\bf R}}
\def\D{{\bf D}}
\def\N{{\bf N}}
\def\U{{\bf U}}
\def\S{{\bf S}}
\def\0{{\bf 0}}
\def\C{{\bf C}}
\def\V{{\bf V}}
\def\L{{\bf L}}
\def\B{{\bf B}}
\def\M{{\bf M}}
\def\P{{\bf P}}
\def\G{{\bf G}}
\def\p{{\bf p}}
\def\q{{\bf q}}
\def\H{{\bf H}}
\def\Q{{\bf Q}}
\def\Rl{\mathbb R}
\def\Cx{\mathbb C}
\def\Zx{\mathbb Z}
\def\Nx{\mathbb N}
\def\Qx{\mathbb Q}
\def\Ax{\mathbb A}
\def\Xx{\mathbb X}
\def\Mx{\mathbb M}
\def\sp{\hspace{3pt}}
\def\Ci{{\rm C}^\infty}
\def\In{{\rm Int}}
\def\ul{\underline}
\def\xint{{\rm xint}}
\def\cmod{{\rm cmod}}
%\def\s{\mbox{\sb s}\hspace{1pt}}
\def\s{{\bf s}}
\def\ub{{\bf u}\hspace{1pt}}
\def\wb{{\bf w}\hspace{1pt}}
\newcommand{\sbt}{\,\begin{picture}(-1,1)(-1,-3)\circle*{2}\end{picture}\ }
\def\Cg2{\C(\mathbb R^2)}
\def\Cgn{\C(\mathbb R^n)}
\def\Ix{\mathbb I}
\def\fkg{\frak g}
\def\fkG{\frak G}
\def\Cf{\C(\Rl)}
\def\Ad{{\bf Ad}}
\def\ad{{\bf ad}}
\def\la{\leftarrow}
\def\ra{\rightarrow}
\def\l({\left (}
\def\r){\right )}
\def\M{{\rm M}}
\def\dff{\stackrel{\rm def}{\equiv}}
\def\ssc{\subsection}
\def\ssb{\subsubsection}
\def\sml{\small}
\def\bla{\big \langle}
\def\bra{\big \rangle}
\def\tr{{\rm tr}}
\def\rl{\hspace{6pt}\rule{1.0mm}{2.0mm}}
\def\pr{\partial}
\def\pf{{\em Proof:}\hspace{6pt}}
\def\blt{\mbox{\put(1,3.5){\circle*{3}}}\;}
\def\div{\nabla \, \blt \,}
\def\divg{\rm div}
\def\prm{\prime}
\def\dpm{\prime \, \prime}
\def\codiv{\rm codiv}
\def\cocurl{\rm cocurl}
\def\trcurl{\rm tricurl}
\def\pdiv{\rm pdiv}
\def\mhs#1#2{\hspace{-#1#2pt}}
\def\e{\bf e}
\def\noi{\noindent}
\def\mtx#1.#2.#3.#4\par{\begin{array}{cc}
#1\\
#3
\end{array}}
\def\mtxs#1.#2.#3.#4.#5.#6.#7.#8.#9\par{\begin{array}{ccc}
#1\\
#4\\
#7
\end{array}}
%\def\mtxx#1.#2.#3.#4.#5.#6.#7.#8.#9.#10.#11.#12.#13.#14.#15.#16\par{\begin{array}{cccc}
%#1\\
%#5\\
%#9#10\\
%#13
%\end{array}}
\def\vt#1.#2\par{\begin{array}{c}
#1\\
#2
\end{array}}
\def\vts#1.#2.#3\par{\begin{array}{c}
#1\\
#2\\
#3
\end{array}}
\def\vtxx#1.#2.#3.#4\par{\begin{array}{c}
#1\\
#2\\
#3\\
#4
\end{array}}
\def\sect{\section}
\def\ssc{\subsection}
\def\ssb{\subsubsection}
\def\ch{\chapter}
\def\EX{\noi{\bf EXAMPLE:}}
\def\bgc{\begin{center}}
\def\edc{\end{center}}
\def\parlx{\frac{\partial}{\partial x}}
\def\parly{\frac{\partial}{\partial y}}
\def\vtxx#1.#2.#3.#4\par{\begin{array}{c}
#1\\
#2\\
#3\\
#4
\end{array}}
\def\VT{\l(\vt x.y\par\r)}
\def\MTA{\l(\mtx a_{11}.a_{12}.a_{21}.a_{22}\par\r)}
\def\bpr{\begin{proposition}}
\def\epr{\end{proposition}}
\def\bcr{\begin{corollary}}
\def\ecr{\end{corollary}}
\def\btm{\begin{theorem}}
\def\etm{\end{theorem}}
\def\sn{{\rm sn}}
\def\cn{{\rm cn}}
\def\dn{{\rm dn}}
\def\FG#1{\begin{center}{\bf FIGURE #1}\end{center}}
\def\FGs#1#2{\begin{center}{\bf FIGURE #1#2}\end{center}}
\def\EX#1{\noindent{\bf EXAMPLE #1}}
\def\EXs#1#2{\noindent{\bf EXAMPLE #1#2}}
\def\bgc{\begin{center}}
\def\edc{\end{center}}
\def\np{\newpage}
\textwidth4.6in
%\textwidth5.0in
\topmargin.0in
\textheight7.5in
\pagestyle{myheadings}
%%Author's initials should precede their names,e.g. W.J. Jones%%
%%Upper and Lower Case Should Be Used for Short Title of Paper%%
\markboth{$~$ \hfill {\rm R. Brown } \hfill $~$} {$~$
\hfill {\rm Elementary Introduction to the Theory of Infinitesimal Diffeomorphism Equations} \hfill$~$}
%=========================================BEGIN DOCUMENT======================================================================
%=========================================BEGIN DOCUMENT=======================================================================
%=========================================BEGIN DOCUMENT========================================================================
\begin{document}
\thispagestyle{empty}
\setcounter{page}{1}
\noindent
%{\footnotesize {{\em Dynamics of Continuous, Discrete and Impulsive Systems}\\ Series \B: Applications \& Algorithms 22 (2015) 199-222}\\
%[-1.00mm] Copyright \copyright 2015 Watam Press} $~$ \\ [.3in]
\noindent
%{\footnotesize {\rm To appear in\\[-1.00mm] {\em Dynamics of Continuous, Discrete and Impulsive Systems}}\\[-1.00mm]http:monotone.uwaterloo.ca/$\sim$journal} $~$ \\ [.3in]
\title{An Introduction to\\ Infinitesimal Diffeomorphism Equations\\
{\sml First Edition}}
\author{Ray Brown}
\date{ }
\maketitle
\newpage
%add in
\footnotesize
\tableofcontents
\newpage
%******************************BEGIN DOCUMENT***************************
%**************************************************************************
\section{\sml About this Book}
{\em Infinitesimal Diffeomorphism Equations} (IDEs)is an entirely new mathematical discipline within the field of Nonlinear Dynamics. In contrast to global dynamics which seeks to predict the long-term asymptotic behavior of a system, IDE theory is primarily focused on local short-term dynamics. In particular, the prediction of the dynamics of complex transients is central to IDE theory. This is because short term dynamics are of great practical importance. Examples of complex transients of interest are the formation of tornados, the outcome of battles, the emergence of ebola, the formation of rogue waves, the onset of seizures and heart attacks. To this end, an objective of IDE theory will be to develop new methods of predicting complex transients, most notable of which are chaotic transients, and to be able to discern the potential for the emergence of complex transients from the {\em form} of the equations used to model a system, whether derived from data or formulated analytically.
\vs6\noi This new field and this book are the product of many years of analysis of complexity, chaos \index{chaos} and their transients. The present study originated at the Mitre Corporation with Dr.James Ellenbogen in the 1980s and continued with collaborations with Professor Leon Chua, Professor Morris Hirsch, Dr. Michael Shlesinger at ONR and Professor Walter Freeman at the University of California, Berkeley.
\vs6\noi The driving force behind this multi-year study is the Hirsch Conjecture, which was a statement to the mathematical community in 1983 of the importance of developing a theory that could identify the presence of chaos in a system from the {\em algebraic form} of the equations that define the system. While a complete theory of IDEs and a resolution of the Hirsch Conjecture are yet to be achieved, the theory of IDEs provides a start toward the goals set forth here and does achieves many results.
\vs6\noi In order to make this book available to the broadest possible audience (at least juniors in college) it is necessary to fill in background on the analysis of chaos and complexity. This somewhat of a digression will make the book more complete and accessible to the unfamiliar reader as well and will provide a historical record of how the theory was conceived.
\vs6\noi {\bf A note on computer code provided in this book:} Most illustrations are accompanied by an abridged version of the computer code used to generate them. Microsoft Visual Basic V6 Professional$^\copyright$ (VB6) is the application used for all illustrations. VB6 was chosen for five reasons: (1) VB6 is a simple and efficient application for writing scientific code. Most programs can be written with only a few lines of VB code; (2) even a child can learn to use the minimal level of VB6 necessary to code quickly and explore the code without assistance; (3) VB6 provides a completely natural means of writing code which more closely aligns with high school algebra; (4) VB6 code, like its distant predecessor FORTRAN, parallels how models are developed; (5) VB6 is readily available and inexpensive.
%---------------------------------------------------------------------------------------------------------------------
\section{\sml Preface}
Infinitesimal Diffeomorphism Equations (IDE) arose as an means of addressing the Hirsch Conjecture \cite{bi:mh}. The Hirsch conjecture, briefly stated, asserts that it should possible to determine whether a differential equation has chaotic solutions by simply examining its form.
\vs6\noi To address Hirsch's conjecture it is necessary to define what is meant by form and in this work, form will be defined as algebraic form. Additionally it is necessary to rigorously settle on what is meant by chaos. There are many definitions: (1) Exponentially sensitive dependence on initial conditions; (2) a positive Lyapunov exponent; (3) a transverse homoclinic point, to name three. Of these, the most mathematically rigorously supported is the third. Its basis is known as the Smale-Birkhoff Theorem \cite{bi:sb}. The theorem characterizes what is meant by {\em chaos} by proving that the level of complexity in a system having a transverse homoclinic point is, on an invariant subset, equivalent to a Bernoulli shift \cite{bi:pw}. For another proof of the Smale-Birkhoff theorem see Nitecki, \cite{bi:zn}, page 154.
\vs6\noi A Bernoulli shift is the mathematical idealization of a coin toss. However, the orbit of a shift is no more complicated than the initial condition of the orbit. This leads to the question of what is a complicated initial condition. Since the number that determines an initial condition may be considered to be a binary sequence, the question of how complicated an orbit is depends on how complicate a binary sequence is. Kolomogrov analyzed \cite{bi:jf} how complicated a binary sequence may be and formulated a measure that can be rigorously applied. His metric is known as algorithmic complexity and the highest level of algorithmic complexity is {\em positive algorithmic complexity} formulated by Alekseev \cite{bi:va}. Since every binary sequence can be identified with a number between zero and one, another mathematician, per Martin Lof \cite{bi:jf} proved that almost every number in the interval $[0,\, 1]$ has positive algorithmic complexity.
\vs6\noi The research on complexity makes clear that the definition of chaos must be considered within the context of the more general notion of complexity since chaos and complexity are linked through the Bernoulli shift. This fact leads to a more generalized version of the Hirsch Conjecture that states that it should be possible to determine the level of complexity of a system from its algebraic form.
\vs6\noi While there are various definitions of chaos, complexity is a more wide-ranging concept and is addressed indirectly in Ergodic theory \cite{bi:pw} which provides the most organized approach to the level of complexity (a complexity spectrum) in dynamical systems. The spectrum runs from ergodic to Bernoulli; and, Bernoulli systems are further classified by their entropy. However, this overlooks the practical matter that a non ergodic system may be temporarily complex. And this is what makes systems in the real world so difficult to predict. For example, periodic systems are thought of as simple. However, if the periodic system alternates between something as simple as the orbit of the hands of a clock from noon to midnight to the complexity of the three-body problem, the periodic system is difficult to predict. This example raises the question of what are the process in nature that drive a seemingly periodic system to alternate between simple dynamics and complex dynamics. In particular, while it may be possible to characterize the components of a periodic system, understanding of the transition between those components may remain elusive. Therefore, the {\em transition} between states of a real-world dynamical system (such as weather) is a part of the complexity of the system and must be mathematically addressed.
\vs6\noi In addition to the issue of identifying complexity from the form of a model, there is also the issue of long term trends (global dynamics) and short term trends (local dynamics, esp. transients). The trends that are of the most immediate practical interest are short term trends. Predicting the formation of a tornado is the best example. Implicit in the prediction of short term dynamics is how systems transition from one level of complexity to another. The answer to this question is essential to predicting the formation of a tornado or the onset of an epileptic seizure. Conventional ODE theory and Newtonian dynamics are hard-pressed to address such questions common in the {\em natural} world. These considerations further enlarge the Hirsch Conjecture to state that it should be possible to determine both the long-term and short-terms levels of complexity of a system from the algebraic form of the model of a system. This is the context in which the Theory of Infinitesimal Diffeomorphism Equations arose.
\vs6\noi IDE theory provides an avenue through which to organize questions of complexity, to examine the transitions between levels of complexity and to identify both long-term and short-term dynamics. This line of thought offers a generalization of the Hirsch Conjecture:
\begin{center}
\parbox{3.5in}{{\em Is it possible to formulate a dynamical system so that its level of complexity, its transitions between levels and the short term and long term levels of complexity are revealed by its algebraic form ?}}
\end{center}
\vs6\noi Addressing the generalized Hirsch Conjecture is fundamental to IDE theory.
\vs6\noi In order to address complexity more generally, there is a need for basic organizing concepts to be used in place of Newtonian dynamics and ODEs since Bernoulli shifts are not formulated in terms ODEs or Newtonian dynamics. This point leads to the choice of {\em stretching and folding} as basic organizing concepts due to their use by Smale \cite{bi:sb} in understanding the formation of chaos in the three-body problem. Serendipitously, the dynamics that originates by combining stretching and folding lend themselves well to processes in nature and the social sciences where periodic processes (folding) combine with very complex stressful (stretching) evolutionary processes to drive the formation of life, weather, and geology. As a result, IDE theory makes extensive use of the concept of stretching and folding in place of Newtonian dynamics to formulate models. In particular, it is possible to use stretching and folding to formulate IDEs in a manner that makes the dynamics of complexity transparent from the algebraic form of IDEs.
\begin{center}\parbox{4.0in} {The ideal solution of the Hirsch Conjecture would be a theorem that states how any IDE can be decomposed into a complexity series consisting of elementary complexity building blocks that can be classified by their level of complexity by a single number such as entropy.
\vs6 Included in this representation would be the transitions between the building blocks as well as both the macroscopic and microscopic dynamics which represent the local and global dynamics.
\vs6 The series must be in closed form in terms of elementary functions with the provision that very insignificant terms can be discarded from any infinite expansions to achieve closed form solutions that are of practical value.}
\end{center}
\vs6\noi To address the Hirsch Conjecture, the form of an IDE must meet the following criteria:
\begin{enumerate}
\item Have a closed-form algebraic representation in terms of elementary functions
\item Explicitly reveal the dominate fixed points
\item Explicitly reveal the transitions between the dynamics determined by its dominate fixed points
\item Include both long-term and short-term dynamics
\end{enumerate}
\vs6\noi As a practical matter, IDE theory must show how to leverage other fields of mathematics to advance the theory. Also needed is how to use a {\em calculus} of IDEs to develop complex models in all sciences. And, when an IDE arises from and ODE, the IDE so derived should be a local solution of the ODE.
\vs6\noi In summary, the Theory of Infinitesimal Diffeomorphism Equations addresses the question of complexity, i.e., the generalized Hirsch Conjecture; provides an alternative to ODEs; frees biological and social sciences from the burden of having to use Newton's laws by relying on the fundamental source of complexity as revealed by the Smale-Birkhoff theorem; provides an avenue for formulating problems in closed-form in terms of elementary functions; provides a calculus for building up complex IDEs from simple parts; provides a means for formulating models with the use of empirical data; and, provides a new method of {\em numerical analysis} and integration.
\vs6\noi {\sml \bf How does the study of IDEs assist and advance the work of other scientific enterprises?}
Infinitesimal diffeomorphisms (IDEs) [4] are transformations on a manifold that can closely approximate the solution of a differential equation. However, they are a legitimate subject of analysis in their own right due to (1) their potential application in the biological and social sciences as seen in [7]; (2) their use in the numerical approximation of the solutions of ODEs; (3) their use as closed form diffeomorphisms having complex dynamics that are equivalent to such systems as that of Chua, Lorenz and R\"{o}ssler thus facilitating the direct study of such systems without the need of ODEs; (4) their independence from the laws of physics; (5) their use in modeling and simulation of large complex systems that presently require hundreds of ODEs to simulate and study; (6) their use in understanding the dynamics of complexity; (7) their use in constructing morphologically equivalent systems that can be expressed in closed form in terms of elementary functions. In this respect they provide morphological solutions of ODEs which cannot be solved in closed form in terms of elementary functions, or require conventional numerical methods to solve. For example, there is no closed form solution of the forced Duffing's equation in terms of elementary functions; however, there is an IDE solution in terms of elementary functions. (8) Statistical methods and even Stochastic Differential Equations only provide probabilistic correlations between dynamical parameters whereas IDEs provide cause and effect relationships between parameters.
\vs6\noi The importance of IDEs to the study of the morphology of systems is made clear by the human EEG [2]: it is the morphology that determines normal versus clinical status of a human brain. Further, as is demonstrated in evolution, when chaotic systems and events unfold, they only rely on the occurrence of a frequency component rather than the order of occurrence of the frequency component in the dynamic of a phenomena or process. This is the morphology of natural systems. Morphology is nature's way of eliminating the importance of the specificity of the initial conditions in the origination of the dynamics of chaotic or complex process or events. For example, it is well-known that chaotic processes have sensitive dependence on initial condition while still having the same Fourier spectrum. This means that the exact initial conditions are not relevant so long as they are not too far apart, because all chaotic processes which start in a neighborhood of each other lead to the same morphological dynamic. This is the fact that biological and social dynamics depend on for their time evolution: some degree of independence from the initial conditions and the unfolding of the relevant components in any order, which may be random. The example of the tobacco mosaic virus provides a metaphor. If the virus is decomposed into its components and then place in a test tube, it can reassemble itself. Clearly, the order/arrangement in which the components appear in the liquid are not important, but only that they are present and available for a random process to facilitate the reassembly of the virus. IDEs provide very direct insight into the morphology of the dynamics of any system. (9) The "laws" on which social and biological systems depend to facilitate the formation of any degree of complexity are stretching and folding. IDEs are specifically formulated from these two dynamics and are thus ideally suited to study the morphological dynamics of complex systems.
\vs6\noi To summarize, IDEs are formulated in terms of the fundamental source of {\em complexity} dynamics of biological and social systems rather than the laws of Newton; IDEs can be used to predict the dynamics of systems rather than describe the correlation between systems; IDEs provide significant computational compression over the use of ODEs for the formulation of complex biological theories; IDEs, for a large class of ODEs of interest to the biological and social sciences, provide very accurate approximations of the solutions of ODEs; IDEs are formulated in closed form in terms of elementary functions thus allowing for simplicity in modeling, simulation and programming. IDEs are iterations as opposed as functions of time and are thus a form of numerical integration; IDEs are local solutions of ODEs when they arise from an ODE; when the there is an attractor component to the IDE, the IDE may be a global solution of an ODE; IDEs can be used to determine the long-term asymptotic dynamics of a system as is done in the subject of Global Analysis.
%=================================INTRODUCTION==================================
\part{Infinitesimal Diffeomorphism Equations}
\label{pr:IDE}
%01
\chapter{Introduction}
\label{ch:intro}
\sml
\begin{center}
\parbox{3.5in}{\em Infinitesimal Diffeomorphism Equations (IDE) are an alternative to ordinary differential equations (ODE) (which usually cannot be solved in closed form in terms of elementary functions). IDEs provide an avenue for the biological and social scientist who is not proficient in advanced mathematics to construct models to be used to explore and predict the dynamics of their subject matter}
\end{center}
\footnotesize
\vs6\noi This book is divided into three parts. Part \ref{pr:IDE} (Chapters \ref{ch:intro} to \ref{ch:ap}) provides an overview of the subject matter and a presentation of the many components that are necessary to developing an {\em intuitive} understanding of the theory. Part \ref{pr:mt} (Chapters \ref{ch:formal} to \ref{ch:cs}) presents the current formal mathematical theory of IDEs. Part \ref{pr:app} (Chapters \ref{ch:mod} to \ref{ch:trans}) present applications to science and engineering.
\vs6\noi {\bf Part \ref{pr:IDE}}, after presenting the framework needed to develop the IDE theory, Chapters \ref{ch:intro}, \ref{ch:note} and \ref{ch:flow}, discusses the fundamental concept that represents the highest form of complexity, the Bernoulli shift, Chapter \ref{ch:shift}. The explicit algebraic form in which the shift appears in an equation must be understood before there is any chance of identifying how variations of the shift appear in familiar equations like the Ueda equation or the Chua equation which represent real world systems in which the presence of chaos is not clear from the algebraic form of their ODEs.
\vs6\noi Two algebraic components in the shift are the integer part of a number and its fractional part. Every number can be written as the sum of these two parts. The algebraic expression of this fact is $x=[x]+\{x\}$, where $[x]$ is the integer part of $x$ and $\{x\}$ is the fractional part of $x$. The need to split a number into these two parts in order to explain the shift is part of problem of identifying a variation of the shift in an ODE. The shift is a discrete function while equations of interest are at least continuous. To see algebraically an embedded shift in a continuous equation it is necessary to shift attention from the continuous aspect of the equation to a discrete subset of its orbit. This is done by evaluating the first return map or the time-one map, Sec. \ref{sc:t1fr}. These two discrete maps implicitly contain $[x]$ and $\{x\}$. Therefore the continuous map must contain a transition between these two discrete components. For this reason, transition functions must be introduced, Chapter \ref{ch:tran}, and their role in creating a variation of the shift in a discrete map must be understood and explicitly visible algebraically.
\vs6\noi At this point enough background has been developed in the preceding chapters to take an in-depth look at the Hirsch Conjecture, Chapter \ref{ch:hirsch}. The reason for this is that this entire book and, therefore, IDE theory, originates from an investigation of the Hirsch Conjecture.
\vs6\noi Fundamental to understanding complexity and to resolving the Hirsch Conjecture is to understand the most basic dynamic needed to produce complexity and that is nonlinearity. The various manifestations of nonlinearity in two dimensions are discussed in Chapter \ref{ch:nl}.
\vs6\noi Chapter \ref{ch:lg} is an aside from the main thread. It is included to draw links between IDE theory and classical Lie group theory for future research between linear and nonlinear groups and the role of nonlinear groups in producing complexity.
\vs6\noi Because Bernoulli shifts are fundamental to complexity and because the Smale-Birkhoff theorem links two simple dynamics to producing a shift, stretching and folding, Chapter \ref{ch:sf} examines how these two dynamics combine to produce complexity and a shift.
\vs6\noi The Chapters \ref{ch:intro} to \ref{ch:sf} provide the preparation needed to examine the concept of chaos in detail. Therefore, the purpose of Chapter \ref{ch:chaos} is to examine definitions of chaos, Sec. \ref{sc:defc01} and Sec. \ref{sc:defc02}, functions of a shift, Sec. \ref{sc:fs} and representations of the shift, Sec. \ref{sc:reps} as IDEs.
\vs6\noi Chapter \ref{ch:nat} Relates chaos to nature using illustrations that compare chaotic time-one maps to actual images of natural phenomena such as waves on the ocean, images of sea shells and sand dunes. Chapter \ref{ch:cmplx} examines other forms of complexity that do not rise to the level of being a function of a shift.
\vs6\noi Chaos came to the attention to the general scientific community mainly through the notion of a {\em strange attractor}. The term {\em strange} in referring to these attractors was used because they were unlike the attractors that were commonly seen in engineering such as circles or fixed points. Chapter \ref{ch:nc} demonstrates that unconventional attractors can arise from non chaotic processes and therefore such attractors do not automatically imply that they are formed from a chaotic system.
\vs6\noi Chapter \ref{ch:bool} demonstrates that high-dimensional Boolean automata locally can have the same level of complexity as chaos. The shift dynamics may be obscured by the dimensionality of the system.
\vs6\noi Chapter \ref{ch:ap} demonstrates that almost periodic systems can locally be as complex as chaos. The significance of this is that natural systems need not have transverse homoclinic points in order to {\em appear} to be chaotic.
\vs6\noi {\bf Part \ref{pr:mt}} presents the current state of the mathematical theory for IDEs.
\vs6\noi The current formal theory of IDEs is found in Chapters \ref{ch:formal}, \ref{ch:solve}, \ref{ch:cal} and \ref{ch:ran}. The preceding chapters were necessary to set the stage for a formal theory. Of particular importance is the Hirsch Conjecture, Chapter \ref{ch:hirsch}, which has motivated the development of this theory.
\vs6\noi Chapter \ref{ch:cs} demonstrates that IDE theory can reproduce the same level of complexity as found in the classical chaotic systems. This is essential to validating IDE theory as a viable alternative to ODE theory.
\vs6\noi {\bf Part \ref{pr:app}} demonstrates how to apply IDE theory to well-known problems and that IDE theory can enhance classical results in problems of interest to human society.
\vs6\noi Chapter \ref{ch:mod} provides some simple component IDEs that frequently occur in biological and social systems. It also provides a bridge between from Newtonian modeling to IDE modeling. Chapter \ref{ch:hs} explains how hyperbolic systems occur in both the global and local dynamics of biological and social systems. Chapter \ref{ch:pg} extends IDE modeling to population growth for which there exists competition between population centers. Chapter \ref{ch:inf} uses IDEs to model infection and epidemiology.
\vs6\noi Chapter \ref{ch:kiii} initializes the goal of linking the actions of normal subjects involved in everyday activities to the neurodynamics of the human brain. This analysis presents a set of axioms that may provide the basis for axiomatic neuroscience and also link neurodynamics to human actions.
\vs6\noi Due to the distinguished position that prey-predator relationships have in ecology Chapter \ref{ch:pp} provides IDE models that closely agree with empirical data from known prey-predator evolution. This is an advance over the traditional model of Lotka-Volterra.
\vs6\noi Chapter \ref{ch:sg} applies IDE theory to construct models in which there is competition between two groups determined by human trafficking and cultural hegemony.
\vs6\noi Chapter \ref{ch:lrn} models the psychological process of learning. To properly treat this subject it is necessary to review how human learning emerged over 7 million years of hominid evolution and to conjecture some axioms of learning. An important aspect of this model is the concept of learning capacity that is analogous to the carrying capacity of the environment in ecology.
\vs6\noi Chapter \ref{ch:cd} demonstrates how to use IDEs to construct complex dynamics directly. Chapter \ref{ch:trans} demonstrates that IDEs can be used to study the effect of transitions between dynamical states in a particularly efficient manner. The computational efficiency in this study is possible because IDEs do not use ODEs and thus no numerical integration is involved.
\vs6\noi Chapter \ref{ch:post} provides some direction for the next book and a short summary of the origin of this book.
%========================================Concept of IDE==========================================
\section{\sml Introduction to the Concept of an IDE}
The biological and social sciences are hard pressed to apply the laws of Newton, which are formulated as ordinary differential equating (ODEs), due to the multiplicity of parameters affecting the dynamics of such systems; hence the reliance on statistical methods. However, statistical methods do not predict cause-effect relationships. Rather, statistics provides correlations between dynamics of populations. There is a need for a "calculus" that can be used to predict causal relationships in biological and social systems that is based on the operative factors of these systems: complexity and, which are an alternative to ODEs.
\vs6\noi The simplest example of an IDE is the transformation
\[ \T_h=\exp(h\, \A)\]
where $\A$ is an $n \times n$ matrix of constants. The value of an IDE at a point $\X \in \Rl^n$ is given by
\[ \T_h(\X)=\exp(h\, \A)\, \X\]
\vs6\noi Examples will show that the parameter $h$ that appears in the theory may be used to study the effect of {\em stretching} in a biological or physical system. It will be shown that when an IDE starts with a small value for $h$ and then, as $h$ is increased, the complexity of the system correspondingly increases in direct relation to the value of $h$. The implication is that stretching, or shearing are significant factors in creating complexity, and in social systems, disorder.
%========================================WHAT ARE IDES?===================
\subsection{\sml IDEs are an Alternative to ODEs}
IDEs are transformations that are closed form expressions in terms of elementary functions of dynamical systems such as prey-predator systems, competitive-cooperative systems, social systems, physical systems or any system that can be mathematically modeled for study, analysis and prediction.
\begin{example}{\bf Iterating an IDE}
\noi Let
\[\T(\X)=\exp(h)\, \X\] where $\X$ and $h>0$ are real numbers.
Iterating $\T$ $n$ times gives $\T^n(\X)=\exp(n\, h)\, \X$. Also,
\[\|\T(\X)-\X\|=\|(\exp(h)-1)\X\|\leq \|\exp(h)-1\|\|\X\|\leq h\,\exp(h)\|\X\|\]
so that the distance between two iterates, for fixed $\X$, is controlled by the number $h$. For small enough $h$ this difference can be made {\em infinitesimal}, thus the designation {\em Infinitesimal Diffeomorphism Equation} or IDE.
\end{example}
\vs6\noi For very small $h$, $\T^n(\X)$ is an orbit that coincides with the solution of an ODE, in particular, the ODE
\[\dot{x}=x\]
which is solved by $x(t)=\exp(t)\, x_0$. If the subscript is dropped and replace $t$ by $h$ this may be viewed as a transformation that maps $x$ to $\exp(h)\, x$. This is the {\em natural} IDE associated to, or derived from, the above ODE. Later it will be seen that not all IDEs are derived from ODEs.
\begin{example}{\bf The Harmonic Oscillator IDE}
\[\T \l(\vt x.y \par\r)= \l(\vt x \cos(h)+y\,\sin(h). y\, \cos(h)-x\, \sin(h)\par\r)\]
arises from the ODE
\[\l(\vt \dot{x}. \dot{y}\par\r)=\l(\vt y. -x\par\r)\]
\end{example}
In both examples the orbits of the IDEs generate a set of points that correspond to an exact solution to the ODE. By choosing $h$ small enough, the IDE provides an excellent representation of the solution of the ODE.
\begin{example}{\bf The Exponential IDE}
The IDE defined by the equation
\[\T(\X)=\exp(h\, \A)\,\X\]
where $\A$ is a $n \times n$ matrix of real or complex numbers and $\X$ is a n-dimensional real or complex vector arises from the linear ODE
\[\dot{\X}=\A\, \X\]
\end{example}
\vs6\noi In each example, the IDE provides a global numerical solution of the associated ODE. This is a result of the linearity of the associate ODE. Nonlinear ODEs will have associated IDEs, but which, in general, will only provide local solutions unless the solution is an attractor. However, the IDE will still provide good insight into the solution of the ODE in that it illustrates how the various factors of stretching and folding combine to produce the final dynamics of the ODE. Importantly, IDEs are not always derived from ODEs: They can be derived based on the dynamics of the biological or social system directly.
\vs6\noi The examples of the preceding subsection demonstrate the origin of IDEs from ODEs. However, IDEs are more general that ODEs. One example is provided by setting the parameter $h=1$. In this case the IDE coincides with the solution of a finite difference equation. Many more examples will be presented in later sections of this book.
\begin{example}{\bf An IDE from a Nonlinear First Order ODE}
\vs6\noi Consider the equation
\begq
\label{eq:firstorder}
\dot{x}=x^2; \;\; x(t_0)=x_0
\endq
The solution is
\[x(t)=\frac{x_0}{1-x_0 \cdot t}\]
The associated IDE is
\[\T_h=\exp(h\, x)\, x\]
\end{example}
\begin{figure}[htbp]
\includegraphics[height=1.933in,width=2.947in,angle=0]{C:/Research/Book/Figures/eps/FirstOrderIDE.eps}
\caption{{\sml Comparison of the IDE derived from Eq. \ref{eq:firstorder} with the exact solution; the IDE is in Red; The solution is in Blue}}
\label{fg:firstorderIDE}
\end{figure}
\sml
\vs6\noi Figure \ref{fg:firstorderIDE} shows the IDE orbit in red and the exact solution in blue. The IDE orbit is purposefully shortened to make the contrast clear.
\begq
\label{cd:fode}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:firstorderIDE} in red is as follows:}\\
h&=&0.001\\
&&\mbox{For j = 1 To M}\\
x &=& 0.01 + j / M\\
t &=& 0.1\\
&& \mbox{For i = 1 To N}\\
t &=& t + h\\
u &=& \exp(h \cdot x) \cdot x\\
x &=& u\\
&&\mbox{\bf Plot Point (t,x)}\\
&& \mbox{Next i}\\
&& \mbox{Next j}
\end{array}\right\}
\endq
\begin{example}{\bf Another IDE from a Nonlinear First Order ODE}
\vs6\noi Consider the equation
\begq
\label{eq:firstorder02}
\dot{x}=x-x^2; \;\; x(t_0)=x_0
\endq
The solution is
\[x(t)=\frac{x_0}{x_0+ (1-x_0) \cdot \exp(-(t-t_0))}\]
The associated IDE is
\[\T_h=\exp(h\,(1- x))\, x\]
\end{example}
\begin{figure}[htbp]
\includegraphics[height=1.417in,width=3.99in,angle=0]{C:/Research/Book/Figures/eps/FirstOrderIDE02.eps}
\caption{{\sml Comparison of the IDE derived from Eq. \ref{eq:firstorder02} with the exact solution; the IDE is in Red; The solution is in Blue}}
\label{fg:firstorderIDE02}
\end{figure}
\vs6\noi The degree of agreement between the IDE and the exact solution is a function of step size. Carrying out long division for the solution of Eq. \ref{eq:firstorder} to the first order in $h$ gives
\[x(h) \approx x_0 \,(1+h \, x_0)\]
and the IDE to the first order is
\[\T_h(x)= \exp(h\, x)\, x\,\approx x\,(1+h\, x)\]
Choosing $h=0.001$, second order terms are of the order $0.000001$. A similar argument can be made for Eq. \ref{eq:firstorder02}
\vs6\noi When an exact solution for an ODE can be found,the IDE can be refined. For Eq. \ref{eq:firstorder} the refined IDE is
\[\T_h=x\cdot(1-x\, h)^{-1}\]
and for Eq. \ref{eq:firstorder02}
\[\T_h=x\cdot(x-(1-x)\, \exp(h))^{-1}\]
For each of these IDEs
\[\T_h^2=\T_{2\, h}\]
as a result of there being an exact solution of the ODEs, see Sec. \ref{sc:alg}.
\vs6\noi The IDEs are written using inverses instead of fractions in anticipation of higher-dimensional IDEs.
\begin{proposition}
\label{pr:idepr}
Let
\begq
\label{eq:idepr}
\dot{\X}=f(\X,t)
\endq
and assume $\X(t,\X_0)$ is the unique solution of Eq. \ref{eq:idepr} with initial condition $\X_0$ and that $\|f(\X,t)\|\leq M$.
Then (1)
\[T_h(\X_0)=\X(h,\X_0)\]
is an IDE; and, (2)
\[\T^n_h=\T_{n\,h}\]
\end{proposition}
\pf
(1) follows from
\[\|\T_h(\X_0)-\X_0 \| = \| \X(h, \X_0)-\X_0\|\leq M\, h\]
and (2) follows from (a) $\X(t,\X_0)$ is the solution of an ODE so $\X(t+s, \X_0)= \X(t, \X(s, \X_0))$; and. (b) mathematical induction.
\rl
%=============================ODE that is not an IDE================================================
\subsection{\sml An ODE with Bounded Periodic Solutions for which there is no global IDE}
\label{sc:noide}
A key property of IDE is that there is a uniform step size $h$ that holds for the entire domain of the IDE. The following ODE is an example where this cannot hold globally.
\begin{lemma}
\label{lm:noide}
There is an ODE whose only solutions are bounded periodic solutions for which there is no global IDE.
\end{lemma}
\pf
Let the system be given by the following time-varying linear differential equations:
\begq
\label{eq:nide}
\l( \vt \dot{x}. \dot{y} \par \r)=
\l( \vt -2\,y\,t. 2 \,x\,t \par \r)
\endq
The general solution is given
by:
\begq
\label{eq:za01a}
\l( \vt x(t). y(t) \par \r)= \l( \vt x_0\cos(t^2)-y_0\sin(t^2). y_0 \cos(t^2)+x_0\, \sin(t^2) \par \r)
\endq
As time increases the step size must constantly change to maintain the required property of having a uniform step size.
\[\|\T_h(\X_0)-\X_0\|=\|\T_h(\X_0)-\X(t)+\X(t)-\X_0\| \geq \|\dot{\X}\, h\|=\|t\,h\, \M\|\|\X_0\|\]
\rl
\begin{figure}[htbp]
\includegraphics[height=2.307in,width=4.423in,angle=0]{C:/Research/Book/Figures/eps/Za01a.eps}
\caption{\sml{\bf Time series for Eq. \ref{eq:za01a}}}
\label{fg:za01a}
\end{figure}
\sml
\vs6\noi While Eq. \ref{eq:za01a} has no global IDE solution, by imposing boundary conditions on the originating ODE, it is possible to construct an IDE over any given bounded interval.
The construction of an IDE that solves Eq. \ref{eq:za01a} over a time interval $T$ is as follows:
\begin{equation}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:za01a} is as follows:}\\
&& \mbox{For i = 1 to N}\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h) - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
\\
\theta&=& \arctan(w/z) \cdot T\\
\\
u &=& x \cdot \cos(h \cdot \theta) +y \cdot \sin(h \cdot \theta)\\
v&=& y \cdot \cos(h \cdot \theta) - x \cdot \sin(h \cdot \theta)\\
\\
x &=& u\\
y &=& v\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right\}
\end{equation}
%===================THE ORIGIN OF IDEs=================================
\subsection{\sml The Origin of IDEs}
IDEs were developed to address two issues: (1) the Hirsch Conjecture; and (2) in order to address the Hirsch Conjecture, it was necessary to analyze the dynamics of complexity to which the Hirsch Conjecture alludes.
\vs6\noi Initially, IDEs were intended to provide a method of analyzing the solution of ODEs which would provide a step toward addressing the Hirsch Conjecture. In the process, IDEs became a separate topic of study in their own right. The following two sections provide an understanding of how these two factors brought about the Theory of Infinitesimal Diffeomorphism Equations.
%=============Problem of complexity=========================
\subsection{\sml The Problem of Complexity}
Complexity or chaos was first formally recognized by Poincar\'{e} in 1895 in \cite{bi:hp}:
\scriptsize
\begin{center}
{\bf A Quote from Poincar\'{e}} \cite{bi:hp}, page 1059.
\fbox{\parbox{3.5in}{{\bf 397}. When we try to represent the figure formed by these two curves and their infinitely many intersections, each corresponding to a doubly asymptotic solution, these intersections form a type of trellis, tissue, or grid with infinitely fine mesh. Neither of the two curves must ever cut across itself again, but it must bend back upon itself in a very complex manner in order to cut across all of the meshes in the grid an infinite number of times.
\vs6 \noi The complexity of this figure is striking, and I shall not even try to draw it. Nothing is more suitable for providing us with an idea of the complex nature of the three-body problem, and of all the problems of dynamics in general, where there is no uniform integral and where the Bohlin series are divergent.
Various hypotheses remain possible.
\vs6\noi
(i) ... We would then have to conclude instability of the solar system.
}}
\end{center}
\sml
\vs6\noi The question before Poincar\'{e} was whether the earth, sun, moon or the solar system generally might fly apart unexpectedly, or was it stable? This question can be narrowed down to an examination of the differential equations of motion of the earth, moon sun, i.e., the three-body problem. Specifically, would it be possible to discern this potential for chaos in the differential equations themselves? Therefore, the entire problem of discerning the potential in a system for complexity narrows down to whether this potential is present in the differential equations that describe a system.
\vs6\noi Subsequently, numerous researchers have encountered complexity in various equations such as Duffing's Equation as seen in Sec.\ref{sc:duf}. In 1964, Smale [2], in the Smale-Birkhoff Theorem, provided the definitive explanation of the source of chaos as identified by Poincar\'{e} through the use of the horseshoe paradigm, see Fg. \ref{fg:tangles}.
\vs6\noi This remarkable theorem blurs the distinction between an intuitive notion of what it means to be {\em unpredictable} and what is believed to be predictable.
\scriptsize
\begin{center}
{\bf Intuitive explanation of the Smale-Birkhoff Theorem}
\fbox{\parbox{3.5in}{ A coin toss is thought of as a random event even though, as will be explained in Sec. \ref{sc:random}, there is no formal mathematical definition of "random". However, the "random" metaphor is useful. The mathematically precise transformation that embodies the intuitive idea of random is a shift. $2\, x \,{\rm mod}(1)$ is an example. The Smale-Birkhoff Theorem says that a transformation that has an intersection between the stable and unstable manifolds, as seen in Fg.\ref{fg:tangles} is as complex as a shift. That is, it is {\em intuitively} as "random" as a coin toss even though the transformation is completely deterministic, meaning that the future iterations the transformation (its orbit) are completely determined by where it starts, i.e., its {\em initial condition}.
\vs6\noi A statement of this theorem to be used in a definition of chaos in a later section is that the theorem states that there is an invariant subset of the diffeomorphism on which the diffeomorphism is exactly a shift. }}
\end{center}
\footnotesize
\vs6\noi The essence of the horseshoe paradigm is that complexity arises, in its simplest form, from the operation of two dynamics: Stretching and folding. In the Twist and Flip map used to create Fg. \ref{fg:tangles} [5], the twist is stretching and the flip is folding.
\vs6\noi As to the significance of Smale's results, both stretching and folding can often be identified in social and biological systems as social friction or friction between species in the competition for survival. Folding is inherent in all systems in the form of periodic and almost periodic processes. In social systems, psychological stress is a form of stretching in that it places demands on psychological and material resources that push these resources to their limits. By devising a "calculus" based on stretching and folding the social and biological sciences may be better positioned to predict causal relationships.
\begin{figure}[htbp]
\centering
\includegraphics[height=2.82in,width=3.in,angle=0]{C:/Research/Book/Figures/eps/TanglesFigure.eps}
\caption{\sml Homoclinic Tangles from the Twist and Flip Map. Plate A: Unstable manifold is in red; Stable manifold is in green. They are symmetric about the vertical axis. The intersection of stable and unstable manifolds is the source of complexity or chaos. Plate B is an enlarged version of the region inside the black rectangle. Plate C is a more detailed view of the unstable manifold inside the black rectangle near the fixed point; the hyperbolic fixed pont is indicated by a black dot}
\label{fg:tangles}
\end{figure}
\vs6\noi Figure \ref{fg:tangles} is produced by deriving a hyperbolic fixed point, there are many, on the vertical axis. In this case the fixed point is approximately at (0.0, 1.95). The slope of the unstable manifold is -1.176166. Using this information, select a set of points on a very short line segment having the slope of the unstable manifold, starting at the fixed point and iterate the map. The visual basic code for this figure is presented in some detail to clarify how to produce images of unstable manifolds accurately.
\begq
\label{cd:tang}
\left.
\begin{array}{lcl}
&&\mbox{{\bf The code for Fig. \ref{fg:tangles}, Plate A is as follows:}}\\
&&\mbox{M = 200000}\\
&& \mbox{For j = 1 to M}\\
x &= &(-1 + (2 \cdot (j - 1) /{\rm M})) \cdot 0.1\\
y &= &-1.176166) \cdot x + 1.95\\
&& \mbox{For i = 1 to 11}\\
r &=& \sqrt{(x - 1) ^ 2) + (y ^ 2)}\\
u &=& (x - 1) \cdot \cos(r) - y \cdot \sin(r) + 1 \\
v &=& y \cdot \cos(r) + (x - 1) \cdot \sin(r)\\
x &=& -u\\
y &=& -v\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}\\
&& \mbox{Next j}
\end{array}\right\}
\endq
First, a very small line segment tangent to the unstable manifold is determined. In this case it is 0.1 in length. Then number of points on this line is decided. In this case that number is M which is 200000. Next the number of iterations of the line segment is determined based on the degree of detail needed in the unstable manifold image. In this case eleven iterations is chosen as sufficient for the large view in Plate A of the stable and unstable manifolds. For Plates B, C far more iterations are needed to reveal the fine detail.
%======================THE HIRSCH CONJECTURE====================
\subsection{\sml The Hirsch Conjecture}
\begin{center}
\parbox{3.5in}{{\em "A major challenge to mathematicians is to determine which dynamical systems are chaotic and which are not. Ideally one should be able to tell from the form of the differential equation"} -- Morris W. Hirsch, 1985 \cite{bi:mh}, page 192.}
\end{center}
\vs6\noi Combining the work of Poincar\'{e} and Smale, the Hirsch Conjecture reduces to identifying complexity in the form of the differential equations of a system by identifying stretching and folding in the form of the differential equations.
\vs6\noi The Theory of IDEs originated from this important question stated in 1985. The component questions of the conjecture are (1) What is meant by {\em form}? (2) Exactly what are the elementary dynamics that combine to create chaos that can be revealed by form? (3) Given that questions (1) and (2) can be answered rigorously, can chaos or complexity be recognized from form alone?
\vs6
\noi While the Hirsch Conjecture is an open problem, an initial attack on questions (1), (2) and (3) must begin with a highly simplified version of the problem that defines {\em form} so that it is amenable to rigorous analysis. In addition, it is necessary to appeal to the Smale-Birkhoff theorem \cite{bi:sb} for an insight into what are the most fundamental dynamical components that are the source of chaos. And, can it be proven or disproven that if questions (1) and (2) have rigorous answers, that (3) has a rigorous answer as well?
\vs6\noi The search for answers for (1) and (2) lead to refocusing on local solutions of ODEs rather than the solution of the ODE itself. The local solution of interest is precisely an IDE examples of which are given in the preceding section. As will be seen, the local solution of an ODE, i.e., an IDE, has (a) the benefit that it can be expressed in closed form in terms of elementary functions; and, (b) that elementary dynamics can be partitioned in a manner that allows for the study of their interaction.
\vs6\noi Serendipitously, the search for an answer to the Hirsch Conjecture has led to the partition of the question into three parts and from this partition the development of an entirely new mathematical theory that has applications well beyond the Hirsch Conjecture has emerged. That theory, {\em The Theory of IDEs}, is the subject of this book.
\vs6\noi In particular, the theory of IDEs: must define {\em form}; must identify the basic dynamical components of complexity; must set forth axioms that are implicit in the need for a rigorous approach to the three questions above and other broader questions of interest to the field of dynamics generally; must identify the connections of the theory to other areas of mathematics to assure that the theory is not redundant and to benefit from the results of other areas of mathematics; must identify computational methods (a calculus) for using IDEs to solve real world problems; must identify how IDEs lead to an understanding of well-known chaotic systems that extends the understanding of complexity that comes from using numerical methods for the study of chaotic ODEs. In addition , IDEs will provide a method of generalizing ODEs to objects that did not previously arise from ODEs.
%==============================================================Exponential Map=====================
\subsection{\sml Deriving IDEs from ODEs}
Another problem that arises from the Hirsch Conjecture is: How are IDEs routinely derived from ODEs?
\begin{example}{\bf A Simple IDE Derived from an ODE} Consider
\[\dot{\X}=f(\X)\]
where $f:\Rl^n\ra \Rl^n$
How must this equation be rearranged in order to see how stretching and folding appear in the form of the equation? The answer that is most productive to date is to rearrange the equation into the form
\[\dot{\X}=\A(\X)\,\X+\F(\X)\]
where $\A(\X)$ is an $n \times n$ matrix and $\F:\Rl^n\ra \Rl^n$
\end{example}
\vs6\noi To see the value of this approach assume $\F=0$. Then
\[\dot{\X}=\A(\X)\,\X\]
Over a very small time interval, h, assume that $\A(\X)$ is nearly constant. Then the solution over a small time interval is
\[\exp(h\, \A(\X))\, \X\]
If $\F\neq 0$ then the solution, over a small interval $h$ can be put into the form
\[\exp(h\, \A(\X))\, (\X-\G(\X))+\G(\X)\]
for some $\G(\X)$.
\vs6\noi One problem remains to facilitate this approach, evaluating $\exp(h\, \A(\X))$. To simplify this evaluation it would be convenient to be able to express $\A(\X)$ in more simple components such as $\A=\B+\C$ where $\X$ is omitted for convenience of exposition. But this will require evaluating $\exp(h\,( \A+\B))$. Preferably, $\exp(h\, (\A+\B))=\exp(h\,\A)\, \exp(h\,\B)$ which is not true in general unless $\A$ and $\B$ commute. How this problem is handled will be the subject of Sec. \ref{sc:dec}.
\vs6\noi Once the form of IDEs is established, IDEs may be considered as subjects of mathematical study separately from ODEs. The fact that this will prove useful will be demonstrated more fully in later sections. For now, the following example will be sufficient to motivate this line of thought. Consider the IDE
\[\T(\X)=\exp(h\, \A(\X))\, \X\]
where $\A(\X)$ is a nowhere differentiable matrix function of $\X$. This IDE may be thought of as arising from a more general concept of an ODE where $\dot{\X}$ is strictly symbolic.
\vs6\noi When an IDE arises from an ODE, it is clear that an IDE is not generally an exact solution of an ODE. However, the theory of IDEs must establish conditions on which the IDE retains the key engineering and decision making information that is inherent in the originating ODE. Establishing this theory requires the concept of {\em morphological equivalence} which is discussed in Sec.\ref{sc:morph}
%========================
\section{\sml Key Concepts: Form and Morphology}
\label{sc:key}
This section will explain what is meant by the form of an equation and it will explain the role of morphology in dynamical systems.
%====================================FORM======================
\subsection{\sml Defining {\em Form}}
\label{sc:form}
\begin{definition}{\bf The {\em form} of a mathematical expression refers to the {\em algebraic} form of the expression}
\vs6\noi The algebraic form of an expression consists in the set of algebraic operations and the individual terms of the expression. For IDE theory, the {\em form} of an equation must only consists of the following:
\begin{itemize}
\item {\bf Any combination of additions, subtractions, multiplications and divisions}
\item {\bf A finite algebraic combination of terms and factors}
\item {\bf Elementary functions:} polynomials and radicals and the elementary transcendental functions: trigonometric functions, exponential functions, logarithms, and hyperbolic functions , {\rm \cite{bi:as}}, chapter 4.
\end{itemize}
\end{definition}
\vs6\noi \underline{Solutions in {\em Closed Form} in Terms of Elementary Functions}
\vs6\noi
Ideally, an ODE would be solvable in closed-form in terms of elementary functions. This means that the solution could be written down, as a function of time, as an algebraic combination of a finite number of elementary functions. Such a program has never been realized for most ODEs. Any hope of establishing such a program must make some compromises. One possibility is that solutions are not expressed as functions of time, but rather as iterations. Further, it may be necessary to make the concession of having an exact orbit, but rather as an orbit which has the same properties morphologically as the "actual" solution. Thus, the closed-form expression would closely track the actual solution and contain the same decision making information. For example, while the human EEG cannot be written down, it serves as a good example to clarify this discussion. All {\em normal} EEGs look the same to a neurologist even though no two EEGs are exactly the same. All normal EEGs are {\em morphologically} equivalent. This fact demonstrates that morphology is good enough for even the most critical decision making situations. It also demonstrates that two EEGs, to be considered normal by having the exact time series, is not necessary.
\vs6\noi The above definition of {\em form} will now be applied to an example to better understand the role that form plays in determining the complexity of the solution of an ODE.
\begin{example}{\bf Defining Form:}
Consider the following two equations:
\begq
\ddot{x}+\dot{x} + x^3=\cos(t)
\label{eq:df1}
\endq
\begq
\ddot{x}+0.05\dot{x} + x^3=7.5\cos(t)
\label{eq:df2}
\endq
\end{example}
As is well known, Eq.(\ref{eq:df2}) has a strange attractor that outlines the unstable manifold of a hyperbolic fixed point, whereas Eq.(\ref{eq:df1}) "appears" not to be capable of producing strange attractor from any initial condition. But these two equations would "seem" to have the same form.
\footnote{Note that the damping factor only serves to make chaos visible using numerical methods. It is not an essential factor in creating a transverse homoclinic point.}
In the case of these two equations, both are periodically forced ODEs with the same number of derivatives and whose autonomous and forcing components only differ in the value of a parameter. It would seem unaesthetic as well as "unmathematical" to require that the form of an equation be determined by the precise values of the parameters that occur in the equation. To resolve this problem, it is necessary to consider parameters as functions which are constant. With this modification the question of form may be used to address Eq.(\ref{eq:gen}).
\begq
\ddot{x} + f(x)\dot{x}+ g(x)= h(t)
\label{eq:gen}
\endq
It would be preferable to postulate broad conditions on the functions, $f,g, h$ that assured that there exists initial conditions for which Eq.(\ref{eq:gen}) has chaotic solutions in the sense of the Smale-Birkhoff theorem. Such a theorem seems out of reach today.
\vs6\noi However, there is another approach suggested by the following question: Is it possible to algebraically arrange the "form" of the equation in components of the dynamics of the separate terms that are responsible for the elementary dynamics of the equation? This requires identifying the elementary dynamical components that are the source of complexity. Those dynamics are stretching and folding.
\vs6\noi Having defined {\em form} it is now necessary to determine the elementary dynamics that combine to create complexity. In this regard, the Smale-Birkhoff theorem \cite{bi:sb} established that stretching and folding are the two elementary dynamics which combine to produce complex dynamics. Clarifying and formalizing what is meant by stretching and folding is necessary if it is to be possible identify stretching and folding in the form of a system.
\vs6\noi In Chapter \ref{ch:sf} stretching and folding are examined in detail and rigorously defined. Some examples are examined to develop an intuitive idea of the meaning of stretching and folding. Once established, it is necessary to examine how stretching and folding might appear in the form of an equation.
%=========================================Morphology===========================
\subsection{\sml The Role of Morphology in The Theory of IDEs}
\label{sc:morph}
An examination of an extensive table of indefinite integrals \cite{bi:pb} reveals that there gaps between successive entries for which there has never been found a closed form integral in terms of elementary functions and that is why there are missing entries. Ideally, such a table would be a "continuous" set of entries; but, if that were possible, it would imply that every ODE could be solved in closed form in terms of elementary functions, which is clearly not true.
\vs6\noi What is likely to be true is that the time series of missing entries are quite similar to the time series of adjacent entries for which a closed form solution is available. I.e., the morphology of successive entries in an ideally "continuous' table would evolve continuously. This fact brings to the fore that morphology is a valuable tool in understanding dynamics of equations for which there is no closed form solution in terms of elementary functions but for which there is a related equation for which a closed form solution is available.
\vs6\noi Every entry in a table of integrals may be considered as an ODE for which the indefinite integral is the solution. Consider the following two successive examples from \cite{bi:pb}, page 13:
\begq
\int \frac{x^2+1}{x^3-3\,x^2-+4\,x-2}dx=\log\l(\frac{(x-1)^2}{\sqrt{x^2-2x+2}}\r)+2\arctan(x-1)
\endq
\begq
\int \frac{3x^2+4}{x^3+x^2-8\,x-12}dx=\frac{44}{25}\log(x+2)+\frac{31}{25}\log(x-3)+\frac{16}{5(x+2)}
\endq
For $x\geq 3.6$ they are morphologically the same, i.e., {\em morphologically equivalent}. If only the morphological features of a problem are important (which often contains the dynamics of interest) and not its particular time series, then these two integrals may be considered equivalent for $x\geq 3.6$ in spite of their apparent formulaic differences.
\vs6\noi
As seen in such medical diagnostics as EEG and EKG \cite{bi:wf}, the morphology is the decision making information in a time series, not the particular time series itself. Applying this approach to stretching and folding, provides a level of latitude in the exact formulation of equations for social and biological sciences that will provide simplifications in treatment while still providing the power of explanation and prediction. As seen in \cite{bi:cr}, topological conjugacy, a form of morphology, is sufficient to capture the key dynamical feature of a problem.
\begin{example}{\bf Morphology} Consider the system Eq.(\ref{eq:t2}):
\begin{equation}
\label{eq:t2}
\left.\begin{array}{lcl}
\dot{x}&=&\omega(y-\sin(\lambda\cdot t)) \\
\dot{y}&=& -\omega \cdot x+\lambda\cos(\lambda\cdot t)
\end{array}\right\}
\end{equation}
whose solution is given by Eq.(\ref{eq:L2D4}):
\begin{equation}
\label{eq:L2D4}
\left. \begin{array}{lcl}
x& = &x_0 \cdot \cos(\omega \cdot t) + y_0 \cdot \sin(\omega \cdot t) \\
y &= &y_0 \cdot \cos(\omega \cdot t) - x_0 \cdot \sin(\omega \cdot t)\\
& & +\sin(\lambda \cdot t)
\end{array}\right\}
\end{equation}
\end{example}
\sml
Replace the forcing functions with two constants (i.e., focusing on the local solution), solving the equation to get the IDE and then replacing the constants with their appropriate periodic functions gives
\begin{equation}
\label{eq:L2D5}
\left.
\begin{array}{lcl}
x_{n+1} &=& ((x_n - cn) \cdot \cos(\omega \cdot h) + (y_0 - sn) \cdot \sin(\omega \cdot h)) + cn\\
y_{n+1} &=& ((y_n - sn) \cdot \cos(\omega \cdot h) - (x_n - cn) \cdot \sin(\omega \cdot h)) + sn
\end{array}\right\}
\end{equation}
where \[sn = \sin(\lambda \cdot t) \cdot \omega \hspace{12pt} {\rm and} \hspace{12pt} cn = \lambda \cdot \cos(\lambda \cdot t) / \omega\]
Note that the IDE, Eq. (\ref{eq:L2D5}), differs significantly in form from the exact solution, Eq. (\ref{eq:L2D4}), while retaining its morphology and even a high degree of time series accuracy.
\vs6\noi Figure \ref{fg:IDE1} illustrates the morphology where the step size is very large relative to an integration algorithm, $h=0.5$. Additional data are as follows:
\[\omega=1 \hspace{12pt} \lambda=2.0 \hspace{12pt} x_0=1.0 \hspace{12pt} y_0=0\]
\vs6\noi For a slightly smaller step size the two time series are nearly identical.
\begin{figure}[htbp]
\includegraphics[height=2.453in,width=1.95in,angle=0]{C:/Research/Book/Figures/eps/Morph.eps}
\caption{Morphological Equivalence and Topological Equivalence}
\label{fg:IDE1}
\end{figure}
\vs6\noi At this time there is no rigorous definition of {\em Morphological Equivalence}. It is a term of art for applied science. If there were it could be programmed into a computer and used as a diagnostic tool, replacing a doctor's observation with an automated diagnosis in many cases. However, there are related rigorous mathematical concepts: Morphological equivalence includes topological equivalence \cite{bi:jk}, page 87; topological conjugacy \cite{bi:cr}, page 113 and morphological equivalence includes differential conjugacy \cite{bi:cr} page 332. However, it is not possible to allow that two IDEs to be morphologically equivalent if there is a subset on which they are morphologically equivalent since a seizure would satisfy that criteria.
\vs6\noi It is the empirical appearance of very short term complexity (such as a seizure) that poses such difficulties in deriving any model that is to be used for prediction of local and short term complexity. In what follows, it will be seen that it is the transition between states that can account for short term complexity such as heart attacks, tornados and seizure. IDEs are specifically formulated to reveal transitions between states in their algebraic form.
\vs6\noi Serendipitously, The Hirsch Conjecture brings attention to the need to analyze the source of state transitions in order to make vital predictions about short term dynamics and to make the relevant design, management and prescription decisions based on those predictions.
%02
%======================================================NOTATION=======================
\chapter{Notation, Special Functions and Concepts Used in this Book}
\label{ch:note}
\begin{center}
\parbox{3.5in}{\em The Theory of Infinitesimal Diffeomorphism Equations requires a set special functions and analytical tools in order to efficiently formulate and solve IDEs in the fields of technology, engineering, biological and social sciences and modeling.}
\end{center}
\vs6\noi This chapter presents preliminaries that are needed throughout the remainder of the book. The topics are Definitions and Notation Sec. \ref{sc:note}. Background on the use of polar coordinates in IDE theory, Sec. \ref{sc:polar}; an initial classification of IDEs, Sec. \ref{sc:clas}; a number of special functions that are needed to assure that an IDE can be represented in closed form in terms of elementary functions, Sec. \ref{sc:sf}; the presentation of logic gates that will be used to fuse two IDE, \ref{sc:gates}; methods of visualization and standardization that are useful for engineering systems, Sec. \ref{sc:vis}; a method of lifting a non invertible one-dimensional map to an invertible map in two dimensions as well as lifting maps to higher dimensions generally, Sec. \ref{sc:lift}; the use of the harmonic oscillator as a clock to transform non-autonomous systems into autonomous systems in higher dimensions, Sec. \ref{sc:hoc}; the definition of standard almost periodic systems that will be used to drive nonlinear systems to form complexity, Sec. \ref{sc:aps}; a discussion of the concept of fusion to be used to link two IDEs, Sec. \ref{sc:fus}; transition functions that are used to fuse two IDEs, Sec. \ref{sc:transfn}; and, the definition of time-one and first return maps, Sec. \ref{sc:t1fr}.
\section{\sml Definitions and Notation}
\label{sc:note}
$\Rl$ is the set of real numbers and $\Cx$ is the set of complex numbers. Through out this paper $\X\in \Rl^n$ or $\X\in \Cx^n$ where $\Rl^n$ is n-dimensional Euclidean space and $\Cx^n$ is n-dimensional complex space. $\M_n(\Rl)$ is the set of all $n\times n$ real matrices in $\Rl^n \times \Rl^n$ and $\M_n(\Cx)$ is the set of all $n\times n$ complex matrices in $\Cx^n \times \Cx^n$. $\F(\X),
\G(\X)$ are functions on $\Rl^n$ or $\Cx^n$ as appropriate which are either Lebesgue measurable or have continuous derivatives as necessary for each proof. $\A(\X)$ is a matrix function on $\Rl^n \times \Rl^n$ or $\Cx^n \times \Cx^n$ which is either Lebesgue measurable or has continuous derivatives as necessary for each proof.
For $u \in \Rl$
\[\sgn(u) \dff {\rm sign}(u)\]
\[\sg(u) \dff 0.5\,(1+\sgn(u))\]
\vs6\noi Vectors of the form $\A(\X)\, \X$ where $\A(\X)=(a_{i\,j}(\X)) \in \M_n(\Cx)$ will occur frequently in this text. Therefore there is a need for a method of computing their Jacobian derivative directly.
\begin{definition}
\label{df:Dder}
Let $\A(\X)\in \M_n(\Cx)$ be any matrix of functions and $\Y\in \Rl^n$ any vector of functions. Then
\[ \D_{x_k} \A(\X) \dff \frac{\partial}{\partial \, x_i} \, \A(\X)= \l(\frac{\partial a_{i\,j}(\X)}{\partial x_k}\r)\]
and
\[\D(\A(\X)\,\Y) \dff \l(\D_{x_k}(\A(\X))\, \Y \r)\]
\end{definition}
\vs6\noi $\D(\A(\X)\,\Y)$ is not the Jacobian; but, like the Jacobian, it produces a matrix from a vector of functions.
\begin{lemma}
\label{lm:Dder}
Let $\J(\X), \;\; \J(\Y)$ be the Jacobian matrices of the vector functions $\X, \;\; \Y$.
\vs6\noi Then (1)
\[\J(\A(\X)\Y)=\D(\A(\X)\,\Y)+\A(\X)\J(\Y)\]
If, $\X=\Y$ are the coordinate functions, $\J(\X)= \I$
and
\[\J(\A(\X)\X)=\D(\A(\X)\,\X)+\A(\X)\]
and (2)
\[\J(f(\X)\, \F(\X))=f(\X)\, \J(\F)+ \F\, (\nabla f)^{\rm T}\]
\end{lemma}
\pf Direct computation using definition \ref{df:Dder} \rl
\vs6\noi The following vector will be useful:
\begin{definition}
\[{\bf 1}^n\dff (1,1,1,1,...,1)\in \Rl^n\]
$\Ix_\X$ is the matrix having the vector components of $\X$ as its diagonal entries with all off-diagonal components being zero.
\vs6\noi $\e_i$ are the basis vectors for $\Rl^n$ or $\Cx^n$.
\end{definition}
\begin{lemma}
\[{\bf 1}^n=\sum_i \, \e_i\]
\end{lemma}
\begin{lemma}
\[\Ix_{\X} \, {\bf 1}=\X\]
\end{lemma}
\pf Direct computation \rl
\vs6\noi When there is no confusion, all subscripts will be omitted. The parameter $h$ is significant and so it will be indicated as a subscript when it is useful to do so.
\vs6\noi The integer part of a number, $x$ will be symbolized as $[x]$ and the fractional part of $x$ is $x-[x]$ and will be denoted as $\{x\}$.
\begin{definition} {\bf Generalized Curl}
\vs6\noi
Let $\A \in \M_n(\Cx)$.
\[ \A_\D \dff \mbox{ \rm diagonal of } \A\]
\[\A_\C \dff \A-\A_\D\]
$\A_\C$ is the {\em Generalized Curl} of $\A$.
\end{definition}
\vs6\noi When there is no confusion $\A_\C$ will be referred to as the curl of $\A$ instead of the generalized curl.
\begin{definition}{\bf Projection and Idempotent}
\vs6\noindent Let $\A\in \M_n(\Cx)$
\[\mbox{ If }\A^2=\A \dff \A \mbox{ is an idempotent}\]
\vs6\noi If $\P\in \M_n(\Cx)$ is an idempotent and
\[\P_i \, {\bf 1}^n=\e_i\]
then $\P$ is a projection.
\end{definition}
\begin{example} {\sml Projections in $\Rl^2$}
\label{ex:pro01}
For $\Rl^2$
\[\P_1=\l(\mtx 1.0.0.0\par\r)\]
and
\[\P_2=\l(\mtx 0.0.0.1\par\r)\]
\end{example}
\begin{definition}
A matrix $\N\in \M_n$ is nilpotent if $\N^n=\0$.
\end{definition}
\begin{example}
\label{ex:nil01}
For $\Rl^2$
\[\N_1=\l(\mtx 0.1.0.0\par\r)\]
and
\[\N_2=\l(\mtx 0.0.1.0\par\r)\]
\end{example}
\begin{lemma}
For any projection $\P$
\[\exp(h \P)=\I+(\exp(h)-1)\P\]
\end{lemma}
\pf
Direct computation \rl
\begin{lemma}
For any nilpotent matrix $\N$
\[\exp(h \N)=\I+h\, \N+\cdots h^{n-1}\N^{n-1}/(n-1)!\]
\end{lemma}
\pf
Direct computation \rl
\begin{definition} {\bf Involution}
\vs6\noi
A matrix $\A\in \M_n$ is an involution if $\A^2=\I$.
\end{definition}
\noi Involutions often occur in chaotic systems.
\begin{example} {\bf Involution}
Let
\[\A=\l(\mtx 0.1.1.0\par\r)\]
then $\A^2=\I$ and
\[\exp(h\, \A)=\l(\mtx \cosh(h). \sinh(h).\sinh(h).\cosh(h)\par\r)\]
\end{example}
\begin{definition} {\bf Orthogonal Matrix}
\vs6\noi An orthogonal matrix is defined by $\B^{-1}=\B^T$.
\end{definition}
\begin{definition} {\bf Skew Symmetric}
\vs6\noi A skew-symmetric is defined by $-\B=\B^T$.
\end{definition}
\vs6\noi A class of skew symmetric orthogonal Matrices occurs in IDE theory frequently.
\begin{lemma}
If $\B$ is skew-symmetric and orthogonal, then
\[\B^{-1}=-\B \mbox{ and } \B^2=-\I\] and
\[\exp(h\, \B)=\cos(h)\, \I +\sin(h)\B\]
\end{lemma}
\pf
Direct computation \rl
\begin{example}
\[ \B= \l(\mtx 0.1.-1.0\par\r)\]
\end{example}
\vs6\noi In this book, the bold face letter $\B$ will only be used for orthogonal, skew-symmetric matrices.
%==================Polar==============
\section{\sml Polar coordinates}
\label{sc:polar}
\begin{eqnarray}
x&=&r\cos(\theta)\\
y&=&r\sin(\theta)
\end{eqnarray}
\begin{eqnarray}
\dot{x}&=& \dot{r}\cos(\theta)-r\sin(\theta)\dot{\theta}\\
\dot{y}&=& \dot{r}\sin(\theta)+r\cos(\theta)\dot{\theta}
\end{eqnarray}
\begq
\l(\vt \dot{x}. \dot{y} \par \r)= \l(\mtx \cos(\theta).-r\sin(\theta) . \sin(\theta). r\cos(\theta)\par \r)\l(\vt \dot{r}. \dot{\theta} \par \r)
\endq
\begq
\l(\vt \dot{r}. \dot{\theta} \par \r)= \frac{1}{r}\l(\mtx r\cos(\theta).r\sin(\theta) . -\sin(\theta). \cos(\theta)\par \r)\l(\vt \dot{x}. \dot{y} \par \r)
\endq
\begq
x^2+y^2=r^2
\endq
\begq
r_x=\frac{\partial r}{\partial x}= \frac{x}{r}\hspace{0.5in} r_y=\frac{\partial r}{\partial y}= \frac{y}{r}
\endq
\begq
\theta_x=\frac{\partial \theta}{\partial x}= -\frac{y}{r^2}\hspace{0.5in} \theta_y=\frac{\partial \theta}{\partial y}= \frac{x}{r^2}
\endq
\begq
x\theta_y-y\theta_x=1
\endq
\begin{proposition}
\label{pr:vf}
Every two-dimensional IDE can be put into the form
\[\T_h=\exp(h\, \frac{\dot{r}}{r}{\bf I})\exp(h \dot{\theta}\, \B)\]
\end{proposition}
\pf
All two-dimensional vector fields can
be put into
the form:
\[\l(\vt \dot{x}. \dot{y} \par\r)=\l(\frac{\dot{r}}{r}{\bf I}
+\dot{\theta} {\bf B}\r)\l(\vt x. y \par \r) \]
where {\bf I} is the identity matrix and {\bf B} is the matrix
\[\l(\mtx 0. 1. -1. 0 \par
\r)\]
Apply proposition \ref{pr:fp} and the fact that $\I \, \B=\B \,\I$.
\rl
%===================Classification of IDEs==============
\section{\sml Classification of IDEs}
\label{sc:clas}
\vs6\noi IDEs are classified as Linear, nonlinear, autonomous and non autonomous. Here are examples:
\vs6\noi {\bf Linear:}
\[\T(\X)=\exp(h\, \A)(\X-\F(\X))+\F(\X)\]
where $\A$ is a matrix of constants.
\vs6\noi {\bf Nonlinear:}
\[\T(\X)=\exp(h\, \A(\X))(\X-\F(\X))+\F(\X)\]
where $\A(\X)$ is a matrix function of $\X$. Both examples above are autonomous.
\vs6\noi A {\bf nonautonomous} IDE is of the form
\[\X_{n+1}=\exp(h\, \A(\X_n))(\X-\F(\X_n, n\,h))+\F(\X,n\,h)\]
\vs6\noi The above examples imply that there are two equivalent alternate forms of the IDE used. For example:
\[\T_h(\X)=\exp(h \A(\X))(\X-\F(\X))+\F(\X)\]
is equivalent to finite difference equation
\[\X_{n+1}=\exp(h\, \A(\X_n))(\X_n-\F(\X_n))+\F(\X_n)\]
The second form will be useful when deriving IDEs from non autonomous ODEs. For example, the second form will be used to derive IDEs from ODEs of the form
\[\dot{\X}= \A(\X)\X+\F(\X,t)\]
In this case the IDE may be written as
\[\X_{n+1}=\exp(h\, \A(\X_n))(\X_n-\G(\X_n,t_n))+\G(\X_n, t_n)\]
where $\G$ is a function of $\F$ and $t_n=n\,h$.
\vs6\noi $h$ is the infinitesimal parameter when $h$ is small. Typically, $h$ will be chosen as 0.001. However, $h$ may take on any value that makes sense for the application. The infinitesimal parameter can take on important physical meaning such as stretching. In Chapter \ref{ch:trans}, dynamical transitions as $h$ varies from small such as 0.001 to large such as 6.0 will be studied. These transitions will shed some light on the difference between infinitesimal chaos as in Lorenz and discrete chaos such as in H\'{e}non.
\begin{definition} {\bf The cdot Convention}
\vs6\noi Let $\A(\X)$ be a matrix function of a vector value, $\X$.
\[\A(\cdot)\, \X \dff \A(\X)\]
\end{definition}
\vs6\noi The value of this notation will become apparent as a short-hand when IDEs are nonlinear and the specific value on which the IDE is evaluated is not important. An example of its use is as follows:
\begin{example}
\vs6\noi Let $\T_h =\exp(h\, \A(\cdot))$. Then
\[\T(\X)=\exp(h \A(\cdot))\, \X=\exp(h \A(\X))\, \X\]
\end{example}
%====================================================================Concepts===============================
%===========================================Special functions==================================
\section{\sml Special Functions Used in IDE Theory}
\label{sc:sf}
This section presents several useful special functions essential to construct IDEs. Many are functions of $\tanh$. Due to the prevalence of $\tanh$ in many computations, the following reference table for the hyperbolic tangent is provided, See Fig. \ref{fg:tanh}:
\begin{figure}[htpb]
\includegraphics[height=2.41in,width=2.027in,angle=0]{C:/Research/Book/Figures/eps/Tanh.eps}
\caption{\sml $\Ci$ Table of hyperbolic tangents having various derivatives at 0.0}
\label{fg:tanh}
\end{figure}
\vs6\noi Let $u \in \Rl$
\[\sg_\beta(u)\dff 0.5(1+\tanh(\beta\,u))\]
$\sg_\beta (u)$ is a $\Ci$ analog of the Heaviside function for very large $\beta$. Choosing $\beta= 200$ will suffice. See Fig. \ref{fg:heaviside}.
\begin{figure}[htpb]
\includegraphics[height=1.13in,width=2.073in,angle=0]{C:/Research/Book/Figures/eps/Heaviside.eps}
\caption{\sml $\Ci$ Analog of the Heaviside Function}
\label{fg:heaviside}
\end{figure}
%*****
\begin{figure}[htpb]
\includegraphics[height=2.337in,width=4.513in,angle=0]{C:/Research/Book/Figures/eps/HS.eps}
\caption{\sml Using the Heaviside Function to Form Pulses}
\label{fg:hs}
\end{figure}
%\footnotesize
\begin{equation}
\left.
\label{eq:hs}
\begin{array}{lcl}
&&\mbox{Equations for Fig \ref{fg:hs}}\\
A: y &=& \sin^2(\pi \cdot x) \cdot 0.25 \cdot (\sgn((x)) - \sgn((x - 1))) \cdot (1 + \sgn(1 - x))\\
B: y &=& \sin^ 2 (2 \cdot \pi \cdot x) \cdot 0.25 \cdot (\sgn((x)) - \sgn((x - 0.5))) \cdot (1 + \sgn(0.5 - x)) \\
C: y &=& \sin^ 2 (2 \cdot \pi \cdot x) \cdot 0.25 \cdot (\sgn((x - 0.5)) - \sgn((x - 1))) \cdot (1 + \sgn(1 - x))\\
D: y &=& \sin^ 2 (4 \cdot \pi \cdot x) \cdot 0.25 \cdot (\sgn((x - 0.25)) - \sgn((x - 0.5))) \cdot (1 + \sgn(0.5 - x))\\
E: y &=& \sin^ 2 (4 \cdot \pi \cdot x) \cdot 0.5 \cdot (\sgn((x - 0.5)) - \sgn((x - 0.75)))\\
F: y &=& \sin^ 2 (4 \cdot \pi \cdot x) \cdot 0.5 \cdot (\sgn((x - 0.5)) - \sgn((x - 0.75)))\\
G: y &=& \sin^ 2 (4 \cdot\pi \cdot x) \cdot 0.25 \cdot (\sgn((x - 0.75)) - \sgn((x - 1))) \cdot (1 + \sgn(0.25 - x))\\
H: y &=& \sin^ 2 (8 \cdot \pi \cdot x) \cdot 0.25 \cdot (\sgn((x)) - \sgn((x - 0.5))) \cdot (1 + \sgn(0.125 - x))
\end{array} \right\}
\end{equation}
\sml
\begin{figure}[htpb]
\includegraphics[height=2.027in,width=2.067in,angle=0]{C:/Research/Book/Figures/eps/sgn.eps}
\caption{\sml $\Ci$ analog of $\sgn(u)$}
\label{fg:sgn}
\end{figure}
\begin{figure}[htpb]
\includegraphics[height=2.37in,width=3.973in,angle=0]{C:/Research/Book/Figures/eps/Abs.eps}
\caption{\sml $\Ci$ analog of $|u|$}
\label{fg:abs}
\end{figure}
\begin{figure}[htpb]
\includegraphics[height=2.027in,width=3.5in,angle=0]{C:/Research/Book/Figures/eps/Abs01.eps}
\caption{\sml $\Ci$ analog of $|u|$}
\label{fg:abs01}
\end{figure}
\vs6\noi
\[\xmod1(u) \dff u-0.5\, (1-\tanh(0.5\,(1-u)))\]
$\xmod1(u)$ is a $\Ci$ analog of $x\, \mod(1)$ on the interval [0, 1]. To extend $\xmod1(u)$ to a finite interval [1, n] compose it n times. This can be done efficiently in a loop to avoid having to write out n-compositions by hand.
\vs6\noi
\begq
\label{eq:xmod}
\left.
\begin{array}{lcl}
&&\mbox{\sml The following code extends $\xmod1$ to the interval [1, 10]}\\
&&\mbox{See Fig. \ref{fg:xmod1}}\\
For\;\; i &= &1 \;\;To\;\; 10\\
u& = &u - 0.5 \cdot (1 - \tanh(0.5 \cdot (1 - u)))\\
Next \;\;i&&\\
\xmod1& =& u - 0.5 \cdot (1 - \tanh(0.5 \cdot (1 - u)))
\end{array}\right\}
\endq
\begin{figure}[htpb]
\includegraphics[height=1.373in,width=3.96in,angle=0]{C:/Research/Book/Figures/eps/xmod1.eps}
\caption{\sml $\Ci$ analog of $x\, \mod(1)$}
\label{fg:xmod1}
\end{figure}
\vs6\noi In order to make $2\, u \mod(1)$, evaluate $\xmod1$ at $2 \, u$. See Fig. \ref{fg:2xmod1}
\begin{figure}[htpb]
\includegraphics[height=1.357in,width=2.4in,angle=0]{C:/Research/Book/Figures/eps/2xmod1.eps}
\caption{\sml $\Ci$ analog of $2\,x\, \mod(1)$}
\label{fg:2xmod1}
\end{figure}
\vs6\noi Further functions may be constructed from $\xmod1$. Consider
\[\xmod1(u^2) = u^2-0.5\, (1-\tanh(0.5\,(1-u^2)))\]
See Fig. \ref{fg:xmod12}
\begin{figure}[htpb]
\includegraphics[height=1.383in,width=2.013in,angle=0]{C:/Research/Book/Figures/eps/xmod12.eps}
\caption{\sml $\Ci$ analog of $u^2\, \mod(1)$}
\label{fg:xmod12}
\end{figure}
\noi As will be seen in Sec. \ref{sc:single}, such functions play a significant role in analyzing the level of complexity or chaos in three-dimensional systems.
\begin{figure}[htpb]
\includegraphics[height=2.22in,width=2.43in,angle=0]{C:/Research/Book/Figures/eps/Integerx.eps}
\caption{\sml $\Ci$ Analog of Integer Part of x}
\label{fg:integerx}
\end{figure}
\vs6\noi {\bf Boolean Functions}
\vs6\noi $\Ci$ analogs of Boolean operations will be needed in later sections as well. In particular, Boolean functions will be used to isolate dimensions. Consider the Boolean function
\[f(x,y)=0.5\cdot(1+\sgn(y-(x+b))\]
$f(x,y)=1$ when $\sgn(y-(x+b)=1$ and 0 otherwise. The problem of $\sgn(0)$ is resolved by making an arbitrary designation of 1 or 0 for $\sgn(0)$. Multiply $f(x,y)$ with $g(z)$ where
$g(z)=0.5\cdot (1-\sgn(z))$. Now the product $f(x,y)g(z)$ can be used to switch between the $x-y$ plane and the $x-z$ plane. This occurs in the R\"{o}ssler equation. Using the $Ci $ analog of $\sgn$ makes the combination infinitely differentiable and provides a morphological equivalent function that can be made arbitrarily close to the $\sgn$ versions of the product.
\vs6\noi Using these functions provides insight into how chaotic systems work and are part of the program to address the Hirsch Conjecture. For example, if $\T,\,\, \S$ are two IDEs, then
\[f(x,y)\,g(z)\,\T +(1-f(x,y)\,g(z))\, \S\] is an IDE (as will be proven later) that alternates between the dynamics of $\T$ and $\S$. If $\T$ is formulated to only operate in the $x-y$ plane and $\S$ to operate only in the $z$ dimension, then the combined result is a dynamical system that moves through three-dimensional space. By using $\Ci$ versions of $\sgn$, the transition between dimensions is obscured, as occurs in dynamical systems that are derived from ODEs. That fact is part of the reason that the Hirsch Conjecture is so challenging.
\vs6\noi The function $f(x,y)\,g(z)$ provides a {\em transition surface} through which the dynamical system moves from the dynamics near one fixed point, perhaps governed by $\T$, to the dynamics of a separate fixed point, perhaps governed by $\S$.
\vs6\noi In this way it is seen that dynamical systems may be dissected by their fixed points and the transition surfaces that separate the regions of influence of each part of the dynamical system at a given fixed point.
\vs6\noi It will later be seen that the transition surface plays a significant role in the global dynamics of a system that cannot be easily observed if one relies on an analysis of fixed point dynamics alone.
%==========================================================================Logic Gates==================
\section{\sml Logic Gates}
\label{sc:gates}
\subsection{\sml Algebraic Forms of Logical Operations}
When a logic gate is used to construct a dynamical system, it is convenient be able to convert the logic gate into an algebraic expression that can be differentiated. This section provides useful conversions of logic gates to algebraic expressions. $\veebar$
\begin{lemma}
\label{lm:ar}
Algebraic form of Logical relationships:
\[\begin{array}{lcl}
p \underline{\vee} q &\equiv& x+y-2\cdot x\cdot y\\
p \vee q & \equiv & x+y-x\cdot y\\
p\wedge q & \equiv & x\cdot y \\
-p & \equiv & -x\\
p \ra q & \equiv & 1-x+x\cdot y
\end{array}\]
\end{lemma}
\pf Use 1 or 0 for $x$ and $y$.
\rl
\vs6\noi Functional variations on these relationships will be needed for real valued functions of a vector variable:
\begq
\left.
\begin{array}{lcl}
f(\X) \vee g(\X) & \equiv & f(\X)+g(\X)-f(\X)\cdot g(\X)\\
f(\X)\wedge g(\X) & \equiv & f(\X)\cdot g(\X) \\
-f(\X) & \equiv & -f(\X)\\
f(\X) \ra g(\X) & \equiv & 1-f(\X)+f(\X)\cdot g(\X)
\end{array}\right\}
\endq
In particular, the function $\tanh(f(\X))$ will be used in constructing bridges to fuse two IDEs.
\subsection{\sml N-phase Gates}
N-phase gates can be used to construct electronic circuits.
\begin{figure}[htbp]
\includegraphics[height=2.68in,width=2.8in,angle=0]{C:/Research/Book/Figures/eps/Gates01.eps}
\caption{{\bf Example of a 3-phase Gate}}
\label{fg:gates01}
\end{figure}
\vs6\noi The bottom graph in blue is produced by
\[x(t) = 0.5 \cdot (\sgn(t - 2) - \sgn(t - 3))\]
A $C^\infty$ version may be produced by
\[x(t) = 0.5 \cdot (\tanh(\beta\,(t - 2)) - \tanh(\beta\, (t - 3)))\]
In terms of IDE special functions this is
\[x(t) = 0.5 \cdot (\sg_\beta(t - 2) - \sg_\beta(t - 3))\]
\begin{lemma}
\label{lm:gates01}
Let $s_i(t)$ be a family of $n$ positive, real-valued functions such that
\[\sum_i s_i(t)=1\]
and $s_i(t)\cdot s_j(t)=\delta_{i\, j}$
then
\[\exp(h\, \sum_i s_i(t)\, \A_i)=\prod_i \exp(h\,s_i(t)\, \A_i)\]
\end{lemma}
\pf \[ s_i(t)\, \A_i\, s_j(t)\,\A_j= s_j(t)\, \A_j\, s_i(t)\,\A_i=\0 \]
\rl
\begin{corollary}
Let
\[\dot{\X}=\l(\sum_i s_i(\X)\, \A_i\r)\, \X\]
then
\[\T_h=\exp(h\, \sum_i s_i(\X)\, \A_i)=\prod_i\, \exp(h\, s_i(\X)\,\A_i)\]
is the associated IDE.
\end{corollary}
\begin{proposition}
\label{pr:fuss}
Let $s(\X)$ be a real valued function on $\Rl^n$ such that $s^2(\X)=s(\X)$,
and Let
\[\T_h=\exp(h\, (s(\X)\, \A(\X)+(1-s(\X))\, \C(\X))\]
then
\[\T_h=s(\X)\,\exp(h\, \A(\X))+(1-s(\X))\, \exp(h\, \C(\X))\]
\end{proposition}
\pf
Since $s(\X)\cdot (1-s(\X))=0$,
\[\exp(h\, (s(\X)\, \A(\X)+(1-s(\X))\, \C(\X))=\exp(h\, (s(\X)\, \A(\X))\, \exp(h\,(1-s(\X))\, \C(\X)))=\]
\[(\I+s\A+ s\A^2/2!+\cdots)(\I+(1-s)\C+(1-s)^2\, \C^2/2!+\cdots)=\]
\[\I+s\,(\exp(h\, \A)-\I)(\I+(1-s)\,(\exp(h \C)-\I)=\]
\[\I+(1-s)\, (\exp(h\, \C)-\I)+s\,(\exp(h\A)-\I)=\]
\[\I+(1-s)\, \exp(h\, \C)-\I+s+s\,\exp(h\, \A)-s=\]
\[\T_h=s(\X)\,\exp(h\, \A(\X))+(1-s(\X))\, \exp(h\, \C(\X))\]
\rl
\begin{theorem}
\label{tm:fus}
Let $s_i(t)$ be an infinite family of positive, real-valued functions such that
$s_i(t)\cdot s_j(t)=\delta_{i\, j}$
then
\[\exp(h\, \sum_i^\infty s_i(t)\, \A_i(\X))=\sum_i^\infty s_i(t)\, \exp(h\, \A_i(\X))\]
\end{theorem}
\pf Apply proposition \ref{pr:fuss} and mathematical induction since
\[r_{n-1}=\sum_i^{n-1} s_i(t)\;\;\;\mbox{and}\;\;\; s_n \;\;\mbox{ satisfy lemma \ref{lm:gates01}}\]
\rl
\begin{theorem}
Let
\[\dot{\X}=\sum_i^n s_i(t)\, \A_i \, ; \;\; \X(0)=\X_0\]
where the $\A_i$ are square matrices of constants and the $s_i(t)$ satisfy the conditions of lemma \ref{lm:gates01}.
Then
\[\X(t)=\sum_i^n s_i(t)\,\exp(t\, \A_i)\, \X_0\]
\end{theorem}
\pf Theorem \ref{tm:fus}. \rl
\begin{lemma}
Let $E_i$ be a partition of the domain, $D$, of $\X(t)$ consisting of $N$ sets and let $\mu$ be Lebesgue measure on $\D$ with $\mu(E_i)=h=1/N$ for all $i$. Select a point, $\xi_i$ in each $E_i$ such that
\[\max \|\X(t)-\X(\xi_i)\|< \epsilon/N\]
on $E_i$. then
\[\| \X(t)-\sum _i \Chi_{E_i}(t)(\X(\xi_i)\| <\epsilon\]
\end{lemma}
\pf Direct computation. \rl
\begin{lemma}
\label{lm:part}
Let $E_i$ be a partition of the domain, $D$, of $\A(\X)$ consisting of $N$ sets and let $\mu$ be Lebesgue measure on $\D$ with $\mu(E_i)=h=1/N$ for all $i$. Select a point, $\xi_i$ in each $E_i$ such that
\[\max \|\A(X)-\A(\xi_i)\|< \epsilon/N\]
on $E_i$. then
\[\| \A(\X)-\sum _i \Chi_{E_i}(\X)(\A(\xi_i)\| <\epsilon\]
\end{lemma}
\pf Direct computation. \rl
%\begin{lemma}
%Assume that $(\T\circ \S )(\X)=\X$ is a hyperbolic fixed point. Then $ (\S\circ \T)(\S(\X)=\S(\X)$ and $\S(\X)$ is also hyperbolic.
%\end{lemma}
\begin{proposition}
Assume that $\A$ is as in lemma \ref{lm:part}. Then
\[\|\exp(h\, \A(\X))-\sum_i \Chi_i \exp(h\, \A_i)\|\leq h M\]
\end{proposition}
\begin{proposition}
Assume
\[\|(\A(\X)-\C_i(\X))\, \Chi_i\| \leq \epsilon/N\]
then
\[\|\exp(h\, \A(\X))-\sum_i \Chi_i \exp(h\, \C_i(\X))\|\leq h M\]
\end{proposition}
%================================Iterative Boolean Functions=======================================
\subsection{\sml Iterative Boolean Functions}
\label{sc:boolif}
Typically Boolean expressions are not iterated. However, in IDE theory iterative Boolean expressions do occur. Table \ref{tb:boolif} is an example.
\begin{example}{\bf Iterative Boolean Function}
\label{ex:boolif}
\[s\ra (1-p)\,(s+q-s\cdot q)\]
where $p, \; q$ are functions of $\X$.
\begin{center}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:boolif}
s&s&p&q\\ \hline
0&1&1&1\\ \hline
0&1&1&0\\ \hline
1&1&0&1\\ \hline
1&1&0&0\\ \hline
0&0&1&1\\ \hline
0&0&1&0\\ \hline
1&0&0&1\\ \hline
0&0&0&0\\ \hline
\end{tabular}}
\vs6\noi {\scriptsize The Boolean Logic Table for $s$\\ Treated as an Iterated Boolean Expression}
\end{center}
\vs6\noi In general, $p, \; q$ define two separate boundaries associate to two separate IDEs.
\end{example}
\vs6\noi After one iteration the following table is produced
\begin{center}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:boolif}
s&s&p&q\\ \hline
0&0&1&1\\ \hline
0&0&1&0\\ \hline
1&1&0&1\\ \hline
1&1&0&0\\ \hline
0&0&1&1\\ \hline
0&0&1&0\\ \hline
1&1&0&1\\ \hline
0&0&0&0\\ \hline
\end{tabular}}
\end{center}
\vs6\noi From this point onward the table is fixed.
\vs6\noi Typically, an {\em algebraic} Boolean function can be constructed from the $\sgn$ function as follows
\[p=0.5\cdot (1+\sgn(f(\X))\]
where $\sgn(0)$ is set to 0. Once an algebraic expression is obtained, it can be made $C^\infty$ by use of the $\tanh$ function as follows
\[p(\X)=0.5\cdot (1+\tanh(\beta\cdot f(\X))\]
where $\beta$ is a large number. This is the IDE special function $\sg_\beta$ defined in Sec. \ref{sc:sf}.
%=======================================================
\subsection{\sml The Sigmoid function}
Sigmoid functions occur in limited growth populations models, neural networks and many other dynamical systems. This suggests that the sigmoid IDE is a fundamental building block of many systems.
\begin{figure}[htbp]
\includegraphics[height=1.29in,width=4.593in,angle=0]{C:/Research/Book/Figures/eps/LG01.eps}
\caption{\sml Two Sigmoid IDEs}
\label{fg:lg01}
\end{figure}
\vs6\noi These two IDEs are associated to the following two ODEs:
\begq
\label{eq:lg01}
\dot{x}= x\cdot (1-x^2)
\endq
\begq
\label{eq:lg02}
\dot{x}= x\cdot (1-x)
\endq
\vs6\noi When limited growth dynamics occur in higher dimensional system with other forcing factors, the ODEs cannot be solved in closed form in terms of elementary functions. However, limited growth dynamics can be included in the IDE. Typically this would suggest the use of Eq. \ref{eq:lg02}. When this equation occurs in an ODE it does not create an ambiguity. However, the associated IDE can be problematic since the change in sign of $x$ in $\exp(h\, (1-x))$ produces a very different result. This can be solved by using $\exp(h\, (1-x^2))$. To justify this change note that in higher dimensional systems, because they cannot be solved in closed form, the morphology of the two different IDEs are identical as seen in Fig. \ref{fg:lg01}. Further note that the forcing function, Eq. \ref{eq:lg01} has the following rearrangement:
\[1-x^2= (1-x)\, (1+x)= x\, (1-x) + (1-x)\]
The morphology of these two functions is very close because, as Fig. \ref{fg:lg01} reveals, the sigmoid function is very similar to the hyperbolic tangent. In terms of global dynamics, their contributions to a complex system are the same.
%=============================================Visualization and Standardization===============================================
\section{\sml Visualization and Standardization}
\label{sc:vis}
Visualizing one-dimensional maps does not reveal their complexity. The visualization of one-dimensional maps can be improved by plotting a one-dimensional map versus a map that has no complexity. The simplest choice is $y\ra (y+h) \, \mod(1)$ where $h$ is a small step size. This is, in effect, a method of comparing the complexity of an one-dimensional map to a {\em standard} map. An alternate method is to plot $(x_{n+k}, x_n)$. Plotting $(x_{n+k}, x_n)$ for sufficiently large $k$ will produce a two-dimensional graph if the map of interest is chaotic because two points far enough apart will be uncorrelated. These methods are illustrated in Fig. \ref{fg:standard01}. Visualization may also assist in modeling. If a series of measurements of a system are graphed for which no model exists and the graph can be approximately matched to a known visualization of a model, then modeling the unknown system may be accelerated.
\begin{figure}[htbp]
\includegraphics[height=2.46in,width=4.617in,angle=0]{C:/Research/Book/Figures/eps/Standard01.eps}
\caption{\sml Plate A: the Delay plot is $(x_n, x_{(n+123)})$; Plate B:The delay map $(x_n, x_{n+1})$ of $2\, x\, \mod(1)$ is in red; $2\, x\, \mod(1)$ versus $y\ra (y+h) \, \mod(1)$ is in Blue-green}
\label{fg:standard01}
\end{figure}
\begq
\label{cd:std01}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:standard01} Plate B Blue-green is as follows:}\\
h&=&0.001\\
&& \mbox{For i = 1 to 250000}\\
\\
u &=& \exp(\ln(1.9999999)) \cdot x\\
x &=& (u)\, \mod(1)\\
v &=& v +h\\
y &=& (v)\, \mod(1)\\
\\
&&\mbox{\bf Plot Point $(x_1,x)$}\\
x_1&=& x\\
&& \mbox{Next i}
\\
&&\mbox{\bf The code for Fig. \ref{fg:standard01} Plate A is as follows:}\\
\\
&&\mbox{ M}=123.0\\
u &=& \exp(\ln(1.9999999)) \cdot x\\
x &=& (u)\, \mod(1)\\
k &=& (i) \, \mod({\rm M})\\
\\
&&\mbox{If k = 0 Then {\bf Plot Point} $(x_1,x)$}\\
&&\mbox{If k = 0 Then $x_1 = x$}\\
&& \mbox{Next i}
\end{array}\right\}
\endq
\vs6\noi Sec. \ref{sc:t1fr} provides a closer look at the value of this concept.
\vs6\noi For higher dimensional maps the standard {\em foot rule} function is the Harmonic Oscillator Clock. In general, any coordinate of a complex function can be better visualized by plotting the coordinate of the complex system versus a coordinate of the HOC.
%===================================
\section{\sml Code Format}
The VB code used to produce all images has the following format but will not be displayed in the code examples to save space:
\begq
\label{cd:fmt}
\left.
\begin{array}{lcl}
{\rm M}&=& 1000\\
h&=&1/{\rm M}\\
&& \mbox{For j = 1 to N}\\
&& \mbox{For i = 1 to M}\\
&& \mbox{Insert Code Here}\\
&&\mbox{\bf Plot Point for time series}\\
&& \mbox{Next i}\\
&&\mbox{\bf Plot Point for time-one map}\\
&& \mbox{Next j}
\end{array}
\right\}
\endq
This code format has the convenience of being able to plot the time-one map without any further logic gates.
\begin{example}
\scriptsize
\begin{equation}
\label{cd:bc01a}
\left. \begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:bc01} Plate B is as follows:}\\
{\rm M}&=& 1000\\
h&=&1/{\rm M}\\
{\rm N}&=& 10\\
\epsilon &=& 0.00000055\\
\alpha&=&\sqrt{1.3}\, \pi\\
&& \mbox{\rm For j = 1 to N}\\
&& \mbox{\rm For i = 1 to M}\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
\\
q &=& 0.5 \cdot (1 - \tanh(120\,(1 - h - y)))\\
p &=& 0.5 \cdot (1 + \tanh(120\,(1 - h + y)))\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
u_1 &=& \exp(-h \cdot 0.1 \cdot y) \cdot (x - 1) + 1 + \epsilon \cdot w\\
v_1 &=& \exp(h \cdot (0.3 \cdot x )) \cdot (y - 1) + 1 + \epsilon \cdot z\\
\\
u_2 &=& \exp(-h \cdot 0.1 \cdot y) \cdot (x + 1) - 1 + \epsilon \cdot w\\
v_2 &=& \exp(h \cdot (0.3 \cdot x )) \cdot (y + 1) - 1 + \epsilon \cdot z\\
\\
x &=& s \cdot u_1 + (1 - s) \cdot u_2\\
y &=& s \cdot v_1 + (1 - s) \cdot v_2\\
\\
&&\mbox{\bf Plot Point for time series}\\
&& \mbox{\rm Next i}\\
&&\mbox{\bf Plot Point for time-one map}\\
&&\mbox{\rm Next j}
\end{array} \right\}
\end{equation}
\end{example}
%============================One-dimensional maps===============================
\section{\sml Lifting of Systems to Higher Dimensions}
\label{sc:lift}
A non-invertible dynamical system on $\Rl^n$ may be lifted to an invertible dynamical system of dimension $\Rl^{2\, n}$. This technique is particularly useful in dealing with one-dimensional maps such as $2\, x\, \mod(1)$.
%==========================ONE-DIMENSIONAL MAPS===========================================
\subsection{\sml Lifting One-dimensional maps to $\Rl^2$}
\label{sc:OneD}
Every non invertible one-dimensional map can be {\em lifted} to a two-dimensional invertible map.
\begin{definition} {\bf Lifting.}
A map $f:\Rl \ra \Rl$ is said to be lifted to a family of maps,
$T_b: \Rl^2 \ra \Rl^2$, if $T_0$ maps all of $\Rl^2$ onto the
graph of $f$ in $\Rl^2$.
\end{definition}
\vs6\noi In short, the map $f$ induces
the family of maps $T_b$.
\begin{lemma}
\label{lm:lift}
Given any map $f:\Rl\rightarrow \Rl$
can be lifted to an invertible map on $\Rl^2$.
\end{lemma}
\pf Choose three factors as follows:
\begq
\label{eq:lft01}
\begin{array}{lcc}
T_1 \left ( \begin{array}{c}
x\\
y
\end{array} \right )
& =&
\left ( \begin{array}{c}
b\cdot x\\
y
\end{array} \right )
\end{array}
\endq
where $0**1$ then $\T$ is hyperbolic at this
fixed point. By requiring that $0****1$. If $|f'(\theta)|>1$,
then $\mu>3$. Thus when $\mu>3$, $T$ has a hyperbolic
fixed point at $( x_0, (1-b)\,x_0)$, where
$x_0=(\mu-1)/\mu$.
\end{example}
\vs6 \noi
Figure \ref{fg:lgstmp} illustrates the unstable manifold for the lifted logistic map,
\[\l(\vt x.y\par\r) \ra \l(\vt b \cdot x + y. f(b \cdot x + y) - b \cdot x \par \r)\]
where$f(u)=3.91\cdot u\cdot(1-u)$
\begin{figure}[htbp]
\includegraphics[height=2.34in,width=2.297in,angle=0]{C:/Research/Book/Figures/eps/LgstMp.eps}
\caption{\sml Logistic Map lifted to $\Rl^2$ with $b=0.01$}
\label{fg:lgstmp}
\end{figure}
\begin{example}{\bf A One-side Shift} Take $f$ as a $\Ci$ analog of the function
$x \bmod(1)$, i.e.,
\[f(x)=x-0.5\,(1-\tanh(0.5\,\beta\,(1-x)))\]
and then evaluate it at $2x$, i.e., use the map $f(2\,x)$.
As $\beta \rightarrow \infty$ this function converges pointwise
to $2x \bmod 1$ except at $x=1$. Figure \ref{fg:Shift} shows the unstable manifold
for this map for $\beta=15$ and $b=0.15$.
\end{example}
\begin{figure}[htbp]
\includegraphics[height=2.197in,width=3.363in,angle=0]{C:/Research/Book/Figures/eps/Shift2Symbols.eps}
\caption{\sml The Unstable Manifold produced by lifting of a One-sided Shift on two symbols to a Two-dimensional map with parameters $\beta=15.0,\;\;b=0.15$}
\label{fg:Shift}
\end{figure}
%=============================n-dimensional Lifts=======================
\subsection{\sml Lifting n-dimensional Maps to $\Rl^{2\,n}$}
Assume that $\X,\; \Y \in \Rl^n$ and $\T:\Rl^n \ra \Rl^n$. then
\begq
\label{eq:lft05}
\left.
\begin{array}{lcc}
\F_b \left ( \begin{array}{c}
\X\\
\Y
\end{array} \right )
& = &
\left ( \begin{array}{c}
b\, \X+\Y\\
\T(b\,\X+\Y)-b\,\X
\end{array} \right )
\end{array}
\right\}
\endq
is a map on $\Rl^{2\, n}$. As $b\ra 0$ the map converges to the graph of $\T$.
\vs6\noi The technique will be used in Sec. \ref{sec:liftlam} to construct a weighted average of a Bernoulli map and an almost periodic mapping.
%======================Harmonic Oscillator as a Clock=======================
\section{\sml The Harmonic Oscillator as a Clock (HOC)}
\label{sc:hoc}
\vs6\noi The classical harmonic oscillator is given by the ODE
\[\dot{\X}=\omega\, \B\,\X\]
where
\[\B=\l(\mtx 0.1.-1.0\par\r) \mbox{ and } \X=\l(\vt u.v \par\r)\]
The general solution is
\[\X(t)=\exp(\omega\, t\, \B)\,\X_0\]
and the IDE is
\[\T_h=\exp(\omega\, h\, \B)\]
\begin{figure}[htbp]
\includegraphics[height=1.617in,width=2.267in,angle=0]{C:/Research/Book/Figures/eps/Harmonic.eps}
\caption{\sml Harmonic Oscillator as a Clock (HOC)}
\label{fg:Harmonic}
\end{figure}
\vs6\noi The parameter $\omega$ determines the speed at which the clock runs.
\vs6\noi For nonautonomous ODEs in which the nonautonomous term is a periodic function, the variable $\theta=\arctan(u/v)$ replaces the time variable $t$. Doing this increases the dimensionality of the ODEs by two resulting in an autonomous equation with no unbounded time variable..
\vs6\noi Consider the non-autonomous equation
\begin{example}
\[\ddot{x}+x^3=\sin(t)\]
\vs6\noi The conventional way of making this an autonomous equation is to embed this equation in a space of one more dimension as follows:
\begq
\label{eq:na01}
\left.
\l(\begin{array}{l}
\dot{x}\\
\dot{y}\\
\dot{z}
\end{array}\r)
=\l(\begin{array}{l}
y\\
-x^3+\sin(z)\\
1
\end{array}\r)
\right\}
\endq
However, time as an unbounded variable still appears in the solution. Since the nonautonomous term is periodic, a formulation of this equation that avoids an unbounded time variable can be obtained by using the HOC as follows:
\[\l(\begin{array}{l}
\dot{x}\\
\dot{y}\\
\dot{u}\\
\dot{v}
\end{array}\r)
=\l(\begin{array}{l}
y\\
-x^3+\sin(\arctan(v/u))\\
v\\
-u
\end{array}\r)\]
Further simplification gives
\[\l(\begin{array}{l}
\dot{x}\\
\dot{y}\\
\dot{u}\\
\dot{v}
\end{array}\r)
=\l(\begin{array}{l}
y\\
-x^3+v/\sqrt{u^2+v^2}\\
v\\
-u
\end{array}\r)\]
\end{example}
\vs6\noi Consider the finite difference equation
\begin{example}
\[\l(\begin{array}{lcl}
t &\ra & t+h \\
x&\ra & x\, \cos(x+t)
\end{array}\r)
\]
The unbounded time variable may be eliminated by using the HOC as follows:
\[\l(\begin{array}{lcl}
u &\ra & u\,\cos(h)+v\,\sin(h) \\
v &\ra & v\,\cos(h)-u\,\sin(h) \\
x&\ra & x\, \cos(x+\arctan(u/v))
\end{array}\r)
\]
\end{example}
\vs6\noi This technique will be used in later sections to introduce time into diffeomorphisms without using an unbounded time variable.
\vs6\noi Using the HOC simplifies the formation of time-one maps and first-return maps (Poincar\'{e} maps). Specifically choose $h=2\cdot\pi/M$ and then sample the equation when
$(i) \, \mod(M)=0$. Example code using the HOC is as follows:
\begq
\label{eq:hoc01}
\left.
\begin{array}{lcl}
&&\mbox{\bf \scriptsize Code Using the Harmonic Oscillator as a Clock (HOC) in place of a Time Variable}\\
h&=&2\, \pi/M\\
z&=&1\\
w&=&0\\
x&=&1\\
&&\mbox{For i = 1 To M}\\
\\
z &=& \cos(h) \cdot z_1 + \sin(h) \cdot w_1\\
w &=& \cos(h)\cdot w_1 - \sin(h) \cdot z_1\\
\\
u&= & 2\,\cos^2(x+\sqrt{2}\,\arctan(z / w))-1\\
x &=& u\\
z_1 &=& z\\
w_1 &=& w\\
\\
&&\mbox{\sml Plot Point}\\
&&\mbox{Next i}
\end{array}\right\}
\endq
Using this method, the time-one map $\theta=2\,k\,\pi$ coincides with the first-return map to the hyperplane $z=0$.
\begin{definition} {\bf Standard HOC}
The standard HOC is the clock for which $z=0.0, \; w=1.0$. In this case $r=\sqrt{z^2+w^2}=1$ and therefore $z=\sin(\theta),\;\; w=\cos(\theta)$.
\end{definition}
%====================================Canonical AP===============================
\section{\sml Canonical Almost Periodic IDE}
\label{sc:aps}
\begin{center}
{\em Canonical Periodic and Almost Periodic IDEs provide Standard IDEs to be used for Baseline Almost Periodic Systems}
\end{center}
\vs6\noi Canonical IDE can be constructed from simple parts:
\[\exp(h\, \N)\exp(h\, \A)\]
where $\N^2=0$ and $\A$ is a block $4 \times 4$ matrix with upper left block $\omega \B$ and the lower right block $\B$. The lower right block is the HOC with period $2\cdot \pi$. The value of $\omega$ determines whether the IDE is periodic or almost periodic.
\begin{figure}[htbp]
\includegraphics[height=1.923in,width=3.42in,angle=0]{C:/Research/Book/Figures/eps/CanonicalAP.eps}
\caption{\sml The Canonical Almost Periodic IDE is formed from the Canonical Periodic IDE seen on this Figure: Plate $\omega=2.0$; Plate B $\omega=3.0$. To obtain an Almost Periodic IDE take $\omega \not\in \Zx$}
\label{fg:canonicalap}
\end{figure}
\begq
\label{eq:canap}
\left.
\begin{array}{lcl}
&&\mbox{\bf \scriptsize Code for Fig. \ref{fg:canonicalap}}\\
h&=&0.001\\
z_1&=&0\\
w_1&=&1\\
x&=&1\\
&&\mbox{For i = 1 To N}\\
\\
z &=& \cos(h) \cdot z_1 + \sin(h) \cdot w_1\\
w &=& \cos(h)\cdot w_1 - \sin(h) \cdot z_1\\
\\
z_1 &=& z\\
w_1 &=& w\\
\\
u &=&(x \cdot \cos(\omega\cdot h) + y \cdot \sin(\omega \cdot h))+h\cdot(w\cdot \cos(h)-z\cdot\sin(h))\\
v &=& (y \cdot \cos(\omega\cdot h) - x \cdot \sin(\omega \cdot h)) \\
\\
&&\mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right\}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.043in,width=4.507in,angle=0]{C:/Research/Book/Figures/eps/CanonicalAP02.eps}
\caption{\sml The Canonical Almost Periodic IDEs. Plate A $\omega=2.1$; Plate B $\omega=\sqrt{3}/3$. }
\label{fg:canonicalap02}
\end{figure}
\vs6\noi It is apparent from Fig. \ref{fg:canonicalap02} that the almost periodic IDEs provide the geometry common to many of the classical chaotic systems. The stretching dynamic just makes the geometry a bit random.
%======================================FIX A,B,C=================================
\vs6\noi The model for the canonical almost periodic IDE is the ODE
\[\ddot{x}+x=\sin(a\cdot t)\]
The following is a set of useful substitutions for generating periodic forcing terms:
\[\begin{array}{lcl}
\sin(t)&=&z\\
\sin(2 \cdot t)&=& 2\cdot z\cdot w\\
\sin(3 \cdot t)&=&3\cdot z-4\cdot z^2\\
\sin(4 \cdot t)&=&4\cdot z\cdot w-8\cdot z^3 \cdot w\\
\sin(5 \cdot t)&=&5\cdot z-20\cdot z^3+16\cdot z^5\\
\cos(t)&=& w\\
\cos(2\cdot t)&=& 2\cdot w^2-1\\
\cos(3\cdot t)&=& 4\cdot w^3-3\cdot w\\
\cos(4\cdot t)&=& 8\cdot w^4-8\cdot w^2+1\\
\cos(5\cdot t)&=& 16\cdot w^5-20\cdot w^3+5\cdot w
\end{array}\]
%=========================================Non Invertibility========================================
\section{\sml Relationships Between Attractors, Non Invertibility, and
Non Dissipative Maps}
This section contains lemmas that relate invertibility, dissipation and orientation preserving concepts to complexity.
\vs6
\noi {\bf Chaos and Attractors} Chaos and attractors are independent
concepts, and the most complex forms of chaos occur in
non dissipative systems where KAM island chains are formed. In fact, the presence of dissipation
reduces complexity by destroying KAM island chains and thus reduces the ``level'' of chaos.
\vs6
\noi {\bf Non invertibility and Complexity} Noninvertible systems are inherently more complex than
invertible systems. This is best illustrated by the fact
non invertibility is a sufficient condition for a system to have
positive entropy [Walters]. Noninvertible systems do not directly arise
from solutions of differential equations.
\vs6
\noi {\bf Non orientation Preserving and Complexity} Orientation-preserving maps are those for which the Jacobian
determinant is positive. The significance of this is that
non orientation-preserving maps cannot arise from the solutions
of differential equations. There are many complex maps which are not orientation preserving; most
notably, there are parameter values for which the Jacobian derivative of
the H\'{e}non map is negative.
\begin{lemma}
Any dissipative system that arises from
an ODE can be converted
to a non dissipative system without altering the fundamental
complexity.
\end{lemma}
\pf Let
$T({\bf X})$ be any n-dimensional dissipative system. Since it
arises from an ODE, the
Jacobian determinant, , $\det(DT({\bf X}))$, must be positive. The
following mapping ``contains'' $T$ in an obvious sense and is
non dissipative:
\[\l(\vt {\bf X}. z \par \r)\ra \l(\vt T({\bf X}).
z/(\det(DT({\bf X}))) \par
\r)\]
This map expands in the direction of the added coordinate $z$ by
exactly the amount needed to keep the combined map
non dissipative. Also, in this map $T$ remains ``intact.'' The
Jacobian determinant of the combined map is 1. \rl
\begin{lemma}
\label{lm:lift} Any noninvertible mapping can be made
invertible by doubling the number of coordinates.
\end{lemma}
\pf See section \ref{sc:OneD}. \rl
\begin{lemma} Any non dissipative map can be made
dissipative in such a way that the original map is an attractor.
\end{lemma}
\pf Lemma \ref{lm:lift}.\rl
\begin{lemma}
Any non-orientation-preserving
mapping can be made orientation preserving by increasing the
dimension by one.
\end{lemma}
\pf Add the coordinate
$z \ra -z$. \rl
%======================================Matrix Operator Convention=======================================
\section{\sml The Matrix Operator Convention}
\label{sc:mtxop}
\begin{definition}{\sml \bf The Matrix Operator Convention}
\label{df:mtxop}
\vs6\noi Let $\A(\X)=(a_{ij}(\X))$ be a matrix of real or complex valued functions.
Then
\[\A(*)\, \X \dff \A(\X)\, \X = (\sum_i a_{ij}(\X))\]
\end{definition}
\begin{definition}
\[(a_{ij}(\cdot))\X\dff (\sum_j a_{ij}(x_j))\]
\end{definition}
\begin{example}
\begq
\l(\mtx 0.f(\cdot).0.0 \par \r)\l(\vt x.y \par \r)= \l( \vt f(y).0\par \r)
\label{eq:mtxop}
\endq
\end{example}
%==========================================
\section{\sml Fusion: Going from Local Dynamics to Global Dynamics}
\label{sc:fus}
An IDE is a local approximation to a system or an ODE. To obtain a global IDE it is necessary to fuse local IDEs, see proposition \ref{pr:fus}. This is the fundamental method of combining two IDEs algebraically and dynamically. There are four forms of fusion used in this book. The first form is
\begin{definition} {\bf Scalar Fusion}
Given two IDEs , $\T, \; \S$ and a real or complex number $\lambda$, the scalar fusion of $\T,\; \S$ by $\lambda$ is the IDE
\[\lambda\, \T_h+(1-\lambda)\,\S_h\]
\end{definition}
\begin{example}{\bf Scalar Fusion}
\[0.75\, \T+0.25\, \S\]
\end{example}
\begin{definition} {\bf Functional Fusion}
Given two IDEs , $\T, \; \S$ and a real or complex valued function, $\lambda(\X)$, the functional fusion of $\T,\; \S$ by $\lambda(\X)$ is the IDE
\[\lambda(\X)\, \T_h+(1-\lambda(\X))\,\S_h\]
\end{definition}
\begin{example}{\bf Functional Fusion}
\[\tanh(x)\, \T+(1-\tanh(x))\, \S\]
where $x$ is any real or complex variable.
\end{example}
\begin{definition} {\bf Compound Functional Fusion}
Given two IDEs , $\T(\X), \; \S(\X)$ and a real or complex valued function, $\lambda(\Z)$, the compound functional fusion of $\T,\; \S$ by $\lambda(\Z)$ is the IDE
\[\lambda(\Z)\, \T_h+(1-\lambda(\Z))\,\S_h\]
where $\lambda(\Z)$ is a function of $\,\X$ and additional variables $\Y$.
\end{definition}
\begin{example}{\bf Compound Functional Fusion}
\[\lambda(\Z)\, \T(\X)+(1-\lambda(\Z))\, \S(\X)\]
where $\lambda(\Z)$ is a function of the form
\[\lambda(\Z)= p(x)\cdot(z+q(y)-z\cdot q(y) )\]
and where
$x,\, y,\, z$ are any real or complex scalar variables.
\end{example}
\begin{definition} {\bf IDE Fusion}
Given three IDEs , $\T, \; \S, \; \R$ , the IDE fusion of $\T,\; \S$ by $\R$ is the IDE
\[\R\, \T_h+(1-\R)\,\S_h\]
\end{definition}
\begin{example}{\bf IDE Fusion}
\[\exp(h\, \A)\, \T_h+(\I-\exp(h\, \A))\,\S_h\]
where $\A$ is any square matrix and may be a matrix function $\X$.
\end{example}
\begin{figure}[htbp]
\includegraphics[height=2.27in,width=2.26in,angle=0]{C:/Research/Book/Figures/eps/Fus01a.eps}
\caption{\sml Developing a Global IDE from Four Local IDEs }
\label{fg:fus01a}
\end{figure}
\vs6\noi The form of the global IDE is
\[s_1(\X)\,\T_1 +s_2(\X)\,\T_2 +s_3(\X)\,\T_3+s_4(\X)\,\T_4 \]
where the $s_i$ are transition functions.
\vs6\noi The simplest global IDE is arises when the local IDE is global. The second simplest is a global IDE formed from two local IDEs as seen in fig. \ref{fg:fus01b}.
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/Fus01b.eps}
\caption{\sml Developing a Global IDE from Two Local IDEs }
\label{fg:fus01b}
\end{figure}
\vs6\noi Fusion plays the role of numerical integration where step size is replaced by domain partition size.
\vs6\noi A key function that controls of the local dynamics within a partition has the form of Eq. \ref{eq:fus1}.
\begq
\label{eq:fus1}
\left.
\begin{array}{lcl}
q &=& 0.5 \cdot (1 - \tanh(120\,(1 - h - y)))\\
p &=& 0.5 \cdot (1 + \tanh(120\,(1 - h + y)))\\
s &=& p \cdot (s + q - s \cdot q)\\
\end{array} \right \}
\endq
%==================================Transition Functions===============================
\section{\sml Transition Functions}
\label{sc:transfn}
\begin{definition} {\bf Boundary Conditions}
A boundary condition is any function that imposes a limit on the extend of the influence of the dynamics of an IDE.
\end{definition}
\vs6\noi Boundary conditions are used to construct transition functions which are used to fuse two IDEs.
\begin{example} {\bf Boundary Condition}
Let $s(x)=0.5\cdot (1+\tanh(\beta \,x))$. For sufficiently large $\beta$, $\tanh(\beta \, x)\approx \sgn(x)$. Then
\[s(x)\cdot \exp(h \, \A)\]
limits the IDE to being non zero only in the half plane $x \geq 1-\epsilon$ for some sufficiently small $\epsilon$.
\end{example}
\begin{definition} {\bf Transition functions}
\label{df:trnasfn}
A transition function or surface is a device that separates the effect of two or more dynamical systems and provides a mechanism by which the combined system transitions between the two or more dynamical regions.
\end{definition}
\begin{example} {\bf Transition Functions}
\label{ex:transfn}
\vs6\noi A real valued function $f:\Rl^n\ra \Rl$ is potentially a transition function if $f(\X)=0$ defines a surface in $\Rl^{n-1}$. This surface may be called a {\em transition surface}.
\vs6\noi Let $f(x,y,z)= z-x$. Let $s( \X)=.5\, (1+\sgn(f(x,y,z))$. Consider two IDEs \ul{fused} together to make a single IDE a follows:
\[\R_h(\X)=s\, \T_h+(1-s)\, \S_h\]
When $s=1$ the dynamics of $\T$ are in play. When $s=0$ the dynamics of $\S$ are in play. The function $s$ provides a means by which the system \ul{transitions} between $\T$ and $\S$.
\end{example}
\begin{definition}{\bf Simple and Compound Transition Functions}
\vs6\noi A simple transition function is of the form $s=f(x,y,z)$. A compound transition function switches between two simple transition functions. This may require an iteration.
\end{definition}
\begin{example}{\bf Compound Transition Function}
\label{ex:trancf}
The Boolean expression in Sec. \ref{sc:boolif}
\[s\ra (1-p)\,(s+q-s\cdot q)\]
where $p, \; q$ are functions of $\X$ is an example of a compound transition function.
\end{example}
\vs6\noi As can be seen from the examples, a transition function is composed of a series of boundary conditions.
\begin{definition}{\bf Time One and First Return Maps for IDEs}
\vs6\noi (1) Given an IDE $\T_h$, $\T_1$ will be referred to as the time one map of the IDE.
\vs6\noi (2) Give two IDEs, $\T,\;\; \S$, connected by a transition function, $f(\X)$,
\[f(\X)\, \T_h+(1-f(\X))\, \S_h\]
The set of points where $f(\T_h(\X))=0$ will be called the first return map for $\T_h$; and, the set of points where $f(\S_h(\X))=0$ will be called the first return map for $\S_h$.
\end{definition}
\vs6\noi Most diffeomorphisms can be realized as the time one map of some IDE but the representation is not unique.
\vs6\noi First return maps (Poincar\'{e} maps) are built into the form of IDEs as {\em transition surfaces}. If $\T, \; \S$ are two IDEs, then the transition between them can be algebraically expressed as
\[\sg(f(\X))\,\T+(1-\sg(f(\X)))\,\S\]
\vs6\noi Time one maps are also used to transition between IDEs. The transition rule is not a surface but a fixed time of transition. The time of transition will be a multiple of some chosen number such as $2\, \pi$. Periodically forced equations provide a "natural" time to transition: the period of the forcing. For example
\[\ddot{x}+x^3=\cos(t)\]
has $2\, \pi$ as a convenient time one map. Each occasion that the time parameter reaches a multiple of $2\, \pi$ a point is plotted. In short if $t=2\, n\, \pi$ for some integer $n$, then plot the point.
An algebraic expression of this concept is to plot the point when $t-[t/2\, \pi]=0$.
\vs6\noi {\bf Virtual Fixed Point:} Virtual fixed points are defined for one-dimensional maps which have a discontinuity making it impossible for the function to cross the line $y=x$. See Fig. \ref{fg:virtualfp}, Plate {\bf A}. It is called a virtual fixed point because there exist a $\Ci$ function which differs from the reference function on a set of arbitrarily small measure on their common domain {\bf D}, see Fig. \ref{fg:virtualfp} plate {\bf B}. More precisely, given $f$ and $\epsilon$, there exist a $g$ which is $\Ci$ such that
\[\int_D \|f(x)-g(x)\| dx< \epsilon\]
\begin{example}
\[\begin{array}{lcl}
\mbox{Plate A of Fig. \ref{fg:virtualfp}}&&\\
f(t) &=& 0.5 \cdot (1 + \sgn(0.5 - t)) \cdot (t + 0.5) + 0.5 \cdot (1 - \sgn(0.5 - t)) \cdot (t - 0.5)\\
&&\\
\mbox{Plate B of Fig. \ref{fg:virtualfp}}&&\\
g(t)& = &0.5 \cdot (1 + \tanh(\beta \cdot (0.5 - t))) \cdot (t + 0.5) + 0.5 \cdot (1 - \tanh(\beta \cdot (0.5 - t))) \cdot (t - 0.5)
\end{array}\]
\end{example}
\begin{figure}[htbp]
\includegraphics[height=2.043in,width=3.997in,angle=0]{C:/Research/Book/Figures/eps/VirtualFP.eps}
\caption{ \sml In plate B above $\beta=200$.}
\label{fg:virtualfp}
\end{figure}
%============================Time one and First Return Maps========================================
\small
\section{\sml Time-one Maps and First Return Maps}
\label{sc:t1fr}
The point of time-one maps and first return maps is to reduce the evaluation of the complexity of a continuous time equation to a discrete equation that admits a more direct inspection and evaluation. This section will define and examine these two maps in the context of IDE theory.
\vs6\noi A time-one map requires some method of measuring time. Typically this has been a time parameter explicitly found in ODEs such as non-autonomous equations. However, it is possible to replace an explicit time parameter in an ODE with a means of measuring time by the use of the HOC, see Sec. \ref{sc:hoc}. The method derived for non autonomous equations can also be used for all ODEs and importantly, it is fundamental to the theory of IDEs.
\begin{figure}[htbp]
\includegraphics[height=1.957in,width=2.697in,angle=0]{C:/Research/Book/Figures/eps/TimeOneFR.eps}
\caption{{\sml Plate A is a Time-one map Illustration; Plate B is a First return Map Illustration}}
\label{fg:timeonefr}
\end{figure}
\vs6\noi Note that Plate A of Fg. \ref{fg:timeonefr} is both a time-one map and a first return map.
\vs6\noi In the construction of IDEs by fusion, the HOC will be used to provide boundary conditions to determine when the region of influence of the fused IDE shifts between component IDEs.
\vs6\noi First return maps are a second technique for reducing continuous time systems to discrete systems. The first return map requires a surface to be present which can be used to measure when a system returns to a given area. First return maps are facilitated in some IDEs by the presence of a transition surface which switches control from one component IDE to another..
\vs6\noi Historically, Poincar\'{e} \cite{bi:hp} recognized that the essential dynamics of a system can be captured by only considering when the time series of the system passed through a conveniently chosen fixed surface. Plotting the points at which the time series intersected with this surface is called a {\em Poincar\'{e} map} or a first return map interchangeably.
\vs6\noi Time-one maps and first return maps are separate methods of analysis but sometimes coincide. Figure \ref{fg:timeone} illustrates one way in which these two discrete maps may differ. The most conspicuous difference is that a time-one map has a precise time at which the equation is evaluated which never changes but the time at which the first return map is evaluated generally varies. This difference produces very different results but does not alter the complexity of the system under review.
\vs6\noi Discrete maps are time one maps due to their precise regularity. Thus $2\, x\, \mod(1)$ is a time one map since there is an implicit clock used to evaluate it. Continuous functions which are evaluated when they cross a surface cannot necessarily be assigned the same time stamp due to variations in the surface
\begin{figure}[htbp]
\includegraphics[height=2.573in,width=4.403in,angle=0]{C:/Research/Book/Figures/eps/TimeOne.eps}
\caption{{\sml Time-one and First Return Maps for $2 \, x\, \mod(1)$}}
\label{fg:timeone}
\end{figure}
\begq
\label{cd:tone01}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:timeone} Plate A, green is as follows:}\\
M&=&1000\\
h&=& 1/M\\
&& \mbox{For i = 1 to 200000}\\
u &=& \exp(\ln(2)\, h) \cdot x\\
v &=& v + h\\
x &=& u \, \mod(1)\\
y &=& v \, \mod(1)\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
To obtain Plate A, red use
\[ k = i\, \mod(M);\;\; \mbox{If $k = 0$ Then Plot Point};\;\; \mbox{If $k = 0$ Then $x_n = x_{n+1}$}\]
The effect of $k = i\, \mod(M)$ is equivalent to using a clock since $M \cdot h=1$, i.e., time one.
\vs6\noi In Plate A, red, a point is plotted as soon as $x$ reaches the line $x=1$. This is a first return map.
\begq
\label{cd:tone}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:timeone} Plate B, blue is as follows:}\\
M&=&1000\\
h&=& 1/M\\
&& \mbox{For i = 1 to 200000}\\
k &=& i \mod(M)\\
u &=& \exp(\ln(2)\, h) \cdot x\\
v &=& v + h\\
x &=& u \\
&& \mbox{If $k = 0$ Then x = x\, \mod(1)}\\
&& \mbox{If $k = 0$ Then Plot $(y,x)$}\\
&& \mbox{If $k = 0$ Then $y = x$}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
In Plate B, the value of $x$ is only determined only after the "time" reaches $M\cdot h=1$. This is a time-one map. Plate B, orange is explained by the use of a standard function to obtain a two-dimensional image of the complexity in the function $2\, x\, \mod(1)$, see Sec. \ref{sc:vis}.
\vs6\noi As will be seen in Sec. \ref{sc:sf}, all Boolean operations such as $if, then$ and functions such as $x \, \mod(1)$ can be replaced by $C^\infty$ functions.
\begin{figure}[htbp]
\includegraphics[height=2.457in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/TimeOneB.eps}
\caption{\sml Plate A: First Return vs $y\ra y+h$: Plate B: The Region $[0,h]$ Expanded}
\label{fg:timeoneB}
\end{figure}
\begin{definition}{\bf Time One and First Return Maps for IDEs}
\vs6\noi (1) Given an elementary IDE $\T_h$, $\T_1$ will be referred to as the time one map of the IDE. For a compound IDE, the HOC must be used to determine time.
\vs6\noi (2) Give two IDEs, $\T,\;\; \S$, connected by a transition function, $f(\X)$,
\[f(\X)\, \T_h+(1-f(\X))\, \S_h\]
The set of points where $f(\T_h(\X))=0$ will be called the first return map for $\T_h$; and, the set of points where $f(\S_h(\X))=0$ will be called the first return map for $\S_h$.
\end{definition}
\vs6\noi Most diffeomorphisms can be realized as the time one map of some IDE but the representation is not unique.
\vs6\noi First return maps (Poincar\'{e}\sp maps) are built into the form of IDEs as {\em transition surfaces} which are transition functions, $f(\X)$, for which $f(\X)=0$. If $\T, \; \S$ are two IDEs, then the transition between them can be algebraically expressed as
\[\sg(f(\X))\,\T+(1-\sg(f(\X)))\,\S\]
Visualizing a first-return map for an IDE is subtle because the time step, $h$, is short. Two successive returns are within $h$ and that defines a very small region. See Fig. \ref{fg:timeoneB}, Plate A. The first return map is plotted in the interval $[0, \, h]$. This is very hard to visualize without rescaling the interval. The rescaling is illustrated in Plate B.
\vs6\noi In addition to plotting the first return vs $y\ra y+h$, two other lines are present in both Plates for comparison. The lines forming the pair $(x_n, x_{n+1})$ nearly coincides with the graph of $y=x$ because two successive iterates are very close together. Both Plates show this fact. In addition, two iterates separated by $1000\cdot h$ are plotted and have a slope of 2. The dynamics on the full interval [0,1] and the subinterval [0,h] are unchanged as both plates illustrate.
\vs6\noi The function $x\ra 2\, x\,\mod(1)$ has been chosen for these illustrations because it is a unilateral shift on two symbols. The dissection of its first-return and time one maps for a shift provides examples that will facilitate recognizing when it is possible to determine the presence of chaos from the form of an equation.
\vs6\noi The first-return map for $2\, x\, \mod(1)$ is a model for the dynamics near an hyperbolic fixed point. Although the example is one-dimensional, it may be thought of a coordinate of a higher dimensional IDE. The example illustrates the dynamics of a chaotic system near an hyperbolic fixed point.
\vs6\noi Time one maps are also used to transition between IDEs. The transition rule is not a surface but a fixed time of transition. The time of transition will be a multiple of some chosen number such as $2\, \pi$. Periodically forced equations provide a "natural" time to transition: the period of the forcing. For example
\[\ddot{x}+x^3=\cos(t)\]
has $2\, \pi$ as a convenient time one map. Each occasion that the time parameter reaches a multiple of $2\, \pi$ a point is plotted. In short if $t=2\, n\, \pi$ for some integer $n$, then plot the point.
An algebraic expression of this concept is to plot the point when $t-[t/2\, \pi]=0$.
%03
%==========================Flow Charts===========================
\chapter{IDE Flow Charts}
\label{ch:flow}
\begin{center}
\parbox{3.5in}{\em Flow Charts will be used facilitate the conceptualization and design of systems using IDEs. IDE flow charts will use many of the same symbols found in control theory and electrical engineering.}
\end{center}
\vs6\noi To facilitate the analysis of the form of an IDE, a set of flow chart symbols will be used. The point of using flow charts is to improve the ability to {\em algebraically} compare the form of two IDEs without having to resort to analytical examination.
%yy
\vs6\noi This section is divided into three parts. (1) the legend of symbols and examples, Sec. \ref{sc:leg}; (2) flow charts for common boundary conditions. Sec. \ref{sc:bc}; (3) examples of commonly occurring flow charts, Sec. \ref{sc:cfc}. Ideally, the flow chart of every complex IDE should be constructible from simple parts consisting of IDE flows and boundary conditions.
%============legend============
\section{\sml Legend}
\label{sc:leg}
\sml
Figure \ref{fc:legend} illustrates the common symbols used in constructing an IDE flow chart. Table \ref{tb:legend} provides examples.
\begin{figure}[htbp]
\includegraphics[height=2.22in,width=2.09in,angle=0]{C:/Research/Book/Figures/eps/Legend.eps}
\caption{\sml IDE Flow Chart Legend}
\label{fc:legend}
\end{figure}
\begin{table}
\caption{\sml \bf Flow Chart Examples}
\label{tb:legend}
\begin{tabular}{||l||l||}\hline
IDE& $\exp(h\, \A)$\\ \hline
Simple Boundary Condition&$p_1=0.5\,(1+\tanh(1-x))$\\ \hline
Compound Boundary Condition&$p=p_1+p_2-p_1\cdot p_2$\\ \hline
Recursive Boundary Condition& $s\ra p\cdot(s+q-s\cdot q)$\\ \hline
Scalar Fusion&$x= s(\X)\cdot y+(1-s(\X))\cdot z$ \\ \hline
Simple HOC Fusion& $p_c=0.5\cdot (1+\sgn(w_c))$\\ \hline
Recursive Scalar Fusion&$x\ra s(\X)\cdot x+(1-s(\X))\cdot z$\\ \hline
Dynamic IDE Definition&$\X_0=\exp(h\, \A_1)\ra \exp(h\, \A_2)\, (\X-\X_0)+\X_0$\\ \hline
IDE Fusion& $\exp(h\, \A)\, \X+(\I-\exp(h\, \A))\F(\X)$\\ \hline
IDE Recursive Fusion&$\T_h \ra \exp(h\, \A)\, \T_h+(\I-\exp(h\, \A))\S_h$\\ \hline
Delay&$y=y_n$\\ \hline
Decision& $p+q-p\cdot q$\\ \hline
\end{tabular}
\end{table}
\vs6\noi There are two IDEs that will be standardized because of their frequent appearance in flow charts. These are seen in Fig. \ref{fc:side}
\begin{figure}[htbp]
\includegraphics[height=1.083in,width=1.177in,angle=0]{C:/Research/Book/Figures/eps/SIde.eps}
\caption{\footnotesize Two Standardized IDEs: Yellow is the Standard semi circle; Orange is the HOC}
\label{fc:side}
\end{figure}
\vs6\noi Both of the IDEs in Fig. \ref{fc:side} are used to construct $x \, \mod(1)$ and other functions, which, in turn, are used to replace mod(1) functions wherever they occur in pure mathematical examples such as the baker's transformation.
\vs6\noi In constructing an IDE for a function such as $x\, \mod(1)$ there are two points of interest: (1) $x\, \mod(1)$ is not an elementary function so it is necessary to derive a $C^\infty$ analog to be used in the IDE; (2) When the analog of a non elementary function is used in place of the non elementary function to derive an IDE, the resulting IDE is a perturbation of the non elementary function. This points out that IDEs must necessarily be perturbations of shift operators. It will be shown that IDEs which are perturbations retain all of the complexity of the original function they replace except possibly on a set of arbitrarily small measure.
\vs6\noi The significance of this for the Hirsch Conjecture and the definition of chaos is that very small perturbations of a shift provide as much or more insight than is provided by the Smale-Birkhoff theorem \cite{bi:sb}. The reason for this is that the Smale-Birkhoff theorem states that the diffeomorphism in question is a shift of some order on a Canter set which may have measure zero. The theorem does not say what that means for the diffeomorphism at large. On the other hand, if an IDE is a small perturbation of a shift, the IDE's complexity extends to the entire domain of definition of the IDE, not just a Canter set of measure zero. This is particularly significant for applications.
\begin{example} {Replacing a "For" Loop with an HOC}
\begin{figure}[htbp]
\includegraphics[height=2.167in,width=3.0in,angle=0]{C:/Research/Book/Figures/eps/HOCvsLoop.eps}
\caption{\sml "For Loop" vs HOC}
\label{fc:hocloop}
\end{figure}
\begin{example} {Replacing "GoTo" with Fusion}
\begin{figure}[htbp]
\includegraphics[height=2.097in,width=3.0in,angle=0]{C:/Research/Book/Figures/eps/GoToFusion.eps}
\caption{\sml "GoTo" vs Fusion}
\label{fc:gotofusion}
\end{figure}
\end{example}
\vs6\noi Figure \ref{fc:hocloop} illustrates how to replace a "For Loop" with an Harmonic Oscillator Clock. The HOC is used to establish a boundary condition, see Sec. \ref{sc:bc}, that keeps track of the number of iterations of the IDE. The HOC coded boundary condition is
\[p_c = 0.5 \cdot (1 + \sgn(1 - 0.01 \cdot h - w_c))\]
while the analytical boundary condition is
\[p_c = 0.5 \cdot (1 + \sgn(1 - w_c))\]
To arrive at an IDE, which is a perturbation of the boundary condition, replace the $\sgn$ with $\tanh$.
\[p_c = 0.5 \cdot (1 + \tanh(\beta (1 - w_c)))\]
for sufficiently large $\beta$.
\end{example}
\vs6\noi The HOC boundary condition will usually be combined with a separate boundary condition controlled by the IDE. The two BCs are then combined into a single compound boundary condition typically using a Boolean expression. This pattern will occur throughout this text.
\vs6\noi GoTo statements send the computer to a location that is a function. Fusion is the standard method of replacing GoTo statements with an IDE. Fusion usually requires a compound or recursive BC.
%========================================
\section{\sml Boundary Conditions (BC) and Fusion}
\label{sc:bc}
\begin{figure}[htbp]
\includegraphics[height=2.333in,width=3.45in,angle=0]{C:/Research/Book/Figures/eps/BCFlowChart.eps}
\caption{\sml IDE Flow Chart Boundary Conditions}
\label{fc:bc}
\end{figure}
\vs6\noi Flow Chart \ref{fc:bcf} illustrates the use of boundary conditions to fuse two IDEs.
\begin{figure}[htbp]
\includegraphics[height=2.333in,width=3.in,angle=0]{C:/Research/Book/Figures/eps/BCF.eps}
\caption{\sml IDE Flow Chart Fusion using Boundary Condition $p_c=0.5\, (1+\sgn(w_c))$ from the HOC}
\label{fg:bcf}
\end{figure}
\sml
\vs6\noi Boundary conditions are used to link two or more simple IDEs together to form a more complex IDE as seen in flow chart \ref{fc:bcf}. Typically, boundary conditions restrict the area of influence of an IDE or establish the boundary between elliptic and hyperbolic regions.
\vs6\noi The form of boundary conditions can be a Boolean expression as illustrated in Fig. \ref{fc:bc} or a n-phased gate, Sec. \ref{sc:gates}. A n-phased gates is the simplest boundary condition. A n-phase gate can be constructed using the HOC as seen in Fig. \ref{fg:hocg}.
\[\begin{array}{lcl}
&&\mbox{\bf Example Code for Boundary Conditions Plate C of Fig. \ref{fg:bcf} is as follows:}\\
\cdots&=& \cdots \\
{\rm HOC} &=&\exp(h \, 2\, \pi \B)\\
\\
\T_h&=&\exp(h \, \A_1)\\
\S_h&=&\exp(h \, \A_2)\\
\\
p_c &=& 0.5 \cdot (1 + \tanh(\beta\,(1 - w)))\\
p_2 &=& 0.5 \cdot (1 + \tanh(\beta\,(1 - y)))\\
q &=& 0.5 \cdot (1 + \tanh(\beta\,(h - x)))\\
p &=& p_c + p_2 - p_c \cdot p_2\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
\R_h&=&s\, \T_h+(1-s)\,\S_h\\
\end{array}\]
\begin{figure}[htbp]
\includegraphics[height=1.923in,width=4.35in,angle=0]{C:/Research/Book/Figures/eps/HocG.eps}
\caption{\sml Three phase gate using an HOC}
\label{fg:hocg}
\end{figure}
\begq
\label{cd:hocg}
\left.
\begin{array}{lcl}
&&\mbox{\bf Example Code for Plate A of Fig. \ref{fg:hocg} is as follows:}\\
\cdots&=& \cdots \\
{\rm HOC} &=&\exp(-h \, 2\, \pi \B)\\
\cdots \\
pc_1 &=& 0.5 \cdot (1 + \sgn(z - 0.5))\\
pc_2 &=& 0.5 \cdot (1 - \sgn(|z| - 0.5))\\
pc_3 &=& 0.5 \cdot (1 - \sgn(z + 0.5))\\
\\
\R_h&=&pc_1\cdot \T_1 +pc_2\cdot \T_2+pc_3\cdot \T_3\\
\end{array}\right \}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.6in,width=4.5in,angle=0]{C:/Research/Book/Figures/eps/HocG02.eps}
\caption{\sml Three phase gate using an HOC: Plate A uses the gate boundary conditions alone; Plate B supplements the three-phase gate with a recursive boundary condition}
\label{fg:hocg02}
\end{figure}
\vs6\noi Both Plates are fusions of the same three IDEs that are also used to construct the baker's transformation, a bilateral shift. This example demonstrates that the boundary conditions play a pivotal role in the formation of complexity. In particular, Plate A is almost periodic while Plate B is chaotic.
\begq
\label{cd:hocg02}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Plate A of Fig. \ref{fg:hocg02} is as follows:}\\
\\
\alpha&=& 2.0 \, \pi\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
\\
pc_1 &=& 0.5 \cdot (1 + \sgn(z - 0.5))\\
pc_2 &=& 0.5 \cdot (1 - \sgn(|z| - 0.5))\\
pc_3 &=& 0.5 \cdot (1 - \sgn(z + 0.5))\\
\\
u_1 &=& \exp(h\, \ln(2)) \cdot x\\
v_1 &=& \exp(-h\, \ln(2))\cdot y\\
\\
u_2 &=& \exp(h\, \ln(2)) \cdot x\\
v_2 &=& \exp(-h\, \ln(2)) \cdot (y - 1) + 1\\
\\
u_3 &=& x\cdot\cos(2.0\pi\cdot h) + y\cdot\sin(2.0\pi\cdot h)\\
v_3 &=& y\cdot\cos(2.0\pi\cdot h) - x\cdot\sin(2.0\pi\cdot h)\\
\\
\\
x &=& pc_2 \cdot u_1 + pc_1 \cdot u_2 + pc_3 \cdot u_3\\
y &=& pc_2 \cdot v_1 + pc_1 \cdot v_2 + pc_3 \cdot v_3\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begq
\label{cd:hocg02b}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Plate B of Fig. \ref{fg:hocg02} is as follows:}\\
\\
\alpha&=& 2.0 \, \pi\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
\\
pc_1 &=& 0.5 \cdot (1 + \sgn(z - 0.5))\\
pc_2 &=& 0.5 \cdot (1 - \sgn(|z| - 0.5))\\
pc_3 &=& 0.5 \cdot (1 - \sgn(z + 0.5))\\
\\
p &=& pc_1 + pc_2\\
q &=& 0.5 \cdot (1 + \sgn(1 - x))\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
u_1 &=& \exp(h\, \ln(2)) \cdot x\\
v_1 &=& \exp(-h\, \ln(2))\cdot y\\
\\
u_2 &=& \exp(h\, \ln(2)) \cdot x\\
v_2 &=& \exp(-h\, \ln(2)) \cdot (y - 1) + 1\\
y_0 &=& s \cdot v_2 + (1 - s) \cdot y_0\\
\\
u_3 &=& x\cdot\cos(2.0\pi\cdot h) + (y-y_0)\cdot\sin(2.0\pi\cdot h)\\
v_3 &=& (y-y_0)\cdot\cos(2.0\pi\cdot h) - x\cdot\sin(2.0\pi\cdot h)+y_0\\
\\
\\
x &=& pc_2 \cdot u_1 + pc_1 \cdot u_2 + pc_3 \cdot u_3\\
y &=& pc_2 \cdot v_1 + pc_1 \cdot v_2 + pc_3 \cdot v_3\\
\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right\}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.367in,width=4.48in,angle=0]{C:/Research/Book/Figures/eps/HocG03.eps}
\caption{\sml Three phase gate using an HOC: Plate A uses the gate boundary conditions alone; Plate B supplements the three-phase gate with a recursive boundary condition}
\label{fg:hocg03}
\end{figure}
\begq
\label{cd:hoc3b}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Plate B of Fig. \ref{fg:hocg03} is as follows:}\\
\\
\alpha&=& 2.0 \, \pi\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
\\
pc_1 &=& 0.5 \cdot (1 + \tanh(\beta\,(z - 0.5)))\\
pc_2 &=& 0.5 \cdot (1 - \tanh(\beta\,(\tanh(z)\cdot z - 0.5)))\\
pc_3 &=& 0.5 \cdot (1 - \tanh(\beta\,(z + 0.5)))\\
\\
p &=& pc_1 + pc_2\\
q &=& 0.5 \cdot (1 + \tanh(\beta\,(1 - x)))\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
u_1 &=& x+h \cdot y\\
v_1 &=& y\\
\\
u_2 &=& x\\
v_2 &=& y+h \cdot x\\
y_0 &=& s \cdot v_2 + (1 - s) \cdot y_0\\
\\
u_3 &=& x\cdot\cos(2.1\pi\cdot h) + (y-y_0)\cdot\sin(2.1\pi\cdot h)\\
v_3 &=& \exp(1.5\,h)\cdot (y-y_0)\cdot\cos(2.1\pi\cdot h) - x\cdot\sin(2.1\pi\cdot h)+y_0\\
\\
\\
x &=& pc_2 \cdot u_1 + pc_1 \cdot u_2 + pc_3 \cdot u_3\\
y &=& pc_2 \cdot v_1 + pc_1 \cdot v_2 + pc_3 \cdot v_3\\
\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
%===================================================================
\section{\sml Common Flow Charts}
\label{sc:cfc}
\begin{figure}[htbp]
\includegraphics[height=0.61in,width=2.053in,angle=0]{C:/Research/Book/Figures/eps/IDEcomposition.eps}
\caption{\sml IDE composition Flow Chart}
\label{fg:idecomposition}
\end{figure}
\vs6\noi Figure \ref{fg:idecomposition} represents proposition \ref{pr:comp}
\begin{figure}[htbp]
\includegraphics[height=0.983in,width=2.533in,angle=0]{C:/Research/Book/Figures/eps/IDEfusion.eps}
\caption{\sml IDE fusion Flow Chart}
\label{fg:idefusion}
\end{figure}
\vs6\noi Figure \ref{fg:idefusion} represents proposition \ref{pr:fus}.
\begin{figure}[htbp]
\includegraphics[height=0.727in,width=2.22in,angle=0]{C:/Research/Book/Figures/eps/IDEdefinesIDE.eps}
\caption{\sml IDE defines another IDE Flow Chart}
\label{fg:idedefide}
\end{figure}
\vs6\noi Figure \ref{fg:idedefide} represents a typical situation such as:
\[\X_0= \T_1(\X)=\exp(\A_1)(\X)\;\;\; \S_h(\X)=\exp(h\, \A_2)\,(\X-\X_0)+\X_0\]
\vs6\noi The IDE that defines $x\, \mod(1)$ is an example.
\begin{figure}[htbp]
\includegraphics[height=0.833in,width=2.607in,angle=0]{C:/Research/Book/Figures/eps/ComplexIDE.eps}
\caption{\sml Complex IDE Flow Chart}
\label{fg:complex}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.677in,width=3.0in,angle=0]{C:/Research/Book/Figures/eps/TransitionFlow01.eps}
\caption{\sml Plate A is a transition between two IDEs; Plate B is an indefinite series of transitions; Plate C is a transition with a loop; Plate D is a recursive transition between two IDEs.}
\label{fg:transflow01}
\end{figure}
\vs6\noi Plate D of Fig. \ref{fg:transflow01} is an example of a transition seen in Fig. \ref{fg:canonical3d}.
\vs6\noi In order to analyze time-one maps there will be needed a clock. This will be the Harmonic Oscillator Clock (HOC) described in Sec. \ref{sc:hoc}. The HOC will contribute to a transition function by defining a boundary condition.
\begin{figure}[htbp]
\includegraphics[height=1.167in,width=2.333in,angle=0]{C:/Research/Book/Figures/eps/AnosovTwistL.eps}
\caption{\sml IDE Flow with }
\label{fg:anasovtwistL}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=1.48in,width=2.35in,angle=0]{C:/Research/Book/Figures/eps/HarmonicClock.eps}
\caption{\sml The Flow for the Harmonic Oscillator Clock when Used to obtain a Time-one map}
\label{fg:harmonicclock}
\end{figure}
\vs6\noi Typically the HOC will be used to form a boundary condition $\S_1$ and then that boundary condition will be used to fuse two IDEs which, in turn, contribute the formation of a transition function used in a more complex IDE.
\vs6\noi Figure \ref{fg:t1fr} compares the time-one flow with the first return flow.
\begin{figure}[htbp]
\includegraphics[height=3.0in,width=4.567in,angle=0]{C:/Research/Book/Figures/eps/T1FR.eps}
\caption{{\sml Plate A is the Flow Chart for the Time-one Map; Plate B is the Flow Chart for the First Return Map}}
\label{fg:t1fr}
\end{figure}
%04
%===============================The Shift Operations====================================
%=================================================================================================
\chapter{The Unilateral and Bilateral Shifts}
\label{ch:shift}
\begin{center}
\parbox{3.5in}{\em IDE theory will require an understanding of how complex systems are formed, particularly chaotic systems. The core concept for the study of complexity is the shift which comes in two forms: the unilateral (one-sided) shift and the bilateral (two-sided) shift. The shift is the mathematical realization of a {\bf random} coin toss. }
\end{center}
\vs6\noi This chapter will analyze the concept of a shift with a focus on three examples: (1) the unilateral shift; (2) the baker's transformation; and, the Anosov transformation. The reason that a shift must be recognizable from its form as an IDE is that if that is not possible, then it is even less possible to recognize the presence of chaos from the form of an arbitrary IDE. In Chapter \ref{ch:chaos} and IDE is chaotic there exist an invariant subset on which the system is a function of a shift. In order to resolve the Hirsch Conjecture it will be necessary to determine, from its form, whether a dynamical system has an invariant subset on which it is a function of a shift. This implies that it must be possible to recognize the presence of a function of a shift from the form of a dynamical system. And this, in turn, requires it to be possible to recognize the various algebraic forms in which a shift can appear in an equation. To support this approach, there must be canonical forms into which ODEs may be presented that supports the determination of the presence of a function of a shift. The IDE is that canonical form therefore it is necessary to use the IDE form to expose the presence of functions of a shift.
\vs6\noi To facilitate the recognition of chaos in the form of an IDE, three devices will be used. (A) A visualization schematic of the IDE; (B) The flow chart for the IDE and (C) The IDE itself. Figure \ref{fg:fusion01}illustrates these three devices.
\begin{figure}[htbp]
\includegraphics[height=2.333in,width=3.587in,angle=0]{C:/Research/Book/Figures/eps/Fusion01.eps}
\caption{\sml Plate A is the schematic of the fusion IDE; Plate B is the flow chart for the IDE: Place C is the {\em fused} IDE}
\label{fg:fusion01}
\end{figure}
\vs6\noi Essential to the derivation of IDEs is the concept of fusion which s a useful device for constructing and de-constructing IDEs, see proposition \ref{pr:fus1}.
\vs6\noi There are six sections to this chapter. (1) What is a shift, Sec. \ref{sc:shift}; (2) analysis of $x\ra x\, \mod(1)$, Sec. \ref{sc:mod1}; (3) the unilateral shift, Sec. \ref{sc:ushift}; (4) the baker's transformation, Sec. \ref{sc:baker}; and (5) the Anosov transformation, Sec. \ref{sc:anosovt}; (6) analysis of the shift mechanism, Sec. \ref{sc:sfa}.
\vs6\noi The results of this chapter will be applied throughout the remainder of this book.
%============================Shift defined========================
\section{\sml The Concept of a Shift}
\label{sc:shift}
The root dynamic of all chaotic systems is the bilateral shift. The simplest algebraic from of a bilateral shift. This is illustrated in Fig. \ref{fg:shift01}.
\begin{figure}[htbp]
\includegraphics[height=2.207in,width=4.5in,angle=0]{C:/Research/Book/Figures/eps/Shift01.eps}
\caption{\sml The Bilateral Shift on Two Symbols}
\label{fg:shift01}
\end{figure}
\begq
\label{eq:bshift01}
\T\l(\vt x.y\par \r)= \l(\vt k\cdot x-[k\cdot x].(y+[k \cdot x])/ k \par\r)
\endq
The shift concept does not depend on numbers since a complex series of apples and oranges arranged in a row can be used to illustrate the idea of a shift, see Fig. \ref{fg:apples}.
\begin{figure}[htbp]
\includegraphics[height=0.45in,width=2.47in,angle=0]{C:/Research/Book/Figures/eps/apples.eps}
\caption{\sml Apples and Oranges may be used as Symbols}
\label{fg:apples}
\end{figure}
\vs6\noi All that is required is that the sequence be infinite in both directions, have a place holder such as a lighter orange and that the sequence have positive algorithmic complexity. However, numbers are much more convenient.
\vs6\noi Let $\{a_n\}_{-\infty}^\infty$ be any sequence of numbers, letters or objects. Then the mapping
\[f(a_i)=a_{i+1}\]
is a shift of the indexing sequence by one step, thus the name {\em shift}. In the same manner given a function on $\Rl$, The transformation $\T(f)(x)=f(x+1)$ maps $f $ to a function in which every element in the domain is shifted by one unit.
\vs6\noi The significance of the shift becomes more apparent if the sequence $a_n$ is generated by a coin toss. In this case, a shift is just another coin toss. And, by multiplying $a_k$ with $a_{k+1}$, the resulting sequence is nearly zero. This is a result of the idea that the future of a coin toss sequence is uncorrelated to its past.
\vs6\noi Thinking of a coin toss as a means of generating a complex sequence is just a metaphor. The only formal idea of a complex sequence is that of one having positive algorithmic complexity. A sequence with positive algorithmic complexity is one for which it is impossible to write down any finite algorithm to generate the sequence. Thus, the only way to describe such a sequence is to just write it down, somehow.
\vs6\noi Since sequences that are infinite in both directions are an abstraction, it is useful to think of {\em bi-infinite} sequences as being made up of two sequences that are only infinite in one direction. For example, the following construction may be used in place of a shift on a bi-infinite sequence.
\vs6\noi Consider the two sequences $\{a_n\}_0^\infty$ and $\{b_n\}_0^\infty$. A shift on the combined sequence is
\[(\{a_n\}_0^\infty), (\{b_n\}_0^\infty)\ra (\{a_n\})_1^\infty, (\{a_0,b_n\}_0^\infty)\]
Writing this out more clearly
\[(\{a_0\; a_1\; a_2\; \cdots\},\{b_0\; b_1\; b_2\; \cdots\})\ra ( \{ a_1\; a_2\; a_3 \cdots\},\{a_0\;b_0\; b_1\; b_2\; \cdots\})\]
Since it does not matter what the $a_i,\; b_i$ are this operation on pairs of sequences can be written as
\[(\{0\; 1\; 2\; 3 \cdots\},\{0\; 1\; 2\; 3 \cdots\})\ra (\{1\; 2\; 3 \; 4 \cdots\},\{0\;0\; 1\; 2\; 3 \cdots\})\]
Another iteration gives the result
\[(\{1\; 2\; 3 \; 4 \cdots\},\{0\;0\; 1\; 2\; 3 \cdots\})\ra (\{ 2\; 3 \; 4 \cdots\},\{1\;0\;0\; 1\; 2\; 3 \cdots\})\]
Regardless of the number of iterations of the index sequences, the current state of the original sequence pair can be obtained by substitution for the indexes with the rule that the left coordinate is from the $a_i$ sequence and the right coordinate can be identified as either from the $a_i$ or from the $b_i$ sequence depending on the 0 0 entry. More simply stated, the left coordinate is feeding the right coordinate.
\vs6\noi This is the general mechanism of a bi-lateral shift no matter what the objects are that are labeled by the indices. A unilateral shift would be confined to a single sequence of indices where as the sequence is shifted to the left, the leading index is simply thrown way.
\vs6\noi The bilateral shift on $k$ symbols on the unit square is as follows (using IDE notation)
\begq
\label{eq:sf01}
\T\l(\vt x.y\par \r)= \l(\vt \exp(\ln(k))\cdot x-\In(\exp(\ln(k))\cdot x).(y+\In(\exp(\ln(k))\cdot x)/k \par\r)
\endq
Using shorthand notation this is
\begq
\T\l(\vt x.y\par \r)= \l(\vt \{k\cdot x\}.(y+[k \cdot x])/ k \par\r)\nonumber
\endq
\begq
\label{cd:sf01}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Eq. \ref{eq:sf01} is as follows:}\\
&& \mbox{For i = 1 to N}\\
u &=& \exp(\ln(k))\cdot x -\In(\exp(\ln(k))\cdot x)\\
v &=& (y + \In(\exp(\ln(k))\cdot x)) \cdot \exp(-\ln(k)) \\
x &=& u\\
y &=& v\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi In Sec. \ref{sc:baker} this form of the bilateral shift will be used derive IDEs which are approximations to the bilateral shift.
\begq
\T_h (\X)= \l(\vt \exp(h\, \ln(k))\cdot x-\In(\exp(h\, \ln(k))\cdot x).(y-\In(\exp(h\,\ln(k))\cdot x)/k +\In(\exp(h\,\ln(k))\cdot x) \par\r)
\endq
%===========================x mod(1)===============================
\section{\sml The Dynamic of the Function $\ y=x\, \mod(1)$}
\label{sc:mod1}
The most classical examples of shift operators all use the function $x\ra x\, \mod(1)$. Therefore, this section will analyze the function $x\ra x\, \mod(1)$ that will be essential for the remainder of this chapter.
\vs6\noi It is well established that the function $x\ra 2\cdot x \, \mod(1)$ is a unilateral shift. This is proven by use of the binary radix to represent a number in which $x\ra 2\cdot x \, \mod(1)$ is represented by multiplying a binary number by $10$ and discarding any integer to the left of the decimal. I.e., $2\,x $ is multiplication by 10 and mod(1) is dropping the integer.
\vs6\noi In general, in ergodic theory and dynamical systems the shift operation relies on the function $x\ra x\, \mod(1)$. This is apparent in the function $x\ra 2\, x \mod(1)$, but is also present in the baker's transformation and Anosov diffeomorphisms \cite{bi:aa}. However, $x\ra 2\cdot x \, \mod(1)$ does not occur in any algebraic form in dynamical systems that arise in nature. So it is necessary to construct its variants that do arise in nature and thus in equations that model nature. The significance of this for the Hirsch Conjecture is that if it is not possible to recognize a shift by its algebraic form, then there is no possibility of recognizing chaos in more complex algebraic forms.
\vs6\noi There are two algebraic forms of the function $x\, \mod(1)$ in IDE theory. One form use the IDE $\exp(h\, \pi\, \B)$, called the elliptic form, and the other uses the hyperbolic functions, referred to as the hyperbolic form, see Fig. \ref{fg:xmod1}.
\vs6\noi The $2\cdot x$ factor of $2\cdot x \cdot \mod(1)$ is relatively transparent and its variants occur in natural hyperbolic systems. What is not transparent is the analog of the $\mod(1)$ dynamic in natural equations. An analysis of this dynamic is presented in the following section.
The function $\ y=x\, \mod(1)$ plays a central role in many dynamical systems but does not occur Nature. For example, $ 2\, x \mod(1)$ can be expressed in complex variables as a mapping on the unit circle $ \theta \ra \exp(2\, \pi i \, \theta)=\cos(2\, \pi \, \theta)+i\, \sin(2\, \pi \, \theta)$. It is a periodic function and this fact provides the linkage between the abstract mapping $x\, \mod(1)$ and analogous dynamics in nature. When considered in the real line $\Rl$, it has a countable number of discontinues, see Fig. \ref{fg:xmod1a} Plate A. Also, the $\mod(1)$ function occurs in the definition of an n-dimensional torus. Two important and frequently occurring examples of the occurrence of $\mod(1)$ are
\[x\ra 2\, x\, \mod(1)\]
the unilateral shift, and
\[\l(\vt x.y \par \r)\ra \l(\mtx 2.1.1.1 \par \r)\l(\vt x.y \par \r)\mod(1)\]
the Anosov diffeomorphism on the torus which is a bilateral shift, \cite{bi:yk}.
\vs6\noi The dynamic of $x \, \mod(1)$ occurs in ODEs as a periodic function. Examples are seen in Fig. \ref{fg:xmod1a}, Plates C, D.
\vs6\noi Figure \ref{fg:xmod1} is the $\Ci$ analog of $ x \, \mod(1)$. It is a periodic function. Its sole function is to return a system back to an initial place.
\begin{figure}[htbp]
\includegraphics[height=2.957in,width=3.767in,angle=0]{C:/Research/Book/Figures/eps/xmod1a.eps}
\caption{{\sml Algebraic Variants of $x\, \mod(1)$}}
\label{fg:xmod1a}
\end{figure}
\vs6\noi The operative dynamic in Plates C, D are the same, i.e., they are morphologically equivalent: They simply return the system to a point that is a fixed time or distance from the end state. That is the dynamic of the time-one map. Therefore, the mod(1) function is the basic mechanism of the time-one map. As Fig. \ref{fg:xmod1a} demonstrates, this mechanism can come an many variations all morphologically equivalent.
\vs6\noi However, in dynamical systems it is more likely that first return dynamics will be supplied by standard periodic functions. This is the point of Fig. \ref{fg:xmod1b} in which the functions of Fig. \ref{fg:xmod1a} have been replaced by the first two terms of their Fourier Series. To produce time-one dynamics only two terms of the Fourier series are needed. The simplest possible time-one dynamics is supplied by semi-circle and the general form of the semi-circle in IDE form is given by
\[\T_h=\exp(h \B)\]
\begin{figure}[htbp]
\includegraphics[height=2.063in,width=3.793in,angle=0]{C:/Research/Book/Figures/eps/xmod1b.eps}
\caption{\sml Fourier Series Variants of $x\, \mod(1)$}
\label{fg:xmod1b}
\end{figure}
\vs6\noi To produce the shift dynamic is much simpler. The shift is $2\, x=\exp(\ln(2))\, x$ and the simplest IDE form for this dynamic is
\[\S_h=\exp(h \ln(2)\I_1) \]
For $h=1$, $\S_1=\l(\mtx 2.0.0.1/2\par\r)$.
\vs6\noi The fusion IDE, see proposition \ref{pr:fus}, provides a means of connecting a hyperbolic system with a periodic function to produce the potential for chaotic dynamics.
\[f(\X)\, \T_h+(1-f(\X))\, \S_h\]
\begin{figure}[htbp]
\includegraphics[height=2.607in,width=2.037in,angle=0]{C:/Research/Book/Figures/eps/xmod1c.eps}
\caption{\sml A Semi-circle, Plate D, is morphologically equivalent to Plates A, B, C and Provide the same Dynamical Contribution to the System and the same Contribution as $x\, \mod(1)$}
\end{figure}
\sml
\vs6\noi So long as the semi-circle has a radius of 0.5, which assures that it provides the same dynamic as $x\, \mod(1)$, a semi-circle of diameter 1 can function as $x \, \mod(1)$. This is more likely what will be found in a chaotic dynamical system. Being able to recognize this function is a step toward being able to determine the presence of chaos from the form of an IDE.
\vs6\noi The occurrence of a morphologically equivalent dynamic to $x \, \mod(1)$ is not easily implemented or recognized. For example, following the dynamic $x\ra 2\, x$ it is necessary to have a periodic function that subtracts exactly 1. The simplest example would be a semi circle properly centered and dependent of the value of $2\, x$. The IDE needed following $2\, x$ requires remembering $2\, x $. Call this value $x_0$. The necessary IDE to provide the action of the mod function is
\[\exp(h \,\pi\, \B)(\X-\X_0) +\X_0\]
and is dependent on the IDE $\X \ra \exp(h \ln(2)\, \I_1)\, \X$ after it is iterated until it reaches the value $2$.
\begin{definition}{\bf cmod1(\X)}
\[{\rm cmod1}(\X)=\exp(h \,\pi\, \B)(\X-\X_0) +\X_0\]
\end{definition}
\begin{figure}[htbp]
\includegraphics[height=2.75in,width=3.64in,angle=0]{C:/Research/Book/Figures/eps/mod1comp.eps}
\caption{\sml Flow diagram for successive applications of cmod1 operation}
\label{fg:mod1comp}
\end{figure}
\vs6\noi The short story is that in order to recognize the presence of chaos from the form of an IDE requires understanding how a periodic function provides the {\em non trivial} action of $x\, \mod(1)$ in a dynamical system.
\vs6\noi Constructing the canonical shift IDEs for first return and time-one maps is the objective of the next two sections.
\vs6\noi What will come to light is that the time one map required to function as a shift is considerably more complex than the first return map.
%==================================
\vs6\noi{\bf Note on the Formation of Complexity}
The operation $\mod(1)$ acts in opposition to the operation $2\, x$. Figure \ref{fg:cmod1a}, Plate A illustrates this dynamic. However, even if the action of the cmod function has the same orientation as the stretching operation, chaos can still form as will be demonstrated. However, when the cmod function opposes the stretching operation as is seen in the horseshoe paradigm, complexity is more easily identified.
\vs6\noi Figure \ref{fg:cmod1b} uses the $\sgn$ function. The $\sgn$ function is not necessary since it can be replaced by $\tanh(\beta\, u)$ for $\beta$ large. The point if this figure is that complexity also increases when the transition function had a very high derivative. This simulates an impulse transition.
\begin{figure}[htbp]
\includegraphics[height=2.39in,width=3.827in,angle=0]{C:/Research/Book/Figures/eps/cmod1b.eps}
\caption{\sml{\sml Plate A Four orbits with cmod1 reversing the direction of the IDE; Plate B Four orbits with cmod1 orientation the same as the IDE; Both plates use the $\sgn$ }}
\label{fg:cmod1b}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.35in,width=3.84in,angle=0]{C:/Research/Book/Figures/eps/cmod1a.eps}
\caption{\sml{\sml Plate A Four orbits with cmod1 reversing the direction of the IDE; Plate B Four orbits with cmod1 orientation the same as the IDE; Both plates use the $\tanh$ }}
\label{fg:cmod1a}
\end{figure}
\begq
\label{cd:cmod1a}
\left.
\begin{array}{lcl}
&&\hspace{12pt}\mbox{\bf The Code for Fig.\ref{fg:cmod1a}}\\
&&\mbox{HOC}\\
&&\mbox{IDE}\\
&&\mbox{Fig. \ref{fg:cmod1b}}\\
p_c &=& 0.5 \cdot (1 + \sgn(z)))\\
&&\mbox{Plate A}\\
\S_h&=&\exp(- h \, \B) (\X-\X_0)+\X_0\\
&&\mbox{Plate B}\\
\S_h&=&\exp( h \, \B) (\X-\X_0)+\X_0\\
\\
&&\mbox{Fig. \ref{fg:cmod1a}}\\
p_c &=& 0.5 \cdot (1 + \tanh(z)))\\
&&\mbox{Plate A}\\
\S_h&=&\exp(- h \, \B) (\X-\X_0)+\X_0\\
&&\mbox{Plate B}\\
\S_h&=&\exp( h \, \B) (\X-\X_0)+\X_0
\end{array}\right \}
\endq
%=================================Canonical Shift IDE=====================================================
\section{\sml Unilateral Shifts}
\label{sc:ushift}
As noted in the introduction, Chapter \ref{ch:intro}, the shift operators are idealizations of the concept of a random coin toss. It is for their relationship to the metaphor, {\em random} that Jim Yorke of the University of Maryland coined the term chaos because shifts are actually deterministic operators, but look like random processes. Thus the conclusion, if they are deterministic and yet look random it must be chaos.
\vs6\noi Shifts come in many denominations such as $2\, x\, \mod(1), \;\; 3\, x\, \mod(1),\;\; \cdots k\, x\, \mod(1)$. They provide different levels of complexity based in the concept of entropy, \cite{bi:wp}, p.6. These examples are called left shifts and can be generalized to be bilateral shifts, see the baker's transformation, \cite{bi:aa}. The adjective {\em baker} refers to a cook, not a person named {\em Baker}.
\vs6\noi This section will analyze the unilateral shift and construct IDEs whose time one maps are unilateral shifts. Perturbations of shifts will also be presented.
%===========================================
\subsection{\sml Canonical Unilateral Shift IDE First Return Map}
\label{sc:shiftIDE}
The point of this section is to construct an IDE, in closed form in terms of elementary functions (no Boolean logic statements), that is exactly a unilateral shift. The significance of this example for the Hirsch Conjecture is that if there is any hope of recognizing that a system is chaotic from its form, then the presence of chaos must be transparent in this example based solely on it form.
\vs6\noi Two examples will be given. A unilateral shift on two symbols, $2\, x\, \mod(1)$ and a unilateral shift on three symbols, $3\, x \, \mod(1)$.
\begin{figure}[htbp]
\includegraphics[height=2.357in,width=4.603in,angle=0]{C:/Research/Book/Figures/eps/ShiftIDE.eps}
\caption{\sml Canonical Unilateral Shift on Two Symbols IDE. Plate B is Exactly the First Return map of a Unilateral Shift; Plate A is a Graphical Representation of the Shift Derived from Fig. \ref{fg:timeone} }
\label{fg:shiftide}
\end{figure}
\begq
\label{cd:shfide}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:shiftide} Plate B, is as follows:}\\
M&=&1000\\
h&=& 1/M\\
&& \mbox{For i = 1 to 200000}\\
q &=& 0.5 \cdot (1 + \sgn(x - h))\\
p &=& 0.5 \cdot (1 - \sgn(1 - y))\\
s &=& (1 - p) \cdot (s + q - s \cdot q)\\
\\
u_1 &=& 0\\
v_1 &=& \exp(\ln(2)\cdot h) \cdot y\\
\\
u_2 &=& \cos(\pi \cdot h) \cdot x - \sin(\pi \cdot h) \cdot (y - y_0)\\
v_2 &=& \cos(\pi \cdot h) \cdot (y - y_0) + \sin(\pi \cdot h) \cdot x + y_0\\
\\
x &=& s \cdot u_1 + (1 - s) \cdot u_2\\
y &=& s \cdot v_1 + (1 - s) \cdot v_2\\
\\
&&\mbox{{\bf Plot Point}}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi The formal equations are
\[
\T_h\l(\begin{array}{c}
z\\
x\\
y
\end{array}\r)
=\l( \begin{array}{ccc}
(1-p(x))\,( z+q(y)-z\cdot q(y))&&\\
(1-z)\cdot (x\,\cos(\pi\cdot h)&-&(y-c)\, \sin(\pi\cdot h))\\
z\cdot \exp(\ln(2)\cdot h)&+&(1-z)((y-c)\,\cos(\pi\cdot h)+x\, \sin(\pi\cdot h)+c\\
\end{array} \r)
\]
where $q(x) = 0.5 \cdot (1 + \tanh(50\cdot((x - h))$ and $p(y) = 0.5 \cdot (1 - \tanh(50\cdot(1 - y))$.
\vs6\noi The first return map constructed here is a function from the interval $[1, \, 1+h]$ to the interval $[0, \, h]$ and is very hard to observe in a graph. However, the composition maps $[1, \, 1+h]$ onto itself and is a shift. This needs formal proof. In higher dimensions the shift occurs around a fixed point.
\vs6\noi Figure \ref{fg:shiftide} is the projection of this IDE onto a two-dimensional plane. While the figure in Plate B appears very simple, its analytical form is not. The fusion IDE structure is
\[\T_h(\X)=s(\Z)\exp(h \ln(2)\I_1)\, \X+(1-s(\Z))\exp(-h\,\pi\, \B)\, \X\]
where $\|s(\Z)(1-s(\Z))\|1$ and the clock marks that a full cycle has transpired. This means that $h$=1 and therefore $\exp( \ln(2)=2$.
\vs6\noi At this point the IDE switches to the mod(1) operation (the semi circle of radius 1). When the semi circle has returned to $y=0$, or in terms of the code $q=1$, $s$ switches back to 1 and the process repeats.
\vs6\noi The recursive nature of $s$ is essential to the dynamics of the IDE.
\vs6\noi This IDE has three simple boundary conditions, $p_c,\, p_1,\, q$. There is one compound boundary cognition $p=p_c+p_1-p_c \cdot p_1$ and one recursive boundary condition, $s$, involving all boundary conditions. They combine to define the transition function that governs the transition between $2\, x$ and $x\, \mod(1)$. Stated more analytically, $s$ governs the transition from the hyperbolic dynamic to the elliptic dynamic. }}
\end{center}
\small
%=======================================Canonical Unilateral Time-one Shift IDE ======================================================================================
\subsection{\sml The Canonical Unilateral Shift on Two Symbols Time-one IDE}
As noted previously, for IDE theory first return maps and time-one maps differ significantly in dynamics but not in form. Figure \ref{fg:ushift01} is a time-one map for an IDE that closely duplicates the dynamics of a discrete unilateral shift on two symbols, specifically $2\, x\, \mod(1)$.
\begin{figure}[htbp]
\includegraphics[height=2.403in,width=4.473in,angle=0]{C:/Research/Book/Figures/eps/ushift01.eps}
\caption{\sml Time-one map for the Canonical Unilateral Shift IDE: Plate A is the IDE time series for $h=0.001$; Plate B is the Time-one Delay Map}
\label{fg:ushift01}
\end{figure}
\begq
\label{cd:ushft}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:ushift01} is as follows:}\\
M&=&1000\\
h&=& 1/M\\
&& \mbox{For i = 1 to 200000}\\
z_c &=& \cos(2\, \pi \,h) \cdot z + \sin(2\, \pi \,h) \cdot w\\
w_c &=& \cos(2\, \pi \,h) \cdot w - \sin(2\, \pi \,h) \cdot z\\
z &=& z_c\\
w &=& w_c\\
p_c &=& 0.5 \cdot (1 + \tanh(\beta\,(1 - w)))\\
q &=& 1-p_c\\
\\
u_1 &=& \exp(\ln(2)\cdot h) \cdot x\\
v_1 &=& \exp(-\ln(2)\cdot h) \cdot y\\
\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta\,(1 - x)))\\
p &=& p_1 + p_c - p_1 \cdot p_c\\
s &=& p \cdot (s + q - s \cdot q)\\
x_0 &=& s \cdot (x - 0.5) + (1 - s) \cdot x_0\\
\\
u_2 &=& (x-x_0)\cdot \cos(\pi \cdot h) - y \cdot \sin(\pi \cdot h) +x_0\\
v_2 &=& y \cdot \cos(\pi \cdot h) + (x-x_0) \cdot \sin(\pi \cdot h) \\
\\
x &=& s \cdot u_1 + (1 - s) \cdot u_2\\
y &=& s \cdot v_1 + (1 - s) \cdot v_2\\
\\
&&\mbox{{\bf Plot Point}}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begin{figure}[htbp]
\includegraphics[height=1.943in,width=3.177in,angle=0]{C:/Research/Book/Figures/eps/ushift01a.eps}
\caption{\sml Flow Chart for $2\, x\, \mod(1)$ IDE}
\label{fg:ushift01a}
\end{figure}
\begin{equation}
\label{eq:time1}
\T:\left\{
\begin{array}{lcl}
\Z_c&\ra& \exp(-2\, \pi \, h\, \B)\, \Z_c\\
z&\ra&p(x)\cdot(z+q(y)-z\cdot q(y))\\
w&\ra& z\cdot (x-0.5)+(1-z) \cdot w\\
\X&\ra& z\cdot \exp(h \, \ln(2)\, \I_1)\, \X+(1-z)\, \exp(-h\cdot \pi \B)\, (\X-\P_1\,\W)+\P_1\,\W
\end{array}\right\}
\end{equation}
where
\begin{equation}
\label{eq:time1a}
\left.\begin{array}{lcl}
\W&=&\l(\vt w.z\par\r)\\
p_c &=& 0.5 \cdot (1 + \sgn((1 - w_c)))\\
p_1 &=& 0.5 \cdot (1 + \sgn((1 - y)))\\
q &=& 1-p_c\\
p &=& p_1 + p_c - p_1 \cdot p_c\\
\end{array}\right\}
\end{equation}
\vs6\noi Equation \ref{eq:time1} is the fusion of $\exp(h\, \ln(2)\I_1)$ and $ \exp(-h\cdot \pi \B)\, (\X-\P_1\,\W)+\P_1\,\W$, a semi circle of diameter 1.
\begin{theorem}
\label{tm:time1}
For the initial condition $(x,\,0)$, the time-one map for Eq. \ref{eq:time1} with boundary conditions Eq. \ref{eq:time1a} is $2\, x\, \mod(1)$.
\end{theorem}
\pf
Stating with the initial condition $(x, \, 0)$, $\T$ is iterated until $p=0$. At the end of this iteration, $h=1$ and therefore $\T(x, \, 0)=(2\, x, \, 0)$ with $x>1$. Designate this value of $x$ as $x_0$ since, for $z=0$, it is a constant. For $ z=0$, $\T= \exp(-h\cdot \pi \B)\, (\X-\P_1\,\W)+\P_1\,\W$, where $\W=(x_0, \,0)$. Starting from $z=0$, $\T$ is iterated until $p_c=1$ at which time $\T(\X)=(x_0-1, \, 0)$ which is $x\, \mod(1)$. Therefore, the composition of these two maps is $2\, x\, \mod(1)$.
\rl
\begin{equation}
\label{eq:time1b}
\left.\begin{array}{lcl}
\W&=&\l(\vt w.z\par\r)\\
p_c &=& 0.5 \cdot (1 + \tanh((1 - w_c)))\\
p_1 &=& 0.5 \cdot (1 + \tanh((1 - y)))\\
q &=& 0.5 \cdot (1 + \tanh((h - x)))\\
p &=& p_1 + p_c - p_1 \cdot p_c\\
\end{array}\right\}
\end{equation}
\begin{theorem}
\label{tm:time1a}
Let $\S$ be the IDE defined by Eq. \ref{eq:time1} with the initial condition $(x,\,0)$ and boundary conditions Eq. \ref{eq:time1b} and Let $\T$ be the IDE defined by Eq. \ref{eq:time1} with the initial condition $(x,\,0)$ and boundary conditions Eq. \ref{eq:time1a}. Then
\[\|\T-\S\|\leq \epsilon(\beta,h)\]
and as $\beta\ra \infty, \;\; \epsilon\ra 0$.
\end{theorem}
\pf The difference between $\T $ and $\S$ is the maximum of $\|\tanh(\beta \cdot u)-\sgn(u)\|$ over their domain which can be made arbitrarily as $\beta\ra \infty$.
\rl
\vs6\noi Figure \ref{fg:ushift01}, Plate B, illustrates that the delay map is precisely $2\, x\, \mod(1)$. If the delay plot is separated by two cycles the delay map will have points from, in addition to $2\, x \mod(1)$, $4\, x \mod(1)$, $8\, x \mod(1)$, etc. This is the simplest IDE that can be formed to produce the time-one map of $2\, x\, \mod(1)$
\vs6\noi To determine if an IDE can produce chaos based on its form, it is necessary to find a stretching dynamic, the exponential, a folding dynamic the semi circle or any periodic function, and a transfer function that links these two dynamics. When these three elements are present in a system there is the potential for the system to produce chaos depending on how stretching and folding are fused. It is possible to fuse stretching and folding so that the result is periodic. This is abundantly illustrated by Fig. \ref{fg:spiderweb}.
%===================================================Perturbations of 2 x mod 1=======================================
\subsection{\sml Perturbations of the Unilateral Shift IDE on Two Symbols}
It is now possible to look at small perturbations of the shift IDE for the propose of being able to recognize from its form that small perturbations of a shift are chaotic.
\begin{figure}[htbp]
\includegraphics[height=2.667in,width=2.667in,angle=0]{C:/Research/Book/Figures/eps/Shift02.eps}
\caption{\sml Unilateral Shift as an Attractor}
\label{fg:shift02}
\end{figure}
\vs6\noi The first and simplest perturbation is to make the shift an attractor. Figure \ref{fg:shift02} provides a detailed diagram of the unilateral shift IDE demonstrating that the unilateral shift IDE has a basin of attraction of positive Lebesgue measure.
\begin{figure}[htbp]
\includegraphics[height=2.35in,width=4.607in,angle=0]{C:/Research/Book/Figures/eps/Shift02a.eps}
\caption{\sml Plate A, The Unilateral Shift IDE as an Attractor; Plate B The Delay Plot for Plate A}
\label{fg:shift02a}
\end{figure}
\vs6\noi Figure \ref{fg:shift02a} illustrates the unilateral shift in IDE form as an attractor. Figure \ref{fg:shift02a} demonstrates that points are rapidly driven to the unstable manifold by the stable manifold. This is the mechanism that produces {\em strange attractors}.
\begin{figure}[htbp]
\includegraphics[height=4.0in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/UnilateralShift.eps}
\caption{\sml Time-one map for three perturbations of the Canonical Unilateral Shift IDE}
\label{fg:ushift}
\end{figure}
\vs6\noi Figure \ref{fg:ushift} Plate A shows that small perturbations of the unilateral shift can account for rotation/curl in three dimensions. Plates B and C provide two additional variations of the shift. In particular, Plate C shows that the single scroll, Fig. \ref{fg:scroll}, is a perturbation of the shift.
\begq
\label{cd:ushf}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Plate C of Fig. \ref{fg:ushift} is as follows:}\\
M&=&1000\\
h&=& 1/M\\
\alpha &=&1.7\cdot \pi\\
\beta&=&5.0\\
s&=&1\\
z&=& 0.0\\
\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z+h\\
u_1 &=& x\cdot\exp(\ln(0.2)\cdot h)\\
v_1 &=& y\exp(\ln(2)\,h) \tanh(3\cdot y)\\
\\
u_2 &=& \exp(\ln(0.2) \cdot h)(x \cdot \cos(\alpha \cdot h) + (y-y_0) \cdot \sin(\alpha \cdot h ))\\
v_2 &=& \exp(\ln(0.2) \cdot h)( (y -y0) \cdot \cos(\alpha \cdot h) - x \cdot \sin(\alpha \cdot h))+y_0\\
\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta(1 - z)))\\
p_2 &=& 0.5 \cdot (1 + \tanh(\beta(1 - y)))\\
q &=& 0.5 \cdot (1 + \tanh(\beta(h - x)))\\
p &=& p_1 + p_2 - p_1 \cdot p_2\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
{\rm If} s &=& 1 \mbox{ Then $y_0 = v_1 - 0.5$}\\
{\rm If} k &=& 0 \mbox{ Then $z = z \mod(1)$}\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 + (1-s) \cdot v_2\\
\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi The form of the perturbations of the unilateral shift time-one IDE is as follows:
\[\T_h(\X)=s(\Z)\exp(h \ln(\D))\, \A(\X)\, \X+(1-s(\Z))\exp(h \ln(0.5)\I)\,\exp( h\,\pi\, \B)\, \X\]
where $\|s(\Z)(1-s(\Z))\|1$ then GoTo line2:}\\
&& \mbox{Next i}\\
&&\mbox{GoTo line1:}\\
r&=&\sqrt{x ^ 2 + y ^ 2}\\
\\
&& \mbox{line2:}\\
x_0&=&x/r \, (r-0.5)\\
y_0&=&y/r \, (r-0.5)\\
&& \mbox{For i = 1 to M}\\
u &=& (x-x_0) \cdot \cos(\omega \, h) + (y-y_0) \cdot \sin(\omega \, h)+x_0\\
v &=& (y-y_0) \cdot \cos(\omega \, h) - (x-x_0) \cdot \sin(\omega \, h)+y_0\\
x &=& u\\
y &=& v\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}\\
&&\mbox{GoTo line1:}
\end{array}\right \}
\endq
\vs6\noi The code for Fig. \ref{fg:anosovtwist04} provides the time series for the Anosov shift, but it is not in closed form in terms of elementary functions. By observing that the factor $(1 + h\, \theta / (r\, \omega \, ))$ plays the role of multiplying by 2, it can be replaced by $\exp(h\, \ln(2))$. Doing this produces the IDE in Fg. \ref{fg:anot}
\begin{figure}[htbp]
\includegraphics[height=2.397in,width=4.567in,angle=0]{C:/Research/Book/Figures/eps/AnoT.eps}
\caption{\sml Plate A is the Time Series for the Modified Anosov Twist in the Plane; Plate B is the Time-one map for the Modified Anosov Twist}
\label{fg:anot}
\end{figure}
\begq
\label{cd:anot}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Fig. \ref{fg:anot}}\\
\alpha&=& 2\,\pi\\
\beta&=&200\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &=& 0.5 \cdot (1 + \tanh(\beta\,(1 - 0.01 \cdot h - w)))\\
q&=& 1-p_c\\
\\
r &=& \sqrt{x ^ 2 + y ^ 2}\\
u &=& x \cdot \cos(h \cdot r) - y \cdot \sin(h \cdot r)\\
v &=& y \cdot \cos(h \cdot r) + x \cdot \sin(h \cdot r)\\
u_1 &=& \exp( h\,\ln(2))\cdot u\\
v_1 &=& \exp( h\,\ln(2))\cdot v\\
r &=& \sqrt{u_1 ^ 2 + v_1 ^ 2}\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta \cdot(1 - r)))\\
p &=& p_c + p_1 - p_c \cdot p_1\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
x_0 &=& s \cdot (u_1 / r) \cdot (r - 0.5) + (1 - s) \cdot x_0\\
y_0 &=& s \cdot (v_1 / r) \cdot (r - 0.5) + (1 - s) \cdot y_0\\
\\
u_2 &=& (x-x_0)\cdot\cos(\pi\cdot h) + (y-y_0)\cdot\sin(\pi \cdot h)+x_0\\
v_2 &=& (y-y_0)\cdot\cos(\pi \cdot h) - (x-x_0)\cdot\sin(\pi \cdot h)+y_0\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 + (1-s) \cdot v_2\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begin{problem}
Derive the IDE for the Anosov shift.
\end{problem}
%==================================Perturbations============================
\subsection{\sml Perturbations of the Anosov Shift}
This section presents a perturbation of the Anosov shift which retains the complexity of the Anosov shift.
\begin{figure}[htbp]
\includegraphics[height=2.427in,width=4.547in,angle=0]{C:/Research/Book/Figures/eps/Anosov04.eps}
\caption{\sml Plate A is the Anosov Twist in the Plane; Plate B is the time-one map for the planar Anosov map }
\label{fg:anosov04}
\end{figure}
\begin{equation}
\label{cd:an04}
\left. \begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:anosov04} Plate A is as follows:}\\
M&=&1000\\
h&=&1/M\\
\\
&& \mbox{For i = 1 to M}\\
z &=&z_1\, \cos(2\, \pi \, h) - w_1\,\sin(2\, \pi \, h )\\
w &=& w_1\,\cos(2\, \pi \, h + z_1\,\sin(2\, \pi \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &= & 0.5 \cdot (1 + \sgn(z))\\
q&=&1-p_c\\
r_0&=&\sqrt{u_1 ^ 2 + v_1 ^ 2}\\
p_1 &=& 0.5 \cdot (1 + \sgn(1 - r_0))\\
p &=& p_1 + p_c - p_1 \cdot p_c\\
\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
x_0 &=& s \cdot (x / r_0) \cdot (r_0 - 0.5) + (1 - s) \cdot x_0\\
y_0 &=& s \cdot (y / r_0) \cdot (r_0 - 0.5) + (1 - s) \cdot y_0\\
\\
u_2 &=& (x-x_0) \cdot \cos(\pi \, h) -(y-y_0) \cdot \sin(\pi \, h)+x_0\\
v_2 &=& (y-y_0) \cdot \cos(\pi \, h) + (x-x_0) \cdot \sin(\pi \, h)+y_0\\
\\
x &=& s \cdot u_1 + (1 - s) \cdot u_2\\
y &=& s \cdot v_1 + (1 - s) \cdot v_2\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right\}
\end{equation}
\begin{figure}[htbp]
\includegraphics[height=2.057in,width=4.597in,angle=0]{C:/Research/Book/Figures/eps/Sum10.eps}
\caption{\sml Plate A is the Time Series for Code \ref{cd:an04} where $r$ is replace by $f(r)=(r+\epsilon/r)\cdot \omega$, with $\omega=\sqrt{2}/5$; Plate B is the Time-one Map}
\label{fg:sum10}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.377in,width=4.43in,angle=0]{C:/Research/Book/Figures/eps/Sum11.eps}
\caption{\sml Figure \ref{fg:sum10} Rotated in $\Rl^3$ }
\label{fg:sum11}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.28in,width=4.49in,angle=0]{C:/Research/Book/Figures/eps/Sum14.eps}
\caption{\sml Figure \ref{fg:sum10} with $\omega=\sqrt{2}/3$ }
\label{fg:sum14}
\end{figure}
%=========================Bilateral Shift Analysis================
\section{\sml Generic Bilateral Shift Analysis}
\label{sc:sfa}
Examination of the proof that the baker's transformation is a bilateral shift reveals that three components are present: (1) an unilateral shift such as $x\ra 2\, x\, \mod(1)$ which is the unstable manifold of a hyperbolic fixed point; (2) a stable manifold of a hyperbolic fixed point; and, (3) a method of linking the stable and unstable manifolds. Simply having a hyperbolic fixed point is not sufficient. There must be some mechanism that links the stable and unstable manifolds to generate a bilateral shift. The purpose of this section is to derive the simplest algebraic formulation of a bilateral shift. The formulation will not be in terms of elementary functions. The following result follows from the proof that the baker's transformation is a bilateral shift.
\begin{theorem}
Equation \ref{eq:bshift} is a generic bilateral shift on k symbols on the unit square.
\begq
\label{eq:bshift}
\T\l(\vt x.y\par \r)= \l(\vt \{k\cdot x\}.(y+[k \cdot x])/ k \par\r)
\endq
\end{theorem}
\pf
Let $(x,y)=(0.a_1a_2a_3\cdots,\,0.b_1b_2b_3\cdots \in [0,\,1)\times [0,\,1)$ in radix k. Note that $10\, x$ is a left shift and that $y/10$ is a right shift. Organize the coordinates as in the proof that the baker's transformation is a bilateral shift.
$10 \, x \, \mod(1)=0.a_2a_3a_3\cdots$. The integer part of $10\, x$ is $a_1$. This must be inserted as the first entry in $y$, i.e., form the sum $y+[10\, x]= 0.b_1b_2b_3\cdots + a_1. 000\cdots=a_1.b_1b_2b_2\cdots$. Dividing by 10 (a right shift) gives
$0.a_1b_1b_2b_3\cdots$. The combined actions is a bilateral shift. \rl
\begin{corollary}
Equation \ref{eq:bshifta} is a bilateral shift on 2 symbols on the unit square.
\begq
\label{eq:bshifta}
\T\l(\vt x.y\par \r)= \l(\vt \{2\cdot x\}.(y+[2 \cdot x])/ 2 \par\r)
\endq
\end{corollary}
\vs6\noi The transformation of Eq. \ref{eq:bshifta} consists of an unstable manifold $2\, x$; a stable manifold $y/2$ and a method of linking the two by adding $[2\, x]$ to $y$. Intuitively, the stable and unstable manifolds have intersected transversally through $[2\, x]$.
\vs6\noi Transcribing the bilateral shift into a {\em formula} gives Eq. \ref{eq:sf02}.
\begq
\label{eq:sf02}
\T\l(\vt x.y\par \r)= \l(\vt \exp(\ln(2))\cdot x-\In(\exp(\ln(2))\cdot x).(y+\In(\exp(\ln(2))\cdot x)/2 \par\r)
\endq
Coding Eq. \ref{eq:sf02} in VB and plotting a single orbit gives Fig. \ref{fg:sf02}.
\begin{figure}[htbp]
\includegraphics[height=2.353in,width=2.407in,angle=0]{C:/Research/Book/Figures/eps/sf02.eps}
\caption{\sml One orbit of Eq. \ref{eq:sf02}}
\label{fg:sf02}
\end{figure}
\vs6\noi The time one orbit of a shift operator does not contain any KAM regions, although it has a dense set of periodic points. The appearance of KAM island chains is the product of the particular perturbation of the shift.
\vs6\noi The time one map be written in short hand as
\begq
\label{eq:sf02e}
\T_1\l(\vt x.y\par \r)= \l(\vt 2\, x- [2\, x].(y+[2\, x])/2) \par\r)
\endq
\begin{figure}[htbp]
\includegraphics[height=2.45in,width=4.563in,angle=0]{C:/Research/Book/Figures/eps/sf02f.eps}
\caption{\sml Plate A is the time one map for Eq. \ref{eq:sf02e}; Plate B is a time-one map for Eq. \ref{eq:sf02f}}
\label{fg:sf02f}
\end{figure}
\begq
\label{eq:sf02f}
\T_1\l(\vt x.y\par \r)= \l(\vt 2\, x- [2\, x].(y-[2\, x])/2+[2\, x]) \par\r)
\endq
Equation \ref{eq:sf02f} is a large perturbation of a shift, Eq. \ref{eq:sf02e}. However, the characteristic {\em random} appearance of the points of the orbit is retained. This example demonstrates that even very large perturbations of a shift remain complex.
%===========================================Transitions======================================
\section{\sml Transition of the Abstract Bilateral Shift to an IDE}
Using previously employed methods, it is now possible to derive approximations to the bilateral shift in closed form, in terms of elementary functions that are IDEs. See Fig. \ref{fg:sf02a} and Eq. \ref{eq:sf02a}
\begin{figure}[htbp]
\includegraphics[height=2.397in,width=4.59in,angle=0]{C:/Research/Book/Figures/eps/sf02a.eps}
\caption{\sml Plate A is the time series for Eq. \ref{eq:sf02a}; Plate B is a time-one orbit.}
\label{fg:sf02a}
\end{figure}
\begq
\label{eq:sf02a}
\T\l(\vt x.y\par \r)= \l(\vt {\rm \cmod1}(\exp(h\, \ln(2))\, x).{\rm \cmod1}(\exp(-h\, \ln(2))(y-x)+x) \par\r)
\endq
\begin{equation}
\label{cd:sf02a}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for both Plates of Fig. \ref{fg:sf02a} is as follows:}\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &=& 0.5 \cdot (1 + \sgn(1 - 0.0001\cdot h - w))\\
q &=& 1 - p_c\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta\cdot((1 - x)))\\
p &=& p_c + p_1 - p_c \cdot p_1\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
u_1 &=& \exp(\ln(2) \cdot h)\cdot x \\
v_1 &=& \exp(-\ln(2) \cdot h)\cdot (y+x)-x\\
\\
x_0 &=& s \cdot (u_1 - 0.5) + (1 - s) \cdot x_0\\
y_0 &=& s \cdot v_1 + (1 - s) \cdot y_0\\
\\
u_2 &=& (x-x_0)\cdot\cos(\pi\cdot h) + (y-y_0)\cdot\sin(\pi\cdot h)+x_0\\
v_2 &=& (y-y_0)\cdot\cos(\pi\cdot h) + (x-x_0)\cdot\sin(\pi\cdot h)+y_0\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 + (1-s) \cdot v_2\\
\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right\}
\end{equation}
\vs6\noi with the following modification to Eq. \ref{eq:sf02a}, Fig. \ref{fg:sf03} is obtained.
\begin{figure}[htbp]
\includegraphics[height=2.427in,width=4.577in,angle=0]{C:/Research/Book/Figures/eps/sf03.eps}
\caption{\sml Modification of Eq. \ref{eq:sf02a}}
\label{fg:sf03}
\end{figure}
\[\begin{array}{lcl}
u_1 &=& \exp(\ln(2) \cdot h)\cdot x \\
v_1 &=& \exp(-\ln(2) \cdot h)\cdot (1.0005 \cdot y+0.25\cdot x)-0,25 \cdot x
\end{array}\]
\vs6\noi The general perturbation of a shift is
\begq
\label{eq:sf02c}
\T_h\l(\vt x.y\par \r)= \l(\vt \exp(h\, \ln(2))\, x-f(\exp(h\, \ln(2))\, x).\exp(-h\, \ln(2))(y-g(\exp(h\, \ln(2))\, x)) \par\r)
\endq
To make this perturbation an IDE $g(\exp(h\, \ln(2))\, x)$ must be added to the second coordinate of Eq. \ref{eq:sf02c}. This addition retains the link between the stable and unstable manifolds. Also, the HOC must be added and its relevant boundary conditions. This is needed to assure that the IDE is bounded as required by the IDE axioms.
\begq
\label{cd:str}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:str01} Plate B is as follows:}\\
&&\mbox{For i = 1 to N}\\
\\
r &=& \sqrt{x ^ 2 + y ^ 2}\\
\theta &=& \arctan(y/ x)\\
\\
u &=& \exp(\ln(2)) \cdot r - [\exp(\ln(2)) \cdot r]\\
v &=& \exp(-\ln(2))\cdot (\theta + \exp(\ln(2)) \cdot r)\\
\\
x &=& u \cdot \cos(2 \cdot \pi \cdot v)\\
y &=& u \cdot \sin(2 \cdot \pi \cdot v)\\
\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.443in,width=4.563in,angle=0]{C:/Research/Book/Figures/eps/str01.eps}
\caption{\sml Plate A is derived from Eq. \ref{eq:sf02e}; Plate B is derived from Eq. \ref{eq:sf02f}}
\label{fg:str01}
\end{figure}
\vs6\noi The difference between Plate A and Plate B is that $[\exp(\ln(2)) \cdot r]$, an integer, has been replaced by $ \exp(\ln(2)) \cdot r$. Note that the integer part of a number $[n]$ can be replaced by a $C^\infty$ special IDE function.
\begq
\label{eq:str01}
\T_1\l(\vt x.y\par \r)= \l(\vt \exp(\ln(2))\cdot r -\xint(\exp(\ln(2))\cdot r). \exp(-\ln(2))\cdot(\theta+\xint(\exp(\ln(2))\cdot r)\par \r)
\endq
\begin{figure}[htbp]
\includegraphics[height=2.35in,width=4.573in,angle=0]{C:/Research/Book/Figures/eps/str02.eps}
\caption{\sml Plate A is derived from Eq. \ref{eq:str01}; Plate B is derived from Eq. \ref{eq:str02}}
\label{fg:str02}
\end{figure}
\begq
\label{eq:str02}
\T_1\l(\vt x.y\par \r)= \l(\vt \exp(\ln(2))\cdot r -\xint(\exp(\ln(2))\cdot r). \exp(-\ln(2))\cdot(\theta+(\exp(\ln(2))\cdot r))-\exp(\ln(2))\cdot r\par \r)
\endq
In Eq. \ref{eq:str02}, $\xint(\exp(\ln(2))\cdot r)$ has been replaced by $\exp(\ln(2))\cdot r $ in the second coordinate and $\exp(\ln(2))\cdot r$ has been subtracted from the second coordinate in order to align Eq. \ref{eq:str02} with the time-one map of an IDE.
\vs6\noi In Eq. \ref{eq:str03}, the exponentials in the second coordinate are dropped and the orbit is morphologically unchanged.
\begq
\label{eq:str03}
\T_1\l(\vt x.y\par \r)= \l(\vt \exp(\ln(2))\cdot r -\xint(\exp(\ln(2))\cdot r). \exp(-\ln(2))\cdot(\theta+ r))-r \par\r)
\endq
\begin{figure}[htbp]
\includegraphics[height=2.41in,width=4.593in,angle=0]{C:/Research/Book/Figures/eps/str03.eps}
\caption{\sml Plate A is derived from Eq. \ref{eq:str03}, $\beta=2.4$; Plate B is derived from Eq. \ref{eq:str03}, $\beta=2.34$}
\label{fg:str03}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.28in,width=4.497in,angle=0]{C:/Research/Book/Figures/eps/str04.eps}
\caption{\sml Plate A is derived from Eq. \ref{eq:str03}, $\beta=2.2$; Plate B is derived from Eq. \ref{eq:str03}, $\beta=2.3$}
\label{fg:str04}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.274in,width=4.387in,angle=0]{C:/Research/Book/Figures/eps/str05.eps}
\caption{\sml Plate A is derived from Eq. \ref{eq:str05}, $h=1$; Plate B is derived from Eq. \ref{eq:str05}, $h=0.001$}
\label{fg:str05}
\end{figure}
\begq
\label{eq:str05}
\T_1\l(\vt x.y\par \r)= \l(\vt \exp(h\,\ln(2))\cdot r -[(\exp(h\,\ln(2))\cdot r]. \exp(-h\,\ln(2))\cdot(\theta+ [(\exp(h\,\ln(2))\cdot r])) \par\r)
\endq
\begin{figure}[htbp]
\includegraphics[height=2.347in,width=4.503in,angle=0]{C:/Research/Book/Figures/eps/str05.eps}
\caption{\sml Plate A is derived from Eq. \ref{eq:str05}, $h=0.001$; Plate B is derived from Eq. \ref{eq:str07}, $h=0.001$}
\label{fg:str06}
\end{figure}
\begq
\label{eq:str06}
\T_1\l(\vt x.y\par \r)= \l(\vt \exp(h\,\ln(2))\cdot r -[(\exp(h\,\ln(2))\cdot r]. \exp(-h\,\ln(2))\cdot(\theta+ [(\exp(h\,\ln(2))\cdot r]))-[(\exp(h\,\ln(2))\cdot r] \par\r)
\endq
\begq
\label{eq:str07}
\T_1\l(\vt x.y\par \r)= \l(\vt \exp(h\,\ln(2))\cdot r -[(\exp(h\,\ln(2))\cdot r]. \exp(-h\,\ln(2))\cdot(\theta+ (\exp(h\,\ln(2))\cdot r))-\exp(h\,\ln(2))\cdot r \par\r)
\endq
\vs6\noi The symbolic dynamics for Eq. \ref{eq:str06} are
\[(0.a_1a_2a_3a_4\cdots,\, 0.b_1b_2b_3\cdots)\ra(0.a_2a_3a_4\cdots,\,( 0.a_1b_1b_2b_3\cdots)-a_1)\]
The second component of this vector is still in $[0,1)$ and this explains why points are concentrated near the origin. In particular,
\begin{lemma}
\label{lm:str06}
If ${\bf a}$ has positive algorithmic complexity, and ${\bf b}$ has zero algorithmic complexity then
\[{\bf a}+{\bf b}\]
has positive algorithmic complexity
\end{lemma}
\pf Direct computation. \rl
\begin{corollary}
If ${\bf b}= 0.b_1b_2b_3\cdots$ has positive algorithmic complexity, then $( 0.a_1b_1b_2b_3\cdots)-a_1$ also has positive algorithmic complexity.
\end{corollary}
\pf lemma \ref{lm:str06} \rl
\begin{figure}[htbp]
\includegraphics[height=2.203in,width=4.39in,angle=0]{C:/Research/Book/Figures/eps/str07.eps}
\caption{\sml Plate A is derived from Eq. \ref{eq:str07}, $h=0.001$; Plate B is derived from Eq. \ref{eq:str08}, $h=0.001$}
\label{fg:str07}
\end{figure}
\begq
\label{eq:str08}
\T_1\l(\vt x.y\par \r)= \l(\vt \exp(h\,\ln(2))\cdot r -[(\exp(h\,\ln(2))\cdot r]. \exp(-h\,\ln(2))\cdot(\theta+ r))- r \par\r)
\endq
\begin{figure}[htbp]
\includegraphics[height=2.407in,width=4.51in,angle=0]{C:/Research/Book/Figures/eps/str08.eps}
\caption{\sml Plate A is derived from Eq. \ref{eq:str09}, $h=0.001$ and $\cmod$ rotation is $\pi$; Plate B is derived from Eq. \ref{eq:str09}, $h=0.001$ in which $\cmod$ is modified to have rotation $1.34\, \pi$}
\label{fg:str08}
\end{figure}
\begq
\label{eq:str08}
\T_1\l(\vt x.y\par \r)= \l(\vt \exp(h\,\ln(2))\cdot r -\xint(\exp(h\,\ln(2))\cdot r). \exp(-h\,\ln(2))\cdot(\theta+ r))- r \par\r)
\endq
Replacing $[(\exp(h\,\ln(2))\cdot r]$ with $\xint(\exp(h\,\ln(2))\cdot r)$ for a large $\beta$ does not change the complexity of the orbits, a.e. and taking this a step further to obtain Eq. \ref{eq:str09} also does not change the complexity of the orbits, see Fig. \ref{fg:str08}.
\begq
\label{eq:str09}
\T_1\l(\vt x.y\par \r)= \l(\vt \cmod(\exp(h\,\ln(2))\cdot r)). \exp(-h\,\ln(2))\cdot(\theta+ r))- r \par\r)
\endq
\begin{equation}
\label{cd:str09a}
\left.\begin{array}{lcl}
&&\mbox{\bf The Code for Plate A of Fig. \ref{fg:str08} is as follows:}\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &=& 0.5 \cdot (1 + \sgn(1 - 0.0001\cdot h - w))\\
q &=& 1 - p_c\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta\cdot((1 - x)))\\
p &=& p_c + p_1 - p_c \cdot p_1\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
r &=& \sqrt{x ^ 2 + y ^ 2}\\
\theta &=& \arctan(y/x)\\
u &=& \exp(h\,\ln(2)) \cdot r\\
v &=& \exp(-h\,\ln(2)) \cdot (\theta + r) - r\\
\\
u_1 &=& u \cdot \cos(2 \cdot \pi \cdot v)\\
v_1 &=& u \cdot \sin(2 \cdot \pi \cdot v)\\
\\
x_0 &=& s \cdot (u_1 / r) \cdot (r - 0.5) + (1 - s) \cdot x_0\\
y_0 &=& s \cdot (v_1 / r) \cdot (r - 0.5) + (1 - s) \cdot y_0\\
\\
u_2 &=& (x-x_0)\cdot\cos(\pi\cdot h) + (y-y_0)\cdot\sin(\pi\cdot h)+x_0\\
v_2 &=& (y-y_0)\cdot\cos(\pi\cdot h) + (x-x_0)\cdot\sin(\pi\cdot h)+y_0\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 + (1-s) \cdot v_2\\
\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right \}
\end{equation}
\vs6\noi In order to replace $x\, \mod(1)$ with the IDE $C^\infty$ function $\cmod$ it is necessary to include the HOC. This results in a four-dimensional system projected onto $\Rl^2$. The difference between Plate A and Plate B is the degree to which $(u_2,v_2)$ deviate from functioning like $x\, \mod(1)$.
\vs6\noi In general, a periodic force has the potential to function like $x\, \mod(1)$ and thus contribute to the formation of a shift dynamic on a sub region of the domain of the system.
%========================================Transition to FT====================================
\section{\sml Transition from the Bilateral Shift IDE to a Twist and Flip System}
\label{sc:shiftt}
A particularly important study is the transition a bilateral shift to a twist system as this system occurs frequently in many contexts. The propose of this section is to carry out that derivation.
\vs6\noi The shift formulation in Eq. \ref{eq:sf02e} can be considered as dimensionless or coordinate independent. As such, the first entry may be considered as the radius in polar coordinates and the second component as the angle, $\theta$ as in Eq. \ref{eq:bsp01}.
\begq
\label{eq:bsp01}
\T\l(\vt r.\theta\par \r)= \l(\vt \{2\cdot r\}.(\theta-[2 \cdot r])/ 2+[2\cdot r] \par\r)
\endq
\begq
\label{eq:bsp01a}
\T\l(\vt r.\theta\par \r)= \l(\vt \cmod1(2\cdot r).(\theta-2 \cdot r)/2+ 2\cdot r -\cmod1(2\cdot r)/2\par\r)
\endq
Converting to IDE form gives
\begq
\label{eq:bsp01b}
\T\l(\vt r.\theta\par \r)= \l(\vt ex\cdot r. ey \cdot (\theta - ex \cdot r) + ex \cdot r - ey \cdot \{ex \cdot r\}\par\r)
\endq
where
\[ex=\exp(h\, \ln(2))\;\;\; ey=\exp(-h\, \ln(2))\]
For $h<<1$ the following approximation is obtained
\begq
\label{eq:bsp01c}
\T\l(\vt r.\theta\par \r)= \l(\vt \exp(h\, \ln(2))\cdot r. \exp(-h\, \ln(2)) \cdot (\theta - r) - r +\exp(-h\, \ln(2))[\exp(h\, \ln(2)\, r)]\par\r)
\endq
Expanding the exponentials gives
\begq
\label{eq:bsp01d}
\T\l(\vt r.\theta\par \r)= \l(\vt \exp(h\, \ln(2))\cdot r. \exp(-h\, \ln(2)) \cdot (\theta - r) - r +h \cdot \ln(2) \cdot r\par\r)
\endq
\begin{figure}[htbp]
\includegraphics[height=2.363in,width=4.573in,angle=0]{C:/Research/Book/Figures/eps/KAM00.eps}
\caption{\sml Plate A is the Time Series for Eq. \ref{eq:bsp01d}; Plate B is the Time-one map}
\label{fg:kam00}
\end{figure}
\vs6\noi A further approximation is as follows:
\begq
\label{eq:bsp01e}
\T\l(\vt r.\theta\par \r)= \l(\vt \exp(h\, \ln(2))\cdot r. (1 - \ln(2) \cdot h) \cdot (\theta - 2\cdot r)\par\r)
\endq
\begin{figure}[htbp]
\includegraphics[height=2.343in,width=4.533in,angle=0]{C:/Research/Book/Figures/eps/KAM00a.eps}
\caption{\sml Plate A is the Time Series for Eq. \ref{eq:bsp01e}; Plate B is the Time-one map}
\label{fg:bsp01e}
\end{figure}
In general rectangular coordinates this becomes
\begq
\label{eq:bsp02}
\T\l(\vt x.y\par \r)= \l(\vt u\,\cos(v).u\, \sin(v) \par\r)
\endq
Lifting Eq. \ref{eq:bsp02} and using the HOC to provide fusion with $\cmod1$ the first component of the fusion becomes
\begq
\label{eq:bsp03}
\T\l(\vt x.y\par \r)= \l(\vt 2\cdot r \cdot \cos(\theta+[2 \cdot r])/ 2).2\cdot r \cdot\sin(\theta+[2 \cdot r])/ 2) \par\r)
\endq
In IDE form this is
\begq
\label{eq:bsp04}
\T\l(\vt x.y\par \r)= \l(\vt \exp(h\, \ln(2))\cdot r \cdot \cos(\theta+[\exp(h\, \ln(2)) r])/ 2).\exp(h\, \ln(2))\cdot r \cdot\sin(\theta+[\exp(h\, \ln(2)) r])/ 2) \par\r)
\endq
Locally, for very small $h$ this is
\begq
\label{eq:bsp05}
\T\l(\vt x.y\par \r)= \l(\vt \exp(h\, \ln(2))\cdot r \cdot \cos(\theta+h\, r).\exp(h\, \ln(2))\cdot r \cdot\sin(\theta+h\, r) \par\r)
\endq
\vs6\noi Using the polar representation of a bilateral shift produces Fig. \ref{fg:str01}, Plate A. Plate B is from Eq. \ref{eq:sf02f}.
\begin{figure}[htbp]
\includegraphics[height=2.333in,width=4.5in,angle=0]{C:/Research/Book/Figures/eps/KAM01.eps}
\caption{\sml Plate A is Four orbits of the Twist and Flip Map; Plate B is Three Orbits of the Twist and Flip IDE}
\label{fg:kam01}
\end{figure}
\vs6\noi All further images will use the Twist and Flip IDE.
%=========================================Time one========================
\subsection{Transitions to the Time-one Twist and Flip IDE}
\begin{figure}[htbp]
\includegraphics[height=2.443in,width=4.563in,angle=0]{C:/Research/Book/Figures/eps/KAM02.eps}
\caption{\sml Plate A is the Spherical Bilateral Shift on Two Symbols; Plate B is The Twist and Flip IDE for HOC $\alpha =\pi$ }
\label{fg:kam02}
\end{figure}
\vs6\noi The following series of images record the transition from Plate B to KAM island formation.
\begin{figure}[htbp]
\includegraphics[height=2.357in,width=4.607in,angle=0]{C:/Research/Book/Figures/eps/KAM03.eps}
\caption{\sml Plate A is the Time Series for the Twist and Flip IDE; Plate B is The Twist and Flip IDE for HOC $\alpha =\pi$ }
\label{fg:kam03}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.417in,width=4.52in,angle=0]{C:/Research/Book/Figures/eps/KAM04.eps}
\caption{\sml Plate A is the Time Series for the Twist and Flip IDE; Plate B is The Twist and Flip IDE for HOC $\alpha =1.3 \cdot \pi$ }
\label{fg:kam04}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.437in,width=4.563in,angle=0]{C:/Research/Book/Figures/eps/KAM05.eps}
\caption{\sml Plate A is the Time Series for the Twist and Flip IDE; Plate B is The Twist and Flip IDE for HOC $\alpha =\sqrt{2} \cdot \pi$ }
\label{fg:kam05}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.44in,width=4.487in,angle=0]{C:/Research/Book/Figures/eps/KAM06.eps}
\caption{\sml Plate A is the Time Series for the Twist and Flip IDE; Plate B is The Twist and Flip IDE for HOC $\alpha =1.5 \cdot \pi$ }
\label{fg:kam06}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.443in,width=4.533in,angle=0]{C:/Research/Book/Figures/eps/KAM06a.eps}
\caption{\sml Plate A is the Time Series for the Twist and Flip IDE; Plate B is The Twist and Flip IDE for HOC $\alpha =1.6 \cdot \pi$ }
\label{fg:kam06a}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.393in,width=4.437in,angle=0]{C:/Research/Book/Figures/eps/KAM07.eps}
\caption{\sml Plate A is the Time Series for the Twist and Flip IDE; Plate B is The Twist and Flip IDE for HOC $\alpha =\sqrt{3} \cdot \pi$ }
\label{fg:kam07}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.44in,angle=0]{C:/Research/Book/Figures/eps/KAM08.eps}
\caption{\sml Plate A is the Time Series for the Twist and Flip IDE; Plate B is The Twist and Flip IDE for HOC $\alpha =1.75 \cdot \pi$ }
\label{fg:kam08}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.443in,width=4.547in,angle=0]{C:/Research/Book/Figures/eps/KAM09.eps}
\caption{\sml Plate A is the Time Series for the Twist and Flip IDE; Plate B is The Twist and Flip IDE for HOC $\alpha =1.94 \cdot \pi$ }
\label{fg:kam09}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.443in,width=4.547in,angle=0]{C:/Research/Book/Figures/eps/KAM10.eps}
\caption{\sml Plate A is the Time Series for the Twist and Flip IDE; Plate B is The Twist and Flip IDE for HOC $\alpha =1.95 \cdot \pi$ }
\label{fg:kam10}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.433in,width=4.263in,angle=0]{C:/Research/Book/Figures/eps/KAM11.eps}
\caption{\sml Plate A is the Time Series for the Twist and Flip IDE; Plate B is The Twist and Flip IDE for HOC $\alpha =2.0 \cdot \pi$ }
\label{fg:kam11}
\end{figure}
%=====================================================First Return==================================
\subsection{\sml Transitions to the Twist and Flip First Return Map}
\begq
\label{cd:km15}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Plate B of Fig. \ref{fg:kam15} is as follows:}\\
\alpha&=& 2\,\pi\\
\beta&=&200\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
\\
p_c &=& 0.5 \cdot (1 + Sgn(1 - 0.0001 \cdot h - w-c))\\
q &=& 1 - p_c\\
\\
r &=& \sqrt{x ^ 2 + y ^ 2}\\
\theta &=& \arctan(y/x)\\
\\
u &=& \exp(h\, \ln(2)) \cdot r\\
v &=& \theta + h \cdot r\\
\\
u_1 &=& u \cdot \cos(v)\\
v_1 &=& u \cdot \sin(v)\\
\\
p &=& 0.5 \cdot (1 + \sgn(1 - u))\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
x_0 &=& s \cdot \cos(v) \cdot (u - 0.5) + (1 - s) \cdot x_0\\
y_0 &=& s \cdot \sin(v) \cdot (u - 0.5) + (1 - s) \cdot y_0\\
\\
u_2 &=& x\cdot\cos(\pi \cdot h) + y\cdot\sin(\pi\cdot h)\\
v_2 &=& y\cdot\cos(\pi \cdot h) - x\cdot\sin(\pi \cdot h)\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 + (1-s) \cdot v_2\\
\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.327in,width=4.573in,angle=0]{C:/Research/Book/Figures/eps/KAM12.eps}
\caption{\sml Plate A is the Time Series for the First Return map IDE; Plate B is the First Return Map for $\alpha = \pi$ }
\label{fg:kam12}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.34in,width=4.563in,angle=0]{C:/Research/Book/Figures/eps/KAM13.eps}
\caption{\sml Plate A is the Time Series for the First Return map IDE; Plate B is The First Return Map $\alpha =\sqrt{2} \pi$ }
\label{fg:kam13}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.367in,width=4.567in,angle=0]{C:/Research/Book/Figures/eps/KAM14.eps}
\caption{\sml Plate A is the Time Series for the First Return map IDE; Plate B is The First Return Map $\alpha = 2\, \pi$ }
\label{fg:kam14}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.357in,width=4.553in,angle=0]{C:/Research/Book/Figures/eps/KAM15.eps}
\caption{\sml Plate A is the Time Series for the First Return map IDE; Plate B is The First Return Map $\alpha = 1.65 \pi$ }
\label{fg:kam15}
\end{figure}
\begq
\label{cd:kam15}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Fig. \ref{fg:kam15}}\\
\alpha&=& 2\,\pi\\
\beta&=&200\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &=& 0.5 \cdot (1 + \tanh(\beta\,(1 - h - w)))\\
q&=& 1-p_c\\
\\
r &=& \sqrt{x ^ 2 + y ^ 2}\\
u &=& x \cdot \cos(h \cdot r) - y \cdot \sin(h \cdot r)\\
v &=& y \cdot \cos(h \cdot r) + x \cdot \sin(h \cdot r)\\
u_1 &=& \exp( h\,\ln(2))\cdot u\\
v_1 &=& \exp( h\,\ln(2))\cdot v\\
r &=& \sqrt{u_1 ^ 2 + v_1 ^ 2}\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta \cdot(1 - r)))\\
p &=& p_c + p_1 - p_c \cdot p_1\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
x_0 &=& s \cdot (u_1 / r) \cdot (r - 0.5) + (1 - s) \cdot x_0\\
y_0 &=& s \cdot (v_1 / r) \cdot (r - 0.5) + (1 - s) \cdot y_0\\
\\
u_2 &=& (x-x_0)\cdot\cos(\pi\cdot h) + (y-y_0)\cdot\sin(\pi \cdot h)+x_0\\
v_2 &=& (y-y_0)\cdot\cos(\pi \cdot h) - (x-x_0)\cdot\sin(\pi \cdot h)+y_0\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 + (1-s) \cdot v_2\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
%======================
\section{\sml The Bilateral Shift with Complex Boundaries}
\label{sc:shiftcb}
Figure \ref{fg:shiftcb} is an example of a shift with complex boundaries. Within the elliptic boundaries it is conjectured that this is a bilateral shift. Therefore, the following Theorem is conjectured:
\begin{theorem}
Let $\T_h$ be an IDE with a hyperbolic fixed point in an invariant region of positive Lebesgue measure defined by both external and internal boundary conditions as seen in {\rm Fig. \ref{fg:shiftcb}} and having a transverse homoclinic point in that region.
\vs6\noi Then on that region $\T_h$ is a bilateral shift.
\end{theorem}
\begin{figure}[htbp]
\includegraphics[height=2.387in,width=4.46in,angle=0]{C:/Research/Book/Figures/eps/ShiftCB.eps}
\caption{\sml Plate A is the Time one Map for a Discrete Twist and Flip map; Plate B is the associated IDE for plate A}
\label{fg:shiftcb}
\end{figure}
\begq
\label{cd:shcb}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Fig. \ref{fg:shiftcb}, Plate B}\\
\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(2\,\pi\, h) - w_1\,\sin(2\,\pi \, h )\\
w &=& w_1\,\cos(2\,\pi \, h + z_1\,\sin(2\,\pi \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
s &=& 0.5 \cdot (1 + \tanh(200\,(w_1)))\\
\\
r &=& 1 / \sqrt{(x - 0.2) ^ 2 + y ^ 2}\\
r &=& \ln(r) - 2 \cdot r\\
\\
u_1 &=& (x -0.2)\cdot \cos(h \cdot r) + y \cdot \sin(h \cdot r)+0.2\\
v_1 &=& y \cdot \cos(h \cdot r) - (x-0.2) \cdot \sin(h \cdot r)\\
\\
u_2 &=& x\cdot\cos(\pi\cdot h) + y\cdot\sin(\pi \cdot h)\\
v_2 &=& y\cdot\cos(\pi \cdot h) - x\cdot\sin(\pi \cdot h)\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 + (1-s) \cdot v_2\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.56in,angle=0]{C:/Research/Book/Figures/eps/ShiftCB1.eps}
\caption{\sml Plate A is the Time Series for a Shift with Complex Boundaries; Plate B Seven orbits for the IDE of Plate A }
\label{fg:shiftcb1}
\end{figure}
\begq
\label{cd:shft}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Fig. \ref{fg:shiftcb1}}\\
\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(2\,\pi\, h) + w_1\,\sin(2\,\pi \, h )\\
w &=& w_1\,\cos(2\,\pi \, h - z_1\,\sin(2\,\pi \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
s &=& 0.5 \cdot (1 + \tanh(200\,(z_1)))\\
\\
r_0 &=& \sqrt{(x - 1) ^ 2 + (y ^ 2)}\\
r &=& 0.7\cdot (1 / r_0) - 1)\\
\\
u_1 &=& (x -1)\cdot \cos(h \cdot r) + y \cdot \sin(h \cdot r)+1\\
v_1 &=& y \cdot \cos(h \cdot r) - (x-1) \cdot \sin(h \cdot r)\\
\\
u_2 &=& x\cdot\cos(\pi\cdot h) + y\cdot\sin(\pi \cdot h)\\
v_2 &=& y\cdot\cos(\pi \cdot h) - x\cdot\sin(\pi \cdot h)\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 + (1-s) \cdot v_2\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi Figure \ref{fg:shiftcb1} is an example of a shift with complex boundaries without KAM island chains.
The significance of this is that a gyration conductance function may create boundaries that are not apparent in the form of the IDE further complicating the resolution of the Hirsch Conjecture. For example, the boundary condition visible in the code, and thus the form of for Fig. \ref{fg:shiftcb1} is $s = 0.5 \cdot (1 + \tanh(200\,(z_1)))$ where as in the figure the boundary is outlined in red.
\vs6\noi It is the gyration conductance function, $ r_0 = \sqrt{(x - 1) ^ 2 + (y ^ 2)}\;\;
r = 0.7\cdot (1 / r_0) - 1) $ that has formed this boundary. This implies that the resolution of the Hirsch Conjecture not only involves the separate IDEs and their implicit boundary condition but also any functions embedded in the IDEs.
%==========================================
%05
\chapter{The Transition of Systems from Simple to Complex}
\label{ch:tran}
\begin{center}\parbox{3.5in} {\em A system may transition from a simple state to a highly complex state and back to a final simple state by passing through intermediate states of varying degrees of complexity, Fig. \ref{fg:cmplxevol}. IDE theory provides a simple mechanism to model such transitions, Proposition \ref{pr:fus}.}
\end{center}
\begin{figure}[htbp]
\includegraphics[height=1.837in,width=2.75in,angle=0]{C:/Research/Book/Figures/eps/CmplxEvol.eps}
\caption{\sml Time Evolution of Complex Systems }
\label{fg:cmplxevol}
\end{figure}
\sml
\vs6\noi The purpose of this chapter is to study the time evolution of a system which is evolving in the presence of a set of small forces which collectively act like a random force. The model will be the time-one map of a weighted sum of a bilateral shift and a system of interest. Each image sequence in each section may be thought of a series of snapshots of the combined system evolving in time.
\vs6\noi The chapter is divided into four sections. Three sections examine the combined effect of a weighted average of a random system with a system of interest: 1) A periodic IDE; 2) a twist and translate IDE; 3) a periodically forced twist IDE. In addition, there will be presented a transition from the twist and flip IDE to KAM island chains.
\vs6\noi The formation of KAM island chains, when viewed as part of the time evolution of a system has physical significance. Looking inside of a hurricane one sees that there is a global dynamic within which there is also pockets of local dynamics. In particular, small vortices may form within the larger system.
\vs6\noi Figure \ref{fg:kamflow} Plate A shows an instantaneous snap shot from the result of combining a rotational flow with a horizontal flow: the formation of KAM island chains. In the presence of dissipation, the islands can become vortices. In particular, the presence of many small forces combining to create a bilateral shift dynamic can combine with twist flows such as the Euler flow to form a complex fluid flow.
\begin{figure}[htbp]
\includegraphics[height=2.34in,width=4.573in,angle=0]{C:/Research/Book/Figures/eps/KAMFlow.eps}
\caption{\sml Plate A is the Time-one Map For KAM Island Chains ; Plate B is The Euler Fluid Flow in Blue Combined with a Horizontal Flow in Green }
\label{fg:kamflow}
\end{figure}
\vs6\noi The form of all equations in this section is
\[s_1 \cdot(s(\X)\cdot \T_h+(1-s(\X))\cdot \S_h))+(1-s_1)\cdot \R_h\]
where
\[s(\X)\cdot \T_h+(1-s(\X))\cdot \S_h\]
is the two-dimensional spherical analog of $2\, \X\, \mod(1)$. For any $\X$ in the unit disk
\[\T_h(\X)=\exp(h\, \ln(2)\,\I_2)\exp(h \, \|\X\| \, \B)\, \X\]
and $\S_h$ is IDE special function $\cmod1(\X)$.
\vs6\noi To view this as an abstract fluid flow (omitting the pressure and density factors), consider $s_1$ a function of time:
\[s_1(t) \cdot(s(\X)\cdot \T_h+(1-s(\X))\cdot \S_h))+(1-s_1(t))\cdot \R_h\]
As such, each image is a frame from the time evolution of the flow. This is the purpose of presenting snapshots for various values of $s_1$ in the following sections.
\vs6\noi The significance of these transitions is that they may be envisioned as an illustration of how an unstable complex system may evolve to simple stable system. The importance of these transitions is that they contribute to explaining how complex phenomena in nature pass between simple and complex manifestations over the course of a short time span. Two important examples are the transition of a super cell to a tornado followed by dissipation of the storm and the onset and dissipation of epileptic seizures.
%=================Transition to periodic===========================
\section{\sml Transitions from the Bilateral Shift to a Linear Periodic IDE}
This section traces the transition of a bilateral shift to a periodic system. The first illustration is Fig. \ref{fg:transp01}. The Significant parameters are found in Table \ref{tb:transp01}.
\begin{figure}[htbp]
\includegraphics[height=2.323in,width=4.57in,angle=0]{C:/Research/Book/Figures/eps/TransP01.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.5$ ; Plate B is The Time-one Map $s_1=0.4$ }
\label{fg:transp01}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transp01}
Figure \ref{fg:transp01}&&&\\ \hline
Parameter & $\omega$ & $s_1$ &$\lambda$\\ \hline
Plate A& $1.0$ &0.8&1.0\\ \hline
Plate B& $1.0$ &0.65&1.0\\ \hline
\end{tabular}}
\footnotesize
\begq
\label{cd:tr01}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Fig. \ref{fg:transp01}, Plate A}\\
\alpha&=& 2\,\pi\\
\beta&=& 200\\
\omega &=& 1\\
s_1 &=& 0.99\\
\lambda &=& 2.0\\
\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &=& 0.5 \cdot (1 + \tanh(\beta\,(1 - h - w)))\\
q&=& 1-p_c\\
\\
r &=& \sqrt{x ^ 2 + y ^ 2}\\
u &=& x \cdot \cos(h \cdot r) - y \cdot \sin(h \cdot r)\\
v &=& y \cdot \cos(h \cdot r) + x \cdot \sin(h \cdot r)\\
u_1 &=& \exp( h\,\ln(2))\cdot u\\
v_1 &=& \exp( h\,\ln(2))\cdot v\\
r &=& \sqrt{u_1 ^ 2 + v_1 ^ 2}\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta \cdot(1 - r)))\\
p &=& p_c + p_1 - p_c \cdot p_1\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
x_0 &=& s \cdot (u_1 / r) \cdot (r - 0.5) + (1 - s) \cdot x_0\\
y_0 &=& s \cdot (v_1 / r) \cdot (r - 0.5) + (1 - s) \cdot y_0\\
\\
u_2 &=& (x-x_0)\cdot\cos(\pi\cdot h) + (y-y_0)\cdot\sin(\pi \cdot h)+x_0\\
v_2 &=& (y-y_0)\cdot\cos(\pi \cdot h) - (x-x_0)\cdot\sin(\pi \cdot h)+y_0\\
\\
u_3 &=& s \cdot u_1 + (1-s) \cdot u_2\\
v_3 &=& s \cdot v_1 + (1-s) \cdot v_2\\
\\
u_4 &=& x \cdot \cos(\omega \cdot h) + y \cdot \sin(\omega \cdot h)\cdot \lambda\\
v_4 &=& y \cdot \cos(\omega \cdot h) - x \cdot \sin(\omega \cdot h)/ \lambda\\
\\
x &=& s_1 \cdot u_3 + (1 - s_1) \cdot u_4\\
y &=& s_1 \cdot v_3 + (1 - s_1) \cdot v_4\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\sml
\begin{figure}[htbp]
\includegraphics[height=2.443in,width=4.48in,angle=0]{C:/Research/Book/Figures/eps/TransP02.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.5$ ; Plate B is The Time-one Map $s_1=0.4$ }
\label{fg:transp02}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transp02}
Figure \ref{fg:transp02}&&&\\ \hline
Parameter & $\omega$ & $s_1$ &$\lambda$\\ \hline
Plate A& $0.5$ &0.85&2.0\\ \hline
Plate B& $0.5$ &0.65&2.0\\ \hline
\end{tabular}}
\begin{figure}[htbp]
\includegraphics[height=2.4173in,width=4.277in,angle=0]{C:/Research/Book/Figures/eps/TransP03.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.5$ ; Plate B is The Time-one Map $s_1=0.4$ }
\label{fg:transp03}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transp03}
Figure \ref{fg:transp03}&&&\\ \hline
Parameter & $\omega$ & $s_1$ &$\lambda$\\ \hline
Plate A& $0.5$ &0.55&2.0\\ \hline
Plate B& $0.5$ &0.35&2.0\\ \hline
\end{tabular}}
\begin{figure}[htbp]
\includegraphics[height=2.373in,width=4.507in,angle=0]{C:/Research/Book/Figures/eps/TransP04.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.5$ ; Plate B is The Time-one Map $s_1=0.4$ }
\label{fg:transp04}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transp04}
Figure \ref{fg:transp04}&&&\\ \hline
Parameter & $\omega$ & $s_1$ &$\lambda$\\ \hline
Plate A& $0.5123$ &0.21&1.1\\ \hline
Plate B& $0.5123$ &0.15&1.1\\ \hline
\end{tabular}}
%========================Transition to Twist and Translate===============
\section{\sml Transitions from the Bilateral Shift to a Twist and Translate IDE}
\begin{figure}[htbp]
\includegraphics[height=2.433in,width=4.493in,angle=0]{C:/Research/Book/Figures/eps/TransT01.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.999$ ; Plate B is The Time-one Map $s_1=0.95$ }
\label{fg:transt01}
\end{figure}
\footnotesize
\begq
\label{cd:trn01}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Fig. \ref{fg:transt01}, Plate A}\\
\alpha&=& 2\,\pi\\
\omega &=& 1.0\\
s_1 &=& 0.999\\
\tau&=&0.5\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &=& 0.5 \cdot (1 + \tanh(\beta\,(1 - h - w)))\\
q&=& 1-p_c\\
\\
r &=& \sqrt{x ^ 2 + y ^ 2}\\
u &=& x \cdot \cos(h \cdot r) - y \cdot \sin(h \cdot r)\\
v &=& y \cdot \cos(h \cdot r) + x \cdot \sin(h \cdot r)\\
u_1 &=& \exp( h\,\ln(2))\cdot u\\
v_1 &=& \exp( h\,\ln(2))\cdot v\\
r &=& \sqrt{u_1 ^ 2 + v_1 ^ 2}\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta \cdot(1 - r)))\\
p &=& p_c + p_1 - p_c \cdot p_1\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
x_0 &=& s \cdot (u_1 / r) \cdot (r - 0.5) + (1 - s) \cdot x_0\\
y_0 &=& s \cdot (v_1 / r) \cdot (r - 0.5) + (1 - s) \cdot y_0\\
\\
u_2 &=& (x-x_0)\cdot\cos(\pi\cdot h) + (y-y_0)\cdot\sin(\pi \cdot h)+x_0\\
v_2 &=& (y-y_0)\cdot\cos(\pi \cdot h) - (x-x_0)\cdot\sin(\pi \cdot h)+y_0\\
\\
u_3 &=& s \cdot u_1 + (1-s) \cdot u_2\\
v_3 &=& s \cdot v_1 + (1-s) \cdot v_2\\
\\
r_1 &=& \sqrt{x ^ 2 + y ^ 2} \cdot \omega\\
\\
u_4 &=& (x \cdot \cos(r_1 \cdot h) + y \cdot \sin(r_1 \cdot h))\\
v_4 &=& (y \cdot \cos(r_1 \cdot h) - x \cdot \sin(r_1 \cdot h)) \\
\\
u_5 &=& u_4 + h \cdot \tau\\
v_5 &=& v_4\\
\\
x &=& s_1 \cdot u_3 + (1 - s_1) \cdot u_5\\
y &=& s_1 \cdot v_3 + (1 - s_1) \cdot v_5\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\sml
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.59in,angle=0]{C:/Research/Book/Figures/eps/TransT02.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.9$ ; Plate B is The Time-one Map $s_1=0.8$ }
\label{fg:transt02}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transt02}
Figure \ref{fg:transt02}&&&\\ \hline
Parameter & $\omega$ & $s_1$ &$\tau$\\ \hline
Plate A& $\pi/3$ &0.9&0.5\\ \hline
Plate B& $\pi/3$ &0.8&0.5\\ \hline
\end{tabular}}
\begin{figure}[htbp]
\includegraphics[height=2.193in,width=4.507in,angle=0]{C:/Research/Book/Figures/eps/TransT03.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.7$ ; Plate B is The Time-one Map $s_1=0.6$ }
\label{fg:transt03}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transt03}
Figure \ref{fg:transt03}&&&\\ \hline
Parameter & $\omega$ & $s_1$ &$\tau$\\ \hline
Plate A& $\pi\cdot 0.7$ &0.7&0.5\\ \hline
Plate B& $\pi\cdot 0.7$ &0.6&0.5\\ \hline
\end{tabular}}
\begin{figure}[htbp]
\includegraphics[height=2.45in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/TransT04.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.5$ ; Plate B is The Time-one Map $s_1=0.4$ }
\label{fg:transt04}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transt04}
Figure \ref{fg:transt03}&&&\\ \hline
Parameter & $\omega$ & $s_1$ &$\tau$\\ \hline
Plate A& $0.5\cdot \pi$ &0.5&0.5\\ \hline
Plate B& $0.5\cdot \pi$ &0.4&0.5\\ \hline
\end{tabular}}
\begin{figure}[htbp]
\includegraphics[height=2.417in,width=4.597in,angle=0]{C:/Research/Book/Figures/eps/TransT05.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.3$ ; Plate B is The Time-one Map $s_1=0.2$ }
\label{fg:transt05}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transt05}
Figure \ref{fg:transt05}&&&\\ \hline
Parameter & $\omega$ & $s_1$ &$\tau$\\ \hline
Plate A& $1.0$ &0.3&0.5\\ \hline
Plate B& $1.0$ &0.2&0.5\\ \hline
\end{tabular}}
\begin{figure}[htbp]
\includegraphics[height=2.103in,width=4.537in,angle=0]{C:/Research/Book/Figures/eps/TransT06.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.1$ ; Plate B is The Time-one Map $s_1=0.01$ }
\label{fg:transt06}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transt06}
Figure \ref{fg:transt06}&&&\\ \hline
Parameter & $\omega$ & $s_1$ &$\tau$\\ \hline
Plate A& $1.0$ &0.1&0.5\\ \hline
Plate B& $1.0$ &0.01&0.5\\ \hline
\end{tabular}}
%===================================
\section{\sml Transitions from the Bilateral Shift to a Time-one IDE\\ of a Forced Twist System}
\label{sc:transft}
\begin{figure}[htbp]
\includegraphics[height=2.423in,width=4.58in,angle=0]{C:/Research/Book/Figures/eps/TransFT01.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.99$ ; Plate B is The Time-one Map $s_1=0.9$ }
\label{fg:transft01}
\end{figure}
\footnotesize
\begq
\label{cd:trn}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Fig. \ref{fg:transft01}, Plate A}\\
\alpha&=& 2\,\pi\\
\beta&=&200\\
\omega &=& 89\\
s_1 &=& 0.99\\
a &=& 0.9\\
\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &=& 0.5 \cdot (1 + \tanh(\beta\,(1 - h - w)))\\
q&=& 1-p_c\\
\\
r &=& \sqrt{x ^ 2 + y ^ 2}\\
u &=& x \cdot \cos(h \cdot r) - y \cdot \sin(h \cdot r)\\
v &=& y \cdot \cos(h \cdot r) + x \cdot \sin(h \cdot r)\\
u_1 &=& \exp( h\,\ln(2))\cdot u\\
v_1 &=& \exp( h\,\ln(2))\cdot v\\
r &=& \sqrt{u_1 ^ 2 + v_1 ^ 2}\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta \cdot(1 - r)))\\
p &=& p_c + p_1 - p_c \cdot p_1\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
x_0 &=& s \cdot (u_1 / r) \cdot (r - 0.5) + (1 - s) \cdot x_0\\
y_0 &=& s \cdot (v_1 / r) \cdot (r - 0.5) + (1 - s) \cdot y_0\\
\\
u_2 &=& (x-x_0)\cdot\cos(\pi\cdot h) + (y-y_0)\cdot\sin(\pi \cdot h)+x_0\\
v_2 &=& (y-y_0)\cdot\cos(\pi \cdot h) - (x-x_0)\cdot\sin(\pi \cdot h)+y_0\\
\\
u_3 &=& s \cdot u_1 + (1-s) \cdot u_2\\
v_3 &=& s \cdot v_1 + (1-s) \cdot v_2\\
\\
r_0 &=& \sqrt{x ^ 2 + y ^ 2}\\
r_1 &=& \omega \cdot r_0 \cdot (1 - r_0)\\
\\
u_4 &=& (x \cdot \cos(r_1 \cdot h) + y \cdot \sin(r_1 \cdot h))\\
v_4 &=& (y \cdot \cos(r_1 \cdot h) - x \cdot \sin(r_1 \cdot h)) + a \cdot h \cdot w_1\\
\\
x &=& s_1 \cdot u_3 + (1 - s_1) \cdot u_4\\
y &=& s_1 \cdot v_3 + (1 - s_1) \cdot v_4\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\sml
\begin{figure}[htbp]
\includegraphics[height=2.373in,width=4.517in,angle=0]{C:/Research/Book/Figures/eps/TransFT02.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.99$ ; Plate B is The Time-one Map $s_1=0.9$ }
\label{fg:transft02}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transft02}
Figure \ref{fg:transft02}&&&\\ \hline
Parameter &$\omega$&$s_1$&a\\ \hline
Plate A&89&0.7&0.9\\ \hline
Plate B&89&0.6&0.9\\ \hline
\end{tabular}}
\begin{figure}[htbp]
\includegraphics[height=2.363in,width=4.47in,angle=0]{C:/Research/Book/Figures/eps/TransFT02a.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.99$ ; Plate B is The Time-one Map $s_1=0.9$ }
\label{fg:transft02a}
\end{figure}
\vs6\noi Figure \ref{fg:transft02a} is the time series for Fig. \ref{fg:transft02}.
\begin{figure}[htbp]
\includegraphics[height=2.443in,width=4.603in,angle=0]{C:/Research/Book/Figures/eps/TransFT03.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.99$ ; Plate B is The Time-one Map $s_1=0.9$ }
\label{fg:transft03}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transft03}
Figure \ref{fg:transft03}&&&\\ \hline
Parameter &$\omega$&$s_1$&a\\ \hline
Plate A&113&0.5&4.5\\ \hline
Plate B&113&0.3&4.5\\ \hline
\end{tabular}}
\begin{figure}[htbp]
\includegraphics[height=2.397in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/TransFT03a.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.99$ ; Plate B is The Time-one Map $s_1=0.9$ }
\label{fg:transft03a}
\end{figure}
\vs6\noi Figure \ref{fg:transft03a} is the time series for Fig. \ref{fg:transft03}.
\begin{figure}[htbp]
\includegraphics[height=2.423in,width=4.45in,angle=0]{C:/Research/Book/Figures/eps/TransFT04.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.99$ ; Plate B is The Time-one Map $s_1=0.9$ }
\label{fg:transft04}
\end{figure}
{\rm \begin{tabular}{||c||c|c|c||}\hline
\label{tb:transft04}
Figure \ref{fg:transft04}&&&\\ \hline
Parameter &$\omega$&$s_1$&a\\ \hline
Plate A&80&0.1&4.5\\ \hline
Plate B&80&0.0001&4.5\\ \hline
\end{tabular}}
\begin{figure}[htbp]
\includegraphics[height=2.31in,width=4.59in,angle=0]{C:/Research/Book/Figures/eps/TransFT04a.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.99$ ; Plate B is The Time-one Map $s_1=0.9$ }
\label{fg:transft04a}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.53in,width=4.593in,angle=0]{C:/Research/Book/Figures/eps/TransFT04b.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.99$ ; Plate B is The Time-one Map $s_1=0.9$ }
\label{fg:transft04b}
\end{figure}
\vs6\noi Figure \ref{fg:transft04a} is the time series associated with Fig. \ref{fg:transft04} and Fig. \ref{fg:transft04b} is Fig. \ref{fg:transft04a} rotated in three dimensions with the $z$ coordinate equal zero.
\begin{figure}[htbp]
\includegraphics[height=2.383in,width=4.437in,angle=0]{C:/Research/Book/Figures/eps/TransFT04c.eps}
\caption{\sml Plate A is the Time-one for $s_1=0.99$ ; Plate B is The Time-one Map $s_1=0.9$ }
\label{fg:transft04c}
\end{figure}
\vs6\noi Figure \ref{fg:transft04c} is the time series associated with Fig. \ref{fg:transft04} rotated in three dimensions.
%===================================================Serendipity============================
%00
\section{\sml Serendipity I}
\label{sc:ser01}
This section is a collection of images that were stumbled upon by chance and contain interesting dynamics that may be relevant to other areas of science. The objective of this section is therefore to provide graphic examples that may be used as a guide to reverse engineering a data set to an mathematical model.
\begin{figure}[htbp]
\includegraphics[height=2.5in,width=4.313in,angle=0]{C:/Research/Book/Figures/eps/GenCmplxIDE.eps}
\caption{\sml Generic IDE Form for all Serendipity Examples }
\label{fg:gencmplxIDE}
\end{figure}
{\rm \begin{tabular}{||c||c|c||}\hline
\label{tb:transt04}
&Data for Section \ref{sc:ser01}&\\ \hline
IDEs&Boundary Conditions& Fusion Functions\\ \hline
$\exp(h\,\ln(2)\, \I_2)$ &$p_c=0.5(1+\tanh(\beta \,(1-w))$ & $s\ra p\cdot(s+q-s\cdot q)$ \\ \hline
$\exp(\pm h\,\pi \B)$ & $q=1-p_c$ & $s_1, \,\, 1-s_1$ \\ \hline
$\exp(\pm h\,2\,\pi \B)$& $p_1=0.5(1+\tanh(\beta(1-r))$ & \\ \hline
$\exp(\pm h\,r(\cdot) \B)$ & $p_2 = 0.5 \cdot (1 + \tanh(\beta \cdot (b - \tanh(w)))) $ &$p_2, 1-p_2 $ \\ \hline
$h\,\P_i \exp(\pm h\, \B)$ & $ $ & \\ \hline
$ \exp(\pm h\, \A(\cdot))$ & $ $ & \\ \hline
\end{tabular}}
\[\begin{array}{lcl}
&&\mbox{For i = 1 to M}\\
z &=&z_1\cdot \cos(2\, \pi\, h) + w_1\cdot \sin(2\, \pi \, h)\\
w &=& w_1 \cdot \cos(2\, \pi\, h) - z_1 \cdot \sin(2\, \pi \, h) \\
z_1 &=& z\\
w_1 &=& w\\
\\
u &=& \exp(-h \cdot 0.05) \cdot (x \cdot \cos(x \cdot h) + y \cdot \sin(x \cdot h) / x)\\
v &=& \exp(-h \cdot 0.05) \cdot (y \cdot \cos(x \cdot h) - x ^ 2 \cdot \sin(x \cdot h)) + a \cdot w\\
x &=& u\\
y &=& v\\
&&\mbox{Next i}
\end{array}\]
\begin{figure}[htbp]
\includegraphics[height=1.417in,width=4.257in,angle=0]{C:/Research/Book/Figures/eps/EarthQ.eps}
\caption{\sml Crude Comparison of Nonlinear Time-one Map to Idealized Earth Quake Formation }
\label{fg:earthq}
\end{figure}
\vs6\noi The class of ODEs for which there is a partial solution to the Hirsch Conjecture may be derived from fusion theory, Proposition \ref{pr:fus}. There are two forms. The first form is scalar fusion.
\[\lambda(\X) \, \T_h+(1-\lambda(\X)) \S_h\]
where $\lambda$ is a scalar function. The simplest case is when $\lambda$ is either 1 or 0. The associated ODE is given by
\[\dot{\X}=\lambda \frac{d \T_h}{dh} +(1-\lambda) \frac{d \S_h}{dh}\]
The second form is IDE fusion.
\[\R_h\, \T_h+(1-\R_h)\, \S_h\]
The ODE may be obtained by differentiation with respect to $h$. This is the form of the equations of Chua, Lorenz and R\"{o}ssler as well as others.
\vs6\noi
\begin{figure}[htbp]
\includegraphics[height=2.6in,width=3.553in,angle=0]{C:/Research/Book/Figures/eps/Sum01.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum01}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.423in,width=3.623in,angle=0]{C:/Research/Book/Figures/eps/Sum01a.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum01a}
\end{figure}
\vs6\noi In general, the associated ODE will be related to the IDE but may require some analysis to sort out the complexity details.
\begin{figure}[htbp]
\includegraphics[height=2.282in,width=4.493in,angle=0]{C:/Research/Book/Figures/eps/Sum02.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum02}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.193in,width=4.357in,angle=0]{C:/Research/Book/Figures/eps/Sum03.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum03}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.433in,width=4.477in,angle=0]{C:/Research/Book/Figures/eps/Sum04.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum04}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.447in,width=4.53in,angle=0]{C:/Research/Book/Figures/eps/Sum05.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum05}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.353in,width=4.59in,angle=0]{C:/Research/Book/Figures/eps/Sum06.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum06}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.403in,width=4.507in,angle=0]{C:/Research/Book/Figures/eps/Sum07.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum07}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.49in,width=4.447in,angle=0]{C:/Research/Book/Figures/eps/Sum08.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum08}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.437in,width=2.987in,angle=0]{C:/Research/Book/Figures/eps/Sum13.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum13}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.31in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/Sum09.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum09}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.377in,width=4.54in,angle=0]{C:/Research/Book/Figures/eps/Sum12.eps}
\caption{\sml Plate A is the Time-one Map of the Periodically Forced Twist, $a=8.2\;\;\alpha=0.01$; In Plate B, $a=8.1\;\;\alpha=0.01$ }
\label{fg:sum12}
\end{figure}
%06
%=======================The Hirsch Conjecture========================
\chapter{The Hirsch Conjecture}
\label{ch:hirsch}
\begin{center}
\parbox{3.5in}{\em The Hirsch Conjecture is the genesis of the Theory of Infinitesimal Diffeomorphism Equations}
\end{center}
\vs6\noi This chapter will use the results of all previous chapters to examine the problems associated with solving the Hirsch Conjecture. The chapter is divided into two sections. The first section explores systems containing single unilateral and bilateral shifts. The second section examines systems containing compound unilateral and bilateral shifts.
\section{\sml IDEs Containing Single Unilateral or Bilateral Shifts}
\label{sc:hirsch01}
The first example will demonstrate how to determine, informally, from its form, that the Ueda equation can produce chaotic solutions. Ueda's equation is
\[\ddot{x}+0.5 \dot{x} +x^3=7.5 \cos(t)\]
Separate this equation into two parts
\[\ddot{x}+0.5 \dot{x} +x^3 \;\;\;\ \mbox{and}\;\;\; 7.5 \cos(t)\]
\vs6\noi An IDE for
\begq
\label{eq:ueda1a}
\ddot{x}+0.5 \dot{x} +x^3
\endq
is given by
\begq
\exp(-h \ln(1.7)\P_2) \exp(h \A(\X)) \, \X
\endq
An IDE for
\begq
\label{eq:ueda1b}
7.5\, \cos(t)
\endq
is given by
\[\exp(h \ln(1.9)\P_2)\, \exp(2\cdot \pi \, h\, \B)\]
A fusion function is given by
\[s(w)=0.5\, (1+\tanh(\beta\,(b-\tanh(w))\]
where $w$ is one coordinate of the HOC
\[\exp(.25\, \pi \, h\, \B)\]
The combined result is
\[\T_h(\X)= s(w)\cdot \exp(-h \ln(1.7)\P_2) \exp(h \A(\X)) \, \X +(1-s(w)) \exp(h \ln(1.9)\P_2)\, \exp(2\cdot \pi \, h\, \B) \]
The stable manifold is provided by the IDE for Eq. \ref{eq:ueda1a} and the unstable manifold is provided by the IDE for Eq. \ref{eq:ueda1b}. The crossing of the two manifolds is provided by $s(w)$.
Figure \ref{fg:ueda1a} illustrates the resulting IDE. Plate A is the time series and Plate B is the time-one map.
\begin{figure}[htbp]
\includegraphics[height=2.432in,width=4.387in,angle=0]{C:/Research/Book/Figures/eps/Ueda1a.eps}
\caption{\sml Plate A is the time series for the Ueda IDE; Plate B is the Time-one map for Plate A}
\label{fg:ueda1a}
\end{figure}
\begq
\label{cd:ued}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for Plate A of Fig. \ref{fg:ueda1a} is as follows:}\\
\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(2\, \pi \, h) + w_1\,\sin(2\, \pi \, h )\\
w &=& w_1\,\cos(2\, \pi \, h) - z_1\,\sin(2\, \pi \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
\\
u_1 &=& x \cdot \cos(x \cdot h) + y \cdot \sin(x \cdot h)/x\\
v_1 &=& \exp(-1.7 \cdot h)\,(y \cdot \cos(x \cdot h) - x^2 \cdot \sin(x \cdot h))\\
\\
u_2 &=& (x-z)\cdot\cos(2\, \pi\cdot h) + (y-z)\cdot\sin(2\, \pi\cdot h)+z\\
v_2 &=& \exp(1.9 \cdot h)\,((y-z)\cdot\cos(2\, \pi\cdot h) - (x-z)\cdot\sin(2\, \pi\cdot h))+z\\
\\
t &=& \tanh(w)\\
s &=& 0.5 \cdot (1 + \tanh(1.12 \cdot (0.1 - t)))\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 + (1-s) \cdot v_2\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi Deriving Fig. \ref{fg:ueda1a} required some art. The following examples will make clear why this the present state of affairs.
\vs6\noi Figure \ref{fg:hirsch01} the spherical {\em bilateral shift} derived in Sec. \ref{sc:shiftt}. While the radius is just the unilateral shift $2\, x\, \mod(1)$, the angular delay plot is is two-dimensional. The form of this IDE is
\begq
\label{eq:hirsch01}
s(\X)\,\exp(h \ln(2)\I)\, \exp(h\,r\,\B)\,\X+(1-s(\X))\, \exp(h\, \pi\, \B)(\X-\X_0)+\X_0
\endq
\begin{figure}[htbp]
\includegraphics[height=2.6in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/Hirsch01.eps}
\caption{\sml Plate A is the Delay Map for the Radius of Eq. \ref{eq:hirsch01}: Plate B is the associated Delay Map for $\theta$}
\label{fg:hirsch01}
\end{figure}
\vs6\noi Figure \ref{fg:hirsch02} is produced by eliminating the twist from Fig. \ref{fg:hirsch01}. The form of the IDE for Fig. \ref{fg:hirsch02} is
\begq
\label{eq:hirsch02}
s(\X)\,\exp(h \ln(2)\I)\, \exp(h\,\B)\,\X+(1-s(\X))\, \exp(h\, \pi\, \B)(\X-\X_0)+\X_0
\endq
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.597in,angle=0]{C:/Research/Book/Figures/eps/Hirsch02.eps}
\caption{\sml Plate A is the Delay Map for the Radius of Eq. \ref{eq:hirsch02}: Plate B is the associated Delay Map for $\theta$}
\label{fg:hirsch02}
\end{figure}
\vs6\noi Morphologically the time-one plots of these two IDEs are the same, but their delay plots reveal that Eq. \ref{eq:hirsch02} is a unilateral shift.
\vs6\noi For reference, the code for Fig. \ref{fg:hirsch02} is
\begq
\label{cd:hir02}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Eq. \ref{eq:hirsch02} is as follows:}\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(2\, \pi \, h) + w_1\,\sin(2\, \pi \, h )\\
w &=& w_1\,\cos(2\, \pi \, h) - z_1\,\sin(2\, \pi \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &= & 0.5 \cdot (1 + \sgn(1 - 0.001 \cdot h - w_1))\\
q&=& 1-p_c\\
\\
u &=& x \cdot \cos( 3.13 \cdot h) + y \cdot \sin(3.13 \cdot h)\\
v &=& y \cdot \cos(3.13 \cdot h) - x \cdot \sin(3.13 \cdot h)\\
\\
u_1 &=& \exp( h\,\ln(2))\cdot u\\
v_1 &=& \exp( h\,\ln(2))\cdot v\\
\\
r &=& \sqrt{u_1 ^ 2 + v_1 ^ 2}\\
p_1 &=& 0.5 \cdot (1 + \tanh(100\cdot(1 - r)))\\
p &=& p_c + p_1 - p_c \cdot p_1\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
x_0 &=& s \cdot (u_1 / r) \cdot (r - 0.5) + (1 - s) \cdot x_0\\
y_0 &=& s \cdot (v_1 / r) \cdot (r - 0.5) + (1 - s) \cdot y_0\\
\\
u_2 &=& (x-x_0)\cdot\cos(\pi\cdot h) + (y-y_0)\cdot\sin( \pi\cdot h)+x_0\\
v_2 &=& (y-y_0)\cdot\cos(\pi\cdot h) - (x-x_0)\cdot\sin( \pi\cdot h)+y_0\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 +(1-s) \cdot v_2\\
\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.387in,width=4.397in,angle=0]{C:/Research/Book/Figures/eps/Hirsch03.eps}
\caption{\sml Plate A is the Time Series for a Periodically Forced Unilateral Shift: Plate B is the Time Series for Bilateral Shift used in Plate A}
\label{fg:hirsch03}
\end{figure}
\vs6\noi In particular, a periodic force of $0.01 \cdot w$ from the HOC is added to one component of the twist used in the formation of the bilateral shift time series. Both figures are rotated in three dimensions to provide additional insight into the dynamics.
\vs6\noi By adding a slight periodic force to a bilateral shift, the level of complexity is reduced. This is due to the fact that white noise will have some level of correlation with a periodic force which simplifies the global dynamics. The down side for the Hirsch Conjecture is that periodically forced nonlinear systems work to obscure the presence of the shift dynamics and the source of chaos. In particular, the greater the simplification of the algebraic form of a chaotic system, the more obfuscated the presence of the shift becomes. This is born out by the complex details of the proof of the Smale Birkhoff Theorem.
\vs6\noi In addition to bilateral and unilateral shifts, the spherical representations of shifts allow for an intermediate level of complexity that appears in ergodic theory. This will be called thee semi unilateral shift. This is obtained by changing the absolute value of the eigenvalue of $u_1$ in the code for Fig. \ref{fg:hirsch02} above to 1. Doing this provides the delay plot Plate A of Fig. \ref{fg:hirsch04a}. The delay plot of $\theta$ will be unchanged. Semi unilateral shifts clearly generalize to any number of dimensions. Semi bilateral shifts are similarly defined. In terms of form, the semi unilateral shift is
\[\exp(h\, \ln(2) \P_i)\, \exp(2\, \pi\, h \B)\]
Where the identity matrix is replaced by a projection.
\begin{figure}[htbp]
\includegraphics[height=2.407in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/Hirsch04a.eps}
\caption{\sml Plate A is the Delay Plot for the Radial Component for a Semi Unilateral Shift; Plate B is the associated Angular Delay Plot}
\label{fg:hirsch04a}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.47in,width=4.527in,angle=0]{C:/Research/Book/Figures/eps/Hirsch04b.eps}
\caption{\sml Plate A is the Time Series the Semi Unilateral Shift; Plate B is the Associated Radial Delay Plot for Comparison to Fig. \ref{fg:hirsch04a}}
\label{fg:hirsch04b}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.433in,width=4.593in,angle=0]{C:/Research/Book/Figures/eps/Hirsch04c.eps}
\caption{\sml Plate A is the Delay Plot for the Radial Component for a Semi Bilateral Shift on Three Symbols; Plate B is the Associated Angular Delay Plot}
\label{fg:hirsch04c}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.377in,width=4.597in,angle=0]{C:/Research/Book/Figures/eps/Hirsch04d.eps}
\caption{\sml Plate A is the Delay Plot for the Radial Component for a Semi Unilateral Shift on Five Symbols; Plate B is the Associated Angular Delay Plot}
\label{fg:hirsch04d}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=1.6in,width=4.553in,angle=0]{C:/Research/Book/Figures/eps/Hirsch04.eps}
\caption{\sml Plate A is the Time Series for a Periodically Forced Semi-Unilateral Shift with $0.025 \cdot w$ forcing; Plate B is the Time Series for the Semi-Unilateral Shift with $0.03 \cdot w$ forcing }
\label{fg:hirsch04}
\end{figure}
\vs6\noi With a slight increase in the HOC forcing term, the time series the forced semi unilateral shift converges to a periodic solution and all indications of the participation of the unilateral shift are lost, Fig. \ref{fg:hirsch04}.
\vs6\noi IDEs derived from semi-unilateral shifts are more closely related to classical three-dimensional systems such as that of Chua. Demonstrating that a semi unilateral shift is present in a system indicates the potential for chaos based on its form alone.
\vs6\noi The operative factor in the periodically forced semi-unilateral shift is as follows:
\[\begin{array}{lcl}
u &=& x \cdot \cos( 3.13 \cdot h) + y \cdot \sin(3.13 \cdot h)\\
v &=& y \cdot \cos(3.13 \cdot h) - x \cdot \sin(3.13 \cdot h)+a\cdot w\\
\\
u_1 &=& u\\
v_1 &=& \exp( h\,\ln(2))\cdot v
\end{array}\]
\begin{figure}[htbp]
\includegraphics[height=2.37in,width=4.607in,angle=0]{C:/Research/Book/Figures/eps/Hirsch05a.eps}
\caption{\sml Plate A is the Radial Delay Map for a Semi-Bilateral Shift; Plate B is the Angular Delay Map for a Semi-Biilateral Shift }
\label{fg:hirsch05a}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.6in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/Hirsch05.eps}
\caption{\sml Plate A is the Radial Delay Map for a Semi-Bilateral Shift; Plate B is the Angular Delay Map for a Semi-Biilateral Shift }
\label{fg:hirsch05}
\end{figure}
%=========================================Compound=========================
\section{\sml IDEs Formed from Compound Unilateral or Bilateral Shifts}
\label{sc:hirsch02}
This section is divided into two subsections. The first examines compound shifts formed from the abstract shift form. The second examines compound shifts formed from the spherical unilateral and bilateral shifts. In general, a compound unilateral or bilateral shift is when there are two interconnected unilateral or bilateral shifts.
\subsection{\sml Compound Shifts from the Abstract Shift Form}
\vs6\noindent An example of a compound bilateral shift on two symbols is constructed as follows:
\begin{equation}
\label{eq:hirsch01}
\left. \begin{array}{lcl}
x&\ra& 2\, x-[2\, (x+z)]\\
y&\ra& 2\, y-[2\, y]\\
z&\ra& (z+[2\, y])/2\\
w&\ra& (w+[2\, x])/2
\end{array}\right \}
\end{equation}
\begin{figure}[htbp]
\includegraphics[height=2.6in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/Hirsch06.eps}
\caption{\sml Plate A are the $(x,\,w)$ Coordinates for the Compound Bilateral Shift; Plate B are the $(y,\, z)$ Coordinates. }
\label{fg:hirsch06}
\end{figure}
\vs6\noi The essential code for Eq. \ref{eq:hirsch01} and Fig. \ref{fg:hirsch06} for $k=2.0$ is as follows:
\[ \begin{array}{lcl}
u_1 &=& k \cdot x - \In(k \cdot (x+z))\\
v_1 &=& (k \cdot y - \In(k \cdot y))\\
u_2 &=& (z + \In(k \cdot y)) / k\\
v_2 &=& (w + \In(k \cdot x)) / k\\
\\
x &=& u_1\\
y &=& v_1\\
z &=& u_2\\
w &=& v_2\\
&&\mbox{\bf Plot Point}
\end{array}\]
\vs6\noi Converting to IDE special function $\xint$ provides Fig. \ref{fg:hirsch06a}
\begin{figure}[htbp]
\includegraphics[height=2.363in,width=4.563in,angle=0]{C:/Research/Book/Figures/eps/Hirsch06a.eps}
\caption{\sml Plate A are the $(x,\,w)$ Coordinates for the Compound Bilateral Shift; Plate B are the $(y,\, z)$ Coordinates. }
\label{fg:hirsch06a}
\end{figure}
\vs6\noi Eliminating $u_2$ and $z$ changes the morphology while providing a three-dimensional system.
\[ \begin{array}{lcl}
u_1 &=& k \cdot x - \In(k \cdot (x))\\
v_1 &=& (k \cdot y - \In(k \cdot a \cdot w)/k)\\
v_2 &=& (w + \In(k \cdot x)) / k\\
\\
x &=& u_1\\
y &=& v_1\\
w &=& v_2\\
&&\mbox{\bf Plot Point}
\end{array}\]
\vs6\noi This provides a system with a single unstable manifold and two stable manifolds.
\vs6\noi Translate Eq. \ref{eq:hirsch01} into an IDE as follows:
\[ \begin{array}{lcl}
u_1 &=& k \cdot x - \In(k \cdot (x))\\
v_1 &=& (k \cdot y - \In(k \cdot a \cdot w)/k)\\
v_2 &=& (w + \In(k \cdot x)) / k\\
\\
x &=& u_1\\
y &=& v_1\\
w &=& v_2\\
&&\mbox{\bf Plot Point}
\end{array}\]
%===============================
\subsection{\sml Compound Shifts from the Spherical Shift Form}
Shifts occurring in Nature and differential equations arise from the spherical shift forms. Commonly, they arise by substituting the $\mod(1)$ dynamic with a hyperbolic IDE. The simplest example of this is the Chua IDE where two copies of a spherical unilateral shift are joined to form a chaotic IDE. The occurrences of spherical shifts without the $\mod(1)$ dynamic are so common that they require a separate designation. Removing the $\mod(1)$ function from the spherical shifts leaves an unbounded hyperbolic source and therefore is not formally an IDE. But the frequency of occurrence of these two sources are so
numerous that a separate definition is justified.
\begin{definition}{\bf Standard Spherical Sources}
\vs6\noi
In these definitions $k$ is any complex number, $\I$ is the n-dimensional identity matrix, $\omega$ is any real number and $\B^2=-\I$.
\[\exp(h\, \ln(k)\I)\exp(h\,\omega \, \B)\]
is the standard n-dimensional linear source (SLS).
\[\X\ra \exp(h\, \ln(k)\I)\exp(h\,\omega \, f(r)\, \B)\, \X\]
where $r=\|\X\|$ is the standard n-dimensional nonlinear source (SNS).
\end{definition}
\vs6\noi Typically $k$ is an integer. For $h=1$, $\exp(h\, \ln(k)\, \I)=k\, \I$.
\vs6\noi In general, where ever a spherical unilateral shift occurs, it cam be replaced by a spherical bilateral shift.
\begin{figure}[htbp]
\includegraphics[height=2.41in,width=4.56in,angle=0]{C:/Research/Book/Figures/eps/Hirsch06b.eps}
\caption{\sml Plate A is the Time Series for a Compound Unilateral Shift; Plate B is the Modified Chua IDE }
\label{fg:hirsch06b}
\end{figure}
\begq
\label{cd:hir06}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Plate A of Fig. \ref{fg:hirsch06b} is as follows:}\\
u &=& x - 0.9 \cdot z\\
a &=& \sin(u) + \sin(3 \cdot u) / 3\\
u_1 &=& \exp(h \cdot \ln(1.01)) \cdot ((x - a) \cdot \cos(h \cdot 0.3) + y \cdot \sin(h \cdot 0.3)) + a\\
v_1 &=& \exp(h \cdot \ln(1.01)) \cdot (y \cdot \cos(h \cdot 0.3) - (x - a) \cdot \sin(h \cdot 0.3))\\
v_2 &=& \exp(-h \cdot \ln(1.08)) \cdot (z - 0.8\cdot y) + 0.8 \cdot y\\
\\
x &=& u_1\\
y &=& v_1\\
z &=& v_2
\end{array}\right \}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.41in,width=4.56in,angle=0]{C:/Research/Book/Figures/eps/Hirsch06c.eps}
\caption{\sml The Time Series for a Compound Unilateral Shift}
\label{fg:hirsch06c}
\end{figure}
\begq
\label{cd:hir}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:hirsch06c} is as follows:}\\
u &=& x - 0.9 \cdot z\\
a &=& \sin(u) + \sin(3 \cdot u) / 3\\
u_1 &=& \exp(h \cdot \ln(2)) \cdot ((x - a) \cdot \cos(17 \cdot h) + y \cdot \sin(17 \cdot h)) + a\\
v_1 &=& \exp(h \cdot \ln(2)) \cdot (y \cdot \cos(17 \cdot h) - (x - a) \cdot \sin(17 \cdot h))\\
v_2 &=& \exp(-h \cdot \ln(3)) \cdot (z - 5.0\cdot y) + 5.0 \cdot y\\
\\
x &=& u_1\\
y &=& v_1\\
z &=& v_2
\end{array}\right \}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.333in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/Hirsch06d.eps}
\caption{\sml Formation of the Compound Unilateral Shift}
\label{fg:hirsch06d}
\end{figure}
\vs6\noi In Fig. \ref{fg:hirsch06c} there are two unilateral shifts. The unstable manifolds are morphologically $2\, x\, \mod(1)$. The mod(1) dynamic is replaced by feeding the unstable manifold of one system into the stable manifold of the other forming a compound unilateral shift that is equivalent to a bilateral shift. The two separate unilateral shifts are separated by the line $z=1.111 \, x$. The centers are approximately $x=\pm 0.92$.
\vs6\noi The form of the two hyperbolic components of this IDE is
\begin{eqnarray}
\label{eq:hirsch06c}
\X &\ra & \exp(h \ln(2)\I)\exp(17\, h \B)(\X-\X_0)+\X_0\\
\label{eq:hirsch06c1}
z &\ra & \exp(-h \, \ln(3))(z-5.0\,y)+5.0\,y
\end{eqnarray}
where $\X_0=\pm 0.92$. Eq. \ref{eq:hirsch06c} is the hyperbolic factor of a unilateral shift, the SLS. The $y$ component feeds into stable manifold of the IDE, Eq. \ref{eq:hirsch06c1}, according to the form of the bilateral shift IDE in the previous chapter:
\[ \exp(h\, \ln(3)(z-b\, y)+b\, y\]
This combination forms a bilateral shift.
\vs6\noi The SLS can be replaced by the SNS to obtain a more complex example.
\begin{figure}[htbp]
\includegraphics[height=2.357in,width=4.567in,angle=0]{C:/Research/Book/Figures/eps/Hirsch06e.eps}
\caption{\sml Time Series for the Compound Unilateral Shift with $2\,\tanh(\beta\,(x-3 \cdot z))$ Boundary Condition; Plate B $\tanh(\beta \,(x-5\cdot z))$}
\label{fg:hirsch06e}
\end{figure}
\vs6\noi The essential property of the boundary function, $f$ is that $f(-\X)=-f(\X)$. See Fig. \ref{fg:hirsch06e}. This provides the potential for dual centers to appear depending on the slope of the boundary line $z=g(x)$.
%==================================Variations=====================
\section{\sml Variations on Unilateral and Bilateral Shifts: The Single Scroll}
\label{sc:scroll}
One of the most frequently occurring variations on the shift is the single scroll found in the Chua IDE. A single scroll can be formed from either unilateral or bilateral shifts. In general, a scroll is formed by a modification of the boundary conditions defining a shift. Figure \ref{fg:scroll01} is an example of a single scroll formed by modifying the boundary conditions of a unilateral shift.
\begin{figure}[htbp]
\includegraphics[height=2.453in,width=4.573in,angle=0]{C:/Research/Book/Figures/eps/Scroll01.eps}
\caption{\sml Time Series for the Single Scroll from the Unilateral Shift; Plate B is the Delay Plot for Plate A}
\label{fg:scroll01}
\end{figure}
\begq
\label{cd:scr}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for Fig. \ref{fg:scroll01} is as follows:}\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(2\, \pi \, h) + w_1\,\sin(2\, \pi \, h )\\
w &=& w_1\,\cos(2\, \pi \, h) - z_1\,\sin(2\, \pi \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &= & 0.5 \cdot (1 + \sgn(1 - 0.001 \cdot h - w_1))\\
q&=& 1-p_c\\
\\
u &=& x \cdot \cos( 3.13 \cdot h) + y \cdot \sin(3.13 \cdot h)\\
v &=& y \cdot \cos(3.13 \cdot h) - x \cdot \sin(3.13 \cdot h)\\
\\
u_1 &=& \exp( h\,\ln(2))\cdot u\\
v_1 &=& \exp( h\,\ln(2))\cdot v\\
\\
q &=& 0.5 \cdot (1 + \sgn(y + y_0))\\
p &=& 0.5 \cdot (1 + \sgn(1 - (x - 0.235 \cdot x ^ 2)))\\
s &=& p \cdot (s + q - s \cdot q)\\
\\
y_0 &=& s \cdot y + (1 - s) \cdot y0
\\
u_2 &=& (x-x_0)\cdot\cos(\pi\cdot h) + (y-y_0)\cdot\sin( \pi\cdot h)+x_0\\
v_2 &=& (y-y_0)\cdot\cos(\pi\cdot h) - (x-x_0)\cdot\sin( \pi\cdot h)+y_0\\
\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 +(1-s) \cdot v_2\\
\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi The divergence of Fig. \ref{fg:scroll01} from a unilateral shift on three symbols is slight; the most conspicuous feature is the presence of a slight amount of curvature resulting from the boundary condition $ p = 0.5 \cdot (1 + \sgn(1 - (x - 0.235 \cdot x ^ 2)))$.
\begin{figure}[htbp]
\includegraphics[height=2.413in,width=4.54in,angle=0]{C:/Research/Book/Figures/eps/Scroll01a.eps}
\caption{\sml Time Series for the Single Scroll from the Unilateral Shift using $\tanh(10 \cdot u)$; Plate B is the Delay Plot for Plate A}
\label{fg:scroll01a}
\end{figure}
\vs6\noi The code for Fig. \ref{fg:scroll01a} is the same as Fig. \ref{fg:scroll01} except that $p = 0.5 \cdot (1 + \tanh(10 \cdot (1 - (x - 0.235 \cdot x ^ 2))))$. The hyperbolic tangent with with slope 10 at 0 blends the dynamics between the two IDEs obscuring the delay plot.
%=======================================================Sources of nonlinearity=======================
%07
\chapter{Nonlinear Systems in Two Dimensions}
\label{ch:nl}
\begin{center}
\parbox{3.5in}{\em Nonlinearity is a fundamental building block of all complexity and may come in a wide array of degrees of complexity. Two-dimensional systems provide an initial insight into how complexity is formed from nonlinearity and how IDEs capture these differences in nonlinearity. }
\end{center}
\vs6\noi Nonlinearity has two basic forms: measure preserving and non measure preserving.
\section{\sml Measure preserving nonlinear systems in two dimensions}
\label{sc:nl}
\vs6\noi Chapter \ref{ch:hirsch} informally examined the extent to which it is possible to determine whether an IDE has chaotic solutions based solely on its form. The SLS and SNS were integral to the form of the IDEs of the unilateral and bilateral shifts which are the purest form of chaos. This chapter presents five examples that demonstrate that functions of a shift may be present in an IDE without the presence of either the SLS or the SNS. At this time, there are no formal theorems to prove the existence of chaos in these IDEs other than the Smale-Birkhoff theorem.
\vs6\noi The five examples presented provide some insight to the difficulty in resolving Hirsch Conjecture in that are no obvious shifts present in the form of the IDEs.
\vs6\noi Nonlinearity is a fundamental building block of complexity. It is the source of stretching in the stretching and folding paradigm. This chapter presents five classes of two-dimensional periodic nonlinear systems that produce different forms of stretching that can contribute to the formation of chaos without an algebraic shift appearing it its form. When combined with folding, each of these systems can produce complexity. However, the degree of the complexity, especially locally, is determined by the type of nonlinearity, which in turn, depends on how the initial conditions appear in the solution of the ODE. Of the five classes, in three cases each orbit will be linear and the distinguishing feature will be angular velocity, nonlinear amplitude and compression and stretching along each orbit. The two cases of nonlinear orbits will be distinguished by the presence or absence of nonzero divergence. Many more cases are possible, but these five will serve to make the point that the degree of complexity depends on the dynamics of the nonlinearity which is related to the curvature of the IDE. In this respect, the five examples suggest that curvature plays a similar role to the SLS and SNS found in the standard spherical shifts.
\begin{figure}[htbp]
\includegraphics[height=1.197in,width=2.617in,angle=0]{C:/Research/Book/Figures/eps/TwoPhaseGate.eps}
\caption{\sml Flow Chart for a Two-Phased Gate Fusing Two IDEs. Each IDE Receives Equal Time}
\label{fg:twopgate}
\end{figure}
\vs6\noi As demonstrated in the previous chapter, a mod(1) operation is essential to define a unilateral or bilateral shift. As was demonstrated, the analog of the mod(1) operation is a semi circle or in terms of an IDE it is
\[\S_h = \exp(\pm h\, \pi\, \B)(\X-\X_0)+\X_0 \]
The choice of the sign depends on the problem.
\vs6\noi The significance of this is that if a nonlinear periodic system is fused with a semi circle, the result may generate complexity in that the semi circle IDE is acting like a mod(1) operation (folding) while the nonlinear periodic IDE is providing stretching. This combination contains within its form a shift that is not immediately obvious. Since fusion will be the algebraic method of linking a nonlinear periodic system to a semi circle, the following pattern will be used extensively:
\[p_c\, \exp(h\, \A(\X))+ (1-p_c))\, \exp(-h\, \pi \, \B)\]
where $p_c(w)=0.5\, (1+\sgn(w))$ where $w$ is a variable in the HOC.
\vs6\noi There are two variations on using $\S_h$. The simplest variation is when the parameter $\X_0$ is constant. The second variation is when $\X_0$ is determined by $\T_h$. An example of the occurrence of the second form is when a simple translation is needed $x\ra x+\tau$. The constant $\tau$ is supplied by a semi-circle of diameter $\tau$.
\vs6\noi IDE theory provides an orderly method of deriving nonlinear periodic systems that can be combined with folding to produce chaos. This will be presented in the following section.
%===========================================================
\subsection{\sml IDE Theory for Two-dimensional Autonomous Nonlinear Systems }
The lemma on which the IDE theory for two-dimensional is developed is as follows
\begin{lemma}
(1) Every two-dimensional autonomous ODE can be put into the following form:
\begq
\label{eq:2dvf}\l(\vt \dot{x}. \dot{y} \par\r)=\l(\frac{\dot{r}}{r}{\bf I}
+\dot{\theta} {\bf B}\r)\l(\vt x. y \par \r) \endq
(2) The IDE associated to this form of the ODE is given by
\[\T_h=\exp(h\, (\dot{r}/r)\, \I)\, \exp(h\,\dot{\theta} \,\B) \]
(3) the divergence of (1) is
\[\frac{1}{r}<{\rm X},\nabla \dot{r}>+\frac{\dot{r}}{r}+<{\bf
B}{\rm X}, \nabla \dot{\theta}>=f(\X,r,\theta)\]
\end{lemma}
\pf (1) is a standard result;
(2) follows from the fact that $\I$ commutes with $\B$. The proof of (3) is more involved.
\vs6\noi First some notation.
\begq
\nabla \sbt f \,X=f\nabla\sbt X+X\sbt\nabla f
\label{eq:1}
\endq
\begq
r^2=x^2+y^2, \hspace{10pt}r\dot{r}=x\dot{x}+y\dot{y}, \hspace{10pt} r_x=\frac{x}{r}, \hspace{10pt} r_y=\frac{y}{r}
\label{eq:2}
\endq
Eq.\ref{eq:2dvf} is rearranges as follows
\begq
\l(\vt \dot{x}. \dot{y} \par\r)=\frac{\dot{r}}{r}\l(\vt x. y \par \r)
+\dot{\theta} {\bf B}\l(\vt x. y \par \r)
\label{eq:4}
\endq
\begq
\l(\vt \dot{x}. \dot{y} \par\r)=\frac{\dot{r}}{r}\l(\vt x. y \par \r)
+\dot{\theta}\l(\vt -y. x \par \r)
\label{eq:5}
\endq
\vspace{12pt}
\begq
\nabla \sbt \l(\vt \dot{x}. \dot{y} \par\r)=\nabla\sbt\,\frac{\dot{r}}{r}\l(\vt x. y \par \r)
+\nabla\sbt\, \dot{\theta}\l(\vt y. -x \par \r)
\label{eq:6}
\endq
Equation \ref{eq:6} is divided into two parts, \ref{eq:7} and \ref{eq:8}
\begq
\nabla\sbt\l(\frac{\dot{r}}{r}\l(\vt x. y \par \r)\r)= \frac{\dot{r}}{r}\nabla\sbt\l(\vt x. y \par \r)+\l(\vt x. y \par \r)\sbt \nabla\frac{\dot{r}}{r}
\label{eq:7}
\endq
\begq
\nabla\sbt\l( \dot{\theta}\l(\vt -y. x \par \r)\r) =\dot{\theta} \nabla\sbt \l(\vt -y. x \par \r) + \l(\vt -y. x \par \r)\sbt \nabla\dot{\theta}
\label{eq:8}
\endq
\vspace{12pt}
Now compute:
\begq
\nabla \l(\frac{\dot{r}}{r}\r)
\label{eq:9}
\endq
\begq
\parlx \l(\frac{\dot{r}}{r}\r)= \frac{1}{r}\dot{r}_x-\frac{1}{r^2}\dot{r}r_x
\label{eq:10}
\endq
\begq
\parly \l(\frac{\dot{r}}{r}\r)=\frac{1}{r}\dot{r}_y-\frac{1}{r^2}\dot{r}r_y
\label{eq:11}
\endq
\vspace{12pt}
\noi Combine \ref{eq:10} and \ref{eq:11} to get the second term of \ref{eq:7}
\begq
\l(\vt x. y \par \r)\sbt \nabla\frac{\dot{r}}{r}=x\parlx\l(\frac{\dot{r}}{r}\r)+y \parly\l(\frac{\dot{r}}{r}\r)
\label{eq:12}
\endq
\begq
=x\l(\frac{1}{r}\dot{r}_x-\frac{1}{r^2}\dot{r}r_x\r)+y \l(\frac{1}{r}\dot{r}_y-\frac{1}{r^2}\dot{r}r_y\r)
\label{eq:13}
\endq
\noi and the first term of \ref{eq:7} is given by
\begq
\frac{\dot{r}}{r}\nabla\sbt\l(\vt x. y \par \r)=2\frac{\dot{r}}{r}
\label{eq:14}
\endq
\vspace{12pt}
\noi Now compute \ref{eq:8}. First note
\begq
\dot{\theta} \nabla\sbt \l(\vt y. -x \par \r)=0
\label{eq:15}
\endq
\noi and
\begq
\l(\vt -y. x \par \r)\sbt \nabla\dot{\theta}=x\dot{\theta}_y-y\dot{\theta}_x=1
\label{eq:16}
\endq
\vs6\noi Combining \ref{eq:15} and \ref{eq:16}, \ref{eq:8} is simplified to equation \ref{eq:17}
\begq
\nabla\sbt\l( \dot{\theta}\l(\vt -y. x \par \r)\r) = \l(\vt -y. x \par \r)\sbt \nabla\dot{\theta} =x\dot{\theta}_y-y\dot{\theta}_x
\label{eq:17}
\endq
\noi Combine \ref{eq:13} and \ref{eq:14} to get for \ref{eq:7}
\begq
x\l(\frac{1}{r}\dot{r}_x-\frac{1}{r^2}\dot{r}r_x\r)+y \l(\frac{1}{r}\dot{r}_y-\frac{1}{r^2}\dot{r}r_y\r)+2\frac{\dot{r}}{r}
\label{eq:18}
\endq
\noi rearrange \ref{eq:18} to get
\begq
\frac{x}{r}\dot{r}_x+\frac{y}{r}\dot{r}_y -\l(\frac{x}{r^2}r_x+\frac{y}{r^2}r_y\r)\dot{r}+2\frac{\dot{r}}{r}
\label{eq:19}
\endq
\noi which is equal to
\begq
\frac{x}{r}\dot{r}_x+\frac{y}{r}\dot{r}_y -\l(\frac{x^2}{r^3}+\frac{y^2}{r^3}\r)\dot{r}+2\frac{\dot{r}}{r}
\label{eq:20}
\endq
\begq
=\frac{x}{r}\dot{r}_x+\frac{y}{r}\dot{r}_y -\l(\frac{x^2+y^2}{r^3}\r)\dot{r}+2\frac{\dot{r}}{r}
\label{eq:21}
\endq
\begq
=\frac{x}{r}\dot{r}_x+\frac{y}{r}\dot{r}_y -\l(\frac{r^2}{r^3}\r)\dot{r}+2\frac{\dot{r}}{r}
\label{eq:22}
\endq
\begq
=\frac{x}{r}\dot{r}_x+\frac{y}{r}\dot{r}_y +\frac{\dot{r}}{r}
\label{eq:23}
\endq
\begq
=r_x\dot{r}_x+r_y\dot{r}_y +\frac{\dot{r}}{r}
\label{eq:24}
\endq
\vs6\noi Combine \ref{eq:7} and \ref{eq:8} to get the divergence of \ref{eq:2dvf} as
\begq
\nabla r\sbt\nabla \dot{r} +\frac{\dot{r}}{r}+x\dot{\theta}_y-y\dot{\theta}_x
\label{eq:25}
\endq
\begq
\nabla r\sbt\nabla \dot{r} +\frac{\dot{r}}{r}+\nabla\dot{\theta}\sbt {\bf B}\l(\vt x. y \par \r)
\label{eq:26}
\endq
Since
\begq
\nabla r = \frac{1}{r}\l(\vt x. y \par \r)
\label{eq:27}
\endq
the divergence is
\begq
\frac{1}{r}\l(\vt x. y \par \r)\sbt\nabla \dot{r} +\frac{\dot{r}}{r}+\nabla\dot{\theta}\sbt {\bf B}\l(\vt x. y \par \r)
\label{eq:28}
\endq
\begq
=\frac{1}{r}\l(\vt x. y \par \r)\sbt\nabla \dot{r} +{\bf B^T}\nabla\dot{\theta}\sbt \l(\vt x. y \par \r)+\frac{\dot{r}}{r}
\label{eq:29}
\endq
\begq
=<\frac{1}{r}\nabla \dot{r},\l(\vt x. y \par \r)> +<{\bf B^T}\nabla\dot{\theta}, \l(\vt x. y \par \r)>+\frac{\dot{r}}{r}
\label{eq:30}
\endq
\begq
=<\frac{1}{r}\nabla \dot{r} +{\bf B^T}\nabla\dot{\theta}, \l(\vt x. y \par \r)>+\frac{\dot{r}}{r}
\label{eq:31}
\endq
\begq
=<\frac{1}{r}\nabla \dot{r} +{\bf B^T}\nabla\dot{\theta}, \hspace{3pt} {\bf X}>+\frac{\dot{r}}{r}
\label{eq:32}
\endq
\begq
=<\frac{1}{r}\nabla \dot{r} -{\bf B}\nabla\dot{\theta}, \hspace{3pt} {\bf X}>+\frac{\dot{r}}{r}
\label{eq:32}
\endq
\rl
\vs6\noi Adding the divergence condition
\[\frac{1}{r}<{\rm X},\nabla \dot{r}>+\frac{\dot{r}}{r}+<{\bf
B}{\rm X}, \nabla \dot{\theta}>=f(\X,r,\theta)\]
provides a general method for deriving examples for which there are no exact solutions. Other partial differential equations may be used.
The only complication arises when $\dot{\theta}$ changes sign causing $\exp(h\,\dot{\theta} \,\B)$ to shift between elliptic and hyperbolic functions. This is handled by fusing the two possibilities using the fusion function $0.5(1+\tanh(\beta \, \dot{\theta)})$.
\vs6\noi The form
\[\T_h=\exp(h\, (\dot{r}/r)\, \I)\, \exp(h\,\dot{\theta} \,\B) \]
is immediately suggestive of the form of the SLS:
\[\R_h=\exp(h\, \ln(2)\, \I)\, \exp(h\,r \,\B) \]
\vs6\noi In particular, when $\dot{\theta}=r$ and $\dot{r}/r=\ln(2)$ an example of the SNS is obtained. However, when $\dot{r}=0$, chaos is still possible. What changes is that the shift is not global, but rather local and dependent of the initial conditions. This is an unaesthetic complication but it is the source of KAM island chains which occur in certain nonlinear systems such as the five presented in this chapter but which do not occur in the shifts. It is this difference, the emergence of KAM island chains, that complicates resolving the Hirsch Conjecture.
\vs6\noi Fusing $\T_h$ with $\exp(\pm h\, \pi \, \B)$, the analog of $ x\, \mod(1)$, using the HOC provides a very general method for forming complex systems in order to model processes where ODEs are made impractical by the need for numerical integration.
\vs6\noi The result of the foregoing construction is a four-dimensional autonomous IDE. The Flow Chart is presented in Fig. \ref{fg:gen2d}
\begin{figure}[htbp]
\includegraphics[height=1.8in,width=2.773in,angle=0]{C:/Research/Book/Figures/eps/Gen2D.eps}
\caption{\sml The General Flow Chart for Complex Four-dimensional Systems Induced by Two-dimensional ODEs}
\label{fg:gen2d}
\end{figure}
\vs6\noi As is noted in a following section , the Gradient of the divergence is a measure of the potential for complexity.
\[\begin{array}{lcl}
\mbox{\bf Example \ref{ex:st}}\\
\dot{r}= 0&&\\
\dot{\theta}=r&&\\
\mbox{IDE}\;\;\; \exp(h r \B)\\
\\
\mbox{\bf Example \ref{ex:tf}}\\
\dot{r}= f(\theta)&&\\
\dot{\theta}=1&&\\
\mbox{IDE}\;\;\;\exp(h\, f(\theta)\, \I)\, \exp(h\,\B) \\
\\
\mbox{\bf Example \ref{ex:je}}\\
\dot{r}= 0&&\\
\dot{\theta}= g(\theta)&&\\
\mbox{IDE}\;\;\;\, \exp(h\,g(\theta) \,\B) \\
\\
\mbox{\bf Example \ref{ex:nlo}}\\
\dot{r}=g(r, \theta)\\
\dot{\theta}=f(\theta)\\
\frac{1}{r}<{\rm X},\nabla g(r.\theta)>+g(r,\theta)/r+<{\bf
B}{\rm X}, \nabla f\theta)>=0&&\\
\mbox{IDE}\;\;\;\exp(h\, g(r,\theta)\, \I)\, \exp(h\,f(\theta) \,\B) \\
\\
\mbox{\bf Example \ref{ex:nzd}}\\
\dot{r}=g(r, \theta)\\
\dot{\theta}=f(\theta)\\
\frac{1}{r}<{\rm X},\nabla g(r,\theta)>+g(r, \theta)/r+<{\bf
B}{\rm X}, \nabla f(\theta)>\neq 0&&\\
\mbox{IDE}\;\;\;\exp(h\, g(r, \theta)\, \I)\, \exp(h\,f(\theta) \,\B)
\end{array}\]
\vs6\noi Each of the two components of the stretching IDE can be expressed in closed form in terms of elementary functions as follows:
\[\exp(h\, \dot{r}/r \I)= \l(\mtx \dot{r}/r.0.0. \dot{r}/r \par \r) \]
\[\exp(h\, \dot{\theta} \B)= \l(\mtx \cos(\dot{\theta}).\sin(\dot{\theta}).-\sin(\dot{\theta}). \cos(\dot{\theta}) \par \r) \]
Combining these tow components in a single equation gives
\[\T_h= \l(\mtx \dot{r}/r.0.0. \dot{r}/r \par \r) \, \l(\mtx \cos(\dot{\theta}).\sin(\dot{\theta}).-\sin(\dot{\theta}). \cos(\dot{\theta}) \par \r) \]
Writing this out in programmable form gives
\begin{eqnarray}
x &\ra& (\dot{r}/r)\, (\cos(\dot{\theta})\cdot x+ \sin(\dot{\theta})\cdot y)\\
y &\ra& (\dot{r}/r)\, (\cos(\dot{\theta})\cdot y- \sin(\dot{\theta})\cdot x)
\end{eqnarray}
\vs6\noi As the examples will show, nonlinearity needs folding to create complexity. However, regardless of the folding dynamic, the transitions between stretching and folding are not all created equally. Specifically, as mentioned above, nonlinear processes differ significantly in the {\em type} of nonlinearity. In particular, how the initial conditions in the solution of an ODE reveals the degree of complexity. The nonlinear occurrence of the initial conditions in complex ways makes clear why it is so difficult to derive ODEs from time series and why nonlinear ODEs are so difficult to solve. IDEs avoid this problem because they are local solutions of ODEs when they arise from an ODE.
\vs6\noi For applications, the type of nonlinearity plays a significant role in predicting local, short-term dynamics such as tornados, earth quakes, structural failures and volcanoes. How the initial conditions occur is directly related to complexity and the ability to predict the short term dynamics. The following example illustrates the difference of the occurrence in the initial conditions of a solution of an ODE between an ODE that is linear and an ODE that is nonlinear.
\begin{example} {\bf Linear versus Nonlinear ODE \cite{bi:hd}}
\[x(t)=\exp(t)\, x_0\]
is the solution of a linear ODE
\[\dot{x}=x\]
The initial condition is $x_0 =\exp(-t)\, x(t)$ demonstrating that the initial condition can easily be solved for in terms of the time-dependent solution.
\vs6\noi In contrast to the linear example,
\[x(t)=\frac{x_0}{ x_0+(1-x_0)\exp(-t)}\]
solves the nonlinear ODE
\[\dot{x}=x-x^2\]
Inverting this equation to solve for the initial condition is a bit more complicated. In very complicated equations, it is necessary to appeal to the inverse function theorem \cite{bi:rcb}, page 281 to prove that inversion is possible.
\end{example}
\vs6\noi Only two forms of folding will be used in this chapter because of their similarity to the mod(1) operation. This will facilitate recognizing when an equation has the potential to be chaotic from its form.
\vs6\noi What will be observed from the examples is that variations in stretching and folding result in very dramatic variations in dynamics, even when the variations are small. These facts make clear why natural phenomena such as occurs in climate and weather, biological systems and human systems are so challenging to predict or even model over the short term: stretching and folding, and their relationship, are varying over time, and even over short time intervals.
\vs6\noi Each example in this chapter can be embedded as a time one map for an IDE (proposition \ref{pr:emb}); each example can be approximated as a first return map for an IDE (proposition \ref{pr:emb02}) that is formed by fusion. This is a result of the fact that every example in this chapter is derived from an ODE and thus the closed form solutions of the associated nonlinear ODEs, $\X(t)$, provide an IDE $\X(h)$. Fusion cannot produce an exact time one map from a given discrete map since the IDE has an step size $h$ whereas a discrete map does not.
\vs6\noi The significance of IDE theory to modeling natural processes is to view the IDE as a snapshot in time that evolves over time as parameters of the system change. By taking snapshots as parameters evolve, a movie can be constructed that replicates a natural process.
%================================================================
\subsection{\sml Flow Charts}
Every example in this chapter will have the form
\[\sg_\beta(X)\,\exp(h \, \A(\X))+(1-\sg_\beta(\X))\,(\exp(-h\,\pi \,\B)\, (\X-\X_0)+\X_0)\]
See Fig. \ref{fg:nlfusionf}.
$\exp(h \, \A(\X))$ provides the stretching component and $\exp(-h\,\pi \,\B)\, (\X-\X_0)+\X_0$ provides the mod(1), or folding, component. The difference in the two examples is determined by how $\X_0$ is produced.
\begin{figure}[htbp]
\includegraphics[height=2.597in,width=3.0in,angle=0]{C:/Research/Book/Figures/eps/NLFusionF.eps}
\caption{\sml Flow Chart for a Two-Phased Gate Fusing of Two IDEs. Plate A is simple fusion; Plate B is Plate A with $\X_0$ being determined by $\T_h$}
\label{fg:nlfusionf}
\end{figure}
\vs6\noi Plate B requires adding the following line of code to Plate A code:
\[x_0=p_c\, (x-.5)+ (1-p_c)\, x_0;\;\;\; y_0=p_c\, (y-.5)+ (1-p_c)\, y_0 \]
The effect of this line of code is to remember the point where $\T_h$ ended and to fix the center of the semi circle for as long as $p_c=0$. The choice of $0.5$ is to assure that the semi circle has diameter 1 making it the analog of $\mod(1)$ operation.
\vs6\noi In Plate A, $\X_0$ is constant and the semi circle is a 180 degree rotation about $\X_0$ or a {\em flip}. In Plate B, $\X_0$ is determined by $\T_h$ and functions like an affine translation of one unit. This will be illustrated in the following sections.
%=====================================================ONE The Twist Equation====================
\subsection{\sml The Twist Equation}
\label{sc:st}
\begin{example}{\sml \bf The Simple Twist}
\label{ex:st}
\vs6\noi Consider
\[\l(\vt \dot{x}. \dot{y} \par\r)=r\l(\mtx 0. -1. 1. 0 \par
\r)\l(\vt x. y \par \r) \]
where $r=\sqrt{x^2+y^2}$. The time-one map
is the simple twist map.
\[\l(\vt x. y \par\r)\ra \l(\mtx \cos(r). -\sin(r). \sin(r). \cos(r) \par
\r)\l(\vt x. y \par \r) \]
\end{example}
\vs6\noi This equation is derived by
asking the question: What is the simplest way in which a linear
equation can be made nonlinear? Since, in the linear oscillator
the initial conditions determine the amplitude of the system, a
simple step would be to consider the family of curves given by
\[\l(\vt x(t). y(t) \par\r)=\l(\vt r \cos(r\,t+ \theta). r
\sin(r \,t + \theta)\par \r) \]
where $r$ is given above and is a function of the initial
conditions. Since $r$ affects both amplitude and frequency, the
ODE that this system solves must be nonlinear.
The effect of multiplying $t$ by a function of the initial
conditions is to cause neighboring orbits to separate, or to move
at different speeds.
\vs6\noi The orbits of the twist ODE in the phase
plane are identical to those of the linear equation from which it was
derived: a set of concentric circles. What is different is that the angular velocity varies with the orbit so that two observers riding on different orbits that are close will experience a constant separation.
\vs6\noi Composing linear folding in the form of a 180 degree rotation with the twist produced Fig.\ref{fg:tangles}.
\vs6\noi The time one map is $\F \circ \T$ where $\F(\X)=-\X$ and the twist is given by
\[\T(\X)=\exp(r\, \B)(\X-\C)+\C\]
where
\[\C=\l( \vt 1.0 \par \r)\]
\vs6\noi This procedure is very general: Given any linear autonomous ODE in any number of
dimensions there is an infinite family
nonlinear autonomous ODE having the same
set of orbits, fixed points, and types of fixed points.
The solutions differ only in that in the nonlinear
case $t$ is multiplied by a function of the initial conditions
that are constant along orbits.
%================================================================Twist and Flip IDE==========================
\subsection{\sml The Discrete Twist and Flip as a Time One Maps of an IDE}
\begin{example}{\sml \bf First Return Map for the Twist and Flip Map, see Fig. \ref{fg:twistft}}
\label{ex:tf}
\[\sg_\beta(X)\,\exp(h \, \A(\X))+(1-\sg_\beta(\X))\,\exp(h\,\C(\X))\]
where $\A(\X)=r\, \B$ and $\C=\omega \, \B$ and $\sg_\beta=0.5\,(1+\tanh(\beta \,z)$
\end{example}
\begin{figure}[htbp]
\includegraphics[height=2.397in,width=4.46in,angle=0]{C:/Research/Book/Figures/eps/TwistFT01.eps}
\caption{\sml Plate A is the Twist and Flip Map time series; Plate B is the First Return Twist and Flip IDE to the hyper plane $z=0$.}
\label{fg:twistft01}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.413in,width=4.44in,angle=0]{C:/Research/Book/Figures/eps/TwistFT.eps}
\caption{\sml Plate A is the Twist and Flip Map; Plate B is the First Return Twist and Flip IDE to the hyper plane $z=0$. Plate B can only be an approximation as noted above. }
\label{fg:twistft}
\end{figure}
\vs6\noi In order to construct the full four-dimensional example, the HOC must be included to compute $z$. That is reflected in the code for Fig. \ref{fg:twistft}
\begq
\label{cd:twtf}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:twistft} is as follows:}\\
\alpha&=& 2\, \pi\\
\beta &=&200\\
h&=& 1\\
&& \mbox{For i = 1 to 200000}\\
z_1& = &z\cdot\cos(\alpha\, h) + w\cdot\sin(\alpha\, h)\\
w_1 &=& w\cdot\cos(\alpha\, h) - z\cdot\sin(\alpha\, h)\\
z& = &z_1\\
w &=& w_1\\
\\
r &=& \sqrt{(x - 1) ^ 2 + y ^ 2)}\\
u_1 &=& (x - 1) \cdot \cos(r) - y \cdot \sin(r) + 1 \\
v_1 &=& y \cdot \cos(r) + (x - 1) \cdot \sin(r)\\
u_2 &=& x \cdot \cos(\pi\,h) - y \cdot \sin(\pi\,h) \\
v_2 &=& y \cdot \cos(\pi\,h) + x \cdot \sin(\pi\,h)\\
\sg_\beta (z)&=&0.5\, (1+\tanh(\beta\, z))\\
x &=& \sg_\beta (z)\cdot \, u_1+(1-\sg_\beta (z))\,u_2\\
y &=& \sg_\beta (z)\cdot \, v_1+(1-\sg_\beta (z))\,v_2\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.33in,width=4.417in,angle=0]{C:/Research/Book/Figures/eps/TwistFT03.eps}
\caption{\sml Plate A is the Twist and Translate Map; Plate B is the First Return Twist and Translate IDE to the hyper plane $z=0$.}
\label{fg:twistft03}
\end{figure}
\vs6\noi By setting the IDE parameter $h=0.001$ and the clock to have frequency 1 the IDE twist and flip flow is obtained, see Fig. \ref{fg:twistflipflow}.
\begin{figure}[htbp]
\includegraphics[height=2.53in,width=4.48in,angle=0]{C:/Research/Book/Figures/eps/TwistFlipFlow.eps}
\caption{\sml The Twist and Flip flow; Plate A is obtained by setting $h=0.001$ with clock frequency 1; in Plate B time is slowed to 0.3 and first return criteria is a function of $\tanh(0.5 \, z)$ }
\label{fg:twistflipflow}
\end{figure}
\vs6\noi There is no contradiction with the Poincar\'{e} Bendixon Theorem \cite{bi:hs} because the IDE flow is a four-dimensional orbit projected onto two dimensions.
\vs6\noi The twist and Flip flow alternates between the twist and a 180 degree rotation. The IDE has the benefit that it is a differentiable (it is even a $C^\infty$ function of $h$) and therefore the dynamics transition from approximating a flow to approximating as discrete transformation.
\[\begin{array}{lcl}
&&\mbox{Code changes for Fig. \ref{fg:twistflipflow} Plate A}\\
h&=&0.001\\
\alpha&=&1.0\\
\\
&&\mbox{Code changes for Fig. \ref{fg:twistflipflow} Plate B}\\
h&=&0.001\\
\alpha&=&0.3\\
\beta&=&0.5
\end{array}\]
What is evident from Fig. \ref{fg:twistflipflow} is that the time $z$ and transition surface $\sg_\beta$ can greatly alter the dynamics of the IDE. The figures reveal that the degree of stretching as determined by $h\, r$, the clock frequency and the transition surface $\sg$ all must come together in the right proportions to produce complexity. This makes clear how short term dynamics can change due to system parameters changes and move a system from almost periodic to chaotic. Nowhere is this more evident than in the formation and dissipation of a tornado.
\vs6\noi In keeping with the program to address the Hirsch conjecture, all of the factors that determine the level of complexity of the twist and flip IDE are found in the form of the equations.
\begin{figure}[htbp]
\includegraphics[height=3.333in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/TwistTrans.eps}
\caption{{\sml The Twist and Translate IDE. Plate A is 10 orbits of the Twist and Translate IDE, $\beta=\infty$; Plate B is 10 Orbits of the Twist and Translate IDE with $\beta=5.0$: Plate C is the Twist and Translate Map}}
\label{fg:twisttrans}
\end{figure}
\begq
\label{cd:twt}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for one orbit for Fig. \ref{fg:twisttrans} Plate B is as follows:}\\
z &=& -0.125\\
w &=& \sqrt{1 - z ^ 2}\\
h&=& 0.001\\
\beta&=& 5.0\\
\alpha&=& 2\, \pi\\
&& \mbox{For i = 1 to 4000000}\\
z_1& = &z\cdot\cos(\alpha\, h) + w\cdot\sin(\alpha\, h)\\
w_1 &=& w\cdot\cos(\alpha\, h) - z\cdot\sin(\alpha\, h)\\
z& = &z_1\\
w &=& w_1\\
\\
r &=& \sqrt{x^2+y^2)} \\
u_1 &=& x \cdot \cos(r\,h) - y \cdot \sin(r\,h) \\
v_1 &=& y \cdot \cos(r\,h) + x \cdot \sin(r\,h)\\
u_2 &=& x+\tau \, h \\
v_2 &=& y \\
\sg_\beta (z)&=&0.5\, (1+\tanh(\beta\, z))\\
x &=& \sg_\beta (z)\cdot \, u_1+(1-\sg_\beta (z)\,u_2\\
y &=& \sg_\beta (z\cdot \, v_1+(1-\sg_\beta (z))\,v_2\\
k& =& (i)\, \mod(1000)\\
&&\mbox{\bf If k = 0 Then Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
%==========================================================================The Amplitude Equation=========================
\subsection{\sml The Amplitude Equation}
\label{sc:amp}
This section derives a nonlinear system in
which $t$ is not multiplied by a function of the initial
conditions but whose amplitude is a nonlinear function of the initial
conditions.
\subsection{\sml The Discrete Amplitude and Translate Map}
\begin{example}{\sml \bf Nonlinear Amplitude}
\label{ex:nla}
\vs6\noi
Consider the equation
\[\l(\vt x(t). y(t) \par\r)=\l(\vt r \cos(t+ \theta). r^2
\sin(t + \theta) \par \r) \]
where $r$ is a function of the initial conditions given by
$r^2=0.5(x^2+\sqrt{x^4+4y^2})$. By direct
computations the following system of nonlinear, autonomous ODEs can be derived:
\[\l(\vt \dot{x}. \dot{y} \par\r)=\l(\mtx 0. -1/r. r. 0 \par
\r)\l(\vt x. y \par \r) \]
where $r^2=0.5(x^2+\sqrt{x^4+4y^2})$ is a constant along integral
curves.
\end{example}
\vs6\noi This system has a feature
in common with the ODE $\ddot{x}+x^3=0$ in that the shape of the
integral curves varies with the initial conditions, see Fig. \ref{fg:amptwist}.
\begin{figure}[htbp]
\includegraphics[height=2.66in,width=3.527in,angle=0]{C:/Research/Book/Figures/eps/AmpTwist.eps}
\caption{{\bf The Amplitude Twist. Plate A is 10 orbits of the Amplitude Twist. Plate B is 10 Orbits of the Amplitude Twist and Translate, $\tau=0.5$}}
\label{fg:amptwist}
\end{figure}
\begq
\label{cd:amt}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:amptwist} Plate B for one orbit is as follows:}\\
&& \mbox{For i = 1 to 10000}\\
h&=&0.001\\
\tau&=&0.5\\
r_1 &=& \sqrt{x ^ 4 + 4 \cdot y ^ 2}\\
r &=& \sqrt{(0.5 \cdot (x ^ 2 + r_1)} \\
u &=& x \cdot \cos(h) - y \cdot \sin(h)/r \\
v &=& y \cdot \cos(h) + x \cdot \sin(h)\cdot r\\
x &=& u+\tau\\
y &=& v\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi It is also possible to combine the amplitude twist, and all examples in this chapter, with a rotation in place of a translation.
\[ \T\l(\vt x. y \par\r)=-\left [ \l(\mtx \cos(\theta).
-\sin(\theta)/r.
\sin(\theta)\,r. \cos(\theta)\par \r) \l(\vt x-a. y\par
\r)+\l(\vt a. 0 \par \r) \right ]\]
\vs6\noi This map has no hyperbolic fixed points but does have high-order
hyperbolic periodic points.
For $\theta=2.0, a=0.5$ a period-six hyperbolic point is found at
approximately $(1.0433,1.1997)$.
By direct inspection this point is found to have a horseshoe.
This example illustrates the contribution of the geometry of the
orbits to producing chaos.
\vs6\noi The twist equations also have a feature in common with this second order ODE: The frequency varies with the initial conditions. However, in
the twist equations, the solutions are all circles, thus the amplitude is essentially what to expect from a linear system. In
the amplitude system, the frequencies are not a function of the initial conditions. Hence, the twist equations and the amplitude
equations have completely separated two of the three features of nonlinear systems illustrated by $\ddot{x}+x^3=0$. The third
feature, variable velocity along points of a single orbit, will be discussed in example \ref{ex:je}.
\footnote{As is known, when this later second order ODE is driven by a periodic force, it produces chaos. In fact $\ddot{x}+x^3=a\,\cos(t)$ is Duffing's
equation without the damping term.}
%=====================================================================
\subsection{\sml The Discrete Amplitude Twist and Translate Map as a Time One Map of an IDE}
The Amplitude twist and translate time one map maybe derived directly from the amplitude twist and translate IDE. The IDE is formed from the time one map by use of the fusion proposition \ref{pr:fus}. One of the benefits of the IDE is that it allows deeper insight into how complex dynamics are formed by the fusion of two dynamical processes in nature.
\begin{figure}[htbp]
\includegraphics[height=2.933in,width=4.503in,angle=0]{C:/Research/Book/Figures/eps/AmpTwistFlow.eps}
\caption{\sml The Amplitude Twist Flow. Plate A is KAM Island Chain. The Unstable Manifold is in Blue; Plate B is an overlay of the Flow an the Unstable Manifold; Plate C is a short segment of the flow with the time one points overlaid }
\label{fg:amptwistflow}
\end{figure}
\vs6\noi Figure \ref{fg:amptwistflow} demonstrates another important engineering point: The concept of {\em Sensitive Dependence on Initial Conditions} (SD) needs refinement. Figure \ref{fg:amptwistflow} provides an example of the SD problem which is illustrated by the Amplitude Twist IDE. The engineering significance is that when working with systems that may have high order hyperbolic periodic points, a very careful analysis may be needed to determine the degree of SD.
\vs6\noi Since the amplitude twist and translate was derived from the composition of time one maps of two ODEs, the two maps may be combined to form an IDE. After forming the IDE, a time one map may be obtained by using the HOC. The resulting IDE is four-dimensional. Figure \ref{fg:amptwistflow}, Plate A illustrates a KAM island chain from the time one map of the twist and translate IDE. Plate B of this figure shows an overlay of the unstable manifold (in blue) of Plate A and the IDE "flow". Plate C shows a small segment of the flow with the points of the time one map overlaid. In order to get Plate C, the clock must be initialized to have the correct phase.
\vs6\noi Note that the amplitude twist and translate IDE has periodic points of order 14. The significance of this is that SD is far greater in the amplitude twist and translate IDE than in the twist and flip IDE. Even the number of decimals used in the code of $\pi$ will affect the result. In particular, the degree of exponentially sensitive dependence on initial conditions is likely a function of the entropy of the bilateral shift that arises from the transverse homoclinic points of the hyperbolic periodic points.
\begq
\label{cd:ampt}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:amptwistflow} is as follows:}\\
z &=& -0.125\\
w &=& \sqrt{1 - z ^ 2}\\
h&=& 0.001\\
\pi&=& 3.14159265358979\\
\beta&=& 2 \, \pi\\
&& \mbox{For i = 1 to 200000}\\
z_1& = &z\cdot\cos(\beta\, h) + w\cdot\sin(\beta\, h)\\
w_1 &=& w\cdot\cos(\beta\, h) - z\cdot\sin(\beta\, h)\\
z& = &z_1\\
w &=& w_1\\
\\
r_1 &=& \sqrt{x ^ 4 + 4 \cdot y ^ 2}\\
r &=& \sqrt{(0.5 \cdot (x ^ 2 + r_1)} \\
u_1 &=& x \cdot \cos(h) - y \cdot \sin(h)/r \\
v_1 &=& y \cdot \cos(h) + x \cdot \sin(h)\cdot r\\
u_2 &=& x+\tau \, h \\
v_2 &=& y \\
\sg_\beta (z)&=&0.5\, (1+\tanh(200\, z))\\
x &=& \sg_\beta (z)\cdot \, u_1+(1-\sg_\beta (z)\,u_2\\
y &=& \sg_\beta (z\cdot \, v_1+(1-\sg_\beta (z))\,v_2\\
k& =& (i)\, \mod(1000)\\
&&\mbox{\bf If k = 0 Then Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\[\S_h=\exp( h\, \pi\, \B)\,(\X-\X_0)+\X_0\ \;\;\; S_h=\exp(- h\, \pi\, \B)\,(\X-\X_0)+\X_0\]
\[p_c = 0.5 \cdot (1 + \tanh(\beta\,(z)))\;\;p_c = 0.5 \cdot (1 + \tanh(z))\;\;\;p_c = 0.5 \cdot (1 + \sgn(z))\;\;\; \beta=1\;\;\;\beta=200\;\;\;\beta=0.5 \]
%=======================THREE=======================================================
\subsection{\sml The Jacobi Equation}
\label{sc:je}
\subsection{\sml The Discrete Chaotic Jacobi Map}
\vs6\noi When $\dot{r}=0$ one obtains the following example.
\begin{example}{\sml \bf The Modified Jacobi Equation}
\label{ex:je}
\vs6\noi
Consider
\[\l(\vt \dot{x}. \dot{y} \par\r)=\l(
\dot{\theta} {\bf B}\r)\l(\vt x. y \par \r) \]
The orbits must be circles, the same as the
simple harmonic oscillator and the twist equation. For
$\dot{\theta}=1$ one obtains the simple harmonic oscillator. For
$\dot{\theta}=r$, the twist is obtained.
However, if
\[\frac{\partial\dot{\theta}}{\partial r}=0\]
closed-form solutions are still possible.
\end{example}
\vs6\noi In particular a {\em modified} Jacobi equation can be solved in closed form in terms of elementary functions.
\vs6\noi If $\dot{\theta}=\sqrt{1-k^2
\sin^2(\theta)}$ The closed-form solution is
\[\l(\vt x(t). y(t) \par\r)=\l(\vt r\,\cn(t+C). r\,\sn(t+C)\par \r) \]
where $\sn, \cn$ are the Jacobi elliptic functions. These functions are the inverses of
elliptic integrals and are derived in the classical problem of rectifying
the ellipse, see Fig. \ref{fg:jactwist}.
\begin{figure}[htbp]
\includegraphics[height=3.220in,width=3.123in,angle=0]{C:/Research/Book/Figures/eps/JacobiTwist.eps}
\caption{{\sml The Modified Jacobi Amplitude Twist. 20 Orbits of the Jacobi Twist and Translate}}
\label{fg:jactwist}
\end{figure}
\vs6\noi Since $\dot{\theta}$ is not a function of $r$,
the angular velocity does not change from orbit to orbit
as was the case with the twist system. In fact, this system
preserves lines through the origin and through any complete
revolution a line or a region is mapped onto itself. The source of
the nonlinearity is that along an orbit, the arc length is
expanded and contracted in a periodic manner. In this
system, matter is neither created, as happens in systems having a source,
nor destroyed, as happens with systems having a sink, but rather
is alternately compressed and stretched.
\vs6\noi This system can be used to obtain chaos by the standard two-phase
gate method: Translate the system
to $(a,0)$ and compose it with the flip to obtain a
time one map that produces chaos while having only periodic
hyperbolic points and no hyperbolic fixed points. The origin of
chaos in this system is solely from the nonlinear acceleration
taking place around circles.
\vs6\noi
A limitation of the Jacobi Equation is that the Jacobi elliptic
functions, $\sn(t), \cn(t)$ are \underline{not} elementary
functions. However, the time-one map determined by these
equations can be constructed from elementary functions. For
anyone wanting to proceed by constructing an example which avoids
the use of the Jacobi Equation use:
\vs6\noi
In place of $\dot{\theta}=\sqrt{1-k^2 \sin^2(\theta)}$
simply choose $\dot{\theta}=2-\sin^2(\theta)$, which is
integrable in terms of elementary functions. Specifically,
\[\sin(\theta)=\frac{\sqrt{2}\sin(\psi)}{\sqrt{1+\sin^2(\psi)}}\]
and
\[\cos(\theta)=\frac{\cos(\psi)}{\sqrt{1+\sin^2(\psi)}}\]
where $\psi=\sqrt{2}(t+C)$, $C$ being the arbitrary constant of
integration determined by the initial conditions. Since
$\dot{r}=0$, write the solution in rectangular
coordinates from the above information and the initial
conditions. Specifically,
\[\l(\vt \sin(\psi). \cos(\psi)
\par\r)=\l(\vt C_1 \cos(\sqrt{2}t)-C_2\sin(\sqrt{2}t).
C_1 \sin(\sqrt{2}t)-C_2\cos(\sqrt{2}t) \par\r)\]
with
\[C_1=\frac{\sqrt{2}x_0}{\sqrt{2x_0^2+y_0}},\hspace{12pt}
C_2=\frac{y_0}{\sqrt{2x_0^2+y_0}}\]
\vs6\noi
The significance of these systems as a building-block of complexity
is twofold.
First, the nonlinear acceleration around orbits of these two
equations, when composed with simple
linear factors, gives rise to chaos even though the nonlinearity
is of the simplest conceivable form, far simpler
than the twist map in that it has \underline{no shearing}.
Second, two
observers traveling on nearby orbits lying on the same radial
line will not experience relative motion. Further, observers
riding on separate orbits will never separate by more than a fixed
but small distance. It is nearly the opposite of sensitive
dependence on initial conditions.
Two observers riding on the same orbit will oscillate relative to
each other while still remaining is circular motion. This
stretching and compressing of arc length around the orbit
means that the vector field has a nonzero divergence while having no sources or sinks.
Matter is never created or destroyed as when there are attractors
or repellers involved; it is only compressed and stretched. The
result of this feature is that when it is composed with simple components
such as the flip, chaos is created by a subtle process. In addition, a very unexpected result appears:
Local attracting periodic points are mixed in with periodic, quasi-
periodic and chaotic orbits Fig.\ref{fg:jactwist}. This is due to the nonzero
divergence of this system. If the divergence were a result of
sinks or sources, one might expect to obtain global attractors or
repellers. But this is not what is found. The existence of
local attracting fixed points also depends on the magnitude of
the flip component that these maps are composed with. Only certain
flips combined with the right initial conditions can give rise to
this unusual phenomena. Figure \ref{fg:jactwist} illustrates beautifully the
kinds of orbits possible with the Jacobi map as a factor. The
presence of measure-preserving chaotic and elliptic orbits
combined with period three attractors suggests the possibility of
such systems existing in nature. The work of Freeman \cite{bi:wf} on the
attractors of the brain combined with the non attracting nature
of common brain waves suggests that the brain is a system with
these properties. This system may one day also explain how complex
structures such as the spinal column can form from dynamical
systems composed of simple components.
%=============================================================================
\subsection{\sml The Discrete Chaotic Jacobi Map as a Time One map of an IDE}
In this section, the Jacobi IDE will be derived from first principles.
\begin{example}{\bf \sml The Derivation of Jacobi IDE}
\vs6\noi the following relationships are needed, see \cite{bi:fb}, chapter 1.
\[\begin{array}{lcl}
x(t)&=& r\,\cn(t)\\
y(t)&=& r \,\sn(t)\\
\dot{x}&=& -r\, \sn\, \dn\\
\dot{y}&=& r\, \cn\, \dn\\
\dot{x}&=& -y\, \dn\\
\dot{y}&=& x\, \dn\\
\dn&=& \sqrt{1-k^2\, y^2}\\
\end{array}\]
In matrix form
\[\dot{\X}(t)= \sqrt{1-k^2\, y^2}\, \B\, \X\]
\end{example}
\vs6\noi An IDE is immediately derived using Proposition \ref{pr:fp}:
\[\T_h(\X)=\exp(h \, \sqrt{1-k^2\, y^2}\, \B))\, \X\]
which may be evaluated using the results of Sec. \ref{sc:ae}. This is the analytic solution for the IDE that produces Fig. \ref{fg:jactrans}.
\begin{figure}[htbp]
\includegraphics[height=2.383in,width=2.45in,angle=0]{C:/Research/Book/Figures/eps/JacTrans.eps}
\caption{\sml Three Orbits of the Jacobi Translate IDE}
\label{fg:jactrans}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.393in,width=4.583in,angle=0]{C:/Research/Book/Figures/eps/JacTrans01.eps}
\caption{\sml Plate A uses Translation by 1; Plate B uses the semi circle of diameter 1.}
\label{fg:jactrans01}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.443in,width=4.567in,angle=0]{C:/Research/Book/Figures/eps/JacFT.eps}
\caption{\sml Plate A is the time series for the Jacobi Twist and Flip IDE; Plate B Is the Time-one Map for Plate A}
\label{fg:jacft}
\end{figure}
\begq
\label{cd:jac}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for Fig. \ref{fg:jacft} is as follows:}\\
k&=&0.9\\
z_1&=&0\\
w_1&=&1.0\\
\alpha&=&2\, \pi\\
&& \mbox{For i = 1 to N}\\
\\
z &=&z_1\, \cos(\alpha \, h) + w_1\,\sin(\alpha \, h )\\
w &=& w_1\,\cos(\alpha \, h - z_1\,\sin(\alpha \, h ) \\
z_1 &=& z\\
w_1 &=& w\\
p_c &= & 0.5 \cdot (1 + \sgn(z))\\
\\
r &=& \sqrt{x ^ 2 + (1 - k ^ 2)\cdot y ^ 2} \\
u_1 &=& x \cdot \cos(r \cdot h) + y \cdot \sin(r \cdot h) \\
v_1 &=& y \cdot \cos(r \cdot h) - x \cdot \sin(r \cdot h)\\
\\
u_2 &=& (x-1)\cdot\cos(\pi\cdot h) + y\cdot\sin(\pi\cdot h)+1\\
v_2 &=& y\cdot\cos(\pi\cdot h) - (x-1)\cdot\sin(\pi\cdot h)\\
\\
x &=&p_c \cdot u_1 + (1-p_c) \cdot u_2\\
y &=& p_c \cdot v_1 + (1-p_c) \cdot v_2\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi
Three sources of autonomous, integrable nonlinearity in two dimensions
are thus illustrated by
three equations: the twist equation, the amplitude equation and the
Jacobi Equation. The twist equation is the most readily available
source of chaos and it has zero divergence. The
second of the two equations provides a source of asymmetry and subtlety
not found in the twist equations and also has zero divergence. Two of these equations
are induced by linear ODEs; all three
have only linear orbits.
\vs6\noi {\bf Note:} An important point that will recur through this book is that to produce complexity, the gradient of the divergence in some direction must be non zero. Since the divergence of the twist is zero, and the divergence of the semi circle is zero, the transition/fusion function must supply the non zero gradient in the twist and flip or twist and translate equations.
%==================================================FOUR=============================================FOUR ============================
\subsection{\sml Nonlinear Orbits with Zero Divergence}
\label{sc:nlo}
In the preceding three examples, the imposed constraint was that the
individual orbits of the ODE be linear (i.e., induced by a linear ODE) and it was observed that, in that case, nonlinearity
could arise from three different sources: nonlinear frequencies,
nonlinear relationships between neighboring orbits, and nonlinear divergence.
Another source of nonlinearity must be when the orbits are not induced by a linear ODE.
\vs6\noi
The following example illustrates how to obtain a nonlinear system that: (1) can be
solved in closed form; (2) is not
induced by a linear system; (3) whose orbits are not linear; (4) which
preserves lines through the origin; and
(5) which has zero divergence.
\vs6\noi Using the form of a vector field
\begq
\label{eq:nlo}
\l(\vt \dot{x}. \dot{y} \par\r)=\l(\frac{\dot{r}}{r}{\bf I}
+\dot{\theta} {\bf B}\r)\l(\vt x. y \par \r)
\endq
make two assumptions. The first is that its underling group
is measure preserving or, what is the same thing, the vector field is
divergence-free. The second is that the system preserves lines
through the origin. Using these two assumptions, derive the
following partial differential equation for $\dot{r}$:
\[\frac{1}{r}<{\rm X},\nabla \dot{r}>+\frac{\dot{r}}{r}+<{\bf
B}{\rm X}, \nabla \dot{\theta}>=0\]
With the following notational convention, obtain the PDE in
standard form. Let
\[\l( \vt \dot{r}. \dot{\theta}\par \r)=\l(\vt g(x,y). f(\theta) \par
\r)\]
Now the PDE becomes
\begin{eqnarray}
x\, p+ y\, q= -(z+r\,f'(\theta))
\end{eqnarray}
\vs6\noi where $p=z_x\,\,q=z_y, z=\dot{r}=g(x,y)$. The general solution is given by
\[z=h(x,y)F(x/y)\]
where $h$ is dependent on $f'(\theta)$. Assuming that
$c\,z=r\,f'(\theta)$ is the form of the solution, then
the following consistency equation is needed:
\begin{eqnarray}
x\, p+ y\, q= -(c+1)z
\end{eqnarray}
For simplicity, require that $-(c+1)>0$ or that $c<-1$. All of
these assumptions would be fine if the solution of the resulting
equation is consistent with these assumptions.
An application of standard methods for solving first-order partial
differential equations gives
\[z=\frac{y}{-(c+1)}F(x/y)\]
\vs6\noi The consistency check is to see whether
\[r\,f'(\theta)=\frac{y}{-(c+1)}F(x/y)\]
is possible. Since $y=r\,\sin(\theta), x=r\,\cos(\theta)$By choosing $f(\theta)=a+b\sin(\theta)$, everything is
consistent. In particular,
\[r=r_0\l(\frac{f(\theta_0)}{f(\theta)}\r)^{1/c}\]
and the first part of the solution is done. Now, choose
$a>b$, then the equation $\dot{\theta}=a+b\sin(\theta)$ is
solvable in closed form for $\sin(\theta)$. Using a standard
table of integrals gives:
\[\frac{b+a\sin(\theta)}{a+b\sin(\theta)}=\sin(k\,t+C_0)\]
where $k=\sqrt{a^2-b^2}$, and $C_0$ is a constant of integration
to be determined from the initial conditions. From this relation
$\sin(\theta), \cos(\theta)$ are obtained.
\vs6\noi Using the foregoing constraints, the general solution of Eq. \ref{eq:nlo} can be obtained and provides the following example.
\begin{example}{\sml \bf Nonlinear Orbits}
\label{ex:nlo}
\[\l(\vt x(t). y(t)
\par\r)=r_0\,\l(\frac{f(\theta_0)}{f(\theta)}\r)^{1/c}\l(\vt
\cos(\theta).\sin(\theta)\par \r) \]
where $c>1$.
\end{example}
\begin{figure}[htbp]
\includegraphics[height=3.333in,width=4.42in,angle=0]{C:/Research/Book/Figures/eps/NonlinearOrbit01.eps}
\caption{{\bf Nonlinear Orbits. Plate A Nonlinear System; Plate B Nonlinear System with Translate.}}
\label{fg:nlorbit01}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.433in,width=4.477in,angle=0]{C:/Research/Book/Figures/eps/NonlinearOrbit05.eps}
\caption{\sml Nonlinear Orbits. Plate A, B IDE for Nonlinear System of Fig. \ref{fg:nlorbit01} where Semi circle rotates with the NL system; Plate C,D IDE for Nonlinear System where the semi circle rotates counter to the NL orbit}
\label{fg:nlorbit05}
\end{figure}
\vs6\noi The effect of counter rotation in Plates C,D of Fig. \ref{fg:nlorbit05} is to operate like $x\, \mod(1)$.
\vs6\noi Note that the root factor is not a constant since
$f(\theta)$ is a function of time. The orbits cannot be
linear, see Fig. \ref{fg:nlorbit01} Plate A, and, by construction, the system is
divergence-free.
\vs6\noi Using the fusion proposition \ref{pr:fus}, this map can be made a component of a
time one map which produces chaos, Fig. \ref{fg:nlorbit01} Plate B.
\begq
\label{cd:nlo}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for one orbit for Fig. \ref{fg:nlorbit05} }\\
z &=& 0\\
w &=& 1\\
h&=& 0.001\\
k&=& 0.9\\
\tau &=& -1.0\\
\beta&=& 100.0\\
\\
&& \mbox{For i = 1 to N}\\
z_1& = &z\cdot\cos(2\, \pi\, h) + w\cdot\sin(2\, \pi\, h)\\
w_1 &=& w\cdot\cos(2\, \pi\, h) - z\cdot\sin(2\, \pi\, h)\\
z& = &z_1\\
w &=& w_1\\
p_c&=&0.5\, (1+\tanh(\beta\, z))\\
\\
&&\mbox{==========IDE Code for U1======}\\
&& \mbox{IDE Code for U1, see Eq. \ref{eq:u1code} below}\\
\\
&&\mbox{==========IDE Code for U2======}\\
u_2 &=& x\cdot\cos(\pi\cdot h) + y\cdot\sin(\pi\cdot h)\\
v_2 &=& y\cdot\cos(\pi\cdot h) - x\cdot\sin(\pi\cdot h)\\
&&\mbox{==========IDE Fusion============}\\
x &=& p_c(z)\cdot \, u_1+(1-p_c(z))\,u_2\\
y &=& p_c(z)\cdot \, v_1+(1-p_c(z))\,v_2\\
&&\mbox{ Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begin{equation}
\label{eq:u1code}
\left. \begin{array}{lcl}
&&\mbox{Code for IDE U1}\\
r &=& \sqrt{x ^ 2 + y ^ 2}\\
al1 &=& a \cdot r + b \cdot y\\
C_2 &=& (b \cdot r + a \cdot y) / al1\\
C_1 &=& x \cdot xk / al1\\
fslt_0 &=& f(y / r)\\
gslt_0 &=& g(y / r)\\
sn_0 &=& C_1 \cdot \sin(xk \cdot h1) + C_2 \cdot \cos(xk \cdot h1)\\
cn_0 &=& C_1 \cdot \cos(xk \cdot h1) - C_2 \cdot \sin(xk \cdot h1)\\
sn_1 &=& (a \cdot sn_0 - b) / (a - b \cdot sn_0)\\
cn_1 &=& cn_0 \cdot xk / (a - b \cdot sn_0)\\
fslt &=& f(sn_1)\\
gslt &=& g(sn_1)\\
gr &=& gslt / gslt0\\
u_1 &=& r \cdot cn_1 \cdot \sqrt{fslt_0 / fslt} \cdot gr\\
v_1 &=& r \cdot sn_1 \cdot \sqrt{fslt_0 / fslt} \cdot gr
\end{array}\right \}
\end{equation}
%==============================================================================
\subsection{\sml The Discrete Chaotic Divergence Free Map as a Time One of an IDE}
\begin{figure}[htbp]
\includegraphics[height=2.5in,width=3.523in,angle=0]{C:/Research/Book/Figures/eps/StretchingFolding01.eps}
\caption{{\bf Nonlinear Orbits. Plate A: Discrete Chaotic Mapping; Plate B Time One Map from IDE}}
\label{fg:stretchfolding01}
\end{figure}
%=======================================================FIVE=======================================================
\section{\sml Nonzero Divergence with Nonlinear Orbits}
\label{sc:nzd}
\subsection{ \sml The Discrete Chaotic Map with Positive Divergence and Nonlinear Orbits}
It is possible to obtain nonzero divergence equations
that are just as useful. One option is to solve the PDE $x\,p+y\,q=z$ and the
choice
\[ \dot{r} = -r f'(\theta)\]
with $\dot{\theta}=f(\theta) \neq {\rm constant}$
gives the closed-form solutions in
rectangular coordinates:
\begin{example}{\sml \bf Nonzero Divergence}
\label{ex:nzd}
\[\l(\vt x(t). y(t) \par\r)=r_0\,\frac{f(\theta_0)}{f(\theta)}\l(\vt
\cos(\theta). \sin(\theta)\par \r) \]
\end{example}
Note that
$\dot{\theta}=-1$ and $\dot{r}\neq 0$ gives nonzero divergence.
\vs6\noi
This process can be greatly generalized. If
$\dot{\theta}=f(\theta)$ and $r=C_0 G(\theta)$ gives an
autonomous ODE:
\[\dot{r}=C_0 G"(\theta)f(\theta)\]
where $C_0$ is eliminated from this equation by noting that
$C_0=r/G(\theta)$. So long as $\dot{\theta}=f(\theta)$ is
solvable in closed form. For example, choose
$f(\theta)=2-\sin^2(\theta)$. By use of a table of integrals, solve this equation for $\sin(\theta)$, which is
all that is necessary to express the solution in rectangular
coordinates. Choosing
$G(\theta)=\sqrt{f(\theta)}(1-0.95\,\sin(\sin(\theta))$ the
orbits of Fig. \ref{fg:nzd02}, Plate A are obtained.
\vs6\noi By composing this autonomous time-one map with
a shift, gives the chaotic orbits of Fig. \ref{fg:nzd02}, Plate B.
\begin{figure}[htbp]
\includegraphics[height=2.5in,width=3in,angle=0]{C:/Research/Book/Figures/eps/NonlinearOrbit02.eps}
\caption{{\sml Nonlinear Orbits. Plate A Nonlinear System; Plate B Nonlinear System with Translate, $h=1.0\; \tau=0.5$}}
\label{fg:nzd02}
\end{figure}
\vs6\noi In the following code
\[
f(u)= 2 - u ^ 2 \;\;\;g(u)= 1 - 0.95 \cdot \sin(u)\]
\tiny
\begq
\label{eq:nzd}
\left.
\begin{array}{lcl}
&& \mbox{\bf Code for Fig. \ref{fg:nzd02} and \ref{fg:nzd03}}\\
&&\\
For\;\; j &=& 1\;\; To \;\;M\\
&&\\
x_0 &=& 4.0 \cdot j / M\\
y_0 &=& 2.0 \cdot x_0\\
&&\\
For \;\; i &=& 1\;\; To \;\;N\\
&&\\
r_0 &=& \sqrt{x_0 ^ 2 + y_0 ^ 2}\\
al1 &=& \sqrt{2.0 \cdot x_0^2 + y_0^2}\\
cl1 &=& \sqrt{2.0} \cdot x_0 / al1\\
sl1 &=& y_0 / al1\\
sl2 &=& \sqrt{1 + sl1 ^ 2}\\
slt_0 &=& \sqrt{2.0} \cdot sl1 / sl2\\
fslt_0 &=& \sqrt{f(y_0 / r_0)}\\
gslt_0 &=& g(y_0 / r_0)\\
sn1 &=& cl1 \cdot \sin(\sqrt(2) \cdot h) + sl1 \cdot \cos(\sqrt(2) \cdot h)\\
cn1 &=& cl1 \cdot \cos(\sqrt(2) \cdot h) - sl1 \cdot \sin(\sqrt(2) \cdot h)\\
sn2 &=& \sqrt(1 + sn1 ^ 2)\\
u &=& r_0 \cdot cn1 / sn2\\
v &=& \sqrt{2} \cdot r_0 \cdot sn1 / sn2\\
slt &=& \sqrt{2} \cdot sn1 / sn2\\
gslt &=& g(slt)\\
fslt &=& \sqrt{f(slt)}\\
u &=& u \cdot (fslt_0 / fslt) \cdot (gslt_0 / gslt)\\
v &=& v \cdot (fslt_0 / fslt) \cdot (gslt_0 / gslt)\\
&&\\
x_0 &=& u+\tau\\
y_0 &=& v\\
&&\\
&&\mbox{\bf Plot Point}\\
&&\\
&& \mbox{Next i}\\
&&\\
&& \mbox{Next j}
\end{array}\right \}
\endq
\footnotesize
\begin{figure}[htbp]
\includegraphics[height=3.2in,width=4.0in,angle=0]{C:/Research/Book/Figures/eps/NonlinearOrbit03.eps}
\caption{{\bf Nonlinear Orbits. Plate A $h=0.1\; \tau=2.5$ ; Plate B $h=1.0\; \tau=0.2$ }}
\label{fg:nzd03}
\end{figure}
%============================================================================================================================
\subsection{\sml The Discrete Chaotic Map with Nonzero Divergence as a Time One Map of an IDEs}
Combining proposition \ref{pr:vf} with Proposition \ref{pr:fus} gives an IDE of the form
\[\T_h =\sg_\beta \, \exp(h\, \frac{\dot{r}}{r}{\I})\exp( h\, \dot{\theta} \B)+(1-\sg_\beta) \exp(h\, \A)\]
writing out the first term in detail gives
\[\sg_\beta \, \exp(h\, \frac{\dot{r}}{r})\I \, \l(\mtx \cos(h\, \dot{\theta}). \sin(h\, \dot{\theta}). -\sin(h\, \dot{\theta}). \cos(h\, \dot{\theta}) \par \r)\]
So long as $\dot{r}$ and $\dot{\theta}$ are expressible in closed form in terms of elementary functions, $\T_h$ will also.
\vs6\noi Taking $\dot{r}=r\, \cos(\theta)$ and $\dot{\theta}=\sin(\theta)$ gives the IDE
\[\T_h =\sg_\beta \, \exp(h\, \frac{x}{r}) \, \l(\mtx \cos(h\,y/r ). \sin(h\, y/r). -\sin(h\,y/r). \cos(h\, y/r) \par \r)+(1-\sg_\beta) \exp(h\, \A)\]
\begin{figure}[htbp]
\includegraphics[height=2.637in,width=4.097in,angle=0]{C:/Research/Book/Figures/eps/Nonlinear0.eps}
\caption{\sml Nonlinear Orbits with Non Zero Divergence. Plate A: Time One Map with Instantaneous Transition ; Plate B: Time One Map with Smooth Hard Transition; Plate C: Time One map with Soft Transition }
\label{fg:nonlinear0}
\end{figure}
\begq
\label{cd:nl0}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:nonlinear0} is as follows:}\\
f(u) &=& 2 - u ^ 2\\
g(u) &=& 1 - 0.95 \cdot \sin(u)\\
\\
&& \mbox{For i = 1 To N}\\
z &=& \cos(2\, \pi) \cdot z1 + \sin(2\, \pi) \cdot w_1\\
w &=& \cos(2\, \pi) \cdot w1 - \sin(2\, \pi) \cdot z_1\\
z_1 &=& z\\
w_1 &=& w\\
\\
r &=& \sqrt{x ^ 2 + y ^ 2}\\
al1 &=& \sqrt{2 \cdot x ^ 2 + y ^ 2}\\
cl1 &=& \sqrt{2} \cdot x / al1\\
sl1 &=& y / al1\\
sl2 &=& \sqrt{1 + sl1 ^ 2}\\
slt0 &=& \sqrt{2} \cdot sl1 / sl2\\
fslt0 &=& f(y / r) ^ {ex}\\
gslt0 &=& g(y / r)\\
sn1 &=& cl1 \cdot \sin(\sqrt{2} \cdot h) + sl1 \cdot \cos(\sqrt{2} \cdot h)\\
cn1 &=& cl1 \cdot \cos(\sqrt{2} \cdot h) - sl1 \cdot \sin(\sqrt{2} \cdot h)\\
sn2 &=& \sqrt{1 + sn1 ^ 2}\\
u &=& r \cdot cn1 / sn2\\
v &=& \sqrt{2} \cdot r \cdot sn1 / sn2\\
slt &=& \sqrt{2} \cdot sn1 / sn2\\
gslt &=& g(slt)\\
fslt &=& f(slt) ^ {ex}\\
\\
u_1 &=& u \cdot (fslt0 / fslt) \cdot (gslt0 / gslt)\\
v_1 &=& v \cdot (fslt0 / fslt) \cdot (gslt0 / gslt)\\
\\
u_2 &=& x + \tau \cdot h\\
v_2 &=& y\\
\\
sg_1 &=& 0.5 \cdot (1 + \tanh(z))\\
sg_2 &=& 1 - sg_1\\
x &=& sg_1 \cdot u1 + sg_2 \cdot u2\\
y &=& sg_1 \cdot v1 + sg_2 \cdot v2\\
k &=& i - M \cdot Int(i / M)\\
&&\mbox{If k = 0 Plot Point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
%===========================================
\section{\sml Boundary Conditions}
Based on the results of the foregoing chapters, the general form of a four-dimensional IDE formed by fusion which can produce chaos is
\[s(\X)\cdot \exp(h\, (\dot{r}/r)\, \I)\, \exp(h\,\dot{\theta} \,\B) +(1-s(\X))\cdot (\exp(h\,\cdot \pi \,\B)\, (\X-\F(\X))+\F(\X)) \]
where $s(\X)$ is the boundary condition. The boundary condition may have both a time and space component. The time component is provided by the HOC thus making the total system four-dimensional. The resulting images of systems requiring a time boundary condition shown in the figures in this book are the projection of a four-dimensional IDE onto a two-dimensional subspace.
\vs6\noi This section will summarize the set of boundary conditions that occur in fusing the IDEs presented so far. The general form is as follows
\[s(\X) \, \T_h + (1-s(\X)) \, \R_h\]
\vs6\noi In the following, the subscript $c$ refers to the HOC.
\[\begin{array}{lcl}
&&\mbox{Boundary condition without space or time constraint}\\
s(\X)&=& \lambda \in \Rl^n\\
&&\mbox{Boundary condition without space constraint}\\
s(\X)&=& p_c=0.5\,(1+\tanh(\beta(1-w_c)))\\
\end{array}\]
\[\begin{array}{lcl}
&&\mbox{Boundary condition without time constraint}\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta(y -f(\X))))\\
p_2 &=& 0.5 \cdot (1 +\tanh((\beta(1 -g(\X)))\\
s(\X) &=& p_2 \cdot (s(\X) + p_1 - s(\X) \cdot p_2)
\end{array}\]
\[\begin{array}{lcl}
&&\mbox{Boundary condition with both space and time constraints}\\
p_c&=&0.5\,(1+\tanh(\beta(1-w_c))\\
q&=&1-p_c\\
p_1 &=& 0.5 \cdot (1 + \tanh(\beta((1 - f(\X))))\\
p &=& p_c + p_1 - p_c \cdot p_1\\
s(\X) &=& p \cdot (s(\X) + q - s(\X) \cdot q)
\end{array}\]
\vs6\noi Time-one maps require time boundary condition and first return maps require a space boundary condition. There are conditions when these two maps coincide. The shifts require both apace and time boundary conditions. This is a result that becomes evident when algebraically translating the abstract mathematical function $x\,\mod(1)$ into a component of an IDE. However, classical two-dimensional ODEs such as that of Ueda have only a time boundary condition whereas three-dimensional systems such as Chua have only a space boundary condition. Since shifts have both time and space boundary conditions, algebraically identifying a shift in these classical systems is problematic.
%===========================C Nonlinear, non periodic Systems=========================
\section{\sml Nonlinear Systems which are not Periodic}
\label{sc:nl01}
The nonlinear periodic systems of Sec. \ref{sc:nl} provide standard components for stable or unstable manifolds for hyperbolic IDEs by simply composing them with the appropriate exponential IDE. The form of the resulting IDE is as follows
\[\exp(h\, \D)\exp(h \A(\cdot))\]
where $\D$ is a diagonal matrix and the "$\cdot$" convention of Sec. \ref{sc:note} applies to $\A(\cdot)$.
\vs6\noi The Anosov map arises from an unbounded hyperbolic map. The measure preserving H\"{e}non map is another example. The unstable manifold in both cases is unbounded. Two common devices to deal with this are (1) add damping; or, (2) project the map onto the torus. A third option is to use the special IDE function $\xmod1$, see Sec. \ref{sc:sf}. A fourth option in IDE theory is fusion, proposition \ref{pr:fus}. Therefore it is necessary to look at how nonlinearities occur in non periodic diffeomorphisms.
\vs6\noi The H\"{e}non map is treated in Sec. \ref{sc:sfhen}, Fig. \ref{fg:henon01}. The Baker's Transformation is a mod(1) example of an unbounded system derived from a linear hyperbolic system and is found in Sec.\ref{sc:baker}.
\vs6\noi In general, the linear system
\[\l(\vt \dot{x}. \dot{y}\par\r)=\l(\mtx a_{11}. a_{12}. a_{21}. a_{22}\par \r)\, \l(\vt x. y\par\r) \]
induces a nonlinear system in the simplest case as follows:
\[\l(\vt \dot{x}. \dot{y}\par\r)= \l(\vt a_{11}\,x+a_{12}\,y. a_{21}\, x+a_{22}\, y\par\r)\]
or
\[M(x,y)\,dx+ N(x,y)\,dy =0\]
When this is an exact differential there is a function $\phi(x,y)=\phi(x_0,y_0)$. In particular
\[\T_h(\X)=\exp(h\, \phi(x,y) \A)\, \X\]
where $a_{11}=-a_{22}$ can be incorporated into an IDE using the fusion method.
\vs6\noi Using the $\xmod1$ special function produces variations of the Anosov map as seen in Fig. \ref{fg:catmapa} and Fig. \ref{fg:catmapb}.
\vs6\noi In general, the use of the IDE fusion method, proposition \ref{pr:fus} or special IDE functions, provide a method of incorporating any unbounded solution of an ODE in an IDE.
%===========================================Elementary Connections to Lie theory===============================
%==========================================================================================
%08
\chapter{Elementary Connections between IDEs and Lie Groups}
\label{ch:lg}
\begin{center}
\parbox{3.5in}{\em There is an analogy between the linear groups of Lie theory and the nonlinear groups of IDE theory. This relationship suggests how the some of the results from Lie theory may contribute to IDE theory.}
\end{center}
\vs6\noi There are two objectives of this section: (1) Develop a nonlinear version of SO(2) that illustrates how IDEs provide a bridge between linear and nonlinear systems. (2) Extend key concepts from Lie Groups to IDEs.
\section{\sml The IDE Analog of SO(2)}
The solution of the ODE
\[\dot{\X}= \B \, \X\]
where
\[\B=\l(\mtx 0.1.-1.0\par \r)\]
generates the entire one-dimensional Lie Group SO(2) since for any angle of rotation $\omega$ the group element is given by
\[\exp(\omega \, \B)\]
This group element acts on $\Rl^2$ by the rule
\[\X\ra \exp(\omega \, \B)\, \X\]
SO(2) preserves the real valued function $r(\X)=\|\X\|$.
\vs6\noi The IDE associated to SO(2) is
\[\T_h(\X)=\exp(h \, \B)\, \X\]
which does not generate the entire group SO(2), but rather only generates "orbits" composed of discrete points obtained by iterating $\T_h$ with $\omega =1$. To replicate any member of SO(2) an addition must be made to $\T$. To obtain a specific rotation of $\Rl^2$, say $\omega$, the following IDE is needed
\[\T_h(\X)=\exp(h \, \omega\, \B)\, \X\]
This demonstrates that associated to SO(2) there is a family of IDEs determined by all possible angles of rotation of $\Rl^2$. The set of all possible IDEs needed to generate SO(2) is
\[G_\omega=\{\exp(h\, \omega \B)| \omega \in \Rl\}\]
This set of IDEs forms a commutative group.
\vs6\noi To make the jump to nonlinear processes recall that SO(2) preserves the form $r^2=\|\X\|^2=\langle \X, \X \rangle$. Using this notation a larger group is defined
\[G_r=\{\exp(h\, r(\X)\, \B)| \X \in \Rl^2\}\]
From this an even larger group is formed by considering all continuous functions on $\Rl$. Let $f\in C(\Rl)$. Then
\[G_f=\{\exp(h\,f(r)\, \B)| \, f \in C(\Rl)\}\]
This can be extended to all measurable functions in $C(\Rl)$. Any additive subgroup of $C(\Rl)$ generates a corresponding group of IDEs.
\vs6\noi Whereas the parameter $t$ defined the Lie Group, the space of continuous functions defines the IDE group corresponding to the invariant of SO(2). For any fixed $f\in C(\Rl)$ and any real number $0f(r)\B>> \mbox{SO(2)} \\
@Af(r) AA @VV\exp(f(r)\, \B) V\\
C(\Rl) @. \T_h\\
@A r AA @VV\exp(h f(r)\, \B)V\\
\Rl @< r \la \X << \Rl^2
\end{CD}\]
\vs6\noi Clearly, this process may be generalized.
\[\begin{CD}
\fkg@>f(r)\A>> \mbox{G} \\
@Af(r) AA @VV\exp(f(r)\, \A) V\\
C(\Rl) @. \T_h\\
@A r AA @VV\exp(h f(r)\, \A)V\\
\Rl @< r \la \X << \Rl^n
\end{CD}\]
\vs6\noi The distinguishing dynamic in the nonlinear analog of SO(2) is the relationship between two group elements, say $\exp(\omega_1 \, t\, \B)$ and $\exp(\omega_2 \, t\, \B)$ where $\omega_1 \neq \omega_2$, that act on separate orbits/integral curves. That relationship is defined by their difference in angular velocity on their respective integral curves. The difference in angular velocity creates a shearing effect that stretches a box of size $\delta x \, \delta y$ while preserving the area of the box whereas an element of SO(2) simply translates the box in tact. As mentioned in previous papers, stretching is one of the two dynamics that are essential to create complexity. Linear groups cannot provide stretching even though they can provide folding.
\begin{example} {\bf Nonlinear IDEs}
\label{ex:nl1}
Let $r=\|\X\|$ and consider the equation
\[\T(\X)=\exp(h\, f(r) \, \B)\, \X\]
This equation defines a family of IDEs containing an arbitrary real valued function on $\Rl$ which may even be a random variable. In terms of a matrix $\T$ is given by
\[\T(\X)=\l(\mtx \cos(h\, f(r)).\sin(h\, f(r)).-\sin(h\, f(r)).\cos(h\, f(r))\par \r) \, \l( \vt x.y \par \r)\]
\vs6\noi Placing the two forms side be side
\[\begin{array}{lll}
\mbox{linear IDE} & \exp(h \B)\X& \mbox {an element in SO(2)}\\
\mbox{nonlinear IDE} & \exp(h \, f(r) \B)\X& \mbox{a nonlinear analog of SO(2)}
\end{array}\]
\end{example}
\vs6\noi A second example further generalizes the nonlinear IDE.
\begin{example} {\bf Second Nonlinear IDE Example:}
\label{ex:nl2}
Consider
\[\T(\X)=\exp(h\, \, \B (r))\, \X\]
where
\[\B(r)=\l(\mtx 0. r. -1/r.0\par \r) \;\;\;\;r^2=0.5\,(x^2+\sqrt{x^4+4\, y^2})\]
\[\T(\X)=\l(\mtx \cos(h).r\, \sin(h).-\sin(h)/ r.\cos(h)\par \r) \, \l( \vt x.y \par \r)\]
or more generally
\[\T(\X)=\l(\mtx \cos(h).f(r)\,\sin(h).-\sin(h)/ f(r).\cos(h)\par \r) \, \l( \vt x.y \par \r)\]
where $f$ is an arbitrary function of a real variable.
\vs6\noi The side-by-side comparison shows that the nonlinear IDE is nonlinear because the curvature of the orbits is not constant while the angular velocity remains constant.
\[\begin{array}{ll}
\mbox{linear IDE} & \exp(h \B)\X\\
\mbox{nonlinear IDE} & \exp(h \, \B(f(r)))\X
\end{array}\]
\end{example}
The nonlinear group IDE reduces to the linear group IDE by setting $f(r)=1$ in both examples. Therefore the linear IDE is a subgroup of the nonlinear IDEs.
\vs6\noi Generalizing further leads to the IDE
\[\T(\X)=\exp(h\, \, \A(\X))\, \X\]
in which there is no assumed symmetry. This will be the case for most biological and social systems and even fluid flow as found in severe weather.
\begin{definition} {\bf Semi-linear IDEs}
A semi-linear IDE is an IDE induced by a linear IDE and an associated invariant.
\end{definition}
\vs6\noi Examples above are semi-linear IDEs.
\vs6\noi Linear IDEs have the property that they are both a local and global solution of a linear ODE. The simplest class of nonlinear IDEs such as the examples presented here show that this class shares properties of both linear and nonlinear ODEs. The nonlinear feature is that the IDE is not a linear function of the initial conditions. The linear feature is that the IDE produces both a local and global orbit of the associated ODE. In this respect the semi-linear IDEs are a bridge between linear and nonlinear ODEs in that they share features of both.
\vs6\noi The following clarifies this relationship.
Let
\[(\lambda, \X)\in \Rl^3 \;\;\; \X \in \Rl^2\]
Define a map $\Phi$ from $\Rl^3$ to $\Rl^2$
\[(\lambda, \X) \stackrel{\Phi}\ra \exp(\lambda \,\B)\, \X\]
Clearly
\[\Phi(\lambda, \X) \in \Rl^2\]
Now let $\lambda:\Rl^2 \ra \Rl$
and consider
\[\Phi(\lambda(\Y), \X)=\exp(\lambda(\Y) \,\B)\, \X\]
For fixed $\Y$, \hspace{2pt} $\exp(\lambda(\Y) \,\B)$ is in SO(2). The operation of $\exp(\lambda(\Y) \,\B)$ on $\Rl^2$ is independent of $\X$. However, for the nonlinear case, $\Phi$ has the form
\[\Phi(\lambda(\X), \X)\]
and the operation of $\Phi$ on $\Rl^3$ depends on the starting point. As such, $\Phi$ has been contracted to a mapping $\Rl^2 \ra \Rl^2$. This represents the form of the simplest nonlinear IDEs, the semi-linear IDEs.
%========================================LIE CONCEPTS EXTENDED=============================
\section{\sml Extending Lie Concepts to IDEs}
This section presents how the maps ${\bf exp},\;\;\Ad,\;\; \ad$ extend to IDEs and also note that the Campbell-Baker-Hausdorff Theorem carries over to IDEs.
\vs6\noi Let $\A(\X)$ be a matrix function of the vector $\X$
\[\Ad_{ \A(\X)}\, \Phi(\X)=\A(\X)\, \Phi(\X)\, \A(\X)^{-1}\]
and
\[\ad_{\A(\X)}\, \Phi(\X)=\A(\X)\, \Phi(\X)-\Phi(\X) \A(\X)\]
Both definitions carry over to matrix functions of a vector variable by pointwise application.
\vs6\noi The following result also carries over to matrix valued functions of a vector.
\[\Ad_{\exp(\A(\X)}=\exp(\ad_{\A(\X)})\]
Then the following well known results extends to matrix valued functions of a vector variable:
\[\frac{d}{dt} \exp(t (A(\X))=\exp(t \A(\X)\l(\frac{\I-\exp(-\ad_{\A(\X)})}{\ad_{\A(\X)}}\r)\A(\X)\]
where, for any matrix $\Phi(\X)$
\[\l(\frac{\I-\exp(-\ad_{\Phi})}{\ad_{\Phi}}\r)=\sum_{k=0}^\infty \frac{(-1)^k}{(k+1)!} (\ad_{\Phi})^i\]
\vs6\noi The Campbell-Baker-Hausdorff formula also carries over to matrix valued functions of a vector variable.
\vs6\noi The inner product of two matrix functions of a vector variable is given, pointwise, by
\[\langle \T, \S \rangle= f(\X) =\langle \T(\X), \S(\X) \rangle=\tr(\S^*(\X)\, \T(\X))\]
If
\[\langle \T, \T \rangle =f(\X) \]
then \[\|\T\|^2= \sup_{\X} \|f(\X)\|^2\]
The inner product is a bilinear form pointwise,
\[\langle \T+\R, \S \rangle =\langle \T(\X)+\R(\X), \S(\X) \rangle =\langle \T(\X), \S(\X) \rangle+\langle \R(\X), \S(\X) \rangle=\]
\[\langle \T, \S \rangle+\langle \R, \S \rangle\]
More generally, if $\T, \; \S$ are IDEs then
\[\langle \T, \S \rangle= f(\X) =\langle \T(\X), \S(\X) \rangle\]
If
\[\langle \T, \T \rangle =f(\X) \]
then \[\|\T\|^2= \sup_{\X} \|f(\X)\|^2\]
\[\langle \T+\R, \S \rangle =\langle \T(\X)+\R(\X), \S(\X) \rangle =\langle \T(\X), \S(\X) \rangle+\langle \R(\X), \S(\X) \rangle=\]
\[\langle \T, \S \rangle+\langle \R, \S \rangle\]
If all IDEs are Lebesgue measurable functions in $\R^n$ then
\[\langle \T, \S \rangle= f(\X)\]
\begin{lemma}
\label{lm:fs1}
Let $\T$ be an IDE and define
\[a_n=\int_D \; \langle \T(\X), \exp(h\, \lambda_n \B)(\X) \rangle d\mu(\X) \]
and assume that
\[S=\sum_i^\infty a_n\]
is an absolutely convergent series. Then
\[\frac{1}{S} \, \sum_i^\infty a_i \exp(h \, \lambda_i \, \B)\]
is an IDE.
\end{lemma}
\pf See section \ref{sc:lm}
\rl
%=======================PDEs and Continuous Transformation Groups===================
\section{\sml PDEs and Continuous Transformation Groups}
The Lie theory of continuous transformations groups extends to nonlinear ODEs through the concept of an infinitesimal generator which is a PDE operator \cite{bi:ei}. ODEs and first order PDEs are more generally related \cite{bi:is}. This section touches on both topics. This subsection shows how to approximate integral surfaces using IDEs.
\vs6\noi
Assume that $z=f(x,y)$ is a surface in $\Rl^3$ to be found. And assume that there is a first order PDE for $z$:
\[P(x,y)\frac{\pr z}{\pr x}+Q(x,y)\frac{\pr z}{\pr y}=0\]
This equation is solved by first solving the system
\[\frac{dx}{P(x,y)}=\frac{dy}{Q(x,y)}\]
for a function of $g(x, y)=c$ where $c$ is a constant and then setting
\[z=F(g(x,y))\]
where $F$ is an arbitrary function of a single variable.
\vs6\noi The system
\[\frac{dx}{P(x,y)}=\frac{dy}{Q(x,y)}=0\]
may also be viewed as two first order ODEs.
\[\frac{dx}{ds}=P(x,y)\;\;\;\; \frac{dy}{ds}=Q(x,y)\]
where the derivatives are with respect to arc length on the surface $f$.
This system may be put into the form
\[\dot{\X}=\A(\X)\, \X\]
from which the IDE
\[\T_h(\X)=\exp(h \A(\X))\, \X\]
may be obtained. For each point on the surface, $f$, $\T_h$ traces out a curve and the total family of curves so obtained defines the approximate surface.
\vs6\noi Another view is to find an invariant of the IDE, call it $k(\T_h(\X))=c=k(\T_h(\X_0))$, and then the general solution of the PDE is given by
\[z=F(k(\T(\X))=f(x,y)\]
where $F$ is an arbitrary function of a single real variable. As always in solving ODEs and PDEs, there is some art involved.
%09
%========================Stretching Defined============================================================
\chapter{The Geometric Dynamics of Complexity: Stretching and Folding}
\label{ch:sf}
\begin{center}
\parbox{3.5in}{\em As demonstrated by the horseshoe paradigm of Smale {\rm \cite{bi:ss}}, stretching and folding are the two fundamental building blocks of complexity}
\end{center}
\vs6\noi As noted in Sec. \ref{sc:key}, the proof of the Smale-Birkhoff theorem demonstrates that the elementary components of stretching (to be defined formally below) and folding are the source of complexity in dynamical systems. What the Smale-Birkhoff theorem does not do is provide insight into the relative proportions of stretching and folding necessary to produce chaos.
\vs6\noi In this chapter, the algorithmic mechanisms of stretching, folding and the transition between them will be examined through two systems: (1) a modified version of the H\'{e}non map (removing the 1 because it has no bearing on the stretching dynamic \cite{bi:rb7}); and, (2) the twist and flip map.
\vs6\noi From these two examples, the dynamics of stretching, folding and the transition between them will be seen as capturing the essential dynamics of any system and, therefore, understanding their appearance in the form of the mathematical description of the system is a step toward resolving the Hirsch Conjecture \cite{bi:mh} and improving system's analysis, design and modeling. In the process, this will improve the possibility of identifying the level of complexity in a system from its form.
\vs6\noi It will become apparent from the following examples that IDEs are uniquely formulated to capture all three dynamical components that generate chaos and complexity.
%========================EXAMPLES and illustrations==============
\section{\sml A Geometric Illustration of Stretching and Folding Using the H\'{e}non Map}
\label{sc:sfhen}
The study of stretching and folding in the H\'{e}non map begins by examining the H\'{e}non stretching and folding dynamics separately for clarity. The illustrations are organized around the fixed point $(1,1)$, See Fig \ref{fg:hen}.
\begq
\T\l(\vt x.y \par \r)=\l(\vt x-2y^2. y \par \r), \hspace{12pt} \mbox{Stretching}
\label{eq:str}
\endq
\begq \T\l(\vt x.y \par \r)=\l(\mtx 0.1.-1.0\par\r)\l(\vt x.y \par \r), \hspace{12pt} \mbox{Folding}
\label{eq:fold}
\endq
\begin{figure}[h]
\centering
\includegraphics[height=1.077in,width=3.24in,angle=0]{C:/Research/Book/Figures/EPS/StretchingFolding.eps}
\caption{\scriptsize Stretching and folding in the modified Henon map \footnotesize}
\label{fg:hen}
\end{figure}
\vs6\noi In Fig.(\ref{fg:hen}) the nonlinear dynamic, Eq.(\ref{eq:str}), translates (stretches) a point to the left by an amount related to the magnitude of the $y$ axis coordinate. As $y$ increases, the stretching becomes larger creating a shearing effect between two near by $y$-levels, see Fig.(\ref{fg:Shear}). This intuitively corresponds to a baker stretching dough. The folding dynamic Eq. (\ref{eq:fold}) simply rotates a point back in the opposite direction. This intuitively corresponds to a baker folding the stretched dough in half. In the mathematical equations, starting at a fixed pont $(1,1)$, these two dynamics exactly cancel each other. The complexity in a neighborhood of the fixed point is suggested by the lateral direction of the stretching dynamic combined with the somewhat vertical direction of the folding dynamic. Stretching away from the fixed point resembles the action of the unstable manifold of a hyperbolic system at a fixed point. The folding action resembles the dynamics of the stable manifold at a fixed point. If these two dynamics are properly synchronized or proportioned, it is apparent that the fixed point will be hyperbolic. If not, the fixed point could be elliptic or another fixed point type.
\begin{figure}[htbp]
\centering
\includegraphics[height=0.923in,width=4.567in,angle=0]{C:/Research/Book/Figures/EPS/Shearing.eps}
\caption{\scriptsize Shearing is Implicit in the Modified H\'{e}non Map. Two Adjacent Points will Separate \footnotesize}
\label{fg:Shear}
\end{figure}
\vs6\noi It is possible to think that divergence encompass the concept of stretching. However, this is misleading. A linear hyperbolic system can have non zero divergence without stretching in the sense to be made precise in a later section. Combining this with a linear folding system cannot produce a transverse homoclinic point since the unstable manifold must be folded. Folding the unstable manifold means that the unstable manifold has non zero curvature \cite{bi:rb2} and this is required if the unstable manifold is to bend back to cross the stable manifold. Stretching relies on the concept of curvature rather than divergence.
\vs6\noi To transition from the H\'{e}non map, from which the level of complexity cannot be determined from its form to an IDE that captures stretching, folding and transition in its form a lemma is needed. In particular, since the H\'{e}non map may be factored into two time-one maps for autonomous ODEs, it is possible to construct the H\'{e}non IDE that makes transparent the stretching, folding and transition between them. This follows from lemma \ref{lm:hide}
\begin{lemma}
\label{lm:hide}
Let $\H=\S_1\circ \T_1$ where $\S_1, \; \T_1$ are time one maps for autonomous, bounded ODEs. Then
\[\H_h(\X)=f(\Z)\S_h(\X)+(1-f(\Z))\, \T_h(\X)\]
is an IDE
\end{lemma}
\pf
Since $\T_1,\; \S_1$ are time one maps of autonomous ODEs there exist IDEs $\T_h,\; \S_h$.
\rl
\vs6\noi Using the HOC, see Sec. \ref{sc:hoc}, the following transition function is obtained: $f(\X)=0.5\cdot (1+\tanh(\beta\, \theta))$ where $\theta=\arctan(z/w)$. The variables $z,\; w$ must be obtained by a two-dimensional dynamical system. Combining the harmonic oscillator with lemma \ref{lm:hide} gives the following four-dimensional IDE:
\[\l(\vt \Z.\X\par\r)\ra \l(\vt\exp(h\,\B)\Z .f(\Z)\S_h(\X)+(1-f(\Z))\,\F_h(\X) \par\r)\]
where
\[\Z=\l(\vt z.w \par \r)\]
and
\[\X=\l(\vt x.y \par \r)\]
and $f(\Z)=0.5 \cdot (1 + \tanh(\beta\cdot(\pi\cdot \alpha - \theta)))$ and $\theta=\arctan(z / w)$. See Fig. \ref{fg:henon02a}.
\begin{figure}[htbp]
\includegraphics[height=1.98in,width=2.02in,angle=0]{C:/Research/Book/Figures/eps/Henon02.eps}
\caption{{\sml H\'{e}non IDE}}
\label{fg:henon02a}
\end{figure}
\vs6\noi For stretching and folding to be "equal" $\alpha =1$. Note that he same stretching and folding dynamics are present in Fig. \ref{fg:henon02a} as in Fig. \ref{fg:hen}.
\begq
\label{cd:hen}
\left.
\begin{array}{lcl}
&&\mbox{ The code for Fig. \ref{fg:henon02a} is as follows:}\\
h&=&0.001\\
z_1=0\\
w_1=1\\
&&\mbox{For i = 1 To N}\\
z &=& \cos(h) \cdot z_1 + \sin(h) \cdot w_1\\
w &=& \cos(h) \cdot w_1 - \sin(h) \cdot z_1\\
z_1&=&z\\
w_1&=&w\\
\\
\theta &=& 2 \cdot \arctan(z / w) + \pi\\
u_1 &=& x - 2 \cdot y ^ 2 \cdot h\\
v_1 &=& y\\
u_2 &=& \cos(h) \cdot x + \sin(h) \cdot y\\
v_2 &=& \cos(h) \cdot y - \sin(h) \cdot x\\
sg_1 &=& 0.5 \cdot (1 + \tanh(8\cdot(\pi - \theta)))\\
sg_2 &=& 1 - sg_1\\
x &=& sg_1 \cdot u_1 + sg_2 \cdot u_2\\
y &=& sg_1 \cdot v_1 + sg_2 \cdot v_2\\
&&\mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right \}
\endq
%==============================================================
\section{\sml The Relationship between Stretching and Folding and the Shift}
\label{sc:sfshift}
The H\'{e}non map is particularly well suited to illustrating the relationship between stretching and folding and the shift.
\begin{figure}[htbp]
\includegraphics[height=2.417in,width=4.533in,angle=0]{C:/Research/Book/Figures/eps/SFShift.eps}
\caption{\footnotesize Plates A and B Make Clear that the Direction of Stretching is Irrelevant; In Plate C, Stretching and Folding Combine to Form a Fixed Point. Plate D Illustrates the Dynamics Away from a Fixed Point}
\label{fg:sfshift}
\end{figure}
\vs6\noi In Fig. \ref{fg:sfshift}, Plate C, The combined stretching and folding dynamics at the fixed point are functioning like a shift with the folding providing the $x\, \mod(1)$ dynamic exactly as occurs in the unilateral shift IDE.
\begq
\label{cd:sft}
\left.
\begin{array}{lcl}
&&\mbox{ The code for Fig. \ref{fg:sfshift}, Plate D is as follows:}\\
h&=&0.001\\
z_1=1\\
w_1=0\\
&&\mbox{For i = 1 To N}\\
z &=& \cos(h \cdot \pi ) \cdot z_1 - \sin(\cdot \pi ) \cdot w_1\\
w &=& \cos(\cdot \pi ) \cdot w_1 + \sin(\cdot \pi ) \cdot z_1\\
z_1&=&z\\
w_1&=&w\\
\\
s &=& 0.5 \cdot (1 + \sgn(w))\\
u_1 &=& x - 2 \cdot y ^ 2 \cdot h\\
v_1 &=& y\\
u_2 &=& \cos(h\cdot 0.5\cdot \pi) \cdot x + \sin(h\cdot 0.5\cdot \pi) \cdot y\\
v_2 &=& \cos(h\cdot 0.5\cdot \pi) \cdot y - \sin(h\cdot 0.5\cdot \pi) \cdot x\\
x &=& s \cdot u_1 + (1-s) \cdot u_2\\
y &=& s \cdot v_1 + (1-s) \cdot v_2\\
&&\mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi The {\em Form} of the H\'{e}non IDE makes the relationship between stretching and folding explicit as well as the presence of a form of a shift.
\begin{eqnarray}
\label{eq:sfshift01}
\X &\ra & \s(\X)\, \exp(h \ln(2)\I_1)\,\X+(1-s(\X)) \exp(h \, \pi\, \B)\,\X\\
\label{eq:sfshift02}
\X &\ra & \s(\X)\, \exp(h\, y\, \N_1)\,\X+(1-s(\X)) \exp(h \, 0.5\, \pi\, \B)\,\X\\
\I_1&=& \l(\mtx 1.0.0.-1\par\r) \nonumber\\
\N_1&=& \l(\mtx 0.1.0.0\par\r)\nonumber
\end{eqnarray}
Equation \ref{eq:sfshift01} is a bilateral shift IDE and Eq. \ref{eq:sfshift02} is the H\'{e}non IDE which is a variation of a shift. This suggests the following definition
\begin{definition}
\[\X \ra \s(\X)\, \exp(h\, p(\X)\, \N_1)\,\X+(1-s(\X)) \exp(h \, \omega \, \B)\,\X\]
where $p(\X)$ is a polynomial function will be called a polynomial shift.
\end{definition}
\vs6\noi The H\'{e}non IDE is a polynomial shift. This gives the following result:
\begin{proposition}
\label{pr:sfshift}
There exist a polynomial shift for which there is a subset on which it is a function of a bilateral shift.
\end{proposition}
\pf \cite{bi:rb2} \rl.
\vs6\noi It should be expected that there exist a large class of polynomial shifts that satisfy proposition \ref{pr:sfshift}.
%================================================================================
\section{\sml Further Examination of the H\'{e}non Map}
\vs6\noi In Fig. \ref{fg:henon01}, seven orbits of the H\'{e}non IDE are compared to seven orbits of the H\'{e}non map. Both figures show the same morphology including the appearance of KAM island chains. This should be the expected result due to lemma \ref{lm:hide}.
\begin{figure}[htbp]
\includegraphics[height=2.403in,width=4.547in,angle=0]{C:/Research/Book/Figures/eps/Henon01.eps}
\caption{\sml Plate A Nine Orbits of the Measure Preserving H\'{e}non map;Plate B Nine Orbits of the H\'{e}non IDE;}
\label{fg:henon01}
\end{figure}
\vs6\noi By adding a damping factor, the orbits converge to an almost periodic function, see Fig. \ref{fg:henon03a}. The significance of this illustration is that dynamical systems may have a very long transient. Even though their long term dynamics can be predicted, it is the short term dynamics that are most relevant for such systems as epidemiology, weather, social systems and warfare. IDE theory provides a direct method of identifying the short term dynamics.
\begin{figure}[htbp]
\includegraphics[height=2.29in,width=2.5in,angle=0]{C:/Research/Book/Figures/eps/Henon03.eps}
\caption{\sml A single Orbit of the H\'{e}non IDE Converges to an Almost Periodic System after a Sufficiently Long Transient}
\label{fg:henon03a}
\end{figure}
\vs6\noi The code for Fig. \ref{fg:henon02a} provides a direct means of affecting the amount of stretching and folding through the transition function $0.5 \cdot (1 + \tanh(8\cdot(\pi - \theta)))$. As noted above, the proportion of stretching and folding may be changed using the following transition function
\[f(\theta)=0.5 \cdot (1 + \tanh(\beta \cdot(\pi\, \alpha - \theta)))\]
The factor $\beta$ affects the steepness of the transfer function. In Fig. \ref{fg:henon04a}, the transition function is given by
\[0.5 \cdot (1 + \tanh(0.5\cdot(\pi \cdot 0.73 - \theta)))\]
In this example, $\beta =0.5$. Instead of cleanly separating stretching and folding as $\beta=8.0$ or higher will do, this choice of $\beta$ "smears" the two dynamics together.
\begin{figure}[htbp]
\includegraphics[height=2.23in,width=2.803in,angle=0]{C:/Research/Book/Figures/eps/Henon04.eps}
\caption{\sml Orbit of the H\'{e}non IDE in which the transition function has been modified to change the stretching proportion and the steepness of the hyperbolic tangent.}
\label{fg:henon04a}
\end{figure}
\vs6\noi Characteristic circular sub orbits appear in Fig. \ref{fg:henon04a} as is often seen in chaotic systems such as the Ueda attractor.
%==================================Twist=======================
\section{\sml Geometric Illustration of Stretching and Folding Using the Twist and Flip IDE}
In this section the Twist and Flip is used for illustrations that parallel the H\'{e}non IDE of the previous section. the twist IDE is
\[\X\ra \exp(h\,r \, \B)\, \X\]
where $r=\sqrt{x^2+y^2}$ in Fig. \ref{fg:twist}.
\begin{figure}[htbp]
\includegraphics[height=1.433in,width=2.667in,angle=0]{C:/Research/Book/Figures/eps/Twist.eps}
\caption{\sml Stretching Formed by a Twist is Morphologically the same as the Stretching in the H\'{e}non map when the H\'{e}non Stretching is Bent into a Circle. }
\label{fg:twist}
\end{figure}
\vs6\noi If the stretching in Eq. \ref{eq:str} is formed into a circle, a twist is obtained as seen n Fig. \ref{fg:twist}. This suggests that similar chaotic dynamics can be expected when the twist is combined with folding. This fact is demonstrated in Fig. \ref{fg:tangles}.
\vs6\noi The IDE used in Plate B of Fig. \ref{fg:idetwist} is formed according to lemma \ref{lm:hide}. This is possible because both the Twist and Flip are time one maps of a bounded ODE and thus satisfy the hypothesis of Lemma \ref{lm:hide}.
\begin{figure}[htbp]
\includegraphics[height=2.32in,width=4.0in,angle=0]{C:/Research/Book/Figures/eps/IDETwist.eps}
\caption{\sml {\bf Plate A} is the unstable manifold of the Twist and Flip Map; {\bf Plate B} is the Twist and Flip IDE in which a damping factor is included to form a "Strange attractor" The gyration conductance function is the same for both plates.}
\label{fg:idetwist}
\end{figure}
\begq
\label{cd:idtw}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for Fig. \ref{fg:idetwist} Plate B}\\
h&=& 0.005\\
&& \mbox{\bf For i = 1 To N}\\
\\
z &=& \cos(h) \cdot z_1 + \sin(h) \cdot w_1\\
w &=& \cos(h) \cdot w_1 - \sin(h) \cdot z_1\\
\\
\theta &=& 2 \cdot \arctan(z / w) + \pi\\
r &=& 1 / \sqrt{(x - a) ^ 2 + y ^ 2}\\
r &=& (r - r \cdot \ln(r))\\
u_1 &=& (x - a) \cdot \cos(r \cdot h) + y \cdot \sin(r \cdot h) + a\\
v_1 &=& y \cdot \cos(r \cdot h) - (x - a) \cdot \sin(r \cdot h)\\
\\
u_2 &=& \exp(-0.0025) \cdot (\cos( h) \cdot x + \sin( h) \cdot y)\\
v_2 &=& \exp(-0.0025) \cdot (\cos( h) \cdot y - \sin( h) \cdot x)\\
\\
sg_1 &=& 0.5 \cdot (1 + \tanh(\beta(\alpha\,pi - \theta))\\
sg_2 &=& 1 - sg_1\\
\\
x &=& sg_1 \cdot u_1 + sg_2 \cdot u_2\\
y &=& sg_1 \cdot v_1 + sg_2 \cdot v_2\\
\\
&&\mbox{\bf Plot Point when z crosses vertical axis}\\
\\
z_1 &=& z\\
w_1 &=& w\\
&& \mbox{\bf Next i}
\end{array}\right \}
\endq
\vs6\noi
Figure \ref{fg:idetwist} makes clear that the attractor, Plate B, is determined by the geometry of the unstable manifold, Plate A. Damping, when added to a measure preserving chaotic system, Plate A, makes the unstable manifold an attractor and it also greatly attenuates the geometry of the unstable manifold. $\beta$ determines the "steepness" of the hyperbolic tangent term ( the transition function) and $\alpha$ determines the proportion of "time" that each term contributes to the total dynamic. The unstable manifold of Plate A is computed analytically.
\vs6\noi {\bf Summary Remark:} In both the H\'{e}non map and the twist and flip map it is demonstrated that under the hypothesis of lemma \ref{lm:hide} the discrete maps may be reformulated as IDEs and that the stretching, folding and transition functions are all clearly visible from the form of the IDEs.
%================================================horseshoe formation====================================================
\section{\sml Horseshoe Formation}
\label{sc:hsf}
Figure \ref{fg:horseshoe} continues developing the geometry of chaos. Using a twist-and-flip map, a small blue square with a hyperbolic fixed point at its center is iterated seven times. The box is stretched into a narrow but elongated (seen in red) form (the twist-and-flip preserves measure) until it returns to the blue box and crosses it. This illustrates the stretching and folding process at work in chaos. The fold is represented by the map $\X \ra -\X$. The stretching is an IDE $\X \ra \exp(-h\, r\, \B)\, \X$. It is a nonlinear rotation about the point $(1,0)$.
\vs6\noi The two components of this map may be separately examined. The twist IDE, $\T_h$, advances until a condition is met and then the flip IDE, $\F_h$, advances for 180 degrees. The time one map is obtained by setting $h=1$. For this combination to be realized as a continuous process, the flip must take place in a third dimension.
\begin{figure}[htbp]
\centering
\includegraphics[height=2.857in,width=3.6in,angle=0]{C:/Research/Book/Figures/EPS/horseshoe.eps}
\caption{\scriptsize Horseshoe Formation \footnotesize}
\label{fg:horseshoe}
\end{figure}
\begq
\label{cd:hs}
\left.
\begin{array}{lcl}
&&\mbox{ \bf The code for Fig. \ref{fg:horseshoe} is as follows}\\
\\
&&\mbox{For i = to 500}\\
x_0 &=& (-2 + 4 \, i / 500) \cdot h \\
&& \mbox{For k = 1 To 500}\\
y_0 &= &(4 \, k / 500) \cdot \, h\\
x &=& x_0\\
y& =& y_0\\
&&\mbox{\bf Plot Point of Blue Square}\\
&&\mbox{For j = 1 To 7}\\
r &= &\sqrt{(x - 1) ^ 2 + y ^ 2}\\
u &= &(x - 1) \cdot \cos(r) - y \cdot \sin(r) + 1\\
v &= &y \cdot \cos(r) + (x - 1) \cdot \sin(r)\\
x &=& -u\\
y &=&-v\\
&&\mbox{ Next j}\\
&&\mbox{\bf Plot Point of Stretched Blue Square}\\
&&\mbox{ Next k}\\
&&\mbox{ Next i}
\end{array}\right \}
\endq
%=====================================================================Analytical View==================
\section{\sml The Analytical View of Stretching and Folding}
\vs6\noi Some analytical examples will provide motivation for formulating the "form" of an equation in terms of stretching and folding. As seen in \cite{bi:bc6}, and \cite{bi:bj}, an intuitive notion of stretching can appear in many ways. In particular, nonlinear vector fields may provide stretching under certain circumstances. Additionally, in an intuitive sense, almost periodic forcing can provide folding. The motivation for considering almost periodic forcing as a folding dynamic comes from the twist-and-flip equation. Twisting anything does provide an intuitive idea of stretching; and, it is reasonable to consider a linear rotation or translation as the simplest possible folding dynamic.
\vs6\noi As seen in \cite{bi:bj}, a nonlinear autonomous equation may provide stretching through the "potential" term of the equation. When do equations such as Eq.(\ref{eq:ptnl1}) provide stretching?
\begq
\ddot{x}+f(x)\dot{x}+ \Phi(x)=0
\label{eq:ptnl1}
\endq
Since damping terms attenuate stretching and complicate the discussion, when do such equations as Eq.(\ref{eq:ptnl2}) provide stretching?
\begq
\ddot{x}+\Phi(x)=0
\label{eq:ptnl2}
\endq
\vs6\noi For example, note that both Eq.(\ref{eq:df1}) and Eq.(\ref{eq:df2}) have "intuitive" stretching and "folding" dynamics along the lines being discussed; and, they both have the same stretching dynamic, but they do not have the same magnitude of the folding dynamic. The coefficient 7.5 on the folding component in Eq.(\ref{eq:df2}) is "enough" folding, whereas Eq.(\ref{eq:df1}) does not "appear" to have enough folding. Eq.(\ref{eq:df1}) is an example of when the relationship between the stretching and folding dynamics is attenuated by the size of the damping term in relation to the size of the folding "amplitude". Per a previous footnote, damping can be disconnected from chaos; the role of damping chaos will be addressed in a later section.
\vs6\noi As noted previously, ideally it would be possible to write down the solution of a chaotic ODE in closed form in terms of elementary functions. However, that is not possible. What is possible (as will be demonstrated in later sections) is to write down the local (and sometimes global) solution of an ODE as a IDE in closed form in terms of elementary functions.
To simplify the discussion and unify the ideas, follow convention and only examine diffeomorphisms on a manifold. This means that it is only necessary to formulate definitions in term transformations such as time-one maps or Poincar\'{e} maps.
\vs6\noi Now turn to the H\'{e}non Map, the most well known example of chaotic dynamics in a transformation of the plane, to provide motivation for the definitions. In \cite{bi:rb2} it is shown that the H\'{e}non map can be factored into a composition of two maps:
\begq
\l(\vt x.y\par\r)\rightarrow \l(\vt y. -x \par \r)
\label{eq:h1}
\endq
and
\begq
\l(\vt x.y\par\r)\rightarrow \l(\vt 1+x-ay^2. y \par \r)
\label{eq:h2}
\endq
Eq.(\ref{eq:h1}), is a 90 degree rotation about the origin and provides the folding dynamic for the H\'{e}non map. Conveniently, Equation (\ref{eq:h2}) suggests a definition for stretching. Note that divergence of the normal to Eq.(\ref{eq:h2}) is $-2\cdot a\cdot y$. This in itself does not imply stretching since the divergence to the normal of linear vector fields may be nonzero. How linear and non linear transformations and vector fields differ is that the divergence has a nonzero gradient. I.e., the divergence is changing. This leads us to the definition of stretching as nonzero gradient of the divergence of some direction in vector field.
\vs6\noi The Lorenz Equation provides an example to show that the gradient of the divergence may be zero in the presence of chaotic dynamics in some directions. However, the normal to the Lorenz vector field has a nonzero gradient for the divergence. Hence, the gradient of the divergence of a vector field in some direction characterizes the presence of stretching. Note that stretching does not have to be exponential. Lets look at some examples.
%66
\vs6\noi As noted earlier, twisting is an intuitive form of stretching. The Twist-and-Flip and Twist-and-Translate mappings provide a very clear separation between 'intuitive stretching" and "folding". By the same means it can shown that without enough folding, stretching does not produce chaotic dynamics; further, by adding a dissipative component, folding and stretching can be attenuated to the point that chaos vanishes. In particular, in engineering terms, friction can reduce the effect of folding.
\vs6\noi The examples in Chapter. \ref{ch:nl} demonstrate that stretching can be provided by (1) shearing as seen in the twist-and-flip map ; (2) nonlinear amplitudes as seen in Sec. \ref{sc:amp} and Example. \ref{ex:nla}, nonlinear stretching and compression of closed orbits, of Sec. \ref{sc:je}. Each case shows that chaos may be formed by composing these types of stretching with either a rotation or a linear translation. As The ODE's associated to these maps are square-wave forced ODE's \cite{bi:bc3}. The case of continuous forcing will be addressed in a later section.
%========================Formal Definition of Stretching=======================
\section{\sml Formal Definition of Stretching}
\label{sc:strt}
\vs6\noi Combining the foregoing remarks offers a formal definition of stretching. Given
\begq
\dot{{\rm X}}={\rm G}( {\rm X} )
\endq
If
\begq
\nabla((\nabla\sbt {\rm AG})\neq 0
\endq
for some matrix A, then G is a stretching vector field, in particular, in the direction defined by A. In essence, if the divergence of a vector field in some direction has a nonzero gradient, the vector field is stretching in that direction. To define folding requires that
\begq
\nabla((\nabla\sbt {\rm AG})= 0
\endq
for all A. That is, there is no change in the stretching dynamic in any direction. The added requirement is that the members of this set be invertible.
\vs6
\noi These definitions have partitioned the space of diffeomorphisms on a manifold into two sets: stretching and folding. In short, if a diffeomorphism is not stretching, it is folding. Note that the set of folding diffeomorphisms form an additive group. By a direct computation it can be shown that the set of folding transformations on an n-dimensional space is the general linear group, GL(n,C) combined with the translation group, i.e., the group of affine transformations. In Chapter \ref{ch:lg} it is shown that folding dynamics can be used to produce stretching dynamics by replacing a constant matrix $\A$ with a matrix function of $\A(\X)$.
\vs6\noi
While the twist-and-flip map and its variants separates stretching from folding, this is not what is seen in typical physical systems such as the Duffing equation. The stretching is infinitesimally combined with folding, and this proceeds continually. Here is the contrast:
\begq
\ddot{x}+ x^3=a\cdot \cos(\omega t)
\label{eq:df3}
\endq
\begq
\ddot{x}+ x^3=a\cdot \sgn(\cos(\omega t))
\label{eq:df4}
\endq
Note for reference in Eq. \ref{eq:df4} that the sgn function may be replaced by $\tanh (\beta u)$ and the unbounded time variable, $t$, may be replaced by using the HOC in Sec. \ref{sc:hoc} as a clock thus making Eq. \ref{eq:df4} a bounded autonomous equation in four dimensions.
\vs6\noi In Eq. \ref{eq:df3} the damping factor has been dropped since it is not necessary for creating a transverse homoclinic point. In Eq. (\ref{eq:df3}) the combination of stretching and folding is {\em infinitesimal} and this fact is important in defining IDEs which are Infinitesimal Diffeomorphism Equations; whereas. in Eq. (\ref{eq:df4}) stretching and folding are separated by replacing the cosine with the sign of the cosine. Stretching is in the nonlinear potential terms and folding in the sgn term which amounts to a flip or a rotation of $\pi$ radians.
\vs6\noi In the foregoing equations, a frequency factor was introduced. The purpose of this is to bring attention to the fact that there must be "enough" folding and stretching that is synchronized to the stretching dynamic to produce a transverse homoclinic point. What should be clear from introducing the frequency factor is that if folding occurs too rapidly the stretching dynamic may not have a chance to contribute to the formation of a transverse homoclinic point.
\vs6\noi Taking all three factors together, there must be enough folding, folding slowly enough and stretch enough to cause a transverse homoclinic point to form and that these three dynamics must have the "right" relationship or proportions to one another.
%================Conjecture=====================
\vs6\noi {\sml Conjecture on the Formation of Complexity}
\begin{guess} Let $\T$ be any stretching diffeomorphism on a smooth manifold, M, then there exist a folding diffeomorphism $\F$ on M, such that $\F\circ \T$ has a horseshoe.
\end{guess}
\vs6\noi The significance of this conjecture for science is that systems which are not chaotic may become chaotic when the right combination of stretching and folding appear. For example, in weather systems some combination of stretching and folding may be present but not in the right proportions. If the right conditions of stretching and folding appear, then the weather system may evolve from benign to chaotic. Another possibility is that a stretching component may exist without the presence of a folding dynamic. Upon a change in conditions of the weather system the folding component may appear resulting in the system evolving to a more complex dynamic.
\vs6\noi
For ODEs, the conjecture implies that given an autonomous ODE having a stretching vector field, there exists a periodic forcing function such that for some initial conditions, the solutions are chaotic.
\vs6\noi Two examples are presented to illustrate this conjecture. By examination of the H\'{e}non map, the conjecture should assure that if the term "1" in Eq.(\ref{eq:h2}) is dropped, the resulting map will still have a horseshoe. Some direct computations will verify that this is true. For $a=2.0$ a hyperbolic fixed point can be found at $(-1,1)$ with slope approximately -1.1134. The unstable manifold may be "seen" using the methods in \cite{bi:bc3}. Equation (\ref{eq:h2}) may be referred to as the "modified" H\'{e}non map.
\vs6\noi As a second example, it should be possible to find chaos in Eq.(\ref{eq:df1}). However, to understand this situation, the role of damping in Eq.(\ref{eq:df1}) must addressed.
\vs6\noi Damping terms, when linear, are also folding terms by the above definition. In Eq.(\ref{eq:df1}) the damping is "negative" and is thus "unfolding" while the forcing term is folding. If there is unfolding in an amount to offset folding, there is "stalemate" and the solution must converge to some variation of the homogeneous, undamped equation. This is precisely what is seen in Eq.(\ref{eq:df1}). If the unfolding effect is eliminated, i.e. drop the damping, chaos appears.
\vs6\noi Nonlinear damping terms, such as occurs in the van der Pol Eq.(\ref{eq:vp1}), pose additional conceptual challenges.
\begq
\ddot{x}+ (x^2-1)\cdot \dot{x} +x=0
\label{eq:vp1}
\endq
The factor $(x^2-1)$ is stretching while the factor $\dot{x}$ is "unfolding". The potential term is linear and, therefore, not stretching. So the entire stretching effect in Eq.(\ref{eq:vp1}) comes from the second term of the equation. Adding folding in the form of a periodic force, things get complicated quickly. The example that explains this complication is Fig. \ref{fg:jactwist}. The damping term introduces stretching through the divergence that occurs on the homogeneous attractor. As Fig. \ref{fg:jactwist} shows, divergence of this nature spawns transverse homoclinic points and attracting periodic points simultaneously. The "randomness" in knowing the basins of attraction of the three periodic points is a result of the homoclinic tangles that are "mixed" in and around their basins of attraction. There is no easy way to guess which attracting periodic point will capture a given initial condition.
\vs6\noi What has been achieved from these examples, definitions, and this approach is that the form of an equation can be examined to draw useful engineering and theoretical conclusions about its potential dynamics. While this is not a complete solution to the Hirsch Conjecture, this approach provides a new direction for studying the dynamics of specific dynamical systems based on their form. This is important in biological and social dynamical systems where parameters cannot usually be determined with great precision, and are thus open to the "foibles" of chaos. Also, formulating dynamics in terms of their stretching and folding properties does provide an new avenue to study dynamics generally, the new theory of IDEs.
\vs6\noi An additional level of stratification of complex dynamics is provided by almost periodic functions which imitate the shape of an unstable manifold of a chaotic dynamical system as demonstrated in Sec. \ref{sc:apchaos} , see Fig. \ref{fg:unstableap}.
%10
%================================================PBS Chaos===========================
\chapter{Chaos}
\label{ch:chaos}
\begin{center}
\parbox{3.5in}{\em The concept of chaos originates with Professor James Yorke at the University of Maryland. Chaos, as with nonlinearity, comes in many forms that vary in their degree of complexity. The variations in chaos are significant in providing an the understanding the complexity of a system.
\vs6\noi The purest form of chaos is the shift. Variations on the shift formed from functions of a shift pose many of the same problems of prediction as the shift itself.}
\end{center}
%=====================================
\section{\sml Defining Chaos}
\label{sc:defc01}
The term {\em Chaos} was coined by Professor Jim Yorke at the University of Maryland. It was motivated by the discovery that there are systems that can be described by an exact formula that also have the same properties ascribed to {\em randomness}. Randomness is a metaphor that has no formal mathematical definition. However, the example of a coin toss provides an avenue to gain a common intuitive understanding of what the metaphor means. A full analysis of the shift was provided in Chapter \ref{ch:shift}. The relationship between randomness and chaos will be presented in Chapter \ref{ch:ran}.
\vs6\noi A coin toss can be recorded by writing down a 1 for heads and a 0 for tails once the toss sequence begins. If such an infinite sequence could be written down it could look like 101101001010001... where the dots at the end means that the sequence continues on forever. Thus, the term {\em chaos} refers to {\em Systems that can be described precisely by a formula whose time series, or iterates, have the same properties as a coin toss}.
%======================================Alternate Definitions============================
\section{\sml Alternative Definitions of Chaos}
\label{sc:defc02}
There are several alternative definitions of chaos that will be enumerated in this section for future reference.
\vs6\noi
{\bf Commonly used Definitions of chaos}
\begin{itemize}
\item it has a Smale horseshoe \cite{bi:aa}
\item it has positive Kolmogorov entropy, \cite{bi:hs}
\item it has positive topological entropy \cite{bi:ak}
\item it has a positive Lyapunov exponent,\cite{bi:dg}
\item its sequences have positive algorithmic complexity \cite{bi:jf}
\item it has a dense set of periodic orbits, is topologically
transitive, and has sensitive dependence on initial conditions
\cite{bi:rd2}
\item it has sensitive dependence on initial
conditions and is topologically transitive \cite{bi:sw}
\item the power spectral density of the time-series has a
component which is absolutely continuous with respect to Lebesgue
measure \cite{bi:ber}
\end{itemize}
and a statistical definition of \cite{bi:ls}. Add to this list the definition in this book, functions of a shift.
\vs6\noi
There are more definitions and there are definitions specialized for
non-invertible systems (i.e., negative Schwartzian derivatives) such as
for one-dimensional maps. All
definitions involving entropy are definitions about information.
Positive entropy may be thought of as negative information or a
loss of information. Sensitive dependence on initial conditions
is meant to suggest that small errors in initial conditions
(information) extrapolate to large errors very fast, thus
information is lost rapidly. Algorithmic complexity is also
inseparable from information.
The presence of positive Lyapunov exponents
is equivalent to some form of exponential loss of decimals during
round off errors.
\vs6\noi
It is possible to obtain chaotic maps from the
composition of periodic maps. If such a composition is written as
\[\Phi=\rm{P} \circ \rm{Q} \]
where $\rm{P}^2=\rm{Q}^2=\I$, where $\I$ is the identity map,
then $\rm{P}\circ \Phi=\rm{Q}$, which is not chaotic. The bilateral shift is the composition of two involutions. This is the
situation for many non-integrable Hamiltonian systems having
chaotic solutions.
\vs6\noi An ideal definition of any concept has at least the following features:
\begin{itemize}
\item It is simple.
\item It is easy to apply by researchers in the field, especially engineers
\item It contains the essential features of the phenomenon
defined
\item It is possible to derive all important and widely
recognized features of the phenomenon from the definition.
\item It includes all widely recognized examples of the
phenomenon
\item It excludes anything that is widely recognized as not
being an example of the phenomenon
\end{itemize}
To address the Hirsch Conjecture, {\em the presence of chaos or complexity must be apparent from the form of the equation} should be added to this list.
\vs6\noi
All definitions of chaos suffer from some defects but the most serious is that the various definitions cannot be derived from
each other. In addition, the various definitions put forth above are nearly impossible for a practicing engineer to apply. The Theory of IDEs seeks to remedy these deficiencies.
%==========Sensitive Dependence=========================
\section{\sml Sensitive Dependence on Initial Conditions}
The most commonly recognized definition of chaos is {\em sensitive dependence on initial condition}. This section examines the role of sensitive dependence on initial conditions in defining chaos. This concept is so pervasive that it will be examined in some detail.
\vs6\noi The following definition is from Guckenheimer.
\vs6
\noi{\bf Definition}: {\em The mapping {\rm T}:${\rm S} \ra {\rm S}$ is said to
have sensitive dependence on initial conditions if there exist a
number $\tau>0$
such that for all $x \in {\rm S}$ and any neighborhood, $U$ of
$x$, there exist a $y \in U$ and $n>0$ such that $d({\rm T}^n(x),{\rm
T}^n(y)) > \tau$}, \cite{bi:rd2}.
\vs6\noi
In this definition $d(x,y)$
represents the distance between the points $x$ and $y$. This
definition says that there is some constant (call it a separation
constant) $\tau$ such that for any point $x \in S$ and any
neighborhood $U$ of $x$, there is some point $y \in U$ (usually
Think of $y$ being very close to $x$) that
will eventually move away from $x$ (not necessarily permanently) by
an amount $\tau$. This definition will be examined by the construction of examples.
\begin{example}{\sml \bf A linear System}
\label{ex:ls}
Consider the linear system
\[\dot{x}=x\]
whose solution is
\[x(t)=x_0 \,\exp(t)\]
For $t=\ln(2)$, it defines the linear one-dimensional map T:$\Rl
\ra \Rl$ given by:
\[{\rm T}(x)=2\,x\]
Let $x \in \Rl$ and choose $\tau=1$ Given $\epsilon >0$ choose any $y$ with
$\|x-y\|<\epsilon$. Then
the distance between the $n^{th}$ iterates of $x$ and $y$ is $2^n\,\epsilon$. By
choosing $2^n \,\epsilon =\tau$ t is possible to solve for the integer $n$
needed to verify that the definition is satisfied, and thus conclude that T has
sensitive dependence on initial conditions. Note that the forward
time autocorrelation of $x(t)$ is 1.
\end{example}
\begin{example}{\sml \bf A bounded system in the plane}
\label{ex:twst}
Consider the simple twist, $\T$, \cite{bi:bc1} with a fixed point at the origin
defined on the plane where a disk of radius $\epsilon$ about the
origin has been
removed. $\T$ is the time-one map determined by the differential
equations:
\[\l( \vt \dot{x}. \dot{y} \par \r)=
\l( \vt -r\,y. r\,x \par \r)\]
where $r=\sqrt{x^2+y^2}$. It is required that
$x^2+y^2>\epsilon^2$ for some fixed but arbitrary parameter,
$\epsilon$, to meet the criteria that a disk of radius $\epsilon$
has been removed from the plane.
\end{example}
\vs6\noi
$\T$ has sensitive dependence on initial conditions since
neighboring solutions of these equations rotate about the origin
at different angular velocities and so must eventually separate
by an amount $\tau=2\epsilon$, the diameter of the disk that has been
removed from the plane.
In this example, every orbit is confined to an invariant
circle with center at the
origin. On each invariant circle $\T$ is a rotation, which is
ergodic \cite{bi:pw} but $\T$ is not ergodic on its domain
of definition. On each invariant circle every orbit forms
a sequence which is almost periodic. The
time series for each solution is almost periodic
as well. There are no hyperbolic fixed points for $\T$, or positive
Lyapunov exponents. In short, $\T$ has no chaotic properties by any
definition.
\vs6
\noi{\bf EXAMPLE: A simple system on the torus}
\vs6
Consider the torus in three dimensions centered at the origin,
where $R$ is the radius from the origin to the center or the
torus, and the radius of a vertical cross section is 1:
\[\l(\vts x(\theta,\phi). y(\theta,\phi). z(\theta,\phi) \par \r)=
\l(\vts (R-\cos(\theta))\cos(\phi). (R-\cos(\theta))\sin(\phi).
\sin(\theta) \par
\r)\]
Measuring the angle $\theta$ from the
origin, define a rotation around each cross section circle located at the
angle $\theta$ as follows: If $x$ is a point on the cross section
circle located at angle $\theta$, rotate $x$ around this circle by an
angle $\theta$. Hence, the rotation in the
direction of $\phi$ depends on the angle $\theta$. For any point
on the torus, and any neighborhood about this point, there is a
point near by on a different cross section circle that rotates at a different
angular rate and hence the two points must eventually separate by
a distance of $\tau= 2$, twice the radius of the cross section of the torus.
Each orbit is almost periodic with no two orbits having the same almost period.
This system cannot have any chaos for the same reason as the
preceding system.
\vs6
\noi{\bf EXAMPLE: Linear without exponential loss of
information}
\vs6\noi
The map
\[\l( \vt x. y \par \r) \ra
\l( \vt x. x+y \par \r)\]
is linear and has sensitive dependence on initial conditions
since for any point $(x,y)$ there is a point within epsilon from which
it separates. This is because after $n$ iterations of this map on the
difference of the two points (because this map is linear their
difference is $(\epsilon, \delta)$),
\[\l( \vt \epsilon. \epsilon+n\, \delta \par \r)\]
The length of this vector determines the distance the two points
have separated after $n-$iterations which is at least the distance $n\delta$.
Since this map preserves the integer lattice it may be considered as a
map on the torus and as such is equivalent to example 13.
\vs6\noi The conclusion of these examples is that sensitive
dependence on initial conditions is not sufficient to define
chaos, even with exponential loss of information.
\vs6
\noi {\bf EXAMPLE: Revisited}
\vs6
If in EXAMPLE 9 of the previous section, the formal definition of sensitive dependence
on
initial conditions is applied using reference point
as the fixed point (0,0), then there does not exist any $\tau$
for which the definition can work. The problem is that the
definition is formulated in terms of a metric. What is true of
this example is that although the terms of the sequence
\[d(T^n(0,0),T^n(x,y))\]
are constant, the angular coordinate of $T^n(x,y)$ is uncorrelated.
Given any metric on a compact manifold a similar example can be constructed.
%=================Bernoulli shifts===============================
\section{\sml Bernoulli Shifts Revisited}
Chapter \ref{ch:shift} analyzes the shift in great detail. This section briefly reviews some of the content of Chapter \ref{ch:shift} with the aim of connecting the shift to the concept of chaos.
\vs6\noi The single definition that captures the idea of chaos in the simplest possible terms is the Bernoulli Automorphism (or shift) which is the mathematical formulation of a coin toss. \footnote{ For historical reference, these shifts are called Bernoulli shifts, in recognition of one of the pioneers of probability theory.} To illustrate the connection between Bernoulli shifts and coin tosses consider the coin toss sequence again 101101001010001... In order to have an orderly reference point for where a sequence begins place a decimal point at the front to get 0.101101001010001... If all digits are shifted one place to the left of the decimal and the number to the left of the decimal is discarded, another potential coin toss is obtained: 0.01101001010001... This process provides a rigorous mathematical formulation of a coin toss and is close as one can get to providing a rigorous definition of a coin toss. If the sequence of 1s and 0s were produced by an actual coin toss experiment, they would embody the intuitive idea of random. Sec. \ref{sc:random} will explore this idea further. For now, it is necessary to accept that this "mapping" is the only rigorous way to intuitively describe the concept of {\em randomness}.
\vs6\noi The way to put all of this into a formula is to note that 10 is binary for the number 2 and 10 times 0.101101001010001...shifts all digits to the left by one space. I.e.,
\[10 \cdot 0.101101001010001...= 1.01101001010001...\]
To formulate the notion of discarding the digit to the left of the decimal the algebraic function mod(1) is used. It means to throw away the integer part of a number. For example, 168.123 mod(1) is 0.123.
\vs6\noi The function $f(x)= 2\, x\, \mod(1)$ captures this entire discussion is a very shorthand algebraic form that can be algebraically manipulated. Of course $3\, x\, \mod(1)$ is also a shift on three digits, 0, 1 2. In fact, $n \, c\, mod (1)$ is a shift on n symbols. Since every number has a binary expression all of the additional shifts concepts can be reduced to binary shifts. So $100\, x\, \mod(1)$ shifts everything to the left by two digits in binary. To see why this is complex refer to Fig. \ref{fg:shift}.
\begin{figure}[htbp]
\centering
\includegraphics[height=1.533in,width=4.267in,angle=0]{C:/Research/Book/Figures/EPS/shift.eps}
\caption{\scriptsize Three examples of one-sided shifts with multipliers (from left top right) $a=2,\; a=10,\; a=16807$. That the degree of disorder increases as $a$ increases is made clear by this figure \footnotesize}
\label{fg:shift}
\end{figure}
\vs6\noi In this figure there are three graphs derived by iterating three different shifts mod(1). As the multiplier is increased, it becomes clear why this process is a good intuitive illustration of {\em randomness} of the shift transformation. The process of multiplying and shifting is the essence of chaos.
\vs6\noi The shift described by $2\,x\, \mod(1) $ is not invertible because of the mod one function. By establishing a reference point, it is possible have two sided shifts that are invertible. Using the decimal point as a reference simply carry out the shift without discarding the digits to the left of the reference point. Doing this is like have an infinite coin toss that started at an infinite time in the past and proceeds for an infinite time into the future.
\vs6\noi The level of complexity of the sequence obtain by repeating the operation of $2\, x\, \mod(1)$ depends on which binary number is the starting point. The number $0.1010101...$ is periodic. The binary form of $\sqrt{2}$ is not periodic because it is irrational. And there are numbers in the interval [0,1] that cannot be written down in finite time and for which there is no algorithm that can be used to generate the number.
\vs6\noi The highest form of complexity is the Bernoulli shift and it embodies the essence of chaos: randomness that comes from a completely clear algebraic formula. In Sec. \ref{sc:random} the shift was connected to a mathematical short hand $2\, x\, \mod(1)$ which was not invertible. There is an invertible algebraic version of the shift. Its form is as follows:
\begin{equation}
\label{eq:ber}
\l( \vt x.y\par\r) \ra \l(\mtx 2.1.1.1\par\r)\l( \vt x.y\par\r)\mod(1)
\end{equation}
A parallel explanation covers Eq.\ref{eq:ber} in two dimensions.
\vs6\noi The proof that Eq. \ref{eq:ber} is a Bernoulli shift is not simple and can be found in \cite{bi:yk}.
\vs6\noi The mod(1) operation can be avoided by working with complex numbers on the unit circle in the complex plane i.e., the mapping
\begq
\label{eq:berc}
\l( \vt z.w\par \r)\ra \l(\vt z^2\, w. z\, w \par \r)
\endq
To see the relationship to Eq. \ref{eq:ber} let $z=\exp(i\, x)$ and let $w=\exp(i\, y)$. Then Eq. \ref{eq:berc} is
\[\l( \vt \exp(i\, x).\exp(i\, y)\par \r)\ra \l(\vt \exp(2\,i\, x)\, \exp(i\, y). \exp(i\, x)\, \exp(i\, y) \par \r)=\l(\vt \exp(i\,(2\, x+ y). \exp(i\, (x+ y)) \par \r)\]
\vs6\noi From a computational point-of-view, it is preferable to stick with algebraic expressions rather than binary sequences because algebraic expressions allow the use of computational rules to analyze a problem. This leads to the position of just considering functions of shifts rather than shifts themselves since problems are usually describable in terms of algebraic expressions, i.e., expressions that can be manipulated by a clearly stated set of rules.
\begin{example}{\bf Periodic Functions of a Shift}
\vs6\noi Consider the mapping $x\ra \cos(2\, x)$. After n iterations this is $\cos(2^n\, x)$. After n iterations of $2\, x\, \mod(1)$ one has $2^n\, x\, \mod(1)$. The $\mod(1)$ operation essentially makes $2\, x\, \mod(1)$ a periodic function when considered on the entire real line. $\cos( x)$ is a periodic function so that its periodicity has the same effect as $\mod(1)$. In particular, $\cos(2 \, x)=\cos(2\, x\, \mod(2 \pi))$ and therefore $\cos(2^n\, x)$ is a function of a shift.
\vs6\noi Because the cosine has a replication formula $\cos(2\, x)= 2\, \cos^2(x)-1$ by a change of variable the transformation $x\ra 2\, x^2-1$ may be obtained. All replication functions, examples to follow in later sections, give rise to functions of a shift and this means that they have, buried in their sequence of iterates, the same level of complexity as some shift.
\end{example}
\begin{figure}[htbp]
\includegraphics[height=2.393in,width=4.193in,angle=0]{C:/Research/Book/Figures/eps/Cos2n.eps}
\caption{\sml \bf Plate A $\cos(2^n)$ cross the harmonic oscillator; Plate B $x_{n+1} =2\, x^2_n-1$ cross the harmonic oscillator }
\label{fg:cos2n}
\end{figure}
\begq
\label{cd:cs2n}
\left.
\begin{array}{lcl}
&&\mbox{{\bf The code for Fig. \ref{fg:cos2n} is as follows:}}\\
h &=& 0.001\\
z_1 &=& 0\\
w_1 &=& 1\\
x &=& \sqrt{2} / 3\\
y &=& x\\
&& \mbox{For i = 1 To N}\\
z &=& z_1\cdot \cos(h) + w_1\cdot \sin(h)\\
w &=& w_1\cdot \cos(h) - z_1\cdot \sin(h)\\
z_1 &=& z\\
w_1 &=& w\\
u &=& \arccos(x)\\
x &=& \cos(2 \, u)\\
y&=& 2 \, y ^ 2 - 1\\
&&\mbox{\bf Plot $(y,z)\; (x,z)$}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi For practical purposes it is more useful to show that a transformation is a function of a shift rather than prove that the complexity of a shift is buried in the transformation.
%==========================================
\section{\sml Definition of Chaos from Shifts}
These considerations lead to the definition of chaos that will be used in this book. The motivation for this definition is the Smale-Birkhoff theorem which gave rise to the term PBS chaos. PBS chaos is a rigorous definition. \footnote{The Smale-Birkhoff Theorem originated from comments of Poincar\'{e}.}. The Smale-Birkhoff theorem is deeply significant. However, its engineering limitations lie in its difficulty to be understood and applied by engineers. Further, it only asserts the existence of complex dynamics ( a shift) on a Canter set which may have measure zero. Using IDE theory to deconstruct equations such a those of Chua, Lorenz and R\"{o}ssler reveals that PBS chaos is not sufficient to provide the level of insight that will assist in the prediction of future states or in providing insight into how to leverage complex dynamics to support innovation.
\vs6\noi Figures \ref{fg:mfld01} and \ref{fg:brain} serve to illustrate the practical limitations of PBS chaos. Each figure is the unstable manifold of a Twist-and-Flip system. The stable manifolds are the reflection of the unstable manifolds about the vertical axis demonstrating that the intersections are complex. The role of the stable manifold is to {\em force} each orbit toward the unstable manifold just as in the linear case. In short, the unstable manifold drags the entire space along with it following a very complex path. Even when the unstable manifold is one-dimensional, it dominates the dynamics of the entire region, regardless of how high the dimension the space may be. In each figure, it is clear that the pathways of the unstable manifolds are totally unrelated, meaning that the significant dynamics are uncorrelated even though both systems are functions of a shift. Thus, the existence of PBS chaos is a start at understanding the system dynamics, clearly not the end of the story. It is the objective of IDE theory to further clarify the dynamics of separate chaotic systems by providing insight through the analysis of the algebraic form of the IDEs.
\vs6\noi In section \ref{sc:single}, IDEs are constructed that produce a means of analyzing complexity through the use of one-dimensional maps. These constructions reveal that PBS chaos is not the entire story on complexity and that further work needs to be done. In particular, PBS chaos and other complex dynamics are to be found in natural systems and must be taken into account if prediction and innovation are to be supported by a theory of {\em complexity}.
\begin{figure}[htbp]
\centering
\includegraphics[height=3.0in,width=2.66in,angle=0]{C:/Research/Book/Figures/EPS/mfld1_c.eps}
\caption{\scriptsize \footnotesize}
\label{fg:mfld01}
\end{figure}
\begin{figure}[htbp]
\centering
\includegraphics[height=2.86in,width=3.287in,angle=0]{C:/Research/Book/Figures/EPS/brain.eps}
\caption{\scriptsize \footnotesize}
\label{fg:brain}
\end{figure}
\begin{figure}[htbp]
\centering
\includegraphics[height=3.013in,width=3.55in,angle=0]{C:/Research/Book/Figures/EPS/mfld2.eps}
\caption{\scriptsize \footnotesize}
\label{fgmfld2}
\end{figure}
\begin{definition} {\bf Function of a Sift} A transformation is said to be a function of a shift when there exist an invariant subset of its domain on which it is exactly a shift.
\end{definition}
\vs6\noi There are many shift functions. $3\, x\, \mod(1)$, $4\, x\, \mod(1)$ and many more. Even $2\, x\, \mod(2\, \pi)$ is a shift.They have been classified by their entropy \cite{bi:pw} p. 105, the degree to which they mix things up. The entropy of a shift $a\, x\, \mod(1)$ is $\log(a)$ showing that their ability to mix up things up increases as the multiplier $a$ increases. This fact is illustrated in Fig. \ref{fg:shift}
\vs6\noi This definition allows for a modified version of the Hirsch Conjecture:
\begin{center}
\parbox{3.5in}{{\em It should be possible to tell if a transformation is a function of a shift from the algebraic form of its equation.} }
\end{center}
\vs6\noi Using this approach one can conclude that $x \ra \cos(2\, x)$ is a function of a shift because it can be written as $x \ra \cos(2 \, x\,\mod(2\, \pi))$. Similarly, one can conclude that $x \ra \cos(3\, x)$ is a function of sa shift.
\begin{definition} {\bf Chaos } A transformation is chaotic if it is a function of a shift. An IDE, $\T_h$, is chaotic when $\T_1$ is a function of a shift.
\end{definition}
\vs6\noi Defining complexity is more difficult and will be deferred until additional examples are presented.
\vs6\noi These considerations raise the question of whether there is a theorem that allows one to conclude when very complicated transformations are functions of a shift. This is the Smale-Birkhoff theorem.
%\[ 2\, x\, \mod(1)\;\; 10\, x\, \mod(1)\;\; 16807 \, x\, \mod(1)\]
%===========================================Functions of a Shift==============
\section{\sml Functions of a Shift}
\label{sc:fs}
Defining chaos as a transformation that is a function of a shift
\[S_a(x)=2x\bmod(a)\]
where $a>1$, is reasonable so long as the function is not constant almost everywhere.
\vs6\noi This section will present several examples of functions of a shift. The following formulas will be needed:
\begin{lemma}
\[(Kx)\bmod(Ka)=K(x \bmod (a))\]
where $a, K>0$.
\vs6\noi If $f$ is a periodic function on the real line
with period $K$
\[f(Kx)=f(Kx \bmod (K))=f(K(x \bmod (1)))\]
\end{lemma}
\pf
Direct computation \rl
\vs6\noi The significance of these formulas is that
\[f(K 2^n x_0)=f(K (2^n\,x_0 \bmod(1))) \]
and so sequences of the form $y_n=f(K 2^n x_0)$ produce functions of a shift.
%====================================================Iteration Examples==================
\subsection{\sml Functions of a Shift from Well-known Functions }
\begin{example}
\label{ex:le}
{\bf Closed form solution of the logistic
equation for $\lambda=4$ in terms of elementary functions}
\noi The logistic equation is
\[y_{n+1}=\lambda \, y_n\,(1-y_n)\]
Only the case where $\lambda=4$ is considered.
\vs6\noi
Consider the sequence
\[y_n=\sin^2(2^n \,C\bmod(1)\pi)=\sin^2(2^n \,C\pi)\]
where $0\leq C \leq 1$ is an arbitrary constant determined by the initial
conditions.
Then
\[y_{n+1}=4\sin^2(2^n \,C\pi)\cos^2(2^n \,C\pi)\]
or
\[y_{n+1}=4\,y_n \,(1-y_n)\]
and hence $y_n$ is the closed form solution of
the logistic equation with parameter value equal to 4.
This example shows that the solution of the logistic equation is
a function of the one-sided shift, but is not topologically
conjugate to a shift.
\end{example}
\vs6\noi
Generalize this example as follows:
\begin{example} {\bf The elliptic logistic equation}
\label{ex:ele}
Now consider the sequence
\[y_n=\sn^2(2^n \,C\,K)\]
where $\sn$ is the Jacobi elliptic sine and $K$ is half the
period of $\sn(t,k)$ and the parameter $k$ is such that $ 00$, $A$ is hyperbolic.
But, a complete proof of chaos requires proving that the Taylor series converges, that $F^{-1}$ exists over a
sufficiently large region, and that $F$ maps all of three space
into the domain of $F^{-1}$. Doing this will depend on the value of $\alpha$, and the
values of the $c_i$.
%======================================
\subsection{\sml The Continuous Case}
Consider the equation
\[x(x_0,t)=\cos(2^t\arccos(x_0))\]
Formally differentiating gives
\[\dot{x}=-\sin(2^t\arccos(x_0))2^t\,\ln(2)\arccos(x_0)\]
For all $t,x_0$ where the cosine is invertible, this reduces to
\[\dot{x}=-\sin(\arccos(x))\ln(2)\arccos(x)\]
which is an autonomous first-order equation. The `solution' is
chaotic, but there are not unique solutions. In general,
functions of the form
\begq
x(x_0,t)=f(\exp(At)f^{-1}(x_0))
\label{eq:cn}
\endq
where $x$ is n-dimensional, and $A$ is an $n \times n$ matrix
define one-parameter groups in that
\[x(x(x_0,s),t)=x(x_0,s+t)\]
so long as $f$ is invertible. Solutions of autonomous
differential equations always define such one-parameter groups.
Formally differentiating Eq.(\ref{eq:cn}) gives rise to the
autonomous ODE
\[\dot{x}=f'(f^{-1}(x))\,A\,f^{-1}(x).\]
If $f$ is globally invertible, the
solution is linear after a change of coordinates. The interesting
cases arise when $f$ is locally, but not globally invertible and
$f$ maps all of n-dimensional space onto a bounded subset of the
domain of $f^{-1}$. The various solutions of an ODE are then
determined by the separate functions that can be derived from a
duplication equation. In the case of the equation $x_{k+1}=2\,x_k^2-1$,
there are many `solutions', depending on the initial conditions,
and this fact is reflected in the different solutions to the
associated duplication equation.
%**************************************************************************************
%===============================Representations of bilateral shifts==================
\section{\sml Representations of the Bilateral and Unilateral Shifts}
\label{sc:reps}
Common examples of bilateral and unilateral shifts are produced using the Int function. Two common examples are
\[x\ra 2\,x\,\mod(1)\]
where the function $x\;\mod(1)$ is $x-\mbox{Int}(x)$ which is also written as $x-[x]$. In Sec. \ref{sc:sf} a $\Ci$ version of $x-[x]$ was given. What was not noted there is that the $\Ci$ version can produce damping when used in conjunction with other functions. This is illustrated by the $\Ci$ version of the Anosov map, due to Arnold \cite{bi:aav}, which is given by
\[\l(\vt x.y \par \r)\ra \l(\mtx 2.1.1.1\par\r)\l(\vt x.y \par \r)\mod(1)\]
Mathematically the mod(1) function allows the Anosov map to be seen as a diffeomorphism on a torus by first identifying the top and bottom of square to form a cylinder and then bending the cylinder into a doughnut shape so that the lateral sides are connected. The resulting image from iterating the Anosov map in the plane is seen in Fig. \ref{fg:catmapa}, Plate A.
\begin{figure}[htbp]
\includegraphics[height=2.75in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/CatMapA.eps}
\caption{\sml {\bf Plate A: The Anosov Map Using mod(1); Plate B: The Anosov Map Using a $\Ci$ Analog of the mod(1) Function, Sec. \ref{sc:sf}}}
\label{fg:catmapa}
\end{figure}
\vs6\noi In Fig. \ref{fg:catmapa}, Plate B, the mod(1) function has been replaced by its $\Ci$ version. As seen in the Plate B, the orbit is no longer uniform. Blank spaces have appeared and the size of the figure (the domain) is diminished. In essence, the $\Ci$ version of the mod function has introduced damping, making the image in Plate B an attractor.
\vs6\noi In Fig. \ref{fg:catmapa}, Plate B, the hyperbolic tangent used to define the $\Ci$ version of $[x]$ was $\tanh(\beta\, u)$, with $\beta=170$. In fig. \ref{fg:catmapb}, Plate A, $\beta=40$ and in Plate B $\beta=20$.
\begin{figure}[htbp]
\includegraphics[height=2.7in,width=4.193in,angle=0]{C:/Research/Book/Figures/eps/CatMapB.eps}
\caption{\sml {\bf Plate A: The Anosov Map Using $\Ci$ Version of mod(1) with $\beta=40$; Plate B: The Anosov Map Using a $\Ci$ Analog of the mod(1) with $\beta=20$}}
\label{fg:catmapb}
\end{figure}
\vs6\noi As $\beta$ decreases the derivative of $\tanh(\beta\, u)$ at zero also flatten out. This results in an increase of the damping factor in Fig. \ref{fg:catmapb}.
%===========================IDE representation===================
\ssc{\sml Representation of the Anosov map on the Torus as an IDE}
\label{sc:anosov}
Returning to the Anosov map,
\[\l(\vt x.y \par \r)\ra \l(\mtx 2.1.1.1\par\r)\l(\vt x.y \par \r)\mod(1)\]
decompose this diffeomorphism to get
\[\l(\vt x.y \par \r)\ra \l(\mtx 1.0.1.1\par\r)\l(\vt x.y \par \r)\mod(1)\]
\[\l(\vt x.y \par \r)\ra \l(\mtx 1.1.0.1\par\r)\l(\vt x.y \par \r)\mod(1)\]
Let
\[\C= \l(\mtx 1.0.1.1\par\r)\hspace{10pt}\C^{\rm T}= \l(\mtx 1.1.0.1\par\r)\]
then
\[\C= \exp\l(\mtx 0.0.1.0\par\r) \hspace{10pt}\C^{\rm T}=\exp\l(\mtx 0.1.0.0\par\r) \]
Let
\[\A=\l(\mtx 0.0.1.0\par \r)\]
\[ \exp(\A)\,\exp(\A^{\rm T})=\exp( \A+\A^{\rm T}+0.5\,[\A, \A^{\rm T}]+\cdots )\]
Dropping all terms higher that $[\A, \A^{\rm T}]$ gives the approximation
\[\l(\vt x.y \par \r)\ra \l(\mtx 2.1.1.1\par\r)\l(\vt x.y \par \r)\mod(1)\approx \exp(\C)\]
where
\[\C=\l(\mtx 1.1.1.-1\par \r)\]
\vs6\noi To obtain an IDE use the following proposition:
\begin{proposition}
\label{pr:cm}
Let $\C_1,\; \C_2$ be two $n \times n$ matrices.
\vs6 \noi Then
\begq
\label{eq:cm}
\T_h= \exp( h\, \C_1+h\, \C_2+0.5\,h\, h_0\,[\C_1, \C_2])
\endq
where $h_0$ is a fixed parameter is an IDE.
\end{proposition}
\pf
\begq
\label{eq:cm1}
\exp(\C_1)\,\exp(\C_2)= \exp( \C_1+\C_2+0.5\,[\C_1, \C_2]+\cdots)
\endq
Replacing $\C_i$ with $h \, \C_i$ gives the relationship
\begq
\label{eq:cm2}
\exp(h\,\C_1)\,\exp(h\,\C_2)= \exp( \C_1+\C_2+0.5\,h^2\,[\C_1, \C_2]+\cdots)
\endq
and so
\begq
\label{eq:cm3}
\exp(h\,\C_1)\,\exp(h\,\C_2)\approx \exp( \C_1+\C_2+0.5\,h^2\,[\C_1, \C_2])
\endq
Replacing $h^2$ with $h\, h_0$ where $h_0$ is a fixed parameter then
\[ \T_h=\exp(h\,( \C_1+\C_2+0.5\,h_0\,[\C_1, \C_2]))\]
is an IDE.
\rl
\vs6\noi Let $\C_1=\A,\;\; \C_2=\A^{\rm T}$ then
\begq
\label{eq:cm3}
\C\,\C^{\rm T}=\exp(h\,\A)\,\exp(h\,\A^{\rm T})\approx \exp( h\,(\A+\A^{\rm T})+0.5\,h\, h_0\,[\A, \A^{\rm T}])
\endq
is an IDE.
\vs6\noi Applying this proposition
\[\T_h=\exp( h\,(\A+\A^{\rm T})+0.5\,h\,h_0\,[\A, \A^{\rm T}])\]
where $h_0$ is just another parameter is an IDE.
\begin{proposition}
\label{pr:anosov}
Let
\[\T(\X)=\exp( h\, \C)\, \X \]
where
\[\C=\l(\mtx \lambda.1.1.-\lambda \par \r)\]
Then
\[\T_h=(\cosh(a\, h)\,\I+ \frac{1}{a}\sinh(a\, h)\C)\]
where $a^2=1+\lambda^2$
\end{proposition}
\pf
To evaluate $\exp(h\, \C)$ note that
\[\C^2=(\lambda^2+1)\I\]
The expansion of the exponential function is
\[\exp(h\, \C)= \I+h\, \C+ \frac{a^2h^2}{2!}\, \I+\frac{a^2h^3}{3!}\, \C +\frac{a^4h^4}{4!}\, \I +\frac{a^4h^5}{5!}\, \C +\cdots=\]
\[\cosh(a\, h)\,\I+ \frac{1}{a}\sinh(a\, h)\C\]
\rl
\vs6\noi The approximation of
\[\T(\X)=\exp(h\, \A)\exp(h\, \A^{\rm T})\, \X\]
on the unit torus is an IDE. $h=1$ gives the Anosov map. The results of this approximation is illustrated in Fig. \ref{fg:catmap}.
\begin{figure}[htbp]
\includegraphics[height=2.42in,width=3.82in,angle=0]{C:/Research/Book/Figures/eps/CatMap.eps}
\caption{\sml {\bf Approximate IDE for the Anosov Map. Plate A: The time series; Plate B: The time-one map for the Anosov Map}}
\label{fg:catmap}
\end{figure}
\begq
\label{cd:cm}
\left.
\begin{array}{lcl}
&& \mbox{\bf The Code for Fig. \ref{fg:catmap}}\\
h &=& 2 \cdot \pi / 3000\\
h_0 &=& 0.01\\
\lambda &=& h_0 / 2\\
a &=& \sqrt{\lambda ^ 2 + 1}\\
b &=& 0.5 \cdot h_0 / a\\
\\
&& \mbox{\bf For i = 1 To N}\\
u &=& x \cdot (\cosh(a \cdot h) + b \cdot \sinh(a \cdot h)) + \sinh(a \cdot h) \cdot y / a\\
v &=& y \cdot (\cosh(a \cdot h) - b \cdot \sinh(a \cdot h)) + \sinh(a \cdot h) \cdot x / a\\
x &=& u\, \mod(1)\\
y &=& v\, \mod(1)\\
&& \mbox{\bf Plot Point}\\
&& \mbox{\bf Next i}
\end{array}\right \}
\endq
\vs6\noi {\bf An Alternative Analysis of the Anosov Map} The following are IDEs on the unit torus,
\[\T_h(\X)= \exp(h\, \A)\, \X\, \mod(1)\hspace{10pt}\S_h(\X)= \exp(h\, \A^{\rm T})\, \X\,\mod(1)\]
and so
\[(f(\X)\,\T_h(\X)+(1-f(\X))\,\S_h(\X))\,\mod(1)\]
is an IDE by lemma \ref{lm:hide}. Using the HOC to define $f$, this IDE cycles between $\T$ and $\S$. If $f(\X)\cdot (1-f(\X))=0$ this IDE is a composition. It is only an exact representation of the Anosov map as an IDE if $\A$ commutes with its transpose, which it does not. However, it is morphologically equivalent as indicated by Fig. \ref{fg:anosov} Plate B which shows that the time-one map is morphologically equivalent to the Anosov map.
\begin{figure}[htbp]
\includegraphics[height=2.207in,width=3.79in,angle=0]{C:/Research/Book/Figures/eps/AnosovIDE.eps}
\caption{\sml {\bf Anosov IDE. Plate A: The time series; Plate B: The time-one map}}
\label{fg:anosov}
\end{figure}
\vs6\noi Following Proposition \ref{pr:anosov} produces Fig. \ref{fg:anosov1}.
\begin{figure}[htbp]
\includegraphics[height=2.407in,width=4.56in,angle=0]{C:/Research/Book/Figures/eps/Anosov1.eps}
\caption{\sml {\bf Anosov IDE. Plate A: The time series; Plate B: The time-one map}}
\label{fg:anosov1}
\end{figure}
%========================================================Twist and Translate Representations==================
\subsection{ \sml Representations of the Bilateral and Unilateral Shifts}
In the previous section are three examples of sequences and
maps that were formed from the one-sided shift. This section
will show how to represent both the one and two-sided shifts
using a composition of maps that arise naturally from forced
oscillators and autonomous three-dimensional dynamical systems.
This section
will present some representations of the shift as maps that
frequently occur in the time-one maps of commonly occurring
dynamical systems. In particular it will be shown that the shift map
occurs as a twist-and-dilation map and that the shift can have a
component that is a sigmoid map.
%=============Twist and Dilation======================
\ssc{\sml Representation of the Bilateral Shift as a
Twist-and-dilation Map in the Plane}
\begin{example} {\bf Bilateral Shift as a Twist-and-dilate map}
\label{ex:shift1}
\vs6\noi
Define two maps $T_1,T_2$ as follows:
\[T_1\l(\vt r. \theta \par \r)=\l(\mtx 1. 1. 0. 1 \par \r)\l(\vt
r. \theta \par \r)\]
and
\[T_2\l(\vt r. \theta \par \r)=\l(\mtx 1. 0. 1. 1 \par \r)\l(\vt
r. \theta \par \r)\]
Require that $-\pi \leq \theta < \pi$, in place of the usual $0
\leq \theta < 2\,\pi$ in these equations. The composition $T_2
\circ T_1$ is
\[(T_2 \circ T_1)\l(\vt r. \theta \par \r)=\l(\mtx 1. 1. 1. 2 \par \r)\l(\vt
r. \theta \par \r)\]
The composition considered on the torus is the Anosov map
and is known to be a bilateral shift.
\vs6\noi
In rectangular coordinates the map
\[ T_1\l(\vt x. y \par \r)=(1+\arctan(y/x)/r)\l(\vt
x. y \par \r)\]
is a radial dilation or contraction of the vector $(x,y)$. The arctangent has values in the interval $[-\pi/2,
\pi/2]$.
\vs6\noi
The map $T_2$ is the simple twist:
\[ T_2\l(\vt x. y \par \r)=\l(\mtx \cos(r). -\sin(r). \sin(r).
\cos(r) \par \r)\l(\vt x. y \par \r)\]
where $r=\sqrt{x^2 + y^2}$. The composition $T_2 \circ T_1$ is
not invertible in the plane but is nearly so in a practical
sense. In particular, the points on the curve $\arctan(y/x)=-r$
are all mapped to the origin. But this is a set of measure zero
and is unlikely to be encountered in practice.
The non-invertibility occurs because the arctangent
can have negative values. The orbits of this map resemble the samples of a
vector random variable with a Gaussian distribution. This map
shows how a shift could occur as a time one map in a driven
oscillator.
\end{example}
\vs6\noi This example also shows that if a set of measure 0 is removed, that a forced
oscillator does not have to have {\em any} elliptic island
chains such as is seen in the familiar KAM theory.
\vs6\noi
In general, KAM island chains can arise from maps of the
form $T_2 \circ T_1$
where $T_2$ is a simple twist and $T_1$ is any number of other
possible maps, \cite{bi:bc3}. Example \ref{ex:shift1} shows that the
structure and appearance of the island chains, when they occur,
can be attributed solely to the nature of the map $T_1$. If $T_1$
is a rotation that preserves the integral curves of $T_2$ then
the entire plane is elliptic. In this case, all solutions are almost periodic,
and there are no island chains or chaos.
If $T_1$ is as in Example \ref{ex:shift1}, then there are no islands chains
either, there are some almost periodic orbits as there are in the shift,
but in general, chaos prevails exactly as it does in the
bilateral shift. In between these two extremes
arise the island chains in which the map $T_2 \circ T_1$ is
sometimes elliptic, some times hyperbolic, and sometimes
parabolic, Arnold \& Avez \cite{bi:aav}.
\begin{example}{\bf Unilateral Shift as a Twist-and-dilate map}
\label{ex:shift2}
Define two maps $T_1,T_2$ as follows:
\[T_1\l(\vt r. \theta \par \r)=\l(\vt
\theta. \theta \par \r)\]
and
\[T_2\l(\vt r. \theta \par \r)=\l(\mtx 1. 0. 1. 1 \par \r)\l(\vt
r. \theta \par \r)\]
$T_2$ is invertible but $T_1$ is not. In rectangular
coordinates this is
\[T_1\l(\vt x. y \par \r)=(\arctan(y/x)/r)\l(\vt
x. y \par \r)\]
and
$T_2$ is, as in the previous example, the simple twist:
\[ T_2\l(\vt x. y \par \r)=\l(\mtx \cos(r). -\sin(r). \sin(r).
\cos(r) \par \r)\l(\vt x. y \par \r)\]
The composition is a noninvertible map on the plane which is a
one-sided shift.
\end{example}
\begin{example} { Unilateral Shift as a Sigmoid Map}
\label{ex:shift3}
Let
\[f(x)=x-0.5\,(1- \tanh(0.5 \beta\,(1-x))\]
and define on the unit interval the map
\[T(x)= f(2x)\]
As $\beta \ra \infty$ this map converges pointwise to the
unilateral shift, except at a finite number of points. This
example shows that the occurrence of a term of the form $f(2x)$ in
a forcing term of a differential equations can have the effect of
a shift, and thus create chaos. An extreme example of this is the
modified Chua equation, {\rm Brown \cite{bi:rb1}}.
\end{example}
%=====================Baker's Transformation=======
\begin{example}
The
map that is most easily proven to be a two-sided Bernoulli shift
is the bakers transformation, Arnold \& Avez,\cite{bi:aav}. The most
familiar formulation of this map is
\[\l( \vt x. y \par \r) \ra \l( \vt 2x. y/2 \par \r)\bmod(1)\]
for $0\leq x \leq 1/2$ and
\[\l( \vt x. y \par \r) \ra \l( \vt 2x. (y+1)/2 \par \r)\bmod(1)\]
for $1/2\leq x \leq 1$. This map formulation can be greatly
simplified by use of the notation $[x]$ which denotes the
integer part of $x$. In this notation:
\[\l( \vt x. y \par \r) \ra \l( \vt 2x.([2x]+ y)/2 \par
\r)\bmod(1)\]
If $\{x\}$ is used for the fractional part of $x$ this simplifies to
\[\l( \vt x. y \par \r) \ra \l( \vt \{2x\}.([2x]+ y)/2 \par
\r)\]
In this form, a closed-form solution for the $n^{th}$ term of
this sequence is (note that this solution is not in terms of elementary
functions):
\[\l( \vt \{2^n\,x\}.([2^n \,x]+ y)/2^n \par \r)\]
Note that everything said about this map carries over to
the case where 2 is replaced by any positive integer $k$. Thus
\[\l( \vt \{k^n\,x\}.([k^n \,x]+ y)/k^n \par \r)\]
is a formula for the $n^{th}$ iterate of a bi-lateral shift on $k$ symbols.
\end{example}
%=====================Bernoulli chaos========================
\section{\sml Bernoulli Chaos}
In order to better understand
the variety of ways in which chaos can arise, start with the
Bernoulli systems and carry
out a program of systematic ``dilution'' of this form of chaos until
chaos disappears altogether.
The most important motivating example
is that of Kalikow, see \cite{bi:pw}, page 112 ,of a Kolomogrov
automorphism which is constructed by using one Bernoulli system
to modify another in such a way that the resulting system is not
Bernoulli, but is Bernoulli on a set of measure zero.
\vs6\noi Among the ways to modify a Bernoulli system are:
\vs6
\noi (1) Form a cross-product between a Bernoulli and
non Bernoulli;
\vs6
\noi (2) Form a partial product of Bernoulli with any other
map, including Bernoulli (K-automorphisms);
\vs6
\noi (3) Compose Bernoulli and non Bernoulli;
\vs6
\noi (4) Form a function of a component of a Bernoulli (logistic
map);
\vs6
\noi (5) Form the weighted average of a Bernoulli and
non Bernoulli system (the fundamental map) Brown \& Chua \cite{bi:bc4}.
\vs6\noi
This list is incomplete. The different ways a Bernoulli system
may be modified to make a chaotic system are likely so numerous and
varied that no single characterization would be possible.
\ssc{\sml The Bernoulli Map in $\Rl^4$}
Let
\[ \l( \vtxx u. v. w. z\par \r)=\l(\vtxx \cos(x). \sin(x). \cos(y).
\sin(y) \par \r) \]
\vs6\noi
By direct substitution,
application of the double-angle formulas for the sine and cosine, and
simplification, the following four-dimensional system on a
two-dimensional space is obtained:
\[ {\rm T} \l( \vtxx u. v. w. z \par \r)= \left(
\begin{array}{rrcc}
0 & 0 & (u^2-v^2) & -2uv\\
0 & 0 & 2uv & (u^2-v^2)\\
w & -z & 0 & 0\\
z & w & 0 & 0
\end{array}\right) \l( \vtxx
u. v. w. z \par \r) \]
\vs6\noi In complex coordinates this map is
given by:
\[{\rm T}\l(\vt w. z\par \r)=\l(\vt w^2\,z. w\,z \par \r)\]
where $\mid w\mid=\mid z\mid=1$. A simple computation shows that this
mapping is 1-1, in particular:
\[{\rm T}^{-1}\l(\vt w. z\par \r)=\l(\vt w\,\bar{z}. z^2\,\bar{w} \par \r)\]
\vs6\noi
The sequence of iterates of the
Anosov map in closed form in terms of elementary functions are as follows. The key
to doing this is the derivation of an expression for the n-th
power of the matrix used in the definition of the Anosov map.
Let
\[ {\rm A}= \l( \mtx 2. 1. 1. 1 \par \r)\]
then
\[ {\rm A}^n= \frac{1}{(1-\lambda^2)\lambda^{n-1}}
\l( \mtx \lambda^{2n}(1-\lambda)+(2-\lambda).
1-\lambda^{2n}. 1-\lambda^{2n}.
(\lambda^{2n}(1-\lambda)+(1-2\lambda))/\lambda \par \r)\]
where $\lambda=0.5(3+\sqrt{5})$, which is the largest eigenvalue
of the matrix A. Use this to obtain the n-th term in
the sequence of iterates of this map. For notational convenience
let
\[ \l(\mtx a_n. b_n. c_n. d_n \par \r)= \frac{1}{(1-\lambda^2)\lambda^{n-1}}
\l( \mtx \lambda^{2n}(1-\lambda)+(2-\lambda).
1-\lambda^{2n}. 1-\lambda^{2n}.
(\lambda^{2n}(1-\lambda)+(1-2\lambda))/\lambda \par \r)\]
\noi then
\[\l(\vtxx u_n. v_n. w_n. z_n \par \r)=\l(\vtxx \cos(a_n\phi_0+b_n
\theta_0).\sin(a_n\phi_0+b_n
\theta_0). \cos(c_n\phi_0+d_n
\theta_0). \sin(c_n\phi_0+d_n
\theta_0)\par\r)\]
Note that $b_n=c_n$.
\vs6\noi
This is the closed-form solution for the chaotic mapping on the
torus. By taking arctangents the
Bernoulli iterates in terms of elementary functions are obtained.
%================Bernoulli as a time one======================
\ssc{The Bernoulli map as a Time-One map}
\label{sc:t1}
By employing a two-phase gate, the construction of the equations of
a nonautonomous ODE whose time-one map is the Bernoulli map is obtained.
The following equation has the Bernoulli map as a the
time-one map:
\begq
\l(\vt \dot{w}. \dot{z} \par \r)=\l(\vt (1-s(t)) w\, \log(z).
s(t) z\log(w) \par \r)
\endq
where $s(t)=0.5(1+{\rm sgn}\sin(\omega t))$. Initial conditions
must be taken to have absolute value 1.
The Bernoulli map can be written, as this equation suggests, as a
composition of two maps:
\[{\rm T_1}\l(\vt w. z \par \r)=\l(\vt w. z\,w \par \r)\]
\[{\rm T_2}\l(\vt w. z \par \r)=\l(\vt w \, z. z \par \r)\]
which are time-one maps for autonomous ODEs.
The Bernoulli map is ${\rm T}_2\circ {\rm T}_1$. The
component maps arise as time-one maps of the solutions of two
systems of ODEs. The solutions are as
follows:
\[\l(\vt w_1(t). z_1(t) \par \r)=\l(\vt w_0 . z_0\,w_0^t \par \r)\]
\[\l(\vt w_2(t). z_2(t) \par \r)=\l(\vt w_0 z_0^t . z_0 \par \r)\]
which are the solutions of the separate component ODEs
corresponding to the two phases of the function $s(t)$.
\vs6\noi These equations are presented in complex form for
convenience. The complex representation is not essential and no
complex variable theory has been used in my analysis.
\vs6\noi Having a Bernoulli mapping in an algebraic formula,
utilize this map to construct examples of chaos which
are less than Bernoulli.
\ssc{\sml Cross products with Bernoulli systems}
The simplest way to obtain a map which is Bernoulli on a set of
measure zero is to have at least one component of a cross product to be
Bernoulli, and one that is not Bernoulli. Let,
\begq
{\rm T}\l(\vts u. v. w \par \r)=\l(\vts u^2\,v. u\,v.
a\,w \par \r)
\endq
where the first two components are restricted to have modulus 1.
If $w_0=1, \hspace{6pt} 00$. However, the complexity of the orbit
that Anosov map contributes is being constantly
reversed by its inverse. The possibility of global chaos that
comes from the Anosov map must always be compromised by the inverse,
thus leaving only local, finite excursions of chaos that come
from long runs by the logistic map having a value above .5.
Thus, the chaos of this map is actually being imparted by the logistic map.
In Chapter \ref{ch:cmplx} it is seen that it is possible to construct an example of
a three-dimensional map from Eq. \ref{eq:kol} by replacing the first two components
with a map which has LZ, ZA, or E, and obtain a map that looks {\em random}.
\vs6\noi The following example is neither Bernoulli, K, nor almost
periodic, but has a set of measure zero on which it is Bernoulli:
\begq
{\rm K}\l(\vtxx u. v. w. z \par \r)=\l(\vtxx u^2\,v. u\,v. {\rm
sg}(u)(w^2\,z)+(1-{\rm sg}(u))(a\,w). {\rm
sg(u)}(w\,z)+(1-{\rm sg(u)})(b\,z) \par \r)
\endq
This is because the third and fourth components of the map alternate
{\em randomly} between Bernoulli and almost periodic. Note that by replacing the function $\sgn(u)$, which occurs in
the definition of the function sg($u$), in the above
equations with a sigmoid function all examples are made
infinitely differentiable.
%============gated compositions==================
\ssc{\sml Gated compositions with Bernoulli systems}
First describe the gate:
\begin{eqnarray}
s_1(t) & = & 1 \mbox{ for \sp} 0 \leq t < 1\\
s_1(t) & = & 0 \mbox{ for \sp} 1 \leq t < 3\\
s_1(t+3) & = & s_1(t) \\
s_2(t) & = & 1-s_1(t)
\end{eqnarray}
Note that $s_2(t)$ is nonzero twice as long as $s_1(t)$.
Using this gate, define a general nonautonomous ODE:
\begq
\dot{x}=s_1(t)\,F_1(x)+s_2(t)\,F_2(x)
\endq
The time-one map determined by sampling the map at times
$t=1,2,3,\ldots,$ results in one point of the orbit being
determined by $\dot{x}=F_1(x)$ and the next two points being determined by
$\dot{x}=F_2(x)$, then back to the $F_1$ equation. The T1 map
is not simply the composition of the maps determined by $F_1$ and
$F_2$ because it is necessary to get one point of the orbit from
the $F_1$ equation and then get {\em two} points from the
$F_2$ equation. A composition would omit the intermediate points,
only recording the result of applying the $F_1$ equation and then
the $F_2$ equation to the initial point. The presence of these
intermediate points being included in the orbit
is significant in that they alter the geometry and complexity
of the orbit.
\vs6\noi The time difference between the two phases may be as long as we
desire. In the example this ratio is 1:2. The greater the ratio
between the phases
the greater the difference in the contribution to the orbit by
the two phases. In this way, combining a Bernoulli phase
with an almost-periodic phase in such ratios (say 1:1000000) that the Bernoulli
contribution is as thin as you please the resulting orbit
must still be chaotic. This technique shows how to include a
Bernoulli system at any desired level.
\vs6\noi The
mechanism illustrated by this example could easily be reflected
in a real-world system in which complex forces alternated with
periodic forces to shape some geological feature or biological
feature of a life form.
\vs6\noi Another technique that may be used is to construct the time one-half
map, i.e., sample the orbit at intervals of $t=1/2$. In this way
One gets two Bernoulli points followed by four almost-periodic
points. Doing this amounts to refining the gates into four
phases, each gate being decomposed into two phases over its
nonzero range.
\vs6\noi The technique of gate refinement corresponds to
the mathematical technique of refining a partition of the real
line, so often used in measure theory and ergodic theory. Using
the refinement method, an ODE is constructed which is a gated
composition of Bernoulli and almost periodic. After all the
simplifications a three-phased gate is obtained as follows:
\begin{eqnarray}
s_1(t) & = & 1 \mbox{\sp for \sp} 0 \leq t < 1\\
s_1(t) & = & 0 \mbox{ for \sp} 1 \leq t < 6\\
s_2(t) & = & 0 \mbox{ for \sp} 0 \leq t < 1\\
s_2(t) & = & 1 \mbox{ for \sp} 1 \leq t < 2\\
s_2(t) & = & 0 \mbox{ for \sp} 2 \leq t < 6\\
s_3(t) & = & 0 \mbox{ for \sp} 0 \leq t < 2\\
s_3(t) & = & 1 \mbox{ for \sp} 2 \leq t < 6
\end{eqnarray}
Extend the functions to be periodic of period 6.
Now define the gated-circuit equation in complex variable
notation:
\begq
\l(\vt \dot{w}. \dot{z} \par \r)=\l(\vt s_1(t) w\, \log(z)
+s_3(t)\,z.
s_2(t) z\log(w)-\lambda^2 s_3(t)\,w \par \r)
\endq
\vs6\noi
By altering the ratios of the gate it is possible to obtain any level of chaos
desired.
%===========================================Generalized Shifts===========================
\section{\sml Generalized Shift}
If $x\in [0,1)$ and has positive algorithmic complexity then so does its right shift. The sum of these two numbers mod(1) also have positive algorithmic complexity. Expressions of the form $1.1 x\, \mod(1)$ in any radix do not change the algorithmic complexity of $x$. Hence, such operations yield sequences as complex as ordinary shift operations. In this sense they are a {\em generalization} of the shift and they are also functions of a shift. In terms of established concepts, a generalized shift has a positive Liapunov exponent. When operating on binary sequences, the orbits of a generalized shift are bounded.
\begin{figure}[htbp]
\includegraphics[height=2.5in,width=4.38in,angle=0]{C:/Research/Book/Figures/eps/GeneralizedS.eps}
\caption{\sml The Generalized Unilateral Shift: Plate A The Unilateral Shift vs $y=x\, \mod(1)$; Plate B The Generalized Unilateral Shift vs $y=x\, \mod(1)$}
\label{fg:generalizedS}
\end{figure}
\begq
\label{cd:gshft}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:generalizedS} Plate B is as follows:}\\
x &=& 0.01\\
h&=& 0.001\\
h_1&=& 10.0 \, h\\
\\
&& \mbox{For i = 1 to 300000}\\
\\
p &=& 0.5 \cdot (1 + \tanh(400\,(h_1 + x - 1)))\\
q &=& 0.5 \cdot (1 + \tanh(400 \, (h_1 - x)))\\
r &=& (1 - p) \cdot (r + q - r \cdot q)\\
u1 &=& \exp(\ln(2) \cdot h) \cdot x\\
u2 &=& x - h\\
x &=& r \cdot u1 + (1-r) \cdot u2\\
&&\mbox{\bf Plot Point}\\
\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begin{lemma}
Let $u1 = \exp(\ln(2) \cdot h) \cdot x$ and $u2 = x - h$ and
\[\T(\X)= r \cdot u1 + (1-r) \cdot u2\]
Then $\T$ is an IDE and as $h_1\ra 0$ $\T$ converges to the $2 \, x \, \mod(1)$ IDE
\end{lemma}
\pf Direct computation.\rl
\vs6\noi While Bernoulli shifts occur in Ergodic theory, it is the unilateral shift IDE that occurs in nature and provides a bridge between discrete chaos and chaos that arises in differentiable dynamics.
\vs6\noi This suggest generalizing the definition of chaos as follows:
\begin{definition} {\bf \sml Chaos}
An IDE is chaotic if it has a bounded invariant subset on which it is a function of a generalized shift.
\end{definition}
\vs6\noi
%11
%=================Weighted Average with Bernoulli Systems===================
\chapter{Chaos and Nature}
\label{ch:nat}
\begin{center} {\em Chaos is the engine of complexity in the natural world}
\end{center}
\vs6\noi This chapter will present illustrations of IDEs whose time series replicates natural phenomena. The point of this illustration is to show that it is the combined forces of chaos and almost periodic forces that accounts for what is seen in the natural world.
\vs6\noi The examples in this chapter are of the form
\[\lambda\,\F\circ\S+(1-\lambda)\, AP\]
where $0<\lambda\leq 1$.
Processes in nature, human organizations and climate are never simple combinations of clearly understood forces as found in physics. In order to gain some insight into natural systems this section derives a method of combining a Bernoulli and almost periodic systems. The Bernoulli factor represents the combined actions of several diverse forces which cannot be easily formulated mathematically and the almost periodic factor represents simple dynamics such as daily routines in organizations that are not under complex external pressures.
\vs6\noi Another view is that the almost periodic factor represents a stable prey-predator relationship such as may be found in the Volterra Lotka model while the Bernoulli factor represents the collective external forces on the pre-predator system that are often accounted for by including a stochastic process in the model. The advantage of the weighted sum model is that any stochastic process must be averaged over time and highly idealized and simplified whereas the weighted sum provides insight into how the prey-predator model might vary over short time periods and how it might change with time and the external forces.
\vs6\noi What will become apparent from these sixteen examples is just how drastically a biological or social system can change over time.
%===============================================Euler Flow============================
\section{\sml The Simplified Euler Fluid Flow Equation}
\label{sc:euler}
A simplified fluid flow equation in two dimensions is
\[\l(\vt \dot{x}. \dot{y}\par\r)= f(r)\l(\vt y. -x\par\r)\]
where $r=\sqrt{x^2+y^2}$. The orbits (flow lines) are concentric circles and the angular velocity along each orbit is $f(r)$. If $f(r)=1/r$, the angular velocity is greatest near the center and dies out as $r$ increases.
\vs6\noi
The solution of this equation, since $f(r)$ is constant along orbits, is given by
\[\X(t)=\exp(t f(r_0) \B)\, \X_0\]
where $r_0=\sqrt{x_0^2+y_0^2}=\|\X\|$
\vs6\noi may also be viewed as and IDE:
\[\T_h(\X)=\exp(h \, \|\X\|\, \B)\, \X\]
The iterates of the Euler IDE agree exactly with the solution of the ODE at the time intervals $t=i\, h$.
\vs6\noi The fact that two adjacent orbits move at different angular velocities creates friction and stress, i.e., stretching. If folding is added to the rotating fluid, then the result is chaos or turbulence. If, in addition, there is an almost periodic force acting on the result motion, then natural phenomena can be simulated.
\begin{figure}[htbp]
\includegraphics[height=2.75in,width=4.667in,angle=0]{C:/Research/Book/Figures/eps/FluidFlow.eps}
\caption{{\sml Plate A: Photograph of Wake Turbulence from Aircraft Wing. Plate B:Simplified Euler Fluid Flow with Folding and Almost Periodic Forcing}}
\label{fg:fflow}
\end{figure}
\begq
\label{cd:flf}
\left.
\begin{array}{lcl}
&& \mbox{\bf The code for Plate B of Fig. \ref{fg:fflow} is as follows:}\\
&&\mbox{For i = 1 To N}\\
\\
r &=& 1 / (\sqrt{(((x - 0.19) ^ 2) + (y ^ 2)})\\
r &=& 5 / \sin(r - \ln(r ^ 2))\\
u &=& \exp(-.003) \cdot ((x - 0.19) \cdot \cos(r)) - (y \cdot \sin(r)) + 0.19 - 0.65 \cdot \cos(x)\\
v &=& \exp(-.003)\cdot (y \cdot \cos(r)) + ((x - 0.19) \cdot \sin(r))\\
x &=& -u\\
y &=& -v\\
\\
&&\mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right \}
\endq
\section{\sml Bernoulli and Almost Periodic Dynamics}
\vs6\noi To construct the model a two-sided
Bernoulli map will first be combined with an almost periodic map in complex coordinates and later converted to real coordinates by doubling the dimensionality of the model. An important feature of this construction is that it provides insight into how two-sided Bernoulli maps manifest themselves in closed form in terms of elementary algebraic functions. This is relevant to addressing the Hirsch Conjecture.
The benefit of this approach, and the point of the Hirsch Conjecture, is that the algebraic
representation of two-sided Bernoulli maps is essential to recognizing when a system is chaotic, or is being forced by a chaotic component in the equations of natural systems.
\vs6\noi Let
\begin{equation}
\l( \vtxx u. v. w. z\par \r)=\l(\vtxx \cos(\phi). \sin(\phi).
\cos(\theta).
\sin(\theta) \par \r)
\label{eq:polar}
\end{equation}
Note that while these coordinates are in 4 dimensions, they are
identified by only $\phi$ and $\theta$, and hence represent only
a two-dimensional space embedded in 4 dimensions.
\vs6\noi
An algebraic representation of a Bernoulli map
in terms of elementary functions is derived by using the Anosov map dynamical system on the torus defined by
\begin{equation}
{\rm A} \l( \vt \phi. \theta \par \r)=\l( \vt 2\phi+\theta.
\phi+\theta \par \r) \mod(1)
\label{eq:anosov}
\end{equation}
This system is also known as the Anosov map and is known to be a
bi-lateral Bernoulli map on the unit square. Note the
occurrence of the addition modulo 1 function in this definition of
the Anosov map. This must be removed in any final algebraic
representation because this operation is not an elementary
function and it does not occur explicitly in the laws of nature.
In fact, the operation of addition mod 1 is a mathematical abstraction
that is very useful for proving theorems while obscuring the relationship between the conclusion of a theorem and its connection to the
physical world.
\vs6\noi
By direct substitution of the components of Eq.\ref{eq:anosov} into Eq. \ref{eq:polar},
and by an application of the double angle formulas for the
sine and cosine, after simplification, the four-dimensional system Eq. \ref{eq:anosov02}, is obtained:
\begin{equation}
{\rm T} \l( \vtxx u. v. w. z \par \r)= \left(
\begin{array}{rrcc}
0 & 0 & (u^2-v^2) & -2uv\\
0 & 0 & 2uv & (u^2-v^2)\\
w & -z & 0 & 0\\
z & w & 0 & 0
\end{array}\right) \l( \vtxx
u. v. w. z \par \r)
\label{eq:anosov02}
\end{equation}
What is revealed by this map is that the bi-lateral Bernoulli map
is equivalent to a map defined by a two-component twist system: the
upper right block of four entries is a twist as is the lower left
block of four entries. The $u,v$ components of this map are acting as a twist
on the $w,z$ component and vice versa.
\vs6\noi
Implementation of this map
on a computer must be done carefully due to the rapid
accumulation of round-off errors. By taking the arctangents/$2\pi$ one obtains a map on the unit square which is a bilateral Bernoulli
map. An implementation of this map in
QuickBASIC is:
\begin{verbatim}
FOR i= 1 TO N
u1=(u^2-v^2)*w-2*u*v*z
v1=2*u*v*w+(u^2-v^2)*z
w1=u*w-v*z
z1=v*w+u*z
phi=arctangent(v1/u1)
theta=arctangent(z1/w1)
u=cos(phi)
v=sin(phi)
w=cos(theta)
z=sin(theta)
PSET (phi/(2*pi),theta/(2*pi)),10
NEXT i
\end{verbatim}
The computation of the arctangents is included to compensate for
round-off errors.
Also, plot the points in angular coordinates to retain the
uniformity of the Bernoulli distribution.
\vs6\noi
Following this example, countless additional
examples of functions of bilateral
shifts on any number of symbols in terms of elementary functions may be derived.
\ssc{Combining Bernoulli with Almost Periodic}
The preceding map, if transformed to complex coordinates
$w=\psi_1+\eta_1\,i,\,z=\psi_2+\eta_2\,i$, can be
represented by:
\begin{equation}
{\rm B}\l(\vt w. z\par \r)=\l(\vt w^2\,z. w\,z \par \r)
\label{eq:bern}
\end{equation}
where the modulus of the complex numbers $|w|=|z| =1$.
\vs6\noi
An almost-periodic map in complex coordinates can be written as
\begin{equation}
{\rm A}\l(\vt w. z\par \r)=\l(\vt a\,w. b\,z \par \r)
\label{eq:ap}
\end{equation}
where $a,b$ are complex numbers satisfying
$|w|=|z|=|a|=|b|=1$. This is a pair of rotations on
the cross product of two unit circles known as the complex torus
in two dimensions.
\vs6\noi Generalize the Bernoulli map to all two-dimensional complex space
by redefining B as follows:
\begin{equation}
{\rm B}\l(\vt w. z\par \r)=\l(\vt w^2\,z/|w^2\,z|. w\,z/|w\,z| \par \r)
\end{equation}
B is now defined on the complex plane and projects every vector
onto the complex torus.
The tours is, in effect, an attractor, and B restricted to the
torus is an invertible differentiable mapping. It is not
necessary to similarly redefine A since A is bounded even when
$|w|, |z| \neq 1$
\vs6\noi
Form a typical linear combination of these two maps often used in vector computations to get the following map:
\begq
{\rm T}\l(\vt w.z\par \r)=(1-\lambda){\rm B}\l(\vt
w.z \par \r)+\lambda {\rm A}\l(\vt w.z \par
\r)
\endq
where $0 \leq \lambda \leq 1$.
To make this equation a mapping on the torus the components must have absolute value 1. To do this form
\begq
K_1=| (1-\lambda)w^2\,z+\lambda(a\,w)|
\endq
and
\begq
K_2=| (1-\lambda)w\,z+\lambda(b\,z)|
\endq
Now define a weighted average of Bernoulli and almost periodic map as
\begq
B_\lambda(1,a,b)\l(\vt w.z\par \r)=\left[(1-\lambda)\l(\vt
w^2\,z/K_1. w\,z/K_2 \par \r)+\lambda\l(\vt a\,w/K_1. b\,z/K_2 \par
\r)\right]
\endq
\vs6\noi This is a mapping on the complex torus.
When $\lambda=0$, it is Bernoulli, and when $\lambda=1$ it is almost
periodic. For all other values of $\lambda$ it is somewhere in between.
Stated differently, it
is a one-parameter family of maps, which, except at the origin, are
$\Ci$ (when considered as real
valued maps) that
continuously vary from an almost-periodic map
to a Bernoulli bilateral shift on two symbols.
\vs6\noi
Note that $B_\lambda$ is not an analytic map. This is of no
significance since the use of complex coordinates here is only a
notational convenience and the theory of complex variables is
not used in any of the results. As
real valued maps, $B_\lambda$ and the subsequence maps that are presented are
infinitely differentiable, except possibly at
the origin.
\vs6\noi
Generalize this map to obtain a set of maps that
continuously vary from an almost-periodic map to a
Bernoulli map on $n-$symbols. The generalization is
\begq
B_\lambda(n,a,b)\l(\vt w.z\par \r)=\left[(1-\lambda)\l(\vt
w^{n+1}\,z^n/K_1. w\,z/K_2 \par \r)+\lambda\l(\vt a\,w/K_1.
b\,z/K_2 \par
\r)\right]
\endq
and $K_i$ are the divisors needed to force the mapping to stay on
the complex torus.
%=======================Time one maps========================
\ssc{Lifting $\B_\lambda$ to $\F_\lambda$}
\label{sec:liftlam}
As mentioned earlier, lifting maps on $\Rl^n$ to $\Rl^{2n}$ is a general process.
\begin{theorem}
Let $f:{\bf R}^n \ra {\bf R}^n$ be any differentiable (not
necessarily invertible) function on
${\bf R}^n$. Then $f$ can be embedded in a time-one map for
an electronic circuit as follows:
\vs6\noi Let ${\bf x,y}\in {\bf R}^n$ be any two vectors in ${\bf R}^n$
and define ${\rm T}:{\bf R}^{2n}\ra {\bf R}^{2n}$ by the
equation:
\begq
{\rm T}\l(\vt {\bf x}. {\bf y} \par \r)=
\l(\vt f(c\,{\bf y}+{\bf x})-c{\bf y}.c{\bf y}+{\bf x}
\par \r)
\endq
where $c$ is any real number, then,
\vs6
\noi (1) {\rm T} is 1-1, has the same number of derivatives as
$f$, and ${\rm det}({\rm DT})=c^n$;
\noi (2) for all $c \neq 0$ {\rm T} is a Poincar\'{e}\sp map for an ODE in ${\bf
R}^{2n}$;
\noi (3) for $c=0$, the range of {\rm T} is the graph of $f$, and
the orbit of a point $({\bf x}, {\bf y})$ under {\rm T} is the
orbit of {\bf x} under $f$, i.e.
$(f^{n+1}({\bf x}),f^n({\bf x}))$;
\noi (4) for any ${\bf x} \in {\bf R}^n$, if $|b|<1$ then $(f^{n+1}({\bf x}),f^n({\bf x}))$,
is a subset an attractor;
\noi (5) as $c \ra 0$, the attractor of {\rm T} converges to the
orbit of {\bf x} under $f$.
\end{theorem}
\pf: All assertions are direct computations. .\rl
\vs6
Use theorem 1 to lift $\B_\lambda$ to $\F-\lambda$ as follows:
\begq
F_\lambda\l(\vt \Theta. \Psi \par \r)=\l(\vt
B_\lambda(n,c\,\Psi+\Theta)-c\,\Psi. c\,\Psi+\Theta \par
\r)
\endq
\vs6\noi
The above theorem shows that $\F_\lambda$ preserves the dynamics of $\B_\lambda$. $\F_\lambda$ is the composition of three invertible maps which are time-one maps of complex ODEs.
\subsection{Notes on $\F_\lambda$}
$\F_\lambda$ is a weighted average of Bernoulli and almost periodic (AP) dynamics. Bernoulli dynamics are the result of composing stretching and folding whereas AP is just folding. Adding the two dynamics rather than composing them is analogous to looking at the logarithm of the combined processes. In this case, the result of weighted averages of Bernoulli and AP is analogous to first composing stretching and folding dynamics and then composing the result with a different folding dynamic. Symbolically this may be represented as follows:
\begin{eqnarray}
\F_1\circ \S_1\circ\F_2\circ \S_2\circ\F_3\circ \F_4\circ\S_3\circ S_4\cdots
\end{eqnarray}
By replacing folding with 1 and stretching with 0 this is symbolically, at the most abstract level, a binary sequence
\[ 1\;0\;1\; 0\;1\; 1\; 0\;0\cdots\]
representing the composition of many diverse stretching and folding actions. This is what is seen in nature: a complex composition of stretching and folding dynamics. Each 1 and 0 is a further combination of elementary stretching and folding dynamics, many of which may be represented as a segment of a time series solution of an ODE. This should make clear why natural processes such as social organizations, war, climate and its impact on geology, wildfires and other highly complex phenomena that cannot be modeled by Newtonian dynamics are so difficult to model. Using statistical methods to model natural phenomena amounts to averaging out large segments of stretching and folding dynamics making short term prediction impossible.
\vs6\noi While $\F_\lambda$ is in eight dimensions, the essential
dynamics occurs in only two dimensions: In addition to the two
dimensions used to portray the dynamics, two more are needed
to arrange for the map to operate on a two-dimensional manifold which is
compact (the torus). The additional four dimensions are used to
make the map invertible. Another way of seeing this is to note
that, by inspection, of the three component
maps that make up $\F_\lambda$, only one carries
the dynamics of the $\B_\lambda$, Eq. 18, and that map only
uses four dimensions. Of the
four dimensions used only two are really needed since the
parameter $c$ may be chosen to be arbitrarily small. For example,
choose $c=10^{-10000}$. In short, it is only necessary to examine $\F_\lambda$ in two dimensions.
\subsection{\bf The Code for $\F_\lambda$ }
\scriptsize
\[\begin{array}{lcl}
&&\\
a1 = \cos(\phi_1):a2 = \sin(\phi_1):b1 = \cos(\phi_2):b2 = \sin(\phi_2)&&\\
u = \cos(\theta_1):v = \sin(\theta_1):w = \cos(\theta_2):z = \sin(\theta_2)&&\\
u1 = \cos(\theta_1):v1 = \sin(\theta_1):w1 = \cos(\theta_2):z1 = \sin(\theta_2)&&\\
u2 = \cos(\theta_1):v2 = \sin(\theta_1):w2 = \cos(\theta_2):z2 = \sin(\theta_2)&&\\
a1 = \cos(\phi_1):a2 = \sin(\phi_1):b1 = \cos(\phi_2):b2 = \sin(\phi_2)&&\\
&&\\
\mbox{For i = 1 To N} &&\\
&&\\
\mbox{Save Initial Conditions}&&\\
ua = u1: va = v1: wa = w1: za = z1: ub = u2: vb11 = v2: wb = w2: zb = z2&&\\
&&\\
\mbox{Compute T1}&&\\
u1 = ua: v1 = va: w1 = wa: z1 = za: u2 = b \cdot ub: v2 = b \cdot vb1: w2 = b \cdot wb: z2 = b \cdot zb&&\\
&&\\
\mbox{Save Initial Conditions}&&\\
ua = u1: va = v1: wa = w1: za = z1&&\\
ub = u2: vb1 = v2: wb = w2: zb = z2&&\\
&&\\
\mbox{Compute T2}&&\\
&&\\
u1 = ua: v1 = va: w1 = wa: z1 = za&&\\
u2 = ua + ub: v2 = va + vb1: w2 = wa + wb: z2 = za + zb&&\\
&&\\
\mbox{Save Output from T2}&&\\
ua = u1: va = v1: wa = w1: za = z1: ub = u2: vb1 = v2: wb = w2: zb = z2&&\\
\mbox{Compute T3}\\
&&\\
\mbox{ Compute Bernoulli Component of } \Psi &&\\
&&\\
u2a = ub: v2a = vb1&&\\
w2a = f3(ub, vb1, wb, zb):z2a = f4(ub, vb1, wb, zb)&&\\
&&\\
unorm = Sqr(u2a ^ 2 + v2a ^ 2:wnorm = Sqr(w2a ^ 2 + z2a ^ 2)&&\\
&&\\
u2a = u2a / unorm:v2a = v2a / unorm:w2a = w2a / wnorm:z2a = z2a / wnorm&&\\
&&\\
ub1 = u2a: vb11 = v2a: wb1 = w2a: zb1 = z2a&&\\
u2b = f3(ub1, vb11, wb1, zb1): v2b = f4(ub1, vb11, wb1, zb1)&&\\
w2b = wb1:z2b = zb1&&\\
&&\\
\mbox{Compute Almost Periodic Component of } \Psi&&\\
&&\\
apu = a1 \cdot ub - a2 \cdot vb1:apv = a1 \cdot vb1 + a2 \cdot ub&&\\
apw = b1 \cdot wb - b2 \cdot zb:apz = b1 \cdot zb + b2 \cdot wb&&\\
&&\\
\mbox{Compute Weighted Sum of Bernoulli and Almost Periodic}&&\\
&&\\
uu = (1 - \lambda) \cdot u2b + t \cdot apu:vv = (1 - \lambda) \cdot v2b + t \cdot apv&&\\
ww = (1 - \lambda) \cdot w2b + t \cdot apw:zz = (1 - \lambda) \cdot z2b + t \cdot apz&&\\
&&\\
\mbox{Contract Results to the Torus}\\
&&\\
unormu = \sqrt{uu ^ 2 + vv ^ 2}: wnormw = \sqrt{ww ^ 2 + zz ^ 2}&&\\
uu = uu / unormu:vv = vv / unormu:ww = ww / wnormw:zz = zz / wnormw&&\\
&&\\
\mbox{Save Output of } \B_\lambda &&\\
&&\\
u2b = uu:v2b = vv:w2b = ww:z2b = zz&&\\
&&\\
\mbox{Compute}\; \theta \mbox{ component}&&\\
&&\\
u1 = ua - ub + u2b:v1 = va - vb1 + v2b:w1 = wa - wb + w2b:z1 = za - zb + z2b&&\\
&&\\
au1 = \arctan(v2, u2): au2 = \arctan(z2, w2)&&\\
&&\\
\mbox{{\bf Plot point (au1, aut)}}&&\\
&&\\
\mbox{Iterate}&&\\
&&\\
x = 2 \cdot au1 - 2\cdot \pi: y = 2 \cdot au2 - 2\, \pi&&
\end{array}\]
\newpage
\footnotesize
%=============================================================Attractors==========================
\ssc{The Attractors of $\F_\lambda$ on the Two-dimensional Torus}
\label{sc:wa}
This section contains sixteen figures that provide insight into how diverse weighted average systems can be. Data is included to facilitate the reader in reproducing these figures and exploring additional figures. The figures illustrates the dynamic diversity of $\F_\lambda$, a very simple case of combining stretching and folding with additional almost periodic (AP) folding action. In particular, $\F_\lambda$ provides insight into how Bernoulli (or {\em random} forces) combine with almost periodic dynamics to generate complex outcomes that are neither random nor almost periodic. This combination is common in natural phenomena.
\vs6\noi $\F_\lambda$ has two parameters that may be varied. They are $ \lambda, c$. For all figures $c=0.00001$ with one exception, Fig. \ref{fg:fmap1516} {\bf B} where $c=0.999$. The initial conditions are $\theta_1, \theta_2, \phi_1, \phi_2$ vary to generate all sixteen figures. Only two of the initial conditions, $ \phi_1, \phi_2$ affect the dynamics. They are designated in the figure captions.
\vs6\noi The parameters $\phi_1\;\; \phi_2$ provide a measure of twisting in the system while $\lambda$ provides a measure of the relative proportion of randomness and AP in the system.
\vs6\noi In Fig. \ref{fg:fmap0102} Plate A, $\lambda =0$ The dynamics therefore are Bernoulli only. Plate A provides a consistency check on the model in that Plate A should be identical to Fig. \ref{fg:ber}, which it is. Plate A represents an environment in which events or actions are as random as a coin toss. In Fig. \ref{fg:fmap0102} Plate B, $\lambda =1$ and the dynamics are only almost periodic (AP). Almost periodic dynamics are common place such as the orbit of the earth around the sun or the daily routine of an individual. The geometry of Plate B is an orbit that winds around the torus at an irrational angle and thus never repeats exactly while covering the entire torus.
\vs6\noi
In Fig. \ref{fg:fmap0304} Plate A illustrates how the dynamics of a purely random process change when the almost periodic dynamics are 40 percent of the combined system. Plate B is only 25 percent more AP than Plate A, but the change is dramatic. In Plate B, large empty spaces have opened up indicating that the image is a projection of the orbit from a higher dimensional space.
\vs6\noi Figure \ref{fg:fmap0506}, Plate A is 65 percent AP providing an image very different from previous plates and continuing to demonstrate complexity while Plate B is 76 percent AP which might signal that the combined system is now trending to something more regular. In Plate B $\lambda=0.5$ and this system is `half Bernoulli and
half almost periodic'. A bifurcation has taken place in that repelling regions have appeared as large
empty spaces and the orbit is now clearly higher dimensional even thought the illustration is only showing a two-dimensional projection onto the tours. Though
the map is half and half in the parameter space, it is still complex, but not Bernoulli. The complexity can be deduced from the algebraic form of $\F_\lambda$ because Bernoulli dynamics are present by design.
\vs6\noi
In Fig. \ref{fg:fmap0506} Plate A the ripples represent the formation of order while the orbit is clearly disorderly to the eye. The Bernoulli component keeps this orbit complex. The ripples resemble wind swept sand dunes as seen from overhead. This suggest that the ripples in sand dunes are the result of a combined random and almost periodic
process. \vs6\noi In Fig. \ref{fg:fmap0708}, Plate A, the AP dynamics are reduced while the twisting effect is increased transforming the combined dynamics into a one-dimensional system. In contrast, Plate B increases AP and reduces the twisting effect bringing the dynamics more into agreement with intuitive expectations. Twisting corresponds to stretching as mentioned in previous sections and the AP dynamics correspond to folding.
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Fmap0102.eps}
\caption{\sml{\bf Weighted Average of Bernoulli and Almost Periodic: Plate A $\lambda=0.0,\;\; \phi_1=0.9,\:\; \phi_2=0.4$; Plate B $\lambda=1.0,\;\; \phi_1=0.9,\:\; \phi_2=0.4$ }}
\label{fg:fmap0102}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Fmap0304.eps}
\caption{\sml{\bf Weighted Average of Bernoulli and Almost Periodic: Plate A $\lambda=0.4,\;\; \phi_1=0.9,\:\; \phi_2=0.4$; Plate B $\lambda=0.5,\;\; \phi_1=0.9,\:\; \phi_2=0.4$ }}
\label{fg:fmap0304}
\end{figure}
\newpage
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Fmap0506.eps}
\caption{\sml{\bf Weighted Average of Bernoulli and Almost Periodic: Plate A $\lambda=0.65,\;\; \phi_1=0.9,\:\; \phi_2=0.4$; Plate B $\lambda=0.76,\;\; \phi_1=0.9,\:\; \phi_2=0.4$ }}
\label{fg:fmap0506}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Fmap0708.eps}
\caption{\sml{\bf Weighted Average of Bernoulli and Almost Periodic: Plate A $\lambda=0.6,\;\; \phi_1=2.5764,\:\; \phi_2=0.4$; Plate B $\lambda=0.8,\;\; \phi_1=0.5,\:\; \phi_2=0.5$ }}
\label{fg:fmap0708}
\end{figure}
\newpage
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Fmap0910.eps}
\caption{\sml{\bf Weighted Average of Bernoulli and Almost Periodic: Plate A $\lambda=0.7,\;\; \phi_1=0.5,\:\; \phi_2=0.5$; Plate B $\lambda=0.705,\;\; \phi_1=0.1,\:\; \phi_2=0.1$ }}
\label{fg:fmap0910}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Fmap1112.eps}
\caption{\sml{\bf Weighted Average of Bernoulli and Almost Periodic: Plate A $\lambda=0.705,\;\; \phi_1=0.1,\:\; \phi_2=2.1$; Plate B $\lambda=0.635,\;\; \phi_1=2.5,\:\; \phi_2=0.1$ }}
\label{fg:fmap1112}
\end{figure}
\newpage
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Fmap1314.eps}
\caption{\sml{\bf Weighted Average of Bernoulli and Almost Periodic: Plate A $\lambda=0.73,\;\; \phi_1=0.5,\:\; \phi_2=5.5$; Plate B $\lambda=0.76,\;\; \phi_1=0.7,\:\; \phi_2=5.5$ }}
\label{fg:fmap1314}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.4in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Fmap1516.eps}
\caption{\sml{\bf Weighted Average of Bernoulli and Almost Periodic: Plate A $\lambda=0.72,\;\; \phi_1=0.0725,\:\; \phi_2=2.6$; Plate B $\lambda=0.0,\;\; \phi_1=0.1,\:\; \phi_2=2.1$ with lifting parameter $b=0.999$ }}
\label{fg:fmap1516}
\end{figure}
\newpage
\subsection{Examples of $\F_\lambda$ Compared to Natural Processes}
\begin{figure}[htbp]
\includegraphics[height=1.477in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Ocean.eps}
\caption{\sml{\bf Plate A $\F_\lambda\;\lambda=0.72,\;\; \phi_1=0.0725,\:\; \phi_2=2.6$; Plate B Stock Photo of Ocean Waves Washing up on the Shore}}
\label{fg:ocean}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.177in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Dunes.eps}
\caption{\sml{\bf Plate A $\F_\lambda\;\lambda=0.65,\;\; \phi_1=0.9,\:\; \phi_2=0.4$; Plate B Stock Photo of Sand Dunes}}
\label{fg:dunes}
\end{figure}
\newpage
\begin{figure}[htbp]
\includegraphics[height=2.047in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Shell.eps}
\caption{\sml{\bf Plate A $\F_\lambda\;\lambda=0.7,\;\; \phi_1=0.5,\:\; \phi_2=0.5$; Plate B Stock Photo of Sea Shell}}
\label{fg:shell}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.2in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Muscle.eps}
\caption{\sml{\bf Plate A $\F_\lambda\;\lambda=0.4,\;\; \phi_1=0.9,\:\; \phi_2=0.4$; Plate B Stock Photo of Muscle Anatomy Shell}}
\label{fg:muscle}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.2in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Fire.eps}
\caption{\sml{\bf Plate A $\F_\lambda\;\lambda=0.73,\;\; \phi_1=0.5,\:\; \phi_2=5.5$; Plate B Stock Photo of a Wildfire}}
\label{fg:fire}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.273in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/Moire.eps}
\caption{\sml{\bf Moire Patterns: Plate A $\F_\lambda\;\lambda=0.76,\;\; \phi_1=0.9,\:\; \phi_2=0.4$; Plate B $\F_\lambda\;\lambda=0.8,\;\; \phi_1=0.5,\:\; \phi_2=0.5$}}
\label{fg:moire}
\end{figure}
%=================================================================Properties of Complexity============================
%============================================================================
%12
\chapter{Chaos versus Complexity}
\label{ch:cmplx}
\begin{center}
\parbox{3.5in}{\em A central open problem: Is all complexity in nature reducible to some form or function of chaos?}
\end{center}
\vs6\noi A central objective of applied mathematics is the prediction of the future behavior of dynamical systems. This is done by formulating models of systems and then using that model to conjecture such diverse things as how a wing design of a jet might function or how a weather front will behave within a few hours.
\vs6\noi The accuracy of prediction hinges on the accuracy of a model in capturing the complexity of the phenomena to be modeled. As one objective, the model may reveal that the underlying system is no more predictable than a coin toss. That would be very important information because it compels the development of contingency plans to respond to that level of uncertainty. Alternatively, when a system design is in development, such information may compel design changes to improve the level of predictable performance.
\vs6\noi Various numerical schemes have been developed that attempt to provide insight into the degree to which a phenomena may be predictable. Some of those measures are presented in this chapter.
\vs6 \noi {\bf Abbreviations used in this chapter}
\vs6\noi
Sensitive dependence on initial conditions (SD), Zero
Autocorrelation (ZA),
Zero Lyapunov exponent (LZ),
Positive Lyapunov Exponent (LP), Ergodic (E), Weak Mixing (WX),
Strong Mixing (SX), Kolomogrov (K), Bernoulli (B), Zero Entropy
(ZE), Strange Attractor (SA).
\vs6\noi The formal
definitions of the last five abbreviations are from ergodic
theory and can be found in \cite{bi:pw}. In short, they are
measures of how well a transformation mixes up its domain when
iterated over a infinite time span. Ergodic is the lowest from of
mixing and Bernoulli the highest. Dynamical systems that have one
of these forms of mixing have some level of complexity.
\vs6\noi Every example in this chapter may occur as a term in an IDE, $\T_h$, and specifically, each diffeomorphism can be identified as $\T_1$, the time one map of the IDE, see Sec. \ref{sc:emb}, proposition \ref{pr:emb}. In particular, if $\H(\X)$ is a diffeomorphism in $\Rl^n$, then the embedding IDE is
\begin{eqnarray}
\T_h(\X)&=&\exp(h\, \A)(\X-(\I-\exp( \A))^{-1}(\H(\X)-\exp( \A)(\X)))+\\
&&(\I-\exp( \A))^{-1}(\H(\X)-\exp( \A)(\X)))
\end{eqnarray}
And, for $h=1$, $\T_1(\X)=\H(\X)$
\vs6\noi The change in dynamics may be studied by starting with $\T_h$ for small $h$ and then allowing $h \ra 1$. This is the study of transitions and provides insight into how a system starts at a low level of stretching and evolves as stretching(stress) increases, see Chapter \ref{ch:trans}.
%======================================Three Properties=======================================
\section{\sml Relationship of Three Properties of Complex Dynamical Systems}
\label{sc:rel}
This section presents examples which show the extent to
which three important
properties of complex dynamical systems are independent.
These properties are: Zero
autocorrelation (ZA),
sensitive dependence on initial
conditions (SD), and exponential loss of information (EL).
One definition of chaos requires the power spectral
density (Fourier transform of the average autocorrelation
function)
to have a component that is absolutely continuous with respect
to Lebesgue measure, Berg\'{e}, et. al. \cite{bi:ber}. This definition arises from the analogy of
chaos to random processes (the relationship between
autocorrelation and continuous spectrum in random processes can be
noted in such well-known references as Doob \cite{bi:jd}, and Papoulis \cite{bi:apa}. In particular, if the autocorrelation,
$R(\tau)$
of a process decays fast enough so that
\[\int_{-\infty}^\infty |R(\tau)|d\tau<\infty\]
then the Fourier transform of $R(\tau)$ is uniformly continuous
by a well-known theorem, see Katznelson \cite{bi:yk}, page 121,
theorem 1.2. For stationary random processes, which are the type that
measure preserving dynamical systems on compact sets define, ergodic theory
applies and the coordinate functions (time series) of
a strong mixing transformation have an autocorrelation
which converges to 0 at infinity, see \cite{bi:pw},
page 45, theorem 1.23 (iii)(3). When the dynamical system is
a flow, the power spectrum can be defined and by previous
comments, it must have a continuous component. Weak mixing
transformations can have continuous power spectra with the
autocorrelation converging to 0 in an average sense, Cornfeld ,
et. al.\cite{bi:cf}, page 29, theorem 2 (iii). Without a
flow, then theorem 3 of this
reference suggests another definition for chaos in terms of the
Fourier coefficients. In short, the theory of stochastic processes
and, a related cousin, ergodic theory, suggest that the
autocorrelation of the Fourier transform can be used to define
chaos in analogy with general random processes.
\vs6\noi
The examples omit the computation of transforms
and consider only the average autocorrelation function in forward time.
For unbounded examples use the
limiting correlation coefficient. It should be noted that some
authors use the convolution in their definition of
autocorrelation in order to get a direct simple relation to the Fourier
transform through the Weiner-Khintchine theorem, See Hsu \cite{bi:hh},
page 121, Equ. (7.28).
\vs6\noi
The use of the linear correlation coefficient as a measure of chaos is
wide spread in practice. For example, see the article of Houlton and
May, from Mullin, ed.\cite{bi:tm}, p. 155. In the examples, use Equ.
(7.3) from this reference to compute the autocorrelation.
If the autocorrelation coefficient is 0 for positive time and
non-zero for $t=0$ for signals having a mean of 0, the analogy with
white noise still holds. (For signals without zero mean value
subtract the mean and then apply the definition).
\vs6\noi
Some chaotic systems have an exponentially decaying
autocorrelation, see Houlton and May from Mullin, ed. \cite{bi:tm},
Fig. 7.2, however only consider
the extreme case of zero autocorrelation is used in these examples.
\vs6\noi
For completeness, seven cases must be considered. They are:
\vs6
\noi (1) ZA, without SD and EL; (2) SD, without ZA and EL; (3) EL
without SD and ZA; (4) ZA and SD without EL; (5) ZA and EL without
SD; (6) SD and EL without ZA; (7) ZA, SD, EL combined.
\vs6\noi
Case (7) is EXAMPLE \ref{ex:le}; case (2) is EXAMPLE \ref{ex:twst}; case (6) is
EXAMPLE \ref{ex:ls}. The remaining four
cases are treated in the following sections.
%=============================================================================Autocorrelation=================================
\section{\sml Autocorrelation and Chaos}
\label{sc:auto}
\begin{example}{\bf ZA without SD or EL (case (1))}
\label{ex:za01}
Let the system be given by the following time-varying linear
differential equations:
\[\l( \vt \dot{x}. \dot{y} \par \r)=
\l( \vt -2\,y\,t. 2 \,x\,t \par \r)\]
The general solution is given
by:
\begq
\label{eq:za01}
\l( \vt x(t). y(t) \par \r)= \l( \vt x_0\cos(t^2)-y_0\sin(t^2). y_0 \cos(t^2)+x_0\, \sin(t^2) \par \r)
\endq
\end{example}
\vs6\noi Observation one, there is no IDE for this ODE, lemma \ref{lm:noide}. Observation two, the average autocorrelation is 0 everywhere
except at 0. Observation three there is no sensitive dependence on initial
conditions due to the fact that the equations are linear and
bounded. Observation four the system has no EL, which follows directly from the form of the time
series equations.
\begin{figure}[htbp]
\includegraphics[height=2.367in,width=2.367in,angle=0]{C:/Research/Book/Figures/eps/ZA01.eps}
\caption{{\sml \bf Zero autocorrelation without SD or EL. For illustration, one coordinate of Eq.\ref{eq:za01} is plotted against the HOC having the same initial conditions}}
\label{fg:za01}
\end{figure}
\vs6\noi Plotting Eq. \ref{eq:za01} in standard phase space coordinates produces a circle which cannot be easily examined for complexity.
\begq
\label{cd:za01c}
\left.
\begin{array}{lcl}
&&\mbox{{\bf The code for Fig. \ref{fg:za01} is as follows:}}\\
&& \mbox{For i = 1 to N}\\
\\
z &=& \cos(h) \cdot z_1 +\sin(h) \cdot w_1\\
w &=& \cos(h) \cdot w_1 -sin(h) \cdot z_1\\
z_1 &=& z\\
w_1 &=& w\\
t &=& \arctan(z / w)\\
\\
u_1 &=& x \cdot \cos(t ^ 2) + y \cdot \sin(t ^ 2)\\
v_1 &=& y \cdot \cos(t ^ 2) - x \cdot \sin(t ^ 2)\\
\\
u_2 &=& x_1 \cdot \cos(t) + y_1 \cdot \sin(t)\\
v_2 &=& y_1 \cdot \cos(t) - x_1 \cdot \sin(t)\\
\\
x &=& u_1\\
y &=& v_1\\
\\
x_1 &=& u_2\\
y_1 &=& v_2\\
\\
&&\mbox{\bf Plot Point $(x,x_1)$}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\begin{example} {\bf ZA and SD without EL (case (4))}
\label{ex:za02}
\vs6\noi Let the system be given by the equations:
\begq
\label{eq:za02}
\l( \vt \dot{x}. \dot{y} \par \r)=
\l( \vt -2r\,y\,t. 2 r\,x\,t \par \r)
\endq
where $r=\sqrt{x^2+y^2}$ and delete a disk about the
origin of radius $\epsilon$. The general solution is given
by the equations:
\[\l( \vt x(t). y(t) \par \r)=
\l( \vt x_0\cos(u)-y_0\sin(u). y_0 \cos(u)+x_0\, \sin(u)
\par \r)\]
where
\[u= r_0\,(t^2-1) \hs9 \hs9 r_0=\sqrt{x_0^2+y_0^2}\]
\end{example}
\vs6\noi This is an example of a nonlinear periodic system for which there is no IDE (the proof of lemma \ref{lm:noide} applies).
\begin{figure}[htbp]
\includegraphics[height=2.367in,width=2.393in,angle=0]{C:/Research/Book/Figures/eps/ZA02.eps}
\caption{{\sml \bf Zero autocorrelation with SD without EL. For illustration, one coordinate of Eq.\ref{eq:za02} is plotted against the HOC having the same initial conditions}}
\label{fg:za02}
\end{figure}
\vs6\noi As with \ref{eq:za01},plotting Eq. \ref{eq:za02} in standard phase space coordinates produces a circle which cannot be easily examined for complexity.
\begq
\label{cd:za02c}
\left.
\begin{array}{lcl}
&&\mbox{{\bf The code for Fig. \ref{fg:za02} is as follows:}}\\
&& \mbox{For i = 1 to N}\\
\\
z &=& \cos(h) \cdot z_1 +\sin(h) \cdot w_1\\
w &=& \cos(h) \cdot w_1 -sin(h) \cdot z_1\\
z_1 &=& z\\
w_1 &=& w\\
t &=& \arctan(z / w)\\
\\
r&=& \sqrt{x^2+y^2}\\
u_1 &=& x \cdot \cos(r \, (t ^ 2 - 1)) + y \cdot \sin(r \, (t ^ 2 - 1))\\
v_1 &=& y \cdot \cos(r \, (t ^ 2 - 1)) - x \cdot \sin(r \, (t ^ 2 - 1))\\
\\
u_2 &=& x_1 \cdot \cos(t) + y_1 \cdot \sin(t)\\
v_2 &=& y_1 \cdot \cos(t) - x_1 \cdot \sin(t)\\
\\
x &=& u_1\\
y &=& v_1\\
\\
x_1 &=& u_2\\
y_1 &=& v_2\\
\\
&&\mbox{\bf Plot Point $(x,x_1)$}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi In this example, the average autocorrelation is 0 everywhere
except at 0 as in the previous example, and there is
sensitive dependence on initial conditions due to the presence of
$r_0$ as a factor in the argument of the sine and cosine.
\vs6\noi
However, there is no exponential loss of information about the
angular frequency $r_0$ since the argument of the sine and cosine is equivalent to
\[r_0(t^2-1) \bmod(2\,\pi)\]
Dividing by $2 \, \pi$ and sampling this map at the equally spaced
time intervals $t=n$ gives, for large $n$, the approximate values
\[r_0(n^2) \bmod(1)\]
The analysis of Ford \cite{bi:jf} shows that the loss of binary bits of
information in this equation is of the order $2\,\ln(n)$ and
hence is not comparable with the loss of information in the
sequence defined by the one-sided shift,
\[(2^n)r_0 \bmod(1)\]
which is of the order of $n$.
%========================================================ZA, SD, EL=========================================
\section{\sml EXAMPLE \label{ex:za}: ZA, SD, and EL without a Horseshoe}
This example is presented before giving the case (5) example
since that example is derived from EXAMPLE \ref{ex:za}.
\begin{example}
\label{ex:za}
The system of ODE's that define this example are:
\[\l( \vts \dot{x}. \dot{y}. \dot{z} \par \r)=
\l( \vts -r\,y\,z. r\,x\,z. z \par \r)\]
where $r=\sqrt{x^2+y^2}$.
\vs6
The general solution of these equations is given by
\[\l( \vts x(t). y(t). z(t) \par \r)=
\l( \vts x_0\cos(u)-y_0\sin(u). y_0 \cos(u)+x_0\, \sin(u). z_0
\exp(t) \par \r)\]
where
\[u= r_0\,z_0\,(\exp(t)-1) \hs9 \hs9 r_0=\sqrt{x_0^2+y_0^2}\]
The time $t$ map, $\Phi_t$, is given by:
\[ \Phi \l( \vts x. y. z \par \r)=
\l( \mtxs \cos(u). -\sin(u). 0. \sin(u). \cos(u). 0. 0. 0.
\exp(t) \par \r) \l(\vts x. y. z \par \r)\]
where \[u= r\,z\,(\exp(t)-1) \hs9 \hs9 r=\sqrt{x^2+y^2}\]
\end{example}
\ssb{\sml Connection to the one-sided shift}
By choosing $t=\ln(2)$ in $\Phi_t$ the following mapping is obtained:
\[ \Phi_{\ln(2)}\l( \vts x. y. z \par \r)=
\l( \mtxs \cos(r\, z). -\sin(r\,z). 0. \sin(r\,z). \cos(r \,z). 0. 0. 0.
2 \par \r) \l(\vts x. y. z \par \r) \]
Iterating this mapping $n$ times gives the map:
\[\Phi_{\ln(2)}^n\l( \vts x. y. z \par \r)=
\l( \mtxs \cos(r\, z\,(2^n-1)). -\sin(r\,z \,(2^n-1)). 0.
\sin(r\,z\, (2^n-1)). \cos(r \,z\, (2^n-1)). 0. 0. 0.
2^{n+1} \par \r) \l(\vts x. y. z \par \r)\]
Choosing the initial condition $(1,0,2\,\pi\,\theta)$ with
$\theta \in [0,\,1]$ gives
\[\Phi_{\ln(2)}^n\l( \vts 1. 0. 2\,\pi\,\theta \par \r)=
\l(\vts \cos((2^n-1) 2\,\pi\,\theta).
\sin((2^n-1)\,2\,\pi\,\theta).
2^{n+1}\,2\,\pi\,\theta\par \r)\]
The first two components of the right hand side vector are
comparable to the components, in vector form, of the $n^{th}$
iterate of the mapping of the complex circle given by
\[S(z)=z^2\]
where $z=a+bi$ with $\|z\|=1$. If as a
starting point on the unit circle the point
$\exp(2\,\pi\,\theta\,i)$ is chosen, the $n^{th}$ iterate of $S$ is
\[\exp(2^n(2\,\pi\,\theta)i)=\cos(2^n(2\,\pi\,\theta))+i\sin(2^n(2\,\pi\,\theta))\]
To see the connection to a one-sided shift note that the
arguments in the sine and cosine can be replaced by
\[2^n(2\,\pi\,\theta) \mod(2\,\pi)\]
which can also be expressed as
\[2^n \,\theta \mod(1)\]
so that the $n^{th}$ iterate of $S$ is just the sine and cosine
evaluated at the pseudo-random number given by $2^n \,\theta
\mod(1)$.
\begin{example} {\bf Autocorrelation in Forward Time}
Again consider return to the general solution of ODE's which is given by:
\[\l( \vts x(t). y(t). z(t) \par \r)=
\l( \vts x_0\cos(u)-y_0\sin(u). y_0 \cos(u)+x_0\, \sin(u). z_0
\exp(t) \par \r)\]
where
\[u= r_0\,z_0\,(\exp(t)-1) \hs9 {\rm and} \hs9 r_0=\sqrt{x_0^2+y_0^2}\]
\vs6\noi
Using the average autocorrelation for a bounded
signal to compute the forward time average autocorrelation for $x(t)$ and
$y(t)$ confirms that their autocorrelation functions are the same
as white noise.
\end{example}
%=======================SD Example==================
\begin{example} {\bf Sensitive dependence on initial conditions}
The time-one map is used for this comment. The third component
of this map is exponential and hence must have sensitive
dependence on initial conditions by EXAMPLE 11.
\end{example}
\begin{example} {\bf Horseshoes}
Use the time $t$ maps, $\Phi_t$, for these remarks.
By direct examination of $\Phi_t$, $t \neq 0$, the only fixed point
is the origin, which is not hyperbolic. There are no periodic
points since $z(t) \ra \infty$ as $t \ra \infty$.
Hence, there are no horseshoes.
\end{example}
%======================EL and ZA w/o SD===========================
\begin{example} {\sml EL and ZA without SD (case (5))}
Formulating the preceding example as a nonautonomous,
two-dimensional ODE, concede the time-one map, $\Phi_t$, but
obtain another interesting example. As a two-dimensional system
the ODEs are :
\[\l( \vt \dot{x}. \dot{y} \par \r)=
\l( \vt -r\,y\,\exp(t). r\,x\,\exp(t) \par \r)\]
where $r=\sqrt{x^2+y^2}$ as before.
\vs6\noi
The general solution of these equations is given by
\[\l( \vt x(t). y(t) \par \r)=
\l( \vt x_0\cos(u)-y_0\sin(u). y_0 \cos(u)+x_0\, \sin(u)
\par \r)\]
where
\[u= r_0\,(\exp(t)-1) \hs9 \hs9 r_0=\sqrt{x_0^2+y_0^2}\]
The same autocorrelation can be obtained for
$x(t), y(t)$ as before. Also the link to the one-sided shift is still
visible by evaluating the time series at the equally-spaced times
$t_n=n \, \ln(2)$. In this case, the sequences is obtained,
\[\l( \vt x(t_n). y(t_n) \par \r)=
\l( \vt x_0\cos(r_0(2^n-1))-y_0\sin(r_0(2^n-1)). y_0 \cos(r_0(2^n-1))+x_0\,
\sin(r_0(2^n-1)) \par \r)\]
which have an exponential loss of information about the angular
frequency, $r_0$.
\vs6\noi
Sensitive dependence on initial
conditions is lost since the fixed point (0,0) is a solution to
our equations so there is no constant $\tau$ such that some
solution starting near $(0,0)$ eventually diverges by the amount
$\tau$ at some
later time. In fact, all solutions stay a fixed distance from
(0,0) for all time.
\vs6\noi
In general, given
\[\cos(f(x_0,t))\]
where $\partial f(x_0,t)/\partial t$ is eventually increasing
and
\[-\infty<\int_0^\infty \cos(f(x_0,t))dt< \infty\]
then
\[\lim_{N \ra \infty}\frac{1}{N}\int_0^N
\cos(f(x_0,t+s))\cos(f(x_0,t))dt=0 \hs9\hs9 {\rm for}\hs9\hs9 s>0\]
The proof follows from the addition formula for the cosine.
$\cos(t^2)$ satisfies these conditions since it gives the
{\em Fresnel} integral
\[\int_0^\infty \cos(t^2)dt=0.5\sqrt{0.5\pi}\]
\end{example}
%=============================Example wo Autocorrelation===================
\begin{example}{\sml Example without Autocorrelation}
To construct an example to cover case (5) interesting example due to Boyd \& Chua on which it is based is presented.
In Boyd \& Chua \cite{bi:bc} an example of a dynamical system that has
no autocorrelation is given. A significant fact about this system
is that it has no mean value. Following their analysis,
construct a new example of a system having exponential loss of
information and having no autocorrelation and which does not have
sensitive dependence on initial conditions, case (5).
\vs6\noi
First note
that the basis of their example is the function
\[ x(t)=R\cos(\ln(t+1)+\theta)\]
where $R$ and $\theta$ depend on the initial conditions.
The mean value of this function, as they observe, does not exists
since
\[\int_0^N x(t)dt/N \]
is periodic as a function of $N$. Note that $x(t)$ is a solution to the classical Cauchy/Legendre differential
equation:
\[(at+b)^2 \, \ddot{x}+(at+b) \,\dot{x} +x=0\]
with $a=b=1$. The general Cauchy/Legendre equation is solved by the substitution
$at+b=\exp(z)$
which reduces it to a linear equation having constant
coefficients.
\end{example}
\begin{example} {\bf Boyd \& Chua}
\[\l(\vts x_1(t). x_2(t). x_3(t) \par
\r)=\l( \vts R\cos(\ln(1+t\,x_3(0))+\theta).
R\sin(\ln(1+t\,x_3(0))+\theta). x_3(0)(1+t \, x_3(0))^{-1} \par
\r)\]
where $R=\sqrt{x_1(0)^2+x_2(0)^2}$, $x_1(0)=R\cos(\theta)$,
$x_2(0)=R\sin(\theta)$ and defines a circuit having no average power.
\end{example}
\vs6\noi
By modifying these equations to include a twisting action
an example is constructed which has exponential loss of
information, EL, has no autocorrelation or average value and
hence does not have ZA, and does
not have sensitive dependence on initial conditions (SD):
\begin{example}{\bf Case (3):}
\vs6\noi
Consider the equations:
\[\l(\vt x_1(t). x_2(t) \par
\r)=\l( \vt R\cos(\cos(R(\exp(t)-1))\ln(1+t)+\theta).
R\sin(\cos(R(\exp(t)-1))\ln(1+t)+\theta) \par \r)\]
It is routine to derive an autonomous three dimensional system
from these two functions. The
autonomous system is given by:
\[\l(\vts x_1(t). x_2(t). x_3(t) \par
\r)=\l( \vts -x_2\,\dot{u}(x_3).
x_1\, \dot{u}(x_3). 1 \par
\r)\]
where $u=\cos(R(\exp(x_3)-1))\log(x_3+1)$. The factor
$v=R\exp(x_3)$ in $u$
assures exponential loss of information in the initial condition
$R$. By taking the cosine of $v$ is obtained a bounded factor that
is loosing information at an exponential rate. Then multiply
this by $\log(t+1)$ to get the effect of the dilating time
scale in the argument of the sine and cosine. The dilating time
scale is the key feature in the Boyd\& Chua example
that assures that it cannot have an average value, and hence
cannot have ZA.
\end{example}
\vs6\noi
This example illustrates another possibility for chaos, a
dynamical system that wanders randomly in time having no average
value, and hence is not even predictable in a statistical sense.
%=======================LZ==========================
\section{\sml Complex dynamics from maps with zero Lyapunov exponents (LZ)}
In this section are examples that show that systems with zero Lyapunov exponents can produce a level of
complexity rivaling that of chaos in appearance.
\vs6\noi The rationale for these examples is a theorem of H. Weyl
\cite{bi:hw}, and observations in Brown \& Chua, \cite{bi:bc4} that the
sequence $\sin(n^2)$ is uncorrelated and uniform. The example to be given is
well known to ergodic theory but less known in the general scientific community.
\begin{example}
\begq
\T\l(\vt x. y \par\r)=\l(\vt x+y. y+\tau \par
\r)\mod(1)
\endq
\vs6 \noi
If $\tau$ is irrational, this map is ergodic (E). Further, it is not a
simple rotation, hence its obits are not almost periodic. The
eigenvalues are 1,1, hence the Lyapunov exponent(LZ) is 0. This is a
two-dimensional example of what is called in ergodic theory
a {\em skew translation} \cite{bi:cf}, page 100. In complex coordinates it can be
expressed as
\begq
T\l(\vt w. z \par\r)=\l(\vt w\,z. a\,z \par
\r)
\endq
where $|a|=|w|=|z|=1$. Recall that a twist on the torus is
written as
\begq
T\l(\vt w. z \par\r)=\l(\vt w\,z. z \par
\r)
\endq
and so if $a=1$ the twist and the skew translation are the same.
\end{example}
\vs6\noi Also, recall that a
Bernoulli mapping on the torus is given by the composition of two
twists:
\begq
T_1\l(\vt w. z \par\r)=\l(\vt w\,z. z \par
\r)
\endq
\begq
T_2\l(\vt w. z \par\r)=\l(\vt w. z \,w \par
\r)
\endq
and so
\begq
B\l(\vt w. z \par\r)=\l(\vt w^2\,z. z \,w \par
\r)=T_2\circ T_1
\endq
The algebraic form of these equations reveals their
relationships and clearly the skew translation falls between the
twist (all orbits are almost periodic) and the Bernoulli map. If
the complex number $a$ has positive algorithmic complexity, the
orbits of the skew translation are, relative to the twist,
unpredictable and have sensitive dependence on initial
conditions(SD). In fact, the real-valued coordinates of this skew translation
have factors like $\sin(n^2), \cos(n^2)$ which are uncorrelated.
To see this, obtain the $n^{th}$ iterate of this map by a
direct computation:
\[{\rm T}^n\l(\vt x. y \par \r)=\l(\vt x+n\,y+n(n+1)\,a/2. y+n\,a
\par\r) \bmod(1) \]
\vs6\noi
By considering $k$-dimensional skew translations, obtain
terms which behave like $\sin(n^k)$, while retaining E,
ZA,
SD, and LZ. Following this idea to its natural conclusion
that a map can be constructed with LZ which has terms that behave like
$\sin(p(n))$ where $p(n) \approx \exp(n)$. In the limit it is clear
that infinite dimensional LZ can be exponential chaos in one
dimension.
\vs6\noi Figure \ref{fg:sec3fig4} is the analog of Fig. \ref{fg:sec2fig1} in Sec. \ref{sc:t1} and is presented in the same
way. The exact equation is:
\begq
T\l(\vts w. z. u \par\r)=\l(\vts w+z. a+z. b\,u \par
\r)\bmod(1)
\endq
where $0<|a|,|w|,|z|,|b|<1$. Note that using addition mod 1 is just a
convenient way of coding this equation. A
five-dimensional equation can obtain the same figure. The significance of this LZ map is
that the orbit is clearly more complex than that Fig. 2 which was produced
by a LP map.
\begin{figure}[htbp]
\includegraphics[height=2.473in,width=2.5in,angle=0]{C:/Research/Book/Figures/eps/SEC3FIG4.eps}
\caption{{\sml This figure is a skew translation and is formated the same as in Fig. \ref{fg:sec2fig1} to facilitate comparison}}
\label{fg:sec3fig4}
\end{figure}
\vs6\noi
\vs6\noi Figure \ref{fg:sec3fig4} demonstrates that other compositions (coordinate
transformations) can give the attractor any geometry. The
specific coordinate transformation is not important. What is
relevant is that the attractor can appear ``strange'' in one
coordinate system while ``familiar'' in another, and these facts
can muddy the waters of chaos.
%&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
\vs6\noi Relative to rotations, skew translations are quite complex. How low a level of complexity is needed
to get SA? Two examples are presented. First the WX map of
Kakutani \cite{bi:pw}. WX is mildly complex, but much less so
than skew translations may be. Further, WX does not have to
involve any stretching, contrary to what
some authors have suggested. In fact
the map of Kakutani is LZ, Fig.\ref{fg:weakmxn}.
\begin{figure}[htbp]
\includegraphics[height=2.893in,width=3.053in,angle=0]{C:/Research/Book/Figures/eps/WeakMxn.eps}
\caption{{\sml One-dimensional (interval exchange) Map that is Weak Mixing}}
\label{fg:weakmxn}
\end{figure}
\begq
\label{cd:wkmx}
\left.
\begin{array}{lcl}
\mbox{{\bf Code for Interval Exchange Map of Fig. \ref{fg:weakmxn}}} &&\\
\\
Function \; f(t)&&\\
tw &=& 2\\
ts &=& 1 / tw\\
fx &=& t + ts\\
&&\\
tw &=& tw ^ 2\\
For\;\; ii &=& 1\;\; To\;\;100\\
If\;\; t > ts \;\;Then\;\; fx &=& fx - 3 / tw\\
If\;\; t < ts \;\;Then\;\; GoTo\;\; outl:\\
ts &=& ts + 1 / tw\\
tw &=& 2\, tw\\
Next\;\; ii\\
outl:\\
f &=& fx
\end{array}\right \}
\endq
\vs6\noi Only the
geometric form of this map is presented due to its complicated definition
found in \cite{bi:pw}. Since this map is WX, its cross product
is also WX and this is illustrated in Fig. \ref{fg:weakmxn02}. As can be seen there, there
is a large measure of global structure to an orbit, but on the
detail level there is ample variation. If iterated
long enough, the orbit will be dense, so the ``empty'' places in the
figure do not indicate repelling regions.
\begin{figure}[htbp]
\includegraphics[height=2.967in,width=3.713in,angle=0]{C:/Research/Book/Figures/eps/WeakMxn02.eps}
\caption{{\sml Weak Mixing Three-dimensional Image}}
\label{fg:weakmxn02}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.39in,width=4.65in,angle=0]{C:/Research/Book/Figures/eps/IntervalX.eps}
\caption{{\sml One-dimensional (interval exchange) Map that is Weak Mixing, Plate A; and its $C^\infty$ analog, Plate B.}}
\label{fg:intervalX}
\end{figure}
\vs6\noi The $C^\infty$ version of the interval exchange map can be made to approximate the discrete version to any degree of accuracy except at the discontinuities which are countable. By lifting the $C^infty$ map to two dimensions, it can be made and IDE.
%&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
\begin{figure}[htbp]
\includegraphics[height=2.173in,width=4.617in,angle=0]{C:/Research/Book/Figures/eps/IntervalX02.eps}
\caption{{\sml Three-dimensional Visualization of the One-dimensional (interval exchange) Map Plate B; Plate A is Three-dimensional Visualization of the $C^\infty$ Version of the Weak Mixing Interval Exchange map.}}
\label{fg:intervalX02}
\end{figure}
\vs6\noi The essential code for Fig. \ref{fg:intervalX02}, Plate A, is the code for the $C^\infty$ analog of the interval map.
\[0.5 \cdot \l(\sum_{k=0} ^\infty ( \tanh(\beta\,(u - 0.5 \cdot b_k + 0.5)) - \tanh(\beta\,(u - b_k + 0.5)) \r) \cdot (u + 0.5)\]
where
\[b_k=1.5\cdot \sum_{i=0}^k (\frac{1}{2})^i\]
\vs6\noi
Figure \ref{fg:weakmxn} illustrates a one-dimensional LZ map, $f(x)$.
This example is due to Kakutani, \cite{bi:pw}.
Making it a component of a
three-dimensional map reveals that it can produce an
attractor which appears to have a high
level of complexity,
Fig. \ref{fg:weakmxn02}. The exact equation for Fig. \ref{fg:weakmxn02}
is as follows:
\[
T\l(\vts w. z. u \par\r)=\l(\vts f(w). z+w. a\,u \par
\r)\mod(1)
\]
where $0<|w|,|z|,|b|<1$.
The "holes" in the attractor in Fig. \ref{fg:weakmxn02} are not repelling
regions, but rather reflections of the orbit correlations of the
function $f$. For enough iterates, the entire square will
be covered with points of the orbit.
\vs6\noi
Taking a different turn is it possible to construct an example
which is globally LZ for which there are times it is locally not
LZ? This amounts to seeking an example which, when the exponents
are averaged over infinite time, the exponents are not positive,
but for which over finite periods of the orbit they are positive.
Clearly, if there are some runs of positive exponents there must
be some runs of negative exponents to force the average to be zero.
\vs6\noi It is possible to construct any number of such maps on the unit
interval so long as only a countable number of discontinuities are allowed.
The process requires that the interval be partitioned into
subintervals and on each subinterval Define a function to be increasing and
differentiable. Further, on the set of subintervals the functions
must be chosen to be invertible. Figure \ref{fg:lz} is an example.
\vs6\noi
In Fig. \ref{fg:lz} A is constructed a map, $g(x)$, with LZ and E.
As a result, the computation of the
Lyapunov exponent is reduced to the fundamental theorem of
calculus and the total
percentage of expansion must equal the total percentage of
contraction so that the net is 0. Figure \ref{fg:lz} reveals that this
map produces a distribution of orbit points that is quite
uniform. The map for Fig. \ref{fg:lz} plate B is the same as Eq. (26) where $f$
is replaced by $g$.
\begin{figure}[htbp]
\includegraphics[height=2.537in,width=4.653in,angle=0]{C:/Research/Book/Figures/eps/LZ.eps}
\caption{\sml {\bf Plate A is a mapping that is locally LZ but not Globally; Plate B is the Three-dimensional Attractor Constructed from Plate A }}
\label{fg:lz}
\end{figure}
\vs6\noi
Thus there are LZ maps which are WX, SD, and ZA, but are
not B or even K. It is not known whether the map of Eq. (19) is SX
for the right choice of $a$.
\vs6\noi An open question: {\bf Does E + SD + ZA imply SX, K, B or Chaos?}
\vs6\noi
There are obviously many variations on this question. Clearly from these examples it can be inferred that neither E or WX implies ZA, SD, or LP.
%13
%=========non chaotic Attractors==================
\chapter{The Phenomena of Non-chaotic Strange Attractors}
\label{ch:nc}
\begin{center}
\parbox{3.5in}{\em The recognition that attractors arise in chaotic systems in very complex forms raises the question of whether complex attractors can arise from non chaotic systems}
\end{center}
\vs6\noi
Numerous researchers have reported on strange non-chaotic
attractors. An early paper is that of Grebogi, Ott, Pelikan, \&
Yorke \cite{bi:go}. The paper of Ding, Grebogi \& Ott \cite{bi:dg} is an
important development of the 1984 paper. The authors sought to
bring attention to the fact that an attractor may have complex
geometry without arising from LP maps. The 1989 work
sought to show how this fits into the scheme of nonlinear dynamics.
It is shown here that the matter of non chaotic strange attractors
can be traced to low-orbit correlations. But first the
1984 and 1989 examples are addressed.
\vs6\noi The skew translation
\begq
\label{eq:st}
\l(\vt x.y\par\r)\ra \l( \vt x+y. y+\tau \par\r)\mod(1)
\endq
may be modified to be nonlinear
as follows:
\[\l(\vt x. y \par \r)\ra \l(\vt f(x,y).
y+a \par \r)\bmod(1)\]
where $a$ is a constant. As always, the use of the
mod(1) function is only a convenience and may be replaced by
elementary functions by increasing the dimensions of the space.
From this equation it is clear that the equations of Grebogi,
Ott, Pelikan, and Yorke \cite{bi:go} are nonlinear skew
translations as is also the case with
the equations of Ding, Grebogi, and Ott \cite{bi:dg}:
\[\l(\vt x_{n+1}. \theta_{n+1} \par \r)=\l(\vt f(x_n,\theta_n).
(\theta_n+2\,\pi\,\omega)\bmod(2\,\pi) \par \r)\]
As with proving maps are chaotic by proving the
existence of horseshoes, for this line of analysis
to be complete it would be
necessary to show that the time-one maps of the ODE they are
analyzing are conjugate to a skew translation on some subset of
its domain.
\vs6\noi The nonlinearities of their maps are
irrelevant to the existence of non-chaotic strange attractors.
Modify Eq.\ref{eq:st} to have an eigenvalue less than 1:
\[\l(\vt x. y \par \r)\ra \l(\vt \alpha\, x+y.
y+a \par \r)\bmod(1)\]
and obtain the attractor in Fig. \ref{fg:skewtrns}
\begin{figure}[htbp]
\includegraphics[height=2.183in,width=2.23in,angle=0]{C:/Research/Book/Figures/eps/SkewTrns.eps}
\caption{{\bf Non chaotic Strange Attractor from Skew Translation}}
\label{fg:skewtrns}
\end{figure}
\vs6\noi By making the map nonlinear, it is possible to routinely introduce bending into the
attractor Fig. \ref{fg:skewtrnsnl}.
\begin{figure}[htbp]
\includegraphics[height=2.223in,width=2.25in,angle=0]{C:/Research/Book/Figures/eps/SkewTrnsNL.eps}
\caption{{\bf Non chaotic Strange Attractor from nonlinear Skew Translation}}
\label{fg:skewtrnsnl}
\end{figure}
\vs6\noi The
explanation of the formation of non-chaotic strange attractors is
that if the orbits of a map are uncorrelated in time, the
geometry of the orbit can become
uncorrelated in space. Skew translations can have ZA, and hence
their dampened orbits can be made to look peculiar, depending on
how the damping factor is included in the equation of the map.
\vs6\noi In general, in the presence
of damping, the correlation of the orbits of a map
can vary from 0 to $ \pm 1$, depending
on the size of the damping factor, and this level of correlation
may be reflected in the
spatial geometry of the orbits. But note that printed geometry, i.e. pictures,
are a subjective element of human cognition, and what is peculiar
is quite relative. It is possible to force the dampened
uncorrelated orbits to take on
familiar forms as well. Figure \ref{fg:sec3fig4} demonstrates this. The attractor is
a square.
The effect on visual presentation of orbits is a function of {\em
how} the damping is inserted in the equation. This distortion
can happen for any map whose orbits lack some degree of
correlation. This implies that orbit geometry is stable under
small perturbations.
\vs6\noi To further illustrate these ideas,
modify the cat
map to have damping giving a distorted attractor geometry as
seen in Fig. \ref{fg:sec4fig9a}.
\vs6\noi The map for Fig. \ref{fg:sec4fig9a} is
\[\l(\vt x. y \par \r)\ra \l(\vt 2\, x+y.
x+(1-0.5\alpha)\,y \par \r)\bmod(1)\]
\begin{figure}[htbp]
\includegraphics[height=3.05in,width=3.117in,angle=0]{C:/Research/Book/Figures/eps/SEC4FIG9a.eps}
\caption{{\bf Damped Bernoulli Mapping}}
\label{fg:sec4fig9a}
\end{figure}
\vs6\noi The parameter factor multiplying $y$ is chosen to make the
determinant of this map $1-\alpha$. In Fig. \ref{fg:sec4fig9a}, $\alpha=0.02$.
\vs6\noi Making the Anosov map nonlinear causes the orbits to bend, as
seen in Fig. \ref{fg:sec4fig12}.
\begin{figure}[htbp]
\includegraphics[height=3.05in,width=3.117in,angle=0]{C:/Research/Book/Figures/eps/SEC4FIG12.eps}
\caption{{\bf Bernoulli Map with Nonlinear Effect}}
\label{fg:sec4fig12}
\end{figure}
\vs6\noi The equation for Fig. \ref{fg:sec4fig12} is
\[\l(\vt x. y \par \r)\ra \l(\vt 2\, x+\sin(\beta\,y)^2.
x+(1-0.5\alpha)\,y \par \r)\bmod(1)\]
where $\beta=1.8$.
Figure \ref{fg:sec4fig12} bears resemblance to Plate A of Fig. \ref{fg:fmap0304}.
The fundamental map, Sec. \ref{sc:wa} provides an orderly evolution from periodic
to chaotic that encompasses skew translations and non-chaotic
strange attractors. For the inverse of this idea, Fig. \ref{fg:sec2fig1}
is an example of a non-strange chaotic
attractor in that the attractor is a square. It is possible to
make a non-strange chaotic attractor in the form a circle, straight line,
or any simple geometric shape, except, possibly, a countable set of points.
\vs6\noi Maps producing strange attractors have some form of complexity
ZA, LP, SD, for example,
because the geometry is a reflection of orbit correlations.
Thus, the existence of strange attractors
(SA) is a measure of complexity that can be added to a list of other
measures. WI revealed is that dynamical
complexity is periodic, and almost periodic dynamics. Next
appears to be E, SD, WX, ZA, followed by LP. But this is not a totally ordered
system, is it a partial order where SD is found in almost-periodic
systems such as the twist on the two-dimensional torus.
Of all the measures of complex dynamics, ZA and LP are the
most general, but do not form a total ordering. Even by adding entropy
\vs6\noi By adding a small
amount of damping, the attractor in Fig. \ref{fg:sec4fig13} is obtained, which may be
termed strange.
\begin{figure}[htbp]
\centering
\includegraphics[height=3.413in,width=4.267in,angle=0]{C:/Research/Book/Figures/EPS/SEC4FIG13.eps}
\caption{\scriptsize Strange Non-chaotic Attractor \footnotesize}
\label{fg:sec4fig13}
\end{figure}
\vs6\noi
The last example of the phenomenon of non-chaotic SA
demonstrates the considerable level of order that may
be present and still obtain SA. Take the map of
Fig. \ref{fg:weakmxn} to construct a strange attractor. This map, also
constructed by Kakutani, is only E, and
further, it has only discrete spectrum \cite{bi:wp}. In simple
language this means that among all E maps this type of E map is
the simplest. For example, it is known that all E
maps with discrete spectrum are
group rotations. To obtain a two-dimensional illustration form the cross
product of this map with itself, and include a parameter,
$\alpha$. A variation:
\[\l(\vt x. y \par \r)\ra \l(\vt f(x).
\alpha\,f(y) \par \r)\]
\vs6\noi In Fig. \ref{fg:sec4fig16}, $\alpha=1.0$, Shows typical orbits of this map,
which is not E,
hence orbits are not dense. The different orbits are indicated by
different colors.
The orbits of $f$ have a level of correlation ranging from
about 0.55 to over 0.90, and the autocorrelation is nearly periodic.
\begin{figure}[htbp]
\centering
\includegraphics[height=2.103in,width=2.13in,angle=0]{C:/Research/Book/Figures/EPS/Sec4Fig16.eps}
\caption{\scriptsize Strange Non-chaotic Attractor \footnotesize}
\label{fg:sec4fig16}
\end{figure}
\vs6\noi In Fig.\ref{fg:sec4fig17} choose $\alpha=0.999$,
which is enough damping to form attractors. Figure \ref{fg:sec4fig17}
shows that the basins of attraction are in the shape of a block
letter ``S.'' There are multiple basins, as indicated by the
numerous colors, but some colors have been used twice. This is
not important, however, since the point is that the various
attractors are peculiar in shape, all have about the same shape,
and there are many basins of attraction.
\begin{figure}[htbp]
\centering
\includegraphics[height=1.983in,width=1.993in,angle=0]{C:/Research/Book/Figures/EPS/Sec4Fig17.eps}
\caption{\scriptsize Strange Non-chaotic Attractor \footnotesize}
\label{fg:sec4fig17}
\end{figure}
\vs6\noi Conclude that the
strangeness of the geometry of an attractor plotted on a computer
screen is a result of the amount of damping, how the damping
occurs in the definition of the map, and most important, the correlation of
orbits. The exact value of the initial conditions may be a factor
also. Clearly, the association of SA with chaos is a coincidence
of the orbit correlations found in chaos. The degree of
complexity found in chaos and even in skew translations is far
more than needed to obtain this interesting phenomenon.
\ssb{Correlation of Initial Conditions}
The examples of the preceding sections have demonstrated the need
to determine when two points are correlated. Define the
correlation of two points as follows: First discard their
integer part and consider only the fractional part of the number.
Now consider their fractional part
as a sequence of integers between 0 and 9. As sequences,
apply the usual formula for correlation of two sequences
to obtain the desired definition.
\vs6\noi Using this definition, the following is obtained whose proof
poses no mathematical difficulties.
\vs6\noi Let $x_0$ be any point in space, and let $U(x_0)$ be any
neighborhood of $x_0$, however small. Then within $U(x_0)$ there
are many points that are uncorrelated to $x_0$.
\vs6\noi This means in simple terms that near any point are countless
points that are uncorrelated with it
and that the location of the uncorrelated points is in essence a
random walk from $x_0$
\vs6\noi
The significance of this fact is that any dynamical system that
acts on two uncorrelated points in such a way as to move the
insignificant, lower-level,
digits up into a higher position of significance
will be reflected in a complex
relationship between the orbits of these two points.
Hyperbolic systems are capable of doing this. The shift is
defined to do precisely this and nothing more. Different
algorithms have varying abilities to elevate the role of lower-
level digits into significance,
and this is reflected in my notions of E, WX, SX,
et cetera. The significance of this reaches a maximum when applied to
points having positive algorithmic complexity. These are points
which cannot be described by a finite algorithm and hence, cannot be
reached by iteration of a finite algorithm. Such points cannot be
correlated to points having zero complexity, for example. Any
dynamical system which moves lower-level digits into significance
having such a point as an initial condition
(which, of course, can never be known) has a complex orbit solely
as a result of the complexity of this initial condition.
\vs6\noi Any dissipative dynamical system that treats uncorrelated points
differently and correlated points
similarly can have many basins of attraction as well as very
complex-
looking attractors.
\vs6\noi As a result, at least two numbers are necessary when comparing two
quantities: their distance apart, and their correlation. Their
distance is a measure of the present; their correlation is a
measure of their potential future relationships. Of these two
measurements, clearly correlation is the most illusive and
accounts for much of the uncertainty of the future. When two quantities are
uncorrelated, their future depends solely on the type of dynamics
they undergo. In weather systems, dynamics can fluctuate
drastically from almost periodic upward, and thus
uncorrelated quantities can fluctuate from having an almost-
periodic relationship to a near-random relationship.
\vs6\noi A simple question that can be answered is whether a simple
rational or integer-initial condition can converge to something
complex, but not having positive algorithmic complexity,
under the action of a dynamical system. The answer is yes. Any
algorithm for the computation of the digits of
$\pi$ is an example. As noted previously, the Chudnovsky brothers have shown that the
digits of $\pi$ are as complex as the outputs of typical random-
number generators. Next ask if it is possible to construct an algorithm
having multiple basins of attraction which converge to two
different ``complicated'' irrational numbers. The answer is yes, and
the number of attractors may be made as large as you like. A
typical example is
\[x \ra h_1(x)\,f(x)+h_2(x)\,g(x)\]
where $f \ra \sqrt{2}, g \ra \sqrt{63}$ and $h_1(x)$ is 1 near
$\sqrt{2}$ and 0 near $\sqrt{63}$, and $h_2(x)$ has the opposite
specifications. By starting near either square root with a simple
rational initial condition, iterates converge to that square root, an
irrational number. Hence it is a fact that a finite
algorithm can be used to start at a simple initial condition and then, using
this algorithm, be attracted
to a complicated irrational number that tells us that dynamical
systems can create some level of complexity, but not positive
algorithmic complexity.
%=====================================complexity=========================================
%=====================================complexity=========================================
%14
\chapter{Boolean Complexity}
\label{ch:bool}
\begin{center}
\parbox{3.5in}{\em Boolean systems are inherently discrete. However, large dimensional Boolean systems can demonstrate the same level of complexity as ODEs or IDEs.}
\end{center}
\vs6\noi The study of chaos has raised many interesting questions about highly complicated nonlinear systems generally. This is because, in an effort to answer questions about chaos, it has been necessary to undertake the study of non-chaotic processes such as skew translations and infinite dimensional rotations which can produce dynamics that can appear related to chaos. Non-chaotic strange attractors is one example. The study of dynamics at the {\em edge} of chaos that has resulted from investigations into complex non-chaotic systems has led to the study of complexity as a separate discipline. Boolean complexity is part of this study.
\section{\sml Background}
Phillip Anderson, in Anderson \cite{bi:pa}, describes eight disciplines that either have been, or are presently, making a contribution to the study of complexity. They are, roughly: (1) the theory of complexity or computability ala Chaiten, Kolomogrov, Church and others; (2) information theory; (3) ergodic theory and dynamical systems; (4) cellular automata and artificial life; (5) large random physical systems; (6) self-organized criticality; (7) artificial intelligence; and (8) neuroscience. Gell-Mann, Crutchfield, and their associates are contributing to (1),(3),(4),(5),(6), and (7), and there is a long list of other contributors. In general, transitioning from these individual disciplines to rigorous mathematical theorems about complexity appears to be difficult. Specifically, this section presents a rigorous connection between complexity, chaos, and various forms of complicated dynamics that have been studied extensively. The connection proceeds through area (3) cited above.
\vs6\noi It is an objective of IDE theory to bring all of these "complexity" disciplines under a single area of study. Specifically, Boolean complexity is linked to single scrolls through one-dimensional maps which in turn can be lifted to two-dimensional dynamical systems through the lift technique of by lifting a single scroll to a three-dimensional system, see Sec. \ref{sc:ss1}.
\subsection{\sml Notation}
\noi Let $\T$ be a transformation on ${\bf R}^n$ that preserves a special finite subset, call it ${\cal V}^n$. Define this subset as follows:
\[{\cal V}^n=\{{\bf x}\in {\bf R}^n|{\bf x}=(a_1,a_2,\ldots,a_n), a_i \in \{0,1\})\]
$\T$ is required to preserve this set, therefore ${\bf T}({\cal V}^n)\subseteq {\cal V}^n$. Allow that $\T$, under iteration, may map ${\cal V}^n$ to a proper subset of itself.
\vs6
\noi The objective is to show that a certain class of transformations which preserve ${\cal V}^n$ can produce very complex dynamics. As an aid to seeing how complex dynamics can occur in this setting, construct an invertible mapping of ${\cal V}^n$ into the unit interval I as follows: Let $X=(x_1,x_2,x_3,\ldots,x_n)\in {\cal V}^n \subset {\bf R}^n$. Define the invertible mapping $\pi: {\cal V}^n \ra {\rm I}$ as
\[\pi(X)=(0.x_1 x_2 x_3\ldots x_n)\]
For example, let $(1,1,0,0,1) \in {\cal V}^n$, then $\pi(1,1,0,0,1)=0.11001$.
\vs6\noi
For any high-dimensional dynamical system $\T$, having a finite invariant subset of the type ${\cal V}^n$, the mapping $\pi$ can be used to define a mapping on [0,\,1] that relates $\T$ to a one-dimensional map of $\I$. In particular, define
$\S_{{\bf T}}(X)\equiv \pi({\bf T}(\pi^{-1}(X)))$, then
\[\S_{\bf T}(0.x_1 x_2 x_3 \ldots x_n) =\pi({\bf T}(x_1,x_2,x_3,\ldots x_n))\]
Thus, when $\T$ is restricted to ${\cal V}^n$, $\T$ is conjugate to a one-dimensional map of a finite subset of [0,\,1] to itself. $\S_{\bf T}$ may be conveniently viewed as a mapping on a subset of fractions between 0 and $(2^n-1)/2^n$ in order to facilitate graphical representation of the one-dimensional map. This is expressed by transforming the point $0.x_1 x_2 x_3, \ldots x_n$ by the formula
\[ (0.x_1 x_2 x_3 \ldots x_n) \ra \sum_{i=1}^n \frac{x_i}{2^i}\]
$\S_{\bf T}$ will mean either the decimal or binary mapping so long as it is not ambiguous.
\vs6\noi
The mapping $S_{\bf T}$, as defined, is determined only on a finite subset of [0,\,1] by the mapping $\T$. It can be extended to be a continuous mapping in any number of ways, all of which are equivalent so long as only the dynamics of $\T$ on ${\cal V}^n$ are of interest.
\begin{example} Define $\T$ as
\[\l(\vt x. y \par \r) \ra \l(\vt x+y- x\cdot y. 1-x \par \r)\]
Then $\S_{\bf T}$ is defined for four points in $\I$. They are $0.0, 0.25, 0.5, 0.75$.
\[S_{\bf T}(0.0)=0.25,\; \S_{\rm T}(0.25)=0.75,\; \S_{\bf T}(0.5)=0.5,\; \S_{\bf T}(0.75)=0.5\]
Since $\S_{\bf T}$ is not defined for the point $(1,0)$ by $\T$, extend $\S_{\bf T}$ to include the interval $(0.75,1.0)$ by assigning it to be the identity on this subinterval. As the dimension of $\T$ increases, this subinterval, which for an $n$-dimensional map is $((2^n-1)/2^n, 1.0)$, goes to zero and so this convention is both harmless and useful.
\end{example}
\vs6\noi It is clear that there are $n!$ representations of a given $\T$, since the coordinates of $\T$ have $n!$ permutations. Thus there are $n!$ different ways of graphing $\T$ on I. However, the dynamics of all representations are qualitatively the same. For example, if a point converges to a given attractor in one representation, then it converges to an attractor of the same size in all representations. Thus, the dynamics of $\T$ are invariant with regard to the representation. It may happen, however, that one graphical representation may be more appropriate than another for a given $\T$. An example of this is in the next section. For convenience, refer to $\pi$ as the standard representation based on a given labeling of the coordinates of $\T$.
\vs6\noi
The next section will show how to define $n$-dimensional maps which preserve ${\cal V}^n$ and have nearly chaotic dynamics. Before that, some examples of how to construct mappings that just preserve ${\cal V}^n$ will be given.
\begin{example} {\bf Example:} Consider the following transformation on ${\bf R}^4$:
\[ {\bf T}(x,y,z,w)=(x\cdot y \cdot z,x+y-x\cdot y,1-z\cdot x, x+y-2x \cdot y)\]
which preserves ${\cal V}^4$. Construct such a transformations generally by the requirement that each coordinate of the transformation define a mapping of ${\cal V}^n$ to $\{0,1\}$. The following are some examples of such functions:
\[ (x,y,z) \ra x+y+z-2(x\cdot y+y\cdot z+ z\cdot x)+3x\cdot y\cdot z\]
\[(x,y,z)\ra x\cdot y+y\cdot z+ z\cdot x -2x\cdot y\cdot z\]
In the first example, if either of $x,y,z$ is one, the result is one, otherwise it is zero. For example, if both of $x,y$ are one, the result is zero. In the second example, at least 2 coordinates must be 1 for the result to be 1, otherwise it is zero.
\vs6\noi
Note that the exponent of any factor may increase without loosing the invariance of ${\cal V}^n$ since 1 or 0 to any power is still 1 or 0, respectively. Thus
\[(x,y,z)\ra x^3\cdot y+y\cdot z+ z\cdot x -2x\cdot y^5\cdot z^{0.5}\]
also defines a mapping of ${\cal V}$ to the set $\{0,1\}$.
\end{example}
\subsection{\sml Construction of complex dynamics in high-dimensional spaces}
Using the notation of the previous section, complex orbits can be generated in high-dimensional spaces with very long periods by writing down a transformation that is conjugate to $3 \,x \bmod(1)$ when restricted to ${\cal V}^n$. The map $3\,x \bmod(1)$ is chosen as it is able to generate complex orbits from points which have zeros in every coordinate position except 1. This would not be possible using $2\, x \bmod(1)$. To define T to be conjugate to $3\,x\bmod(1)$, T must carry out the function of a coordinate shift followed by a binary addition operation. The $i^{th}$ coordinate function for the shift-and-add transformation, T, is given by:
\begq
\label{eq:bool1}
\left.
\begin{array}{lcl}
x_{i+1}& = & y_i+v_{i-1}\cdot(1-2\cdot(x_i+ x_{i-1})+4\cdot z_i)\\
y_i & = & x_i+x_{i-1}-2 \cdot z_i \\
z_i& = &x_i \cdot x_{i-1} \\
v_i & = & z_i+y_i \cdot v_{i-1} \\
\end{array}\right\}
\endq
\vs6\noi The equation for $v_i$ does not have to be recursive in $v_{i-1}$ and can be written out completely in advance. The $k^{th}$ coordinate is:
\[v_k=z_k+y_k\cdot z_{k-1}+ y_k\cdot y_{k-1}\cdot z_{k-2}+ y_k\cdot y_{k-1}\cdot y_{k-2}\cdot z_{k-3}+ \cdots +y_k\cdot y_{k-1}\cdot y_{k-2}\cdots y_1 \cdot v_0 \]
\vs6\noi To start this equation, choose $v_0=0.0$ and so the last term drops out of the equation for $v_i$. As T is precisely a shift and add with carry on ${\cal V}^n$ it is conjugate to $3\,x \bmod(1)$ by direct inspection of the formulae. Figure \ref{fg:cplxfg1} is a graph of $S_{\rm T}$ on the unit interval which will be recognized as the graph of $3\,x\,\bmod(1)$. This graph is constructed by assuming $x_1$ is in the first decimal place, i.e., use the standard representation, $\pi$. Any other representation would make it difficult to observe that T is actually $3\cdot x \bmod(1)$ on ${\cal V}^n$. However, the fact that T is conjugate to a chaotic map on [0,\,1] is still clear from every representation of the coordinates of T as a number on the unit interval.
\begin{figure}[htbp]
\includegraphics[height=2.71in,width=2.607in,angle=0]{C:/Research/Book/Figures/eps/CPLXFG1.eps}
\caption{{\sml This figure shows the one-dimensional map, $S_T(X)$
corresponding to Eq.\ref{eq:bool1}. It is morphologically equivalent to the one-dimensional
map $x\ra 3x\mod(1)$ on the domain of definition.}}
\label{fg:cplxfg1}
\end{figure}
\begq
\label{cd:cplzfg1}
\left.
\begin{array}{lcl}
&& \mbox{The code for Fig. \ref{fg:cplxfg1} is as follows:}\\
For\; j &= &1\; To\; N\\
&&\\
Sum& = &0\\
&&\\
For\; i &=& 1\; To\; M\\
Sum& = &Sum + x(M - i + 1) / a ^ i\\
Next\; i&&\\
&&\\
x &=& Sum\\
&&\\
w(1)& =& 0\\
&&\\
For\; i& = &2\; To\; M\\
w(i)& =& x(i - 1)\\
Next\; i&&\\
&&\\
For\; i& =& 1\; To\; M\\
&&\\
u(i)& = &(x(i) + w(i) + c(i - 1)) - 2 \cdot (x(i) \cdot w(i) + x(i) \cdot c(i - 1) + w(i) \cdot c(i - 1)) + 4 \cdot x(i) \cdot w(i) \cdot c(i - 1)\\
c(i)& =& (x(i) \cdot w(i) + x(i) \cdot c(i - 1) + w(i) \cdot c(i - 1)) - 2 \cdot x(i) \cdot w(i) \cdot c(i - 1)\\
&&\\
Next \;i&&\\
&&\\
For \;i &= &1\; To\; M\\
x(i) &=& u(i)\\
Next\; i&&\\
&&\\
Sum &=& 0\\
&&\\
For\; i& = &1\; To\; M\\
Sum &=& Sum + x(M - i + 1) / a ^ i\\
Next\; i&&\\
&&\\
y &= &Sum\\
&&\\
&&{\bf Plot\; Point }\; (x,\,y)\\
&&\\
Next\; j&&
\end{array}\right\}
\endq
\sml
\vs6
\noi Some important observations about the shift-and-add transformation are: (1) It can be defined in any number of dimensions. When $\T$ is restricted to points whose coordinates are all less than 1, it converges to the origin as a local attractor. For values greater than 1, it is unbounded. An interesting feature of T is that, as a dynamical system, it would not be readily recognized that $\T$ is conjugate to a unilateral shift on a finite subset. This is a key point. Very high-dimensional dynamical systems having finite invariant subsets can have a hidden complexity in the sense that the complexity is not readily apparent from the form of the equations defining the system. This further emphasized the need for a canonical representation of any system that would allow the Hirsch Conjecture to be resolved.
\vs6\noi Because dynamical systems such as $\T$ may be equated to finite automata, $\T$ demonstrates that finite automata can have a degree of complexity whose source is obscured by the presence of its numerous dimensions or cells. Also, it draws a connection of finite automata to dynamical systems theory and thus IDE theory, a question that was raised by Anderson \cite{bi:pa}
\vs6\noi
The conjugacy between T and $S_{\rm T}$ may be viewed in two ways. First, the orbits of $\T$ in ${\cal V}^n$ define orbits of $S_{\rm T}$ on $\I$ and so the attractors of $\T$ correspond to attractors of $S_{\rm T}$. Second, since $S_{\rm T}$ is a function, it can be graphed. This is done by making a random selection of the points in ${\cal V}^n$ and plotting the value of $S_{\rm T}$. This {\em Monte Carlo} method of graphing $S_{\rm T}$ is adequate for this purpose.
\vs6\noi
By composing $\T$ with various other transformations the relationship between $\T$ and $3\,x \bmod(1)$ can be made obscure. One interesting transformation is the reflection through the center coordinate, $x_i\ra x_{n-i+1}$, where $n$ is the dimension of the domain of $\T$. The result of this composition is illustrated in Fig.\ref{fg:cplxfg2}
\begin{figure}[htbp]
\includegraphics[height=2.433in,width=2.557in,angle=0]{C:/Research/Book/Figures/eps/CPLXFG2.eps}
\caption{{\sml In this figure, the map of Fig. \ref{fg:cplxfg1} has been composed
with a map which reflects the decimal positions of a number about the middle decimal value. In this form, the chaotic nature
of the original map T is clear, but the reason for which it is chaotic is obscured by its high-dimensional domain of definition.}}
\label{fg:cplxfg2}
\end{figure}
\vs6\noi Figure \ref{fg:cplxfg2} is the graph of a function on the unit interval. The appearance of this graph could suggest the orbit of a point under the action of a 2-dimensional map such as the Anosov map, Arnold \& Avez \cite{bi:aav}. This illusion results from the high degree of oscillation of the graph.
\vs6\noi
A simple coordinate shift produces $2\, x \bmod(1)$, i.e., $x_i\ra x_{i-1}$. The graph determined by this conjugacy is shown in Fig. 3. Plotting this figure requires that the dimension of the domain of T to be large enough to get a good graph. A dimension greater than 1000 will suffice. By iterating $\T$ when defined to be conjugate to $2\,x \bmod(1)$, the orbits
cannot exceed the dimension of the domain space. This is not true of $3\, x \bmod(1)$ even though both $2\, x \bmod(1)$ and $3\, x \bmod(1)$ are shifts on the appropriate symbol set.
\begin{figure}[htbp]
\includegraphics[height=2.12in,width=2.2in,angle=0]{C:/Research/Book/Figures/eps/CPLXFG3.eps}
\caption{{\sml This figure shows that a simple coordinate
shift generates 2x mod (1). The orbits of this map are short in comparison with those of Figs. \ref{fg:cplxfg1} and \ref{fg:cplxfg2}, while both maps
are essentially chaotic.}}
\label{fg:cplxfg3}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.413in,width=4.607in,angle=0]{C:/Research/Book/Figures/eps/CPLXFG4A.eps}
\caption{{\sml This figure shows that a simple coordinate
shift generates 2x mod (1). The orbits of this map are short in comparison with those of Figs. \ref{fg:cplxfg1} and \ref{fg:cplxfg2}, while both maps
are essentially chaotic.}}
\label{fg:cplxfg4a}
\end{figure}
\vs6\noi Compare Fig. \ref{fg:cplxfg4a} with the unilateral shift $2\, x\, \mod(1)$ of Chapter \ref{ch:shift}. There is no simple experiment that can be performed that can distinguish between the time-one map of Boolean automata in the figure in the right hand plane and the time-one map of the unilateral shift IDE.
%==================Extending T========================
\section{\sml Extensions to Finite Invariant Subsets with Integer Coordinates}
The invertible mapping $\pi$ used to obtain a conjugacy between $\T$ and the one-dimensional map $S_{\rm T}$ can be defined for any $\T$-invariant set whose coordinates are integers. The essential step in doing this is keeping track of multiple digit integers. For example, if the point (21,3) is mapped to 0.213, it cannot be inverted without more information since the point (2,13) also has this representation. The solution of this invertibility problem is the choice of a useful radix. If 21 is the largest integer to appear in a coordinate of a finite subset of the plane, then the choice of the radix 22 will assure that $\pi$ is defined to be invertible. To continue this example, suppose the radix is 22, then 10 corresponds to 22, and the numbers below 22 are labeled as $0,1,2,\dots,9,a_{10}, a_{11}, a_{12}, \ldots a_{21}$. In this system the point (21,3) is $(a_{21},3)$ and the point $(2,13)$ is $(2,a_{13})$. This removes the ambiguity since $\pi((a_{21},3))=0.a_{21}3$ and $\pi((2,a_{13}))=0.2a_{13}$. These numbers can be converted to decimal using the usual formulae, thus assuring that the dynamics of the one-dimensional map can be seen in the usual way. This gives the result:
\begin{lemma} Let T be a mapping of ${\bf R}^n$ into itself which preserves a finite subset, ${\cal V}^n_k$, whose coordinates are integers less than or equal to $k$. Then T is conjugate to a one-dimensional map when restricted to ${\cal V}^n_k$.
\end{lemma}
\pf This follows by using the radix $k+1$ to define $\pi$, and then defining $S_{\rm T}=\pi \circ {\rm T} \circ \pi^{-1}$. \rl
%====================Boolean ====================================
\section{\sml Dynamics of Boolean Automata}
Boolean automata are automata whose states are either 0 or 1. Typically, these automata are modeled using Boolean logic. This section shows that these automata can be viewed as dynamical systems on high-dimensional manifolds.
\vs6\noi
It is sufficient to show that every Boolean expression has an algebraic expression which is equal to the Boolean expression when restricted to the set $\{0,1\}$. Begin with binary expressions. The Boolean expression for {\em and} is given by $x \wedge y$. The algebraic equivalent is $x \cdot y$. The expression for Boolean {\em or} is $x \vee y$, whose algebraic equivalent is $x+y-x\cdot y$. This could be derived by using the Boolean expression for the complement of $x$, whose algebraic equivalent is $1-x$. {\em Exclusive or}, $\underline{\vee}$, is not derivable from {\em and} and complements, and must be separately derived. Its algebraic expression is $x+y-2\cdot x \cdot y$. {\em If x then y} is given by $\sim x \vee y$, which is $(1-x)+y - (1-x)\cdot y$, which simplifies to $1-x+x\cdot y$. From these elementary formulae the algebraic expression for any Boolean expression can be deduced, thus making it possible to use elementary algebra to reduce a Boolean logic expression to its simplest form. For example, $(x \wedge y) \underline{\vee} z$ is given by $x\cdot y +z-2x\cdot y\cdot z$.
The algebraic formulae for Boolean logic makes it clear that Boolean expressions produce nonlinear dynamics, and that finite automata generally are nonlinear dynamical systems.
Since finite automata can be examined through one-dimensional dynamics, this raise the question``are there simple Boolean formulae which give rise to complex one-dimensional dynamics?''. The construction of the mapping $3x \bmod(1)$ is an example of a finite automata which has chaotic dynamics. But the formulae for this map is relatively complicated due to the carry function. A simpler formula with complex dynamics is given by
\[x_i \ra x_i \cdot x_{i-3}+x_{i+3}-2x_i\cdot x_{i+3} \cdot x_{i-3}\]
where $0 \ra M, -1\ra M-1, -2 \ra M-2, \mbox{ and }, i+3 \mbox{ is } \bmod(M+1)+1$, with $M=81$, the dimension of the manifold.
\vs6\noi
Figure \ref{fg:cplxfg4} shows the one-dimensional attractor for this system. In this system the value of a cell is a function of its value and the value of the cells three steps before and three steps afterward. If this system were laid out as a 9 by 9 array of cells, it would happen that the value of a boundary cell such as cell 10, the first cell in the second row, is modified by cell 7 and cell 13. This asymmetry is a curious consequence of viewing a dynamical system as a cellular automata laid out on a two-dimensional plane (if it is laid out as a torus, this phenomena goes away). More interesting is that any automata that is defined to have each cell only modified by its neighbors translates into a dynamical system with asymmetries in its definition. This increases the difficulty of writing down a simple formula for the dynamical system. Still more interesting, the consequence of a rule whereby a cell's value is only affected by that of its neighbors translates to a having a digit in a binary number affected by digits far removed from it. Thus, significant digits in a number can be affected, or altered, by the value of insignificant digits. This fact provides an insight into how complex dynamics can evolve unexpectedly, since having an insignificant digit in a number affect the value of significant digit means that the meaning of the term {\em significant digit} is altered.
\begin{figure}[htbp]
\includegraphics[height=2.08in,width=2.05in,angle=0]{C:/Research/Book/Figures/eps/CPLXFG4.eps}
\caption{{\sml This figure shows that very complicated orbits can be generated without resorting to the use of known chaotic maps
such as 3x mod (1).}}
\label{fg:cplxfg4}
\end{figure}
\scriptsize
\begq
\label{cd:cpx4}
\left.
\begin{array}{lcl}
&& \mbox{The code for Fig. \ref{fg:cplxfg4} is as follows:}\\
\\
&& \mbox{For k = 1 To N}\\
\\
Sum &=& 0\\
&&\mbox{For i = 1 To M}\\
Sum &=& Sum + x(i) / a ^ i\\
&& \mbox{Next i}\\
\\
yp &=& Sum\\
Sum &=& 0\\
&&\\
&& \mbox{For i = 1 To M}\\
&&\\
ix &=& i - b\\
iy &=& i + b\\
iz &=& i - c\\
iw &=& i + c\\
ix &=& ix - M \cdot Int(ix / M) + (1 - \sgn(ix)) \cdot M / 2\\
iy &=& iy - M \cdot Int(iy / M) + (1 - \sgn(iy)) \cdot M / 2\\
iz &=& iz - M \cdot Int(iz / M) + (1 - \sgn(iz)) \cdot M / 2\\
iw &=& iw - M \cdot Int(iw / M) + (1 - \sgn(iw)) \cdot M / 2\\
va &=& x(ix) \cdot (x(iy) + x(iz) + x(iw))\\
v1b &=& x(iy) \cdot (x(iz) + x(iw)) + x(iz) \cdot x(iw)\\
v &=& va + v1b\\
v1a &=& (x(ix) + x(iw)) \cdot x(iy) \cdot x(iz)\\
v1b &=& x(ix) \cdot x(iw) \cdot (x(iz) + x(iy))\\
v1 &=& v1a + v1b\\
v2 &=& x(ix) \cdot x(iy) \cdot x(iz) \cdot x(iw)\\
v3 &=& v - 3 \cdot v1 + 6 \cdot v2\\
u(i) &=& x(i) + v3 - 2 \cdot x(i) \cdot v3\\
&&\\
&& \mbox{Next i}\\
&&\\
&&\mbox{For ii = 1 To M}\\
ix &=& ii + 1\\
ix &=& ii - M \cdot Int(ii / M)\\
x(ii) &=& 1 - u(ix)\\
Sum &=& Sum + x(ii) / a ^ ii\\
&&\mbox{Next ii}\\
&&\\
xp &=& Sum\\
&&\\
{\bf Plot Point}&&\\
&&\\
&& \mbox{Next k}\\
\end{array}\right\}
\endq
\footnotesize
\begin{figure}[htbp]
\includegraphics[height=2.357in,width=2.337in,angle=0]{C:/Research/Book/Figures/eps/CPLXFG4B.eps}
\caption{{\sml This Figure is Construct by Replacing the Sgn and Int Functions in \ref{fg:cplxfg4} with their $\Ci$ Approximations.}}
\label{fg:cplxfg4b}
\end{figure}
\vs6\noi
An example of this is to reflect the digits in a number about a central number. For example, if $x=0.123456789$ then map $x$ to the number $0.987654321$. This is not easily done in a formula for a one-dimensional map, but it is very easy for a high-dimensional mapping. The transformation, \T, is given by $x_i \ra x_{N-i+1}$ where $N$ is the dimension of the space. In effect, this mapping scrambles the significance of the digits represented by the mapping, $\T$, restricted to points whose coordinate entries are in the set $\{0, 1\}$.
\vs6\noi
In 1988, Lin and Chua discovered a version of this type of chaos, which might be called discrete chaos, in digital filters, \cite{bi:cl}, \cite{bi:lc}. A symbolic dynamic structure for their findings was developed by Wu and Chua, \cite{bi:wc}. Ogorzalek examined the presence of complex behavior in digital filters noting that both chaotic and non-chaotic complex behaviors can be observed and examined using one-dimensional maps, \cite{bi:mo}.
\vs6\noi
This section provides a formal framework that explains their results. Specifically, complex dynamics can be observed in digital filters due to the conjugacy of their implicit high-dimensional maps to chaotic and complex one-dimensional maps similar to those given in the examples.
\vs6
\noi As mentioned, in order to generate complexity, it is not necessary to restrict attention to chaos. For example, it is possible to carry out the same constructions using skew translations or mappings which are weak mixing. The result is a high-dimensional dynamical system that maps ${\cal V}^n$ into itself in which the presence of the skew translation or weak mixing transformation is concealed by the large number of dimensions.
%=================Conway=====================
\section{\sml Conway Dynamics}
\label{sc:cd}
This section derives a dynamical system for John Conway's {\em Game of Life}. In this regard, Dogaru and Chua \cite{bi:dc} derived the simplest CNN realization of the Conway Game of Life. Their analysis realizes the Game of Life a simple explicit formula involving only absolute value functions.
The analysis in this section derives, by an alternate approach, a dynamical system whose restriction to lattice ${\cal V}^n$ is the Game of Life.
\vs6\noi Conway's cellular automata is a $M \times M$ array of cells each having the value 0 or 1. The rules for this automata are as follows:
\vs6\noi
1) If a cell has the value 1, and has either 0,1,4,5,6,7, or 8 neighbor cells having the value 1,the cell's value changes from 1 to 0. 2) If the cell has two or three neighbor cells with the value 1, its value remains 1. 3) If the cell has the value 0, and there are three neighbors with the value 1, its value changes from 0 to 1.
\vs6\noi
It is possible to derive the corresponding dynamical system utilizing the Boolean Dynamics of Sec.4. However, this approach would require over 100 terms in the coordinate transformation defining the game of life dynamics. A much shorter version can be obtained by utilizing a technique from \cite{bi:bc3}. With this approach, the $i^{th}$ coordinate can be expressed as
$x_i \ra x_i v_1+(1-x_i)v_2$
where $v_1=0.5(1+\sgn((y_i-a_{11})(a_{12}-y_i)+.05))$, $v_2=0.5(1+\sgn((y_i-a_{21})(a_{22}-y_i)+.05))$, and
\[y_i=\sum_{j=1}^8 x_{f_i(j)}\]
The function, $f_i(j)$ is given by $f_i(1)=(i+1), f_i(2)=i-1, f_i(3)= i-M-1,f_i(5)=i-M+1, f_i(6)= i+M-1,f_i(7)=(i+M), f_i(8)=i+M+1$. $f$ is defined only for the integers one to eight since Conway's definition utilized only 8 cells of the cellular automata. For the game of life, set $a_{11}=1.5, a_{12}=3.0,a_{21}=2.5, a_{22}=3.0$.
\vs6\noi
Some care must be taken to ensure the boundary cells function properly, but this is not a problem. The value of this formulation is the ability to easily manipulate the parameters $a_{ij}$ appearing in the dynamical systems formulation. Change $a_{21}$ from 2.5 to 2.0, the dynamics change drastically. Whereas typically, the Conway parameters lead to attracting fixed points very quickly, this modification typically leads to very long transient chaotic orbits of thousands of points. Figure \ref{fg:cplxfg5a} is the one-dimensional map for a typical modified Conway orbit.
\begin{figure}[htbp]
\includegraphics[height=2.863in,width=2.977in,angle=0]{C:/Research/Book/Figures/eps/CPLXFG5A.eps}
\caption{{ \sml If the equations for the Game of Life are slightly modified, the orbits become complex.
}}
\label{fg:cplxfg5a}
\end{figure}
\tiny
\begq
\label{cd:cplx5a}
\left.\begin{array}{lcl}
&&\mbox{\sml The code for Fig. \ref{fg:cplxfg5a} is as follows:}\\
&&\mbox{\sml For i= 1 To; M}\\
&&\\
u1 &=& 4 \cdot z \cdot (1 - z)\\
x(i)& =& Int(0.5 + u1)\\
z &=& u1\\
Next\;\; i&&\\
&&\\
For\;\; ii\;\;& = &1\;\; To\;\; M1\\
For \;\;i& = &1 \;\;To \;\;M1\\
&&\\
i2 &= &(ii - 1) \cdot M1 + i\\
Next\;\; i&&\\
Next\;\; ii&&\\
\\
Sum = 0\\
For\;\;i &= & \;\;To\;\; M\\
Sum &= & Sum + x(i) / a ^ i\\
Next\;\; i&&\\
&&\\
For\;\; k &= & 1\;\; To \;\;N\\
sum1& =& Sum\\
Sum &= &0\\
&&\\
For \;\;i &= & 1 \;\;\;\;To M\\
testi &= & \sgn(i - M1 \cdot Int(i / M1))\\
test1 &= & \sgn((i - 1) - M1 \cdot Int((i - 1) / M1))\\
ik(1) &= & (i + 1) \cdot testi\\
ik(2) &= & (i - 1) \cdot test1\\
ik(3) &= & (i - M1 - 1) \cdot test1\\
ik(4) &= & i - M1\\
ik(5) &= & (i - M1 + 1) \cdot testi\\
ik(6) &= & (i + M1 - 1) \cdot test1\\
ik(7) &= & i + M1\\
ik(8) &= & (i + M1 + 1) \cdot testi\\
&&\\
For\;\;\;\;\;\; j &= & 1 To 8\\
test &= & ik(j)\\
If \;\;test &<& 1\;\; Then\;\; ik(j) = 0\\
If \;\;test &>& M\;\; Then \;\;ik(j) = 0\\
Next\;\; j&&\\
&&\\
v &= & 0\\
x(0) &= & 0\\
For\;\; j &= & 1\;\; To\;\; 8\\
v &= & v + x(ik(j))\\
Next\;\; j&&\\
&&\\
v1 &= & (1 + \sgn((v - a1) \cdot (a2 - v) + 0.005)) / 2\\
v2 &= & (1 + \sgn((v - a3) \cdot (a4 - v) + 0.005)) / 2\\
u(i) &= & x(i) \cdot v1 + (1 - x(i)) \cdot v2\\
Next\;\; i&&\\
&&\\
x(M) &= & u(M)\\
For i &= & 1 To M - 1\\
x(i) &= & u(i)\\
Sum &= & Sum + x(i) / a ^ i\\
Next i&&\\
&&\\
For\;\; ii &= & 1\;\; To\;\; M1\\
For \;\;i &= & 1 \;\;To \;\;M1\\
&&\\
i2 &= & (ii - 1) \cdot M1 + i\\
Next\;\; i&&\\
Next \;\;ii&&\\
&&\\
{\bf Plot Point} \\
&&\\
i1 &= & M \cdot Rnd(1)\\
For \;\;i &= & 1\;\; To\;\; 2\\
i1 &= & 123 \cdot i1\\
i1 &= & i1 - M \cdot Int(i1 / M) + 1\\
Next\;\; i&&\\
Next \;\;k&&
\end{array}\right\}
\endq
\footnotesize
%==================Stabilizing===========================================
\section{\sml Stabilization Issues}
In general, ${\cal V}^n$ is an unstable set when T is defined using Boolean functions of Sec. 4. As noted, if values slightly off of ${\cal V}^n$ are used as initial conditions, the orbit of the transformation will either rapidly converge to the origin, or go unbounded. However, it is possible to define a new transformation that agrees with T on ${\cal V}^n$ for which ${\cal V}^n$ is a stable set. This is done as follows:
Let T be a transformation defined as
\[{\rm T}(X)=\l(\vts f_1(X). \vdots . f_n(X) \par \r)\]
where $X \in {\cal V}^n$ and $f_i(X)\in \{0,1\}$ for each $i$, and each $f_i$ is a first order polynomial. Then the following transformation agrees with T on ${\cal V}^n$ and is stable.
\vs6
\noi An auxiliary function is useful:
\[{\rm S}(X)=\sum_{i=0}^n x_i^2 \cdot(1-x_i)^2\]
where $X=(x_1,x_2,x_3,\ldots, x_n)$. Using this function define a new map that agrees with T on ${\cal V}^n$ and is stable. This transformation is
\[ X \ra \exp(-({\rm S}(X)))T(X)\]
\vs6\noi
In addition to stabilizing the dynamical system, it can be made differentiable by substituting the hyperbolic tangent for the sgn function.
%15
%====================Almost Periodic Complexity================================
\chapter{Almost Periodic Dynamics}
\label{ch:ap}
\begin{center}
\parbox{3.5in} {\em Periodic and almost periodic systems are simple to define mathematically. In terms of ergodic theory, almost periodic systems are the lowest level of complexity: ergodic but not weak mixing; however, locally periodic and almost periodic systems may be as complex as chaotic systems.
\vs6\noi It is impossible to perform any experiment in real time to determine whether a system is simply almost periodic or is chaotic.}
\end{center}
\vs6\noi This chapter examines the role of almost periodic dynamics in four respects: (1) The level of complexity of almost periodic IDEs taken in isolation from nonlinear processes. (2) Another role is the impact of almost periodic forcing on nonlinear systems; this includes a comparison of periodic to periodic forcing. (3) A third role is the similarity in complexity of almost periodic time series and the unstable manifolds of chaotic systems. And (4) a series of almost periodic IDEs are presented to reveal the diversity of almost periodic systems.
\vs6\noi The point of this chapter is that almost periodic is, from a practical point-of-view, very complex even though on a global level it is only ergodic; and, that local dynamics, from a short term predictability point-of-view, are also very significant. The emergence of a sporadic disease, such as Ebola, is one of the best examples.
\vs6\noi This chapter should make clear that almost periodic dynamics provides a high level of complexity even though almost periodic dynamics are only slightly more complex that periodic dynamics. The point that is again emphasize it is the local dynamics that pose practical predictability challenges to scientists, especially engineers. The dynamics of almost periodic functions adds another layer of complexity that needs to be unraveled. In the case of the almost periodic functions, there are no transverse homoclinic points. However, the level of complexity, from a practical point-of-view is challenging and may emulate PBS chaos.
\vs6\noi The level of complexity of almost dynamics adds another challenge to unraveling the Hirsch Conjecture in that the form of an IDE, ideally, will make it possible to tell which systems are chaotic versus almost periodic. Almost periodic functions in high dimensions can have complexity rivaling chaos. This will be seen in Fig. \ref{fg:unstableap} below, and this represents a major challenge to resolving the Hirsch Conjecture.
%================================AP Complexity=============================
\section{\sml Almost Periodic Complexity}
\vs6\noi In Fig. \ref{fg:almostperiodic6d} are three plates of a periodic function in six dimensions. The images are the projection of the orbits of the function onto the plane with three-dimensional rotation to assist in clarifying the figure. The Plates only vary in a single parameter labeled $a$ found in the equation $s = 1 / (a + \cos((w / r) - \ln(1 + w / r)))$.
Figure \ref{fg:almostperiodic6d} illustrates how a single parameter can dramatically alter the dynamics of an almost periodic function. And further, the figure illustrates the importance of local dynamics.
\vs6\noi High dimensional equations represent systems containing many interaction forces, and in this case, very simple interacting forces. From the point-of-view of Global Dynamics and Ergodic theory, there is little to say about the system in Fig. \ref{fg:almostperiodic6d}. However, from an engineering point-of-view, this figure is very significant. If it were a model of a system, it's local complexity would be of immediate interest.
\vs6\noi The evolution of the dynamics as the parameter $a$ varies is also one of the values of this figure. For $a$ large only a slight local anomaly (circled in red) can be observed As $a$ decreases the complexity increases as might be conjectured by the form of the function $s$.
\begin{figure}[htbp]
\includegraphics[height=3.333in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/AlmostPeriodic6D.eps}
\caption{\sml A Periodic System in Six Dimensions. Plate A the Parameter $a=6.0$; in Plate B $a=1.2$; Plate C $a=1.0$}
\label{fg:almostperiodic6d}
\end{figure}
\begq
\label{cd:ap6d}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:almostperiodic6d} is as follows:}\\
z &=& 0.5\\
w &=& \sqrt{1 - z ^ 2}\\
h&=& 0.001\\
\alpha&=& 1.0\\
&& \mbox{For i = 1 to 200000}\\
z_1& = &z\cdot\cos(\alpha\, h) + w\cdot\sin(\alpha\, h)\\
w_1 &=& w\cdot\cos(\alpha\, h) - z\cdot\sin(\alpha\, h)\\
z& = &z_1\\
w &=& w_1\\
r &=& \sqrt{z ^ 2 + w ^ 2}\\
\\
u &=& z \cdot \cos(0.1 \cdot h) + w \cdot \sin(0.1 \cdot h)\\
v &=& w \cdot \cos(0.1 \cdot h) - z \cdot \sin(0.1 \cdot h)\\
\\
s &=& 1 / (a + \cos((w / r) - \ln(1 + w / r)))\\
\\
x &=& u \cdot \cos(s) + v \cdot \sin(s)\\
y &=& v \cdot \cos(s) - u \cdot \sin(s)\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right\}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.6in,width=4.6in,angle=0]{C:/Research/Book/Figures/eps/ApComplexity.eps}
\caption{\sml Almost Periodic Systems in Eight Dimensions}
\label{fg:apcomplexity}
\end{figure}
\begq
\label{cd:apcplx}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:apcomplexity} is as follows:}\\
z &=& 0.5\\
w &=& \sqrt{1 - z ^ 2}\\
h&=& 0.001\\
\alpha&=& 1.0\\
a_{1\,1}&=& 0.2\\
a_{1\,2}&=& \sqrt{1-a_{1\,1}}\\
&& \mbox{For i = 1 to 1000000}\\
z_1& = &z\cdot\cos(\alpha\, h) + w\cdot\sin(\alpha\, h)\\
w_1 &=& w\cdot\cos(\alpha\, h) - z\cdot\sin(\alpha\, h)\\
z& = &z_1\\
w &=& w_1\\
r &=& \sqrt{z ^ 2 + w ^ 2}\\
\\
u_1 &=& u11 \cdot \cos(0.1 \cdot h) + v11 \cdot \sin(0.1 \cdot h)\\
v_1 &=& v11 \cdot \cos(0.1 \cdot h) - u11 \cdot \sin(0.1 \cdot h)\\
\\
u_2 &=& a_{1\,1} \cdot z / r + a_{1\,2} \cdot w / r\\
v_2 &=& a_{1\,2} \cdot z / r - a_{1\,1} \cdot w / r\\
\\
s &=& 1 / (\cos((w / r) - \ln(1 + w / r)))\\
\\
x &=& u_1 \cdot \cos(s) + u_2 \cdot \sin(s)\\
y &=& u_2 \cdot \cos(s) - u_1 \cdot \sin(s)\\
\\
&&\mbox{\bf Plot Point}\\
\\
u11 &=& u_1\\
v11 &=& v_1\\
&& \mbox{Next i}
\end{array}
\right \}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.163in,width=4.473in,angle=0]{C:/Research/Book/Figures/eps/ApComplexity02.eps}
\caption{\sml Two Periodic Systems in Eight Dimensions that "Appear" Chaotic}
\label{fg:apcomplexity02}
\end{figure}
\vs6\noi Figure \ref{fg:apcomplexity02} is precisely periodic demonstrating that high dimensional periodic systems can rival chaotic systems in lower dimensions. Plate A differs from the previous figure in that $s = \cos((w / r) - \ln(1 + w / r))$. In Plate B, $s = 1 / (0.5 \cdot r + 0.85 \cdot \cos(2 \cdot z / r))$.
%========================================AP forcing=============================
\section{\sml Almost Periodic Forcing}
\label{sc:apforcing}
\vs6\noi The second significance of almost periodic processes in nature is that they combine with nonlinear and chaotic processes to form natural phenomena. Figure \ref{fg:nonlineartrans} illustrates how a nonlinear rotation (a twist) transforms under folding and then almost periodic forcing.
\begin{figure}[htbp]
\includegraphics[height=3.103in,width=3.57in,angle=0]{C:/Research/Book/Figures/eps/NonLinearTrans.eps}
\caption{\sml The Evolution of a Nonlinear Rotation (stretching) followed by Folding and then Almost Periodic Forcing}
\label{fg:nonlineartrans}
\end{figure}
\vs6\noi Plate A for Fig. \ref{fg:nonlineartrans} is given by the equations
\[
\l(\vt \dot{x}. \dot{y}\par\r)=\l(\vt y/r. -x/r \par\r)\]
where $r=\sqrt{x^2+y^2}$. This equation could describe a very simple ideal fluid flow. The associated IDE is
\[\T(\X)=\exp(h\, r\, \B)\,\X\]
where
\[\B= \l(\mtx 0.1.-1.0\par\r)\]
In Plate B, are the integral curves of the twist composed with a 180 degree rotation (folding). In plate C, an almost periodic force is added to the stretching and folding dynamic. The result is a natural snap shot of a process that could appear in chaotic fluid flow.
\begin{figure}[htbp]
\includegraphics[height=2.937in,width=4.137in,angle=0]{C:/Research/Book/Figures/eps/APTwist.eps}
\caption{\sml Time-one Map of an Almost Periodic Forced Twist}
\label{fg:APTwist}
\end{figure}
\vs6\noi Figure \ref{fg:APTwist} illustrates the complexity that occurs when forcing is almost periodic rather than periodic. This is far more common in nature than periodic forcing.
\begin{figure}[htbp]
\includegraphics[height=2.477in,width=2.047in,angle=0]{C:/Research/Book/Figures/eps/ComplexAPTwist.eps}
\caption{\sml Time-one Map of a Complex Almost Periodic Forced Twist}
\label{fg:CAPTwist}
\end{figure}
\vs6\noi Figure \ref{fg:CAPTwist} is an example of how complex time-one maps become when almost periodic dynamics are combined in a complex way with a twist.
\vs6\noi So it is clear that complexity is a combination of many shifts which in turn are likely combined with very complicated almost periodic maps, see Fig.\ref{fg:unstableap}. Identifying the presence of bilateral shifts on Canter sets is the start of analyzing the complexity in such systems as tornados; it is a red flag; but, a bilateral shift on a Canter set (which may have measure zero) does not tell a complete story and this may explain why the prediction of long-term dynamics is so difficult and why the Hirsch Conjecture is so challenging.
\vs6\noi In short, the prediction of the progress and outcome of disease, storms and the clash of armies are not well served by simply saying that they are chaotic. The inner working of the chaos of complex systems is relevant to their understanding and therefore to the decision making processes required to address the threat they pose to human life.
\begin{figure}[htbp]
\includegraphics[height=2.703in,width=3.693in,angle=0]{C:/Research/Book/Figures/eps/APChaos.eps}
\caption{\sml Attractor for Almost Periodic Forced Twist}
\label{fg:apchaos}
\end{figure}
\begq
\label{cd:za01}
\left.
\begin{array}{lcl}
&&\mbox{{\bf The code for Fig. \ref{fg:za01} is as follows:}}\\
&& \mbox{For i = 1 to N}\\
r &=& 1 / \sqrt{(x - 1) ^ 2) + (y ^ 2)}\\
u &=& ((x - 1) \cdot \cos(r)) - (y \cdot \sin(r)) + 1 - 0.3 \cdot \cos(x)\\
v &=& (y \cdot \cos(r)) + ((x - 1) \cdot \sin(r))\\
x &=& -u\\
y &=& -v\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}
\end{array}\right\}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.05in,width=3.347in,angle=0]{C:/Research/Book/Figures/eps/APChaos02.eps}
\caption{\sml Attractor for Almost Periodic Forced Twist}
\label{fg:apchaos02}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.79in,width=2.827in,angle=0]{C:/Research/Book/Figures/eps/APChaos03.eps}
\caption{\sml Attractor for Almost Periodic Forced Twist}
\label{fg:apchaos03}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.043in,width=2.723in,angle=0]{C:/Research/Book/Figures/eps/APChaos04.eps}
\caption{\sml Attractor for Almost Periodic Forced Twist}
\label{fg:apchaos04}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.903in,width=3.05in,angle=0]{C:/Research/Book/Figures/eps/APChaos05.eps}
\caption{\sml Attractor for Almost Periodic Forced Twist}
\label{fg:apchaos05}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.723in,width=2.853in,angle=0]{C:/Research/Book/Figures/eps/APChaos06.eps}
\caption{\sml Attractor for Almost Periodic Forced Twist}
\label{fg:apchaos06}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.267in,width=4.1in,angle=0]{C:/Research/Book/Figures/eps/APChaos08.eps}
\caption{\sml Attractor for Almost Periodic Forced Twist}
\label{fg:apchaos08}
\end{figure}
%=============Almost Periodic Dynamics versus Chaos======================
\section{\sml Almost Periodic Dynamics versus Chaos}
\label{sc:apchaos}
This section compares the time series of an almost periodic function with the unstable manifold of a chaotic system.
\begq
\label{cd:usap}
\left.
\begin{array}{lcl}
&& \mbox{ The code for Plate {\bf A} of Fig. \ref{fg:unstableap} is as follows: }\\
&& \mbox{For i = 1 To N}\\
t &\ra& t + 0.0002\\
g(t)& = &\cos(0.01\, t)\\
h(t)& =& 1.1 + \cos( t)\\
u &=& f(5\,\sqrt{ g(t) ^ 2 + h(t) ^ 2} )\\
\\
x &=& 10\,( g(t) \cdot \cos(u) + h(t) \cdot \sin(u))\\
y &=& 10\,( h(t) \cdot \cos(u) - g(t) \cdot \sin(u))\\
&& \mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi where
\[f(v)= 2 / \sin((1 / v) - 2 \cdot \log(1 / v))\]
\begin{figure}[htbp]
\includegraphics[height=2.18in,width=4.3in,angle=0]{C:/Research/Book/Figures/eps/UnstableAP.eps}
\caption{Geometry of an Almost Periodic Time Series (A) compared to an Unstable Manifold (B)}
\label{fg:unstableap}
\end{figure}
\vs6\noi There is a hyperbolic fixed point at (0.0, -0.21065) with slope 0. Two loops are required to generate the unstable manifold. The outer loop forms a small line segment of length 0.00001 tangent to the unstable manifold. Points from this line will be used as initial conditions and iterated 8 times in the inner loop by the diffeomorphism.
\begq
\label{cd:usap}
\left.
\begin{array}{lcl}
&&\mbox{The code for Plate B for Fig. \ref{fg:unstableap} is as follows}\\
&&\mbox{For i = 1 To 155000}\\
x_0 &= &(-1 + (2 \cdot (i - 1) / 155000)) \cdot 0.0001\\
y_0 &=& -0.21065\\
x &=& x_0\\
y &=& y_0\\
\\
&& \mbox{For j = 1 To 8}\\
r &=& 1 / \sqrt{(x - 0.2) ^ 2 + y ^ 2}\\
r &\ra &(r - r \cdot \log(r))\\
u &= &(x - 0.2) \cdot \cos(r) - y \cdot \sin(r) + 0.2\\
v &= &y \cdot \cos(r) + (x - 0.2) \cdot \sin(r)\\
x &= &-u\\
y &= &-v\\
\\
&&\mbox{\bf Plot Point}\\
\\
&&\mbox{Next j}\\
&&\mbox{Next i}
\end{array}\right\}
\endq
%==========Almost Periodic Dynamics===================
\section{\sml Almost Periodic Time Series}
\label{sc:ap}
This section presents a series of examples of almost periodic dynamics in order to display the range of complexity these systems may present and therefore to the relevance of their complexity to resolving the Hirsch Conjecture. In each case the figures are projections of a high-dimensional almost periodic system onto the two-dimensional plane. As such, there will be seen intersections of the orbit, something that cannot occur in two-dimensional solutions of ODEs having unique solutions.
\vs6\noi Figure \ref{fg:aponed} is a high-dimensional almost periodic system projected onto the $x-y$ plane. The image is an attractor.
\begin{figure}[htbp]
\includegraphics[height=2.427in,width=2.417in,angle=0]{C:/Research/Book/Figures/eps/APoned.eps}
\caption{ Almost Periodic Time Series}
\label{fg:aponed}
\end{figure}
{\bf Code for Fig. \ref{fg:aponed}}
\vs6\noi For i = 1 To N
\[\begin{array}{lcl}
t &=& i \cdot h\\
x &=& 1.45 \cdot \cos(y - 4.5 \cdot z)\\
{\bf Plot\; Point}\\
y &=& x\\
\end{array}\]
Next i
\begin{figure}[htbp]
\includegraphics[height=2.62in,width=2.647in,angle=0]{C:/Research/Book/Figures/eps/SubChaos01.eps}
\caption{ Almost Periodic Time Series}
\label{fg:subchaos01}
\end{figure}
\begq
\label{cd:sub01}
\left.
\begin{array}{lcl}
&& \mbox{The code for Fig. \ref{fg:subchaos01} is as follows:}\\
&&\mbox{In place of a time variable, the HOC been used as per Sec.\ref{sc:hoc}}\\
\\
&&\mbox{For i = 1 To N}\\
&&\mbox{Harmonic Oscillator Code}\\
z &=& \cos(h) \cdot z_1 + \sin(h) \cdot w_1\\
w &=& \cos(h) \cdot w_1 - \sin(h) \cdot z_1\\
r &=& \sqrt{z ^ 2 + w ^ 2}\\
z_1 &=& z\\
w_1 &=& w\\
&&\mbox{Transformation Code}\\
x_1 &=& x\\
y_1 &=& y\\
u_1 &=& y_1 + w/r\\
v_1 &=& x_1 + w/r\\
u &=& -0.832 \cdot \cos(u_1) + 1.144 \cdot \sin(u_1)\\
v &=& 1.144 \cdot \cos(v_1) +0.832 \cdot \sin(v_1)\\
\\
x &=& u\\
y &=& v\\
&&\mbox{{\bf Plot Point}}\\
&&\mbox{Next i}
\end{array}\right\}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.62in,width=2.647in,angle=0]{C:/Research/Book/Figures/eps/SubChaos02.eps}
\caption{ Almost Periodic Time Series}
\label{fg:subchaos02}
\end{figure}
\begq
\label{cd:sub02}
\left.
\begin{array}{lcl}
&&\mbox{The code for Fig. \ref{fg:subchaos02} is as follows:}\\
\\
&&\mbox{For i = 1 To N}\\
z &=& z_1\, \cos(h) + w_1\,\sin(h) \\
w &=& w_1\,\cos(h) - z_1\,\sin(h)\\
z_1 &=& z\\
w_1 &=& w\\
\\
\theta &=& 2\, \pi \cdot \arctan(z, w)\\
x_1 &=& 30 \cdot \cos(0.1 \cdot \theta)\\
y_1 &=& 10 \cdot \cos(8 \cdot \theta)\\
r_0 &=& 0.5\,\sqrt{x_1 ^ 2 + y_1 ^ 2}\\
r&=& \ln(r_0)/r_0\\
u &=& x_1 \cdot \cos(r) + y_1 \cdot \sin(r)\\
v &=& y_1 \cdot \cos(r) - x_1 \cdot \sin(r)\\
x &=& u\\
y &=& v\\
\\
&&\mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right\}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.85in,width=2.907in,angle=0]{C:/Research/Book/Figures/eps/SubChaos03.eps}
\caption{ Almost Periodic Time Series}
\label{fg:subchaos03}
\end{figure}
\begq
\label{cd:sub03}
\left.
\begin{array}{lcl}
&&\mbox{The code for Fig. \ref{fg:subchaos03} is as follows:}\\
x &=& 0.9\\
y &=& 0\\
z_1 &=& 1\\
w_1 &=& 0\\
\\
&&\mbox{For i = 1 To N}\\
z &=& \cos(h) \cdot z_1 + \sin(h) \cdot w_1\\
w &=& \cos(h) \cdot w_1 - \sin(h) \cdot z_1\\
z_1 &=& z\\
w_1 &=& w\\
\theta &=& \arctan(z/ w)\\
\\
y &=& x\\
x &=& (7 \cdot y ^ 2 / (2 + (y ^ 2))) + 5.6 \cdot \sin(3 \cdot \theta)\\
\\
&&\mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right\}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.903in,width=2.427in,angle=0]{C:/Research/Book/Figures/eps/SubChaos04.eps}
\caption{ Almost Periodic Time Series}
\label{fg:subchaos04}
\end{figure}
\begq
\label{cd:sub04}
\left.
\begin{array}{lcl}
&&\mbox{The code for Fig. \ref{fg:subchaos04} is as follows:}\\
&&\mbox{For i = 1 To N}\\
z &=& \cos(h) \cdot z_1 + \sin(h) \cdot w_1\\
w &=& \cos(h) \cdot w_1 - \sin(h) \cdot z_1\\
z_1 &=& z\\
w_1 &=& w\\
\theta &=& \arctan(z/ w)\\
\\
x_1 &=& x\\
y_1 &=& y\\
x_1 &=& x_1 - 2 \cdot \sin(\theta)\\
r &=& \sqrt{x_1 ^ 2 + y_1 ^ 2}\\
x &=& 1.7 \cdot y_1 / (r + 1)\\
y &=& -1.7 \cdot x_1 / (r + 1)\\
\\
&&\mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right\}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.62in,width=2.65in,angle=0]{C:/Research/Book/Figures/eps/SubChaos05.eps}
\caption{ Almost Periodic Time Series}
\label{fg:subchaos05}
\end{figure}
\begq
\label{cd:sub05}
\left.
\begin{array}{lcl}
&& \mbox{The code for Fig. \ref{fg:subchaos05} is as follows:}\\
&&\mbox{For i = 1 To N}\\
z &=& \cos(h) \cdot z_1 + \sin(h) \cdot w_1\\
w &=& \cos(h) \cdot w_1 - \sin(h) \cdot z_1\\
z_1 &=& z\\
w_1 &=& w\\
t &=& \arctan(z/ w)\\
\\
y &=&x\\
x &=& 1.28 \cdot \cos(y + 12 \cdot \sin(t)) + 1.6 \cdot \sin(y)\\
\\
&&\mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right\}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.5in,width=2.553in,angle=0]{C:/Research/Book/Figures/eps/SubChaos06.eps}
\caption{ Almost Periodic Time Series}
\label{fg:subchaos06}
\end{figure}
\begq
\label{cd:sub06}
\left.
\begin{array}{lcl}
&&\mbox{The code for Fig. \ref{fg:subchaos06} is as follows:}\\
&&\mbox{For i = 1 To N}\\
z &=& \cos(h) \cdot z_1 + \sin(h) \cdot w_1\\
w &=& \cos(h) \cdot w_1 - \sin(h) \cdot z_1\\
z_1 &=& z\\
w_1 &=& w\\
\theta &=& \arctan(z/ w)\\
\\
r_0 &=& \sqrt{(\cos(0.1 \cdot \theta) ^ 2 + (7 \cdot \cos(6 \cdot \theta)) ^ 2}\\
r &=& \exp(0.1 \cdot \sin(0.8 \cdot r_0))\\
u &=& \cos(0.1 \cdot \theta) \cdot \cos(r) \cdot \cos(\theta)\\
v &=& \cos(0.1 \cdot \theta) \cdot \cos(r) \cdot \sin(\theta)\\
\\
x &=& u\\
y &=& v\\
\\
&&\mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right\}
\endq
\begin{figure}[htbp]
\includegraphics[height=2.833in,width=4.8in,angle=0]{C:/Research/Book/Figures/eps/APVariation.eps}
\caption{ Plate A: $\gamma=0.8$\; Plate B: $\gamma=0.5$}
\label{fg:apvariation}
\end{figure}
\begq
\label{cd:apv}
\left.
\begin{array}{lcl}
&&\mbox{The code for Fig. \ref{fg:apvariation} is as follows:}\\
&&\mbox{For i = 1 To N}\\
z &=& \cos(h) \cdot z_1 + \sin(h) \cdot w_1\\
w &=& \cos(h) \cdot w_1 - \sin(h) \cdot z_1\\
z_1 &=& z\\
w_1 &=& w\\
\theta &=& \arctan(z/ w)\\
\\
r_0 &=& \sqrt{\cos(0.1\cdot \theta)^2 + (20 \cdot \cos(0.4 \cdot \theta)) ^ 2}\\
r &=& \exp(\gamma \cdot \sin(0.4 \cdot r_0))\\
u &=& \cos(r) \cdot \cos(\theta)\\
v &=& \cos(r) \cdot \sin(\theta)\\
\\
x &=& u\\
y &=& v\\
\\
&&\mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right\}
\endq
\vs6\noi The only change is in the value of $\gamma$ demonstrates how a small change in a parameter can led to dramatic changes in the orbit of an almost periodic system.
%=========================FUNCTIONS OF AN IDE=======================================
\part{Mathematical Theory}
\label{pr:mt}
%16
%================================FORMAL DEFINITION=====================
\chapter{Formal IDE Theory}
\label{ch:formal}
\begin{center}
\parbox{3.5in}{\em The mathematical theory of IDEs provides the formal framework for applying IDE theory to solve and model scientific and engineering problems.}
\end{center}
\section{\sml Formal Definition of IDEs}
\label{sc:fd}
The formal definition of IDEs is presented to provide a rigorous starting point for a formal mathematical theory. The definition must reflect that, when the IDE is associated to the solution of an ODE, the orbit of the IDE closely resembles a continuous time series at least as well as a numerical integration scheme will produce for the ODE. That is its most important characteristic. The graphical representation of the orbit is something of a signature of the IDE that provides immediate intuitive information about the dynamics of the IDE, both locally and globally. If the orbit is "periodic" is will most often be clear from the graph. If the orbit is chaotic, the graphical representation of the orbit will, in the presence of damping, reflect a "strange attractor". Thus, it is essential that inherent in the definition of an IDE, that the graphical representation of the orbit provides the most elementary insights into the dynamics.
\vs6\noi Other essential characters of IDEs are that it is has a starting point that will correspond to the initial condition of an ODE and that there is some form of boundedness condition as a matter of practicality.
\vs6\noi \begin{center} {\bf The Axioms of Infinitesimal Diffeomorphism Equations (IDEs)} \end{center}
\[\begin{array}{lll}
{\rm (1)} & \T_h \mbox{ is a one parameter family of measurable transformations on some measure space}&\\
&\Mx \subseteq \Cx^n \;\mbox {or}\;\;\; \Rl^n&\\
{\rm (2)} & \forall \X, \; \T_0(\X)=\X&\\
{\rm (3)} & \exists \; M>0 \, \ni \, \forall \, \X, \|\T_h(\X)\| \leq M\, \|\X\|\\
{\rm (4)} & \exists \; M,\; N >0 \, \ni \, \forall \, \X, \|\T^N_h(\X)-\X\| \leq M\, \|\X\| \, h
\end{array}\]
%========
\vs6\noi The fundamental proposition connecting IDEs to ODEs is
\begin{proposition}
\label{pr:fp}
Let
\begq
\label{eq:fnl1}
\dot{\X}=\A(\X)\, \X
\endq
and assume that $\|\A(\X), \, \A'(\X))\|\leq M_0$
\vs6\noi Then over a sufficiently small time interval $h$
\[\|\X(t)-\exp(h\, \A'(\X))\, \X\|\leq (h^2\,M_0^2 +o(h^2))\,\|\X\|\]
\end{proposition}
\pf
\[\dot{\X}-\A(\X)\, \X=0\]
\[\exp(-\int_0^t\, \A(\X(s))\, ds\,( \dot{\X}-\A(\X)\, \X)=0\]
\[\exp(-\int_0^t\, \A(\X(s))\, ds\, \dot{\X}-\exp(-\int_0^t\, \A(\X(s))\, ds\,\A(\X)\, \X=0\]
\[\frac{d}{dt}(\exp(-\int_0^t\, \A(\X(s))\, ds\,\cdot \X(t))=0\]
\[\exp(-\int_0^t\, \A(\X(s))\, ds\,\cdot \X(t)=\X_0\]
\[ \X(t)=\exp(\int_0^t\, \A(\X(s))\, ds \,\X_0\]
Over a sufficiently small time interval $h$, applying the mean value theorem
\[ (\exp(h\, \A(\X(\xi)) -\exp(h\, \A(\X_0))\,\X_0\]
\[ (h\, (\A(\X(\xi)) - \A(\X_0))+o(h^2))\,\X_0\]
Taking norms
\[\| (h\, (\A'(\X(\eta))(\X(\xi) - \X_0)+o(h^2)\,\X_0\| \leq \| (h\, (\A'(\X(\eta))\| \|(\X(\xi) - \X_0)\|+\|o(h^2))\,\X_0\|\]
\[\| (h\, (\A'(\X(\eta))\| \|(\X(\xi) - \X_0)\|+\|o(h^2))\,\X_0\|\leq h\,M_0 \|(\X(\xi) - \X_0)\|+\|o(h^2))\,\X_0\|\]
\[ h\,M_0 \|h\,\A(\X_0)\X_0\|+\|o(h^2))\,\X_0\| \leq (h^2\,M_0^2 +o(h^2))\,\|\X_0\|\leq h\, M\, \X_0\]
for some constant $M$.
\rl
\vs6\noi Proposition \ref{pr:fp} states that the IDE, $\T_h(\X)=\exp(h \, \A(\X))\,\X$ is locally an approximation of the ODE, Eq. \ref{eq:fnl1}, to at least order $h$. In summary
the approximate solution of Eq. \ref{eq:fnl1} locally is $\exp(h\, \A(\X))\, \X$ and as $h\ra 0$ the IDE is as close to the actual solution as desired.
\begin{theorem}
\label{tm:loc}
If $\T_h(\X) 0$ for all $x,\,y,\,t$.
\vs6
\mhs21Then
\[T_t\l(\vt x. y\par \r)=\l(\vt x.
y\exp(\Omega(x)\,t)+c(x)(\exp(\Omega(x)\,t)-1)/\Omega(x) \par \r)\]
where $c(x)$ and $\Omega(x)$ are $C^\infty$ functions of $x$
and $T_t$ is a one parameter group of dilation/contraction maps.
If this family is to be the simplest possible, then
$c(x)=0$.
\end{proposition}
The proof follows from the following four lemmas.
\begin{lemma}
\label{lm:dc01}
\[f(x,y,t)=\int_0^y \exp(t\,g(x,\eta,t))d\eta+a\, \int_0^t
\exp(\eta\,g(x,0,\eta))d\eta\]
\end{lemma}
\pf First observe that $f(x,y,t)=\int_0^y
\exp(g(x,\eta,t))d\eta+h(x,t)$ by partial integration with
respect to $y$. By the group property $f(x,y,0)=y$ for all $y$
hence $y=\int_0^y \exp(g(x,\eta,0))d\eta +h(x,0)$.
Differentiating this expression with respect to $y$ gives
$\exp(g(x,y,0))=1$ for all $x,y$. Hence, $g(x,y,0)=0$ for all
$x,y$. This last result means that $g(x,y,t)=t\,k(x,y,t)$ for
some function $k$. Rewrite $f$ (using the
letter $g$ in place of the letter $k$ for convenience) as
follows:
\[f(x,y,t)=\int_0^y \exp(t\,g(x,\eta,t))d\eta+h(x,t)\]
From this conclude that $h(x,0)=0$ for all $x$.
\vs6\noi
Now let \[T_t\l(\vt x. y\par\r)=\l(\vt x. y_1\par \r)\]
Hence \[y_1=\int_0^y \exp(t\,g(x,\eta,t))d\eta +h(x,t)\]
Using the group property $T_{s+t}=T_s \circ T_t$ gives
\[T_s\l(\vt x. y_1\par \r)=\l(\vt x. y_2\par \r)\]
where
\[y_2=\int_0^{y_1}\exp(s\,g(x,\eta,s))d\eta +h(x,s)=
\int_0^y \exp((s+t)\,g(x,\eta,s+t))d\eta +h(x,s+t)\]
For $y=0$, $y_1=h(x,t)$ so that
\[\int_0^{h(x,t)}\exp(s\,g(x,\eta,s))d\eta +h(x,s)=h(x,s+t)\]
Differentiating this expression with respect to $t$ gives
\[\exp(s\,g(x,h(x,t)),s))\,h'(x,t)= h'(x,s+t)\]
where the partial differentiation notation ha been omitted since
$x$ is always a constant throughout all proofs.
Since $h(x,0)=0$, and for $t=0$ the equation is obtained
\[\exp(s\,g(x,0,s))\,h'(x,0)= h'(x,s)\]
This is a differential equation for $h$ from which it can be concluded
\[h(x,t)=a\, \int_0^t \exp(\eta\,g(x,0,\eta))d\eta\]
where $a=h'(x,0)$,
and so
\[f(x,y,t)=\int_0^y \exp(t\,g(x,\eta,t))d\eta+a\, \int_0^t
\exp(\eta\,g(x,0,\eta))d\eta\]\rl
\begin{lemma}
\label{lm:dc02}
For all $a,s,x,y$
\begin{eqnarray*}
a\,\exp(s\,g(x,y,s)) &= & a\,\exp(s,g(x,0,s))+\\
\int_0^{y} \exp(s\,g(x,\eta,s))(g(x,\eta,s) &+&
s\frac{\pr g(x,\eta,s)}{\pr t})d\eta
\end{eqnarray*}
\end{lemma}
\pf From the result of lemma 2 apply the group property again to obtain
\begin{eqnarray*}
\int_0^{y_1}\exp(s\,g(x,\eta,s))d\eta+a\, \int_0^s
\exp(\eta\,g(x,0,\eta)) & =&\\
\int_0^{y} \exp((s+t)\,g(x,\eta,s+t))d\eta+a\, \int_0^{s+t}
\exp(\eta\,g(x,0,\eta))d\eta& &
\end{eqnarray*}
Since,
\[h(s+t)=h(s)+\int_0^{h(t)} \exp(s\,g(x,\eta,s)d\eta\]
this expression simplifies to
\[\int_0^{y_1} \exp(s\,g(x,\eta,s))d\eta=
\int_0^{y} \exp((s+t)\,g(x,\eta,s+t))d\eta+\int_0^{h(t)}
\exp(s,g(x,\eta,s))d\eta\]
Differentiating this result with respect to $t$ gives
\[\exp(s\,g(x,y_1,s))\dot{y}_1 = \]
\[\int_0^{y} \exp((s+t)\,g(x,\eta,s+t))(g(x,\eta,s+t)+(s+t)
\frac{\pr g(x,\eta,s+t)}{\pr t})d\eta\]
\[+\exp(s,g(x,h(x,t))\dot{h} \]
for $t=0$
\begin{eqnarray*}
a\,\exp(s\,g(x,y,s)) & =& a\,\exp(s,g(x,0,s))\\
&+ &\int_0^{y} \exp(s\,g(x,\eta,s)) \l( g(x,\eta,s)+s\frac{\pr
g(x,\eta,s)}{\pr t} \r)d\eta
\end{eqnarray*}\rl
\begin{lemma}
\label{lm:dc03}
\[g(x,y,t)= F(x,a\,t+y)\]
\end{lemma}
\pf Beginning with the result in lemma 13 and
differentiating this expression with respect to $y$
gives
\[s\,a\,\exp(s\,g(x,y,s)) g_y(x,y,s)=
\exp(s\,g(x,y,s))\l(g(x,y,s)+s\frac{\pr g(x,y,s)}{\pr t}\r)\]
which may be rewritten as
\[t\,a\, g_y(x,y,s)=
g(x,y,s)+t\frac{\pr g(x,y,t)}{\pr t}\]
or
\[t\,a\, g_y= g+t\, g_t\]
The general solution of this partial differential equation is given by:
\[g(x,y,t)=c/t+ F(x,a\,t+y)\]
where $F$ is an arbitrary function and $c$ is an arbitrary
constant. Since $g$ is required to be differentiable everywhere in
all variables $c=0$.\rl
\begin{lemma}
\label{lm:dc04}
\[F(x,u)=\Omega(x)\]
\end{lemma}
\pf Substituting this information into the equation for $f(x,y,t)$
and applying the group property with $s=-t$ gives the functional
equation
\[F(x,a\,t+y)=F(x,y_1-a\,t)\]
If $F$ is not a constant with respect to the second variable
then differentiate this equation with respect to the
second variable and divide out the derivative to get
\[a \frac{\pr y_1}{\pr y}=\dot{y}_1-a\]
It is known
\[ \frac{\pr y_1}{\pr y}=\exp(t\,F(x,a\,t+y))\]
for all $t$ so that
\[\dot{y}_1=a\,\exp(t\,F(x,a\,t+y))+
\int_0^{y} \exp(t\,F(x,a\,t+\eta))d\eta\]
from this conclude that
\[
a\,+a\,\exp(t\,F(x,a\,t+y)) = a\,\exp(t\,F(x,a\,t+y))+\]
\[\int_0^{y} \exp(t\,F(x,a\,t+\eta))F(a\,t+\eta)d\eta\]
or
\[a\,=
\int_0^{y} \exp(t\,F(x,a\,t+\eta))F(x,a\,t+\eta)d\eta\]
for all $t$, implying that $F(x,u)=0$ for all $u$. But this is a
contradiction, hence $F(x,u)=\Omega(x)$.
\rl
%************************************************
\subsection{\sml The Convolution Theorem}
%**********************************
\begin{theorem} (Twist Convolution Theorem)
Let
\[\zb(t)=\l(\vt x(t). y(t)\par\r)\]
be a solution of the following ODE
\[\left (\vt \dot{x}. \dot{y}\par \right)=r(t) \left (\mtx 0. -1.
1. 0\par \right)\left (\vt x-b(t). y \par \right )\]
where $r(t)=\sqrt{(x-b(t))^2+y^2}$.
\vs6
Then the following is true:
\vs6
\mhs21(1)
\[\zb(t)=\s(t)+\A\l(\int_0 ^t\,r(\eta)d\eta
\r)(\zb_0-\s(0))-\int_0^t\,\A\l(\int_\eta
^t\,r(\xi)d\xi\r)d\s(\eta)\]
\vs6
\mhs21(2) If $\ub(t) \equiv \zb(t)-\s(t)$, then
\[\|\ub(t)+\int_0^t\,\A\l(\int_\eta
^t\,\|\ub(\xi)\|d\xi\r)d\s(\eta)\|=\|\A\l(\int_0
^t\,\|\ub(\eta)\|d\eta \r)\ub_0\|=\|\ub_0\|\]
\vs6
\mhs21(3)
If the function $b(t)=b_0$ is a constant then
\[\|\ub(t)\|=\|\A\l(\int_0
^t\,\|\ub(\eta)\|d\eta\r)\ub_0\|=\|\ub_0\|\]
and hence
\[\ub(t)=\A(\ub_0\,t)(\ub_0)\]
\end{theorem}
\pf
\vs6\noi Geometric considerations show that at a fixed time $t_0$
and over a small time
increment $h$, the forced twist equation must be described
approximately by:
\[\zb(t_0+h)=A(h\,r(t_0))(\zb(t_0)-\s(t_0))+\s(t_0)\]
where
\[\zb(t)=\left (\vt x. y\par \right )\]
and where
\[\s(t)=\left (\vt b(t). 0\par \right )\]
Also, $A(u)$ occurring in the above and later expressions is the matrix
\[A(u)=\left (\mtx \cos(u). -\sin(u). \sin(u). \cos(u) \par
\right )\]
\vs6\noi This is the solution of the twist-and-flip ODE
\[\left (\vt \dot{x}. \dot{y}\par \right)=r(t) \left (\mtx 0. -1.
1. 0\par \right)\left (\vt x-a. y \par \right )\]
where $a=b(0)$ and $r(t)=\sqrt{(x-a)^2+y^2}$
over a short time interval, $h$. Iterating this expression
starting at an initial point $\zb_0$ gives
\[\zb_1=A(h\,r_0)(\zb_0-\s(0))+\s(0)\]
\[\zb_2=A(h\,r_1)(\zb_1-\s(h))+\s(h)\]
\[\zb_3=A(h\,r_2)(\zb_2-\s(2h))+\s(2h)\]
where, $r_i=\|\zb_i-\s(i\,h)\|$. In general,
\[\zb_{k+1}=A(h\,r_k)(\zb_k-\s(kh))+\s(kh)\]
so that the composition is given by,
\[\zb_{N+1}=A(h\,r_N)A(h\,r_{N-1})A(h\,r_{N-2}) \dots
A(h\,r_0)(\zb_0-\s(0))+\]
\[\sum_{k=1}^N A(h\,r_N)A(h\,r_{N-1})A(h\,r_{N-2}) \dots
A(h\,r_{N-k})(b((k-1)h)-b(kh))+ b(Nh)\]
\[=A(h\sum_{i=0}^N\, r_i)(\zb_0-\s(0))+\]
\[\sum_{k=1}^N\,A(h\,\sum_{i=0}^k\,r_{N-i})(\s((k-1)h)-\s(kh))+\s(Nh)\]
as $h \ra 0$, this expression becomes
\[\zb(t)=\s(t)+A\l(\int_0 ^t\,r(s)ds\r)(\zb_0-\s(0))-\int_0^t\,A\l(\int_s
^t\,r(\xi)d\xi\r)d\s(s)\]
This must be considered a Stieltjes integral when $\s(t)$ is not
differentiable.
Now make a change of variable $\ub(t)=\zb(t)-\s(t)$. Then we
have
\[\ub(t)=A\l(\int_0 ^t\,\|\ub(s)\|ds\r)\ub_0-\int_0^t\,A\l(\int_s
^t\,\|\ub(\xi)\|d\xi\r)d\s(s)\]
or
\[\ub(t)+\int_0^t\,A\l(\int_s
^t\,\|\ub(\xi)\|d\xi\r)d\s(s)=A\l(\int_0 ^t\,\|\ub(s)\|ds\r)\ub_0\]
so that,
\[\|\ub(t)+\int_0^t\,A\l(\int_s
^t\,\|\ub(\xi\r)\|d\xi)d\s(s)\|=\|A\l(\int_0
^t\,\|\ub(s)\|ds\r)\ub_0\|=\|\ub_0\|\]
This last line shows that the forced twist-and-flip has an
integral invariant.
If the function $b(t)=b_0$ is a constant then
\[\|\ub(t)\|=\|A\l(\int_0
^t\,\|\ub(s)\|ds\r)\ub_0\|=\|\ub_0\|\]
and hence
\[\ub(t)=A(\ub_0\,t)(\ub_0)\]
which is the twist part of the twist-and-flip map, when evaluated
at $t=\pi/\omega$.
\vs6\noi
Now show that the function obtained in this implicit
equation is the solution of the forced twist-and-flip ODE stated
at be beginning of this section.
\vs6
Recall that
\[\zb_{k+1}=A(h\,r_k)(\zb_k-\s(kh))+\s(kh)\]
for small $h$ this is
\[\zb_{k+1}-\zb_k=A(h\,r_k)(\zb_k-\s(kh))+\s(kh)-\zb_k=\]
\[A(h\,r_k)(\zb_k-\s(kh))-(\zb_k-\s(kh))=\]
\[(A(h\,r_k)-I)(\zb_k-\s(kh))\]
hence,
\[(\zb(t+h)-\zb(t))/h=(A(h\,r(t))-I)(\zb(t)-\s(t))/h\]
taking limits as $h \ra 0$ gives
\[\dot{\zb}(t)=\|\zb(t)-\s(t)\|\,B\,(\zb(t)-\s(t))\]
where B is the matrix
\[\left ( \mtx 0. -1. 1. 0\par \right)\]
\rl
%================================
\begin{lemma}{\bf (Technical Lemma)}
\label{lm:fm}
Let $g:\Rl^n \ra \Rl$ and assume that
\[f(\exp(t\,\A)\zb_0)=f(\zb_0)\]
for all $t$, where \A \sp is a square
matrix of dimension $m$ and $\zb_0$ is a vector of dimension $m$.
\vs6\noi Then
\[f(\exp(g(\wb)\,t\,\A)\zb_0)=f(\zb_0)\]
for any $\wb in \Rl^n$.
\end{lemma}
\pf The proof consists in observing that $g(\wb)$ is a scalar and
so $g(\wb)\,t$ can be considered as just another time value, $t_1$.
But for any time $t_1$
\[f(\exp(t_1\,\A)\zb_0)=f(\zb_0)\]
by these assumptions about $f$. \rl
\vs6\noi Now generalize the convolution theorem:
\begin{theorem}
\label{tm:cv1}
Let
\[\zb(t)=\l(\vt x(t). y(t)\par\r)\]
be the solution to
\[\left (\vt \dot{x}. \dot{y}\par \right)=f(\lambda(\zb)) \left
(\mtx a_{11}.
a_{12}.
a_{21}. a_{22}\par \right)\left (\vt x-b(t). y \par \right )\]
where $\lambda(u)$ is the function from lemma \ref{lm:fm} and $f$
is an arbitrary continuous function of a single variable.
\vs6
Then,
\vs6
\mhs21(1)
\[\zb(t)=\s(t)+\exp\l(\A\,(\int_0
^t\,r(\eta)d\eta) \r)(\zb_0-\s(0))-
\int_0^t\,\exp\l(\A \, (\int_\eta^t\,r(\xi)d\xi)\r)d\s(\eta)\]
where $r(t)=f(\lambda(\zb(t)-\s(t)))$.
\vs6
\mhs21(2)
Let $\ub(t)=\zb(t)-\s(t)$
then
\[r\l(\ub(t)+
\int_0^t\,\exp\l(\A \, (\int_\eta^t\,r(\xi)d\xi)\r)d\s(\eta)\r) = \]
\[ r\l(\exp\l(\A\,(\int_0
^t\,r(\eta)d\eta)\r)(\ub_0))\r)=r(\ub_0)\]
\end{theorem}
\vs6\noi \pf Once item (1) is established by following the twist-and-flip
case given in the appendix, item (2) follows from the technical lemma.\rl
\vs6\noi
Note that the above results for nonlinear equations follow from
the proofs for the linear case because the integral curves of the solutions of
fundamental nonlinear equations agree with those of the solutions of
related linear equations. It is this close connection between
linear equations and fundamental nonlinear equations that makes a
theory for these nonlinear equations possible.
There is a parallel result for the general case
corresponding to item (3) from the preceding twist-and-flip
convolution lemma.
\vs6\noi
Extend this results to the conventional forced oscillator
by the following transformation:
\vs6
\mhs21 Let
\[\dot{\zb}(t)=f(\lambda(\zb))\,\A\,\zb-\dot{\s}(t)\]
Then the transformation
\[\wb=\zb+\s(t)\]
reduces this equation to the form of the equations of convolution lemmas.
\vs6\noi The derivation of the integral form of the solution of the
twist-and-flip ODE when applied to linear forced ODEs is
equivalent to the familiar convolution expression for the general solution of a
linear forced ODE. In fact, it can be seen to be
the same expression as occurs in the linear case by integrating the Stieltjes integral by parts
and simplifying.
\vs6\noi
In the special case where \s(t) is differentiable we can obtain a
convolution theorem more easily. The following lemma is needed
which \cite{bi:hw}.
\begin{lemma}
Let $\Phi(t)$ be a matrix function of $t$. The function $\zb(t)$
where
\[\zb(t)=\exp(\Phi(t))\]
is a solution of
\[\dot{\zb}=\Phi(t)\,\zb\]
if and only if the two matrices, $\Phi(t)$ and
\[\int_0^t\Phi(\eta)d\eta\]
commute.
\end{lemma}
\pf The proof is a direct computation.\rl
\vs6\noi
As noted in \cite{bi:hw} the above condition is satisfied in case $\Phi(t)$ is a constant
matrix and a diagonal matrix. However, it is also satisfied when $\Phi(t)=f(t)\A$. The condition
$\Phi(t)=f(t)\A$ is the condition that is used to prove the
results about the fundamental nonlinear ODEs. Using this result
a simplified convolution theorem can be derived. To do this
first prove some more technical lemmas.
\begin{lemma}
Assume the ODE
\[\dot{\zb}=f(t)\,\A\,\zb\]
has a unique solution for every initial condition, $\zb_0$.
\vs6
\mhs21 Then
\[\zb(t)=\exp(F(t)\,\A) \,\zb\]
where
\[F(t)=\int_0^t f(\eta)d\eta\]
\end{lemma}
\pf The result follows from the preceding lemma.\rl
\begin{lemma}
Assume the ODE
\[\dot{\zb}=f(t)\,\A\,\zb+\s(t)\]
has a unique solution for every initial condition, $\zb_0$.
\vs6
\mhs21 Then
\[\zb(t)=\exp(F(t)\,\A)
\,\zb_0+\exp(F(t)\,\A)\l(\int_0^t\exp(-F(\eta)\A) \, \s(\eta)d\eta \r)\]
where,
\[F(t)=\int_0^t f(\eta)d\eta\]
\end{lemma}
\vs6\noi The following proposition is obtained
\begin{proposition}
Assume the ODE
\[\dot{\zb}=f(\zb(t))\,\A\,\zb(t)+\s(t)\]
has a unique solution for every initial condition, $\zb_0$.
\vs6\noi
\mhs21 Then
\[\zb(t)=\exp(F(t)\,\A)
\,\zb_0+\exp(F(t)\,\A)\l(\int_0^t\exp(-F(\eta)\A) \, \s(\eta)d\eta \r)\]
where
\[F(t)=\int_0^t\,f(\zb(\eta))d\eta\]
Further, if $\lambda$ is an invariant function for the associated
homogeneous linear ODE, then
\[\lambda \l( \zb(t)-
\exp(F(t)\,\A)\l(\int_0^t\exp(-F(\eta)\A) \,
\s(\eta)d\eta \r)\r)=\lambda(\zb_0)\]
\end{proposition}
\pf Let $\zb(t)$ be the solution for which $\zb(0)=\zb_0$, and
let $g(t)=f(\zb(t))$ which is now a fixed function of $t$. Then
\[\dot{\zb}=g(t)\,\A\,\zb+\s(t)\]
and the previous lemma applies. The last line follows from the
invariance of the function $\lambda$.\rl
\vs6\noi
Now compute the Jacobian of the time-one map for a general
fundamental nonlinear ODE. First prove some preliminary
lemmas. The first lemma is an application of a theorem from
Coddington and Levinson [1955]:
\blm
Let
\[\dot{\zb}=f(\zb)\,\A\,\zb\]
have a unique solution for each initial condition, and let ${\rm
D}(\zb)$ be the derivative of \zb \sp with respect to the initial
condition $\zb_0$.
\vs6
\mhs21 Then,
\[\det({\rm D}(\zb))=\exp \l(\int_0^t {\rm tr}({\rm
D}(f(\zb)\,\A\,\zb)(\eta) d\eta\r)\]
\elm
\pf The proof is an application of theorem 7.2, p.25 of
[Coddington \& Levinson, 1955]\rl
\blm
The assumptions of the above lemma give
\[{\rm D}(f(\zb)\,\A\,\zb)=\A\zb\cdot\nabla f+f(\zb)\,\A\]
\elm
\pf The proof is a direct computation.\rl
\blm
Given the assumptions of the preceding lemma the
following is the trace formula:
\[{\rm tr}({\rm D}(f(\zb)\,\A\,\zb))=\nabla f \cdot
\dot{\zb}/f(\zb)+f(\zb)\,\A\]
\elm
\pf The proof is a direct computation.\rl
\begin{corollary}
The results of the preceding lemmas gives the formula
\[\det({\rm D}(\zb))=\exp\l(f(\zb)\,{\rm tr}\,\A\,t+\int_0^t (\nabla
f(\zb)\cdot \dot{\zb}/f(\zb))(\eta)d\eta \r)\]
\end{corollary}
\begin{corollary}
Given the hypothesis of the above lemmas
\[{\rm tr}(\A\,\zb \cdot \nabla \lambda)(\zb)=\tau\,(\nabla
\lambda \cdot \dot{\zb})/\lambda(\zb)\]
\end{corollary}
\vs6\noi The final result is stated as a theorem
because of its importance. We state the result in two-dimensions
but because of the results of the previous lemmas and corollaries
it is true in n-dimensions:
\begin{theorem}
Let
\[\zb(t)=\exp(\lambda(\zb_0)\,t\,\A)\zb_0\]
and consider the time-one map $T_\tau(\zb_0)=\zb(\tau)$
then the differential,$D(T_\tau)$, of $T_\tau$ with respect to $\zb_0$ is
\[\exp\l(\lambda(\zb_0)\,\tau\,\A\r)+\B(\zb_0,\tau)\]
where
\[\B(\zb_0,\tau)=\left[\l(\frac{\pr
\exp(\lambda(\zb_0)\,\tau\,\A)}{\pr x}
\zb_0\r),\l(\frac{\pr \exp(\lambda(\zb_0)\,\tau\,\A)}{\pr y}
\zb_0 \r) \right]=
\tau\,(\A\,\zb \cdot \nabla \lambda )\]
The determinant of this differential is given by
\[\det(D(T_\tau))=\exp\l(\lambda(\zb_0)\,\tau\,{\rm tr}(\A)\r)\]
If the trace of \A\sp is zero both the linear and nonlinear system
defined by \A\sp are area preserving.
\end{theorem}
\pf The proof is follows from the previous results and the fact
that $\nabla \lambda \cdot \dot{\zb}=0$.\rl
\blm
Let \zb, $T_\tau$, and $D(T_\tau)$ be as in the above theorem.
Then the trace of $D(T_\tau)$ is given by
\[{\rm tr}(D(T_\tau))= {\rm
tr}(\exp(\lambda(\zb_0)\,\A\,\tau))+(\nabla \lambda \cdot
\dot{\zb})/\lambda(\zb)= {\rm
tr}(\exp(\lambda(\zb_0)\,\A\,\tau))\]
\elm
\pf The function $\lambda$ is orthogonal to the integral curves
determined by \zb and hence the second term is zero.\rl
\begin{lemma}
Assume the ODE
\[\dot{\zb}=f(t)\,\A\,\zb\]
has a unique solution for every initial condition, $\zb_0$.
\vs6
\mhs21 Then
\[\zb(t)=\exp(F(t)\,\A) \,\zb\]
where
\[F(t)=\int_0^t f(\eta)d\eta\]
\end{lemma}
\pf The result follows from the preceding lemma.\rl
\begin{lemma}
Assume the ODE
\[\dot{\zb}=f(t)\,\A\,\zb+\s(t)\]
has a unique solution for every initial condition, $\zb_0$.
\vs6
\mhs21 Then
\[\zb(t)=\exp(F(t)\,\A)
\,\zb_0+\exp(F(t)\,\A)\l(\int_0^t\exp(-F(\eta)\A) \, \s(\eta)d\eta \r)\]
where,
\[F(t)=\int_0^t f(\eta)d\eta\]
\end{lemma}
\vs6\noi The following proposition is now possible
\begin{proposition}
Assume the ODE
\[\dot{\zb}=f(\zb(t))\,\A\,\zb(t)+\s(t)\]
has a unique solution for every initial condition, $\zb_0$.
\vs6
\mhs21 Then
\[\zb(t)=\exp(F(t)\,\A)
\,\zb_0+\exp(F(t)\,\A)\l(\int_0^t\exp(-F(\eta)\A) \, \s(\eta)d\eta \r)\]
where
\[F(t)=\int_0^t\,f(\zb(\eta))d\eta\]
Further, if $\lambda$ is an invariant function for the associated
homogeneous linear ODE, then
\[\lambda \l( \zb(t)-
\exp(F(t)\,\A)\l(\int_0^t\exp(-F(\eta)\A) \,
\s(\eta)d\eta \r)\r)=\lambda(\zb_0)\]
\end{proposition}
\pf Let $\zb(t)$ be the solution for which $\zb(0)=\zb_0$, and
let $g(t)=f(\zb(t))$ which is now a fixed function of $t$. Then
\[\dot{\zb}=g(t)\,\A\,\zb+\s(t)\]
and the previous lemma applies. The last line follows from the
invariance of the function $\lambda$.\rl
\vs6
Now compute the Jacobian of the time-one map for a general
fundamental nonlinear ODE. First prove some preliminary
lemmas. The first lemma is an application of a theorem from
Coddington and Levinson [1955]:
\blm
Let
\[\dot{\zb}=f(\zb)\,\A\,\zb\]
have a unique solution for each initial condition, and let ${\rm
D}(\zb)$ be the derivative of \zb \sp with respect to the initial
condition $\zb_0$.
\vs6
\mhs21 Then,
\[\det({\rm D}(\zb))=\exp \l(\int_0^t {\rm tr}({\rm
D}(f(\zb)\,\A\,\zb)(\eta) d\eta\r)\]
\elm
\pf The proof is an application of theorem 7.2, p.25 of
[Coddington \& Levinson, 1955]\rl
\blm
The assumptions of the above lemma gives
\[{\rm D}(f(\zb)\,\A\,\zb)=\A\zb\cdot\nabla f+f(\zb)\,\A\]
\elm
\pf The proof is a direct computation.\rl
\blm
The assumptions of the preceding lemma gives the
following trace formula:
\[{\rm tr}({\rm D}(f(\zb)\,\A\,\zb))=\nabla f \cdot
\dot{\zb}/f(\zb)+f(\zb)\,\A\]
\elm
\pf The proof is a direct computation.\rl
\begin{corollary}
The results of the preceding lemmas gives the formula
\[\det({\rm D}(\zb))=\exp\l(f(\zb)\,{\rm tr}\,\A\,t+\int_0^t (\nabla
f(\zb)\cdot \dot{\zb}/f(\zb))(\eta)d\eta \r)\]
\end{corollary}
\begin{corollary}
Given the hypothesis of the above lemmas
\[{\rm tr}(\A\,\zb \cdot \nabla \lambda)(\zb)=\tau\,(\nabla
\lambda \cdot \dot{\zb})/\lambda(\zb)\]
\end{corollary}
\vs6\noi The final result is stated as a theorem
because of its importance. The result is stated in two-dimensions
but because of the results of the previous lemmas and corollaries
it is true in n-dimensions:
\begin{theorem}
Let
\[\zb(t)=\exp(\lambda(\zb_0)\,t\,\A)\zb_0\]
and consider the time-one map $T_\tau(\zb_0)=\zb(\tau)$
then the differential,$D(T_\tau)$, of $T_\tau$ with respect to $\zb_0$ is
\[\exp\l(\lambda(\zb_0)\,\tau\,\A\r)+\B(\zb_0,\tau)\]
where
\[\B(\zb_0,\tau)=\left[\l(\frac{\pr
\exp(\lambda(\zb_0)\,\tau\,\A)}{\pr x}
\zb_0\r),\l(\frac{\pr \exp(\lambda(\zb_0)\,\tau\,\A)}{\pr y}
\zb_0 \r) \right]=
\tau\,(\A\,\zb \cdot \nabla \lambda )\]
The determinant of this differential is given by
\[\det(D(T_\tau))=\exp\l(\lambda(\zb_0)\,\tau\,{\rm tr}(\A)\r)\]
If the trace of \A\sp is zero both the linear and nonlinear system
defined by \A\sp are area preserving.
\end{theorem}
\pf The proof is follows from the previous results and the fact
that $\nabla \lambda \cdot \dot{\zb}=0$.\rl
\blm
Let \zb, $T_\tau$, and $D(T_\tau)$ be as in the above theorem.
Then the trace of $D(T_\tau)$ is given by
\[{\rm tr}(D(T_\tau))= {\rm
tr}(\exp(\lambda(\zb_0)\,\A\,\tau))+(\nabla \lambda \cdot
\dot{\zb})/\lambda(\zb)= {\rm
tr}(\exp(\lambda(\zb_0)\,\A\,\tau))\]
\elm
\pf The function $\lambda$ is orthogonal to the integral curves
determined by \zb and hence the second term is zero.\rl
%=============================FIRST ORDER THEORY=============
\section{\sml First Order Theory}
First order theory provides initial insight into the general theory.
\btm
Let $x(t)$ be a real valued function of a real variable $t$.
Assume
\begq
\dot{x}+V(x)\cdot x=0\hspace{12pt} x(0)=x_0
\label{eq:hm1}
\endq
has a unique bounded solution for every initial condition and that $\|x(t)\| \leq M$ for all $t\in {\bf R}$.
Let $t_n=n\, h$, for $h \in (0,a]$, for $a <1$. also, define $\tilde{x}$ as
\begq
\tilde{x}(t_{n+1})= \exp(- V(x(t_n))(h))\cdot x(t_n)
\endq
then
\begq \|x(t_n)-\tilde{x}(t_n)\|\leq K\cdot h \endq
for fixed $ K$ and all integers $n $.
\etm
\pf The two primary steps are (1) to use mathematical induction to prove the approximation at the $n^{th}$ (the result is clearly true for $n=0$); and, (2) to use mean value theorems to eliminate integrals in favor of algebraic terms.
Let $\Delta=\|x(t_n)-\tilde{x}(t_n)\|$ then the error at the $n^{th}$ step is given by
\begq
\Delta= \|(\exp(- V(x(\xi))\cdot h)-\exp(- V(x(t_0))\cdot h)))\cdot x(t_0)\|
\endq
and so
\begq
\Delta\leq \|\exp(- V(x(\xi))\cdot h)-\exp(- V(x(t_0))\cdot h)\|\cdot M
\endq
and
\begq
\Delta\leq \|\exp(- V(x(\rho)))\cdot h)\|\cdot \|V(x(t_0))-V(x(\xi))\|\cdot h)\cdot M
\endq
Let \[K_1=\max_x \|V(x(t))\| \hspace{6pt}\mbox{and} \hspace{6pt}K_2= \max_x \| V'(x(t))\|\]
to get
\begq\Delta\leq \|\exp(K_1 \cdot h)\|\cdot K_2\|x(t_0)-x(\xi)\|\cdot h\cdot M\endq
\begq\Delta\leq \|\exp(K_1 \cdot h)\|\cdot K_2\|V(x(t))\cdot x(t)\cdot h\|\cdot h\cdot M\endq
\begq\Delta\leq \|\exp(K_1 \cdot h)\|\cdot K_2\cdot K_1 \cdot h^2\cdot M^2\endq
showing that the error can be made arbitrarily small. End of sketch.
%xx
\vs6 In particular the iteration
\begq
x_{n+1} =\exp(- V(x_n)\cdot h))\cdot x_n
\endq
is an approximation to the solution of Eq. (\ref{eq:hm1}) when all smoothness and boundedness assumptions are satisfied.
\vs6\noi
Now consider Eq.(\ref{eq:ih1}) with the same assumptions as Theorem 1. A rough sketch of the derivation of the relevant IDE and state the theorem are presented afterwards.
\begq
\dot{x}+V(x)\cdot x=f(t)\hspace{12pt} x(0)=x_0
\label{eq:ih1}
\endq
By taking $f(t)$ to be a constant, $b$, over a very small interval $[t_n,t_{n+1}]$ obtain a integral equation containing a convolution. Note that the assumption on $f$ implies that the derivative of $f$ is not too troublesome, i.e., $\|f'(t)\|$ is uniformly bounded over the entire real line.
\vs6\noi This substitution is needed:
\[\frac{d\,W(t)}{dt}= V(x(t))\]
Introducing an integrating factor into Eq.(\ref{eq:ih1}) and collecting terms gives
\begq
\frac{d (x(t)\exp(W(t))}{dt}=f(t)\exp(W(t))
\label{eq:ih2}
\endq
Integrating over a small interval $[t_n, t_{n+1}]$
\begq
x(t_{n+1})\exp(W(t_{n+1}))=x(t_n)\exp(W(t_n))+\int_{t_n}^{t_{n+1}} f(s)\exp(W(s))ds
\label{eq:ih3}
\endq
\begq
x(t_{n+1})=x(t_n)\cdot \exp(-V(x_n)\,h)+\exp(-W(t))\int_{t_n}^{t_{n+1}} f(s)\exp(W(s))ds
\label{eq:ih4}
\endq
Since the solution for the homogeneous equation is known, it is only necessary to consider approximating the inhomogeneous part
\begq
\exp(-W(t_n))\int_{t_n}^{t_{n+1}} f(s)\exp(W(s))ds\approx \exp(-W(t_n))\, b\,\int_{t_n}^{t_{n+1}}\exp(W(s))ds
\label{eq:ih5}
\endq
Where $b$ is substituted for $f(t)$ over the interval $[t_n, t_{n+1}]$. At this point assume $W(t)=t$ to shorten and simplify the discussion so that it better applies to the following presentation. Then the integral can be explicitly evaluated giving a the form of the IDE that will be presented in the following discussion.
\begq
\exp(-(t_n))\, b\,\int_{t_n}^{t_{n+1}}\exp(s)ds=\exp(-(t_n))\, b\,(\exp(t_{n+1})-\exp(t_n))
\label{eq:ih6}
\endq
This gives $ b\,(\exp(h)-1)$ for Eq.(\ref{eq:ih6}). Collecting terms gives the theorem for the case where $V(x)=\alpha$
\btm
The IDE for Eq.(\ref{eq:ih1}) is given by
\[x_{n+1}=\exp(\alpha \cdot h)(x_n-f(n\,h))+f(n\,h)\]
\etm
%For $V(x)$ not constant, the derivation is more involved and can be found in \cite{bi:rb6}.
%==================IDEs from various ODEs=================================
\section{\sml IDEs Derived from ODEs}
In this section IDEs corresponding to certain ODEs will be derived. This section is divided into IDEs derived from (1) linear and autonomous ODEs and (2) non autonomous ODEs. Axiom {\bf A5} will not be addressed. In every case it is assumed that $\det(\A(\X)) \neq 0$ and $\det(\A(t)) \neq 0$. All results in this section are proven by corollary \ref{cr:nh}.
%===============================================
\subsection{\sml Homogeneous Linear ODEs}
The general homogeneous linear ODE is
\begq
\label{eq:ode1}
\dot{\X}=\A(t)\, \X
\endq
The most elementary case is where $\A$ is a constant matrix
\[\dot{\X}=\A\, \X\]
The general solution is
\[\X(t)=\exp(t \, \A)\, \X_0\]
and the IDE is given by
\[\T_h=\exp(h \, \A)\]
In this case the IDE corresponds exactly to the general solution of the originating ODE. The IDE strictly speaking is $\exp(h\, \A)$ which is also the fundamental matrix for the associated ODE.
\begin{lemma}
Given
\[\dot{\X}=\A\, \X\;\;\; \X(0)=\X_0\]
\[\T^n_h(\X_0)=\X(n\, h)\]
\end{lemma}
\pf
Direct computation \rl
\begin{proposition}
Consider the initial value problem
\begin{eqnarray}
\dot{\X}(t)&=&\A(t)\,\X\\
\X(0)&=&\X_0\\
\|\A(t)\|&\leq &M
\end{eqnarray}
Let $\T_h=\exp(h\,\A(h))$.
Then $\T_h$ is an IDE.
\vs6\noi Also let
\[\T_h=\exp(\int _0^h\A(s)ds)\]
then $\T_h$ is an IDE.
\end{proposition}
\begin{proposition}
Consider the initial value problem
\begin{eqnarray}
\dot{\X}(t)&=&\A(t)\,\X+\C\\
\X(0)&=&\X_0\\
\|\A(t)\|&\leq &M\\
\det( \A(t))&\neq&0
\end{eqnarray}
and $\C$ is a constant vector.
\vs6\noi Let $\T_h(\X)=\exp(h\,\A(h))\,(\X-\A(h)^{-1}\C)+\A(h)^{-1}\,\C$.
\vs6\noi Then $\T_h$ is an IDE.
\end{proposition}
\vs6\noi Also if $\C=\0$ and if
\[\T_h=\exp(\int _0^h\A(s)ds)\]
then $\T_h$ is an IDE.
\vs6\noi \pf
Direct computation
\rl
\begin{example} {\bf \sml Bessel's Equation of Order Zero}
\label{ex:bes}
\begq
t\frac{d^2y}{dx^2}+\frac{dy}{dx} +t\cdot y=0
\label{eq:bes}
\endq
By substituting a constant for the time variable and solving the linear equation then re substituting the time variable for the constant provides a simple IDE that is morphologically equivalent to the Bessel equation, Eq.(\ref{eq:bes}):
\begin{eqnarray}
x&\ra& \exp(\alpha\cdot h/t_n)(x\cos(h)+y\cdot \sin(h))\\
y&\ra& \exp(\alpha\cdot h/t_n)( y\cdot \cos(h)- x\cdot\sin(h))
\label{eq:bes01}
\end{eqnarray}
However, it should be expected that the time variable need not occur in the IDE. First two short-hand abbreviations:
\begin{eqnarray}
u &=& -h \cdot \log(h)\\
v &=& \sqrt{h / \log(1 + h)}
\end{eqnarray}
then the time independent IDE is given by Equation (\ref{eq:bes01}):
\begq
\l(\vt x.y\par \r)\ra\l(\mtx \cos(u) .\sin(u) \cdot v.- \exp(-\alpha \cdot u)\cdot \sin(u) \cdot v.\exp(-\alpha \cdot u)\cdot\cos(u) \par\r)\l(\vt x.y\par \r)
\label{eq:bes01}
\endq
\end{example}
\vs6\noi The first approximation IDE to Eq. (\ref{eq:ode1}) is given by
\begq
\X\ra \exp(\A(h)\cdot h)\X
\endq
This can be generalized and made more precise:
%=================NONLNEAR================
\subsection{\sml Nonlinear ODEs}
\begin{proposition}
\label{pr:ide00}
Consider the initial value problem
\[\begin{array}{lcl}
\dot{\X}(t)&=&\A(\X)\,\X\\
\X(0)&=&\X_0\\
\|\exp(\A(\X))\|&\leq &M
\end{array}\]
Let $\T(\X)=\exp(h\,\A(\X))\,\X$
Then $\T$ is an IDE
\end{proposition}
\pf
\[\|\T(\X)-\X\|=\|\exp(h\,\A(\X))\,\X-\X\|\leq \,M \, h\]
\rl
%============================================= Non autonomous IDEs=======
\subsection{\sml Non Autonomous IDEs}
\begin{theorem}
Let
\[\dot{\X}=\A \X +\G(t,\X)\]
and let
\[\T_h(\X)=\exp(h\, \A)(\X-\F(h,\X))+\F(h,\X)\] where
\[\F(h,\X)=-\A^{-1}\G(h,\X)\]
and assume that $\|\X(t)\|\leq M$ for all $t$. then,
\[\|\T_h(\X)-\X(h)\| \leq M \, h\]
\end{theorem}
\pf
\[\X(h)=\exp(h\, \A)\X_0+ \exp(h\, \A)\int_0^h\exp(-s\, \A)(-1)\G(s,\X(s))ds\]
\[=\exp(h\, \A)\X_0+ \exp(h\, \A)\int_0^h\exp(-s\, \A)ds \cdot (-1)\G(\xi,\X(\xi))\]
\[=\exp(h\, \A)\X_0+ \exp(h\, \A)(\A^{-1}(\exp(-h\, \A)-\I) \, (-1)\G(\xi,\X(\xi))\]
\[=\exp(h\, \A)\X_0+ (\A^{-1}(\I-\exp(h\, \A)) (-1)\G(\xi,\X(\xi))\]
\[=\exp(h\, \A)(\X_0-\A^{-1}\G(\xi,\X(\xi)))+ \A^{-1} (-1)\G(\xi,\X(\xi)\]
\[=\exp(h\, \A)(\X_0-\F(\xi,\X(\xi)))+ \F(\xi,\X(\xi)\]
\[= \exp(h\, \A)(\X_0-\F(h,\X_0))+ \F(h,\X_0)+\epsilon(h)\]
since
\[\T(\X)= \exp(h\, \A)(\X-\F(h,\X))+ \F(h,\X)\]
therefore
\[\|\X(h)-\T_h(\X)\| \leq M\, h\] \rl
\vs6\noi Another useful form of the non autonomous equation is as follows:
\begin{theorem}
\label{tm:na}
Let
\[\dot{\Y}=\A \Y +\G(t,\Y)\]
and let
\[\X_{n+1}=\exp(h\, \A)(\X_n-\F(n\, h,\X_n))+\F(n\, h,\X_n)\] where
\[\F(n\, h,\X_n)=(-1)\A^{-1}\G(n\, h,\X_n)\]
and assume that $\|\Y(t)\|\leq M$ for all $t$. then,
\[\|\X_{n+1}-\Y(n\, h)\| \leq M \, h\]
\end{theorem}
\pf
\[\X(h)=\exp(h\, \A)\X_0+ \exp(h\, \A)\int_0^h\exp(-s\, \A)(-1)\G(s,\X(s))ds\]
\[=\exp(h\, \A)\X_0+ \exp(h\, \A)\int_0^h\exp(-s\, \A)ds \cdot (-1)\G(\xi,\X(\xi))\]
\[=\exp(h\, \A)\X_0+ \exp(h\, \A)(\A^{-1}(\exp(-h\, \A)-\I) \, (-1)\G(\xi,\X(\xi))\]
\[=\exp(h\, \A)\X_0+ (\A^{-1}(\I-\exp(h\, \A)) (-1)\G(\xi,\X(\xi))\]
\[=\exp(h\, \A)(\X_0-\A^{-1}\G(\xi,\X(\xi)))+ \A^{-1} (-1)\G(\xi,\X(\xi)\]
\[=\exp(h\, \A)(\X_0-\F(\xi,\X(\xi)))+ \F(\xi,\X(\xi)\]
\[= \exp(h\, \A)(\X_0-\F(h,\X_0))+ \F(h,\X_0)+\epsilon(h)\]
since
\[\T(\X)= \exp(h\, \A)(\X-\F(h,\X))+ \F(h,\X)\]
therefore
\[\|\X(h)-\T_h(\X)\| \leq M\, h\] \rl
\vs6\noi A second form of the non autonomous IDE is
\[\X_{n+1}=\exp(h\, \A)\,\X_n+(\I-\exp(h\, \A))(-1)\A^{-1}\G(n\, h,\X_n)\]
where $\A$ may be a function of $\X$.
\begin{lemma}
\label{lm:na}
Assume
\[\X_{n+1}=\exp(h\, \A)\,\X_n+(\I-\exp(h\, \A))(-1)\A^{-1}\G(n\, h,\X_n)\]
then,
\[\|\X_{n+1}-(\exp(h\, \A)\,\X_n+\A\,(h\, +o(h^2))(-1)\A^{-1}\G(n\, h,\X_n))\| \leq M h^2\]
\end{lemma}
\pf
\[\|\X_{n+1}-(\exp(h\, \A)\,\X_n+(\I-\exp(h\, \A))(-1)\A^{-1}\G(n\, h,\X_n))\|=\]
\[\|(\I-\exp(h\, \A))(-1)\A^{-1}\G(n\, h,\X_n)-(\A\,(h\, +o(h^2))(-1)\A^{-1}\G(n\, h,\X_n))\| =\]
\[\|(\I-\exp(h\, \A))\A^{-1}-\I\,((h\, +o(h^2)))\,\G(n\, h,\X_n))\|\leq\]
\[\|(\I-\exp(h\, \A))\A^{-1}-\I\,((h\, +o(h^2)))\| \|\,\G(n\, h,\X_n))\| \leq M\, h^2\]
\rl
%=============================PERIODIC COEFFICIENTS=================
\subsection{\sml Periodic Coefficients}
Linear systems with periodic coefficients are treated in [9], p.78. Here only the simplest case is treated;
\[\dot{\X}=\exp(t\B)\X\]
where $\exp(t \B)$ is periodic. The simplest example in two dimensions is when
\[\B=\l(\mtx 0.1.-1.0\par \r)\]
The related IDE is given by
\[\T(\X)=\exp(\int_0^h\exp(s\, \B)ds)\X\]
An alterate variation is given by
\[\T(\X)=\exp(h\,\exp(h\, \B))\X\]
The inhomogeneous case is treated by proposition[*].
\vs6\noi A more general case is given by
\[\dot{\X}=f(\exp(t\B))\X\]
Where $f$ is a sufficiently smooth matrix valued function. The IDE is given by
\[\T(\X)=\exp(h\,f(\exp(h\, \B)))\X\]
%===========================================PERTURBATIONS ETC==========================================
\subsection{\sml Perturbations of the Canonical IDE}
\begin{proposition}
Let $\T(\X)=\exp(h\, \A)(\X-\F(\X))+\F(\X)+(\exp(h\, \C)-\I)\G(\X)$
then $\T$ is an IDE.
\end{proposition}
\pf
Direct computation \rl
\begin{proposition}
Assume
\[\T(\X)=\exp(h\, \A)(\X-\F(\X))+\F(\X)\] is an IDE on $\Rl^n$ and that $f(h)\,\G(\X)$ is a diffeomorphism on $\Rl^n$ and let
\[\S(\X)-\X= (\I-\exp(h\, \A))(\X-\F(\X))+f(h)\,\G(\X)\]
Also, assume
\begin{eqnarray}
f(0)&=&0\\
\|f(h)\|&< &h\\
\|\G(\X)\|&\leq& M_0 \, \X\\
\end{eqnarray}
then $\S(\X)$ is an IDE
\end{proposition}
\pf
The only nontrivial axiom to be satisfied is 3. Since
\[\S(\X)-\X= (\I-\exp(h\, \A))(\X-\F(\X))+f(h)\,\G(\X)\]
it follows that
\[\|(\I-\exp(h\, \A))(\X-\F(\X))+f(h)\,\G(\X)\| \leq (h\,M_{\T}+h M_0)\|\X\|\leq h\, M\, \|\X\|\]
\rl
%==================================ADDITIONAL PROPERTIES======================
\subsection{\sml Even and Odd IDEs}
\begin{lemma}
\label{lm:odd}
If $\F(-X)=-\F(\X)$ then
\[\T^n(-\X)=-\T^n(\X)\]
\end{lemma}
\pf \[\T(-\X)=\exp(h\, \A)(-\X-\F(-\X))+\F(-\X)=-(\exp(h\, \A)(\X+\F(-\X))-\F(-\X))\]
\[=-(\exp(h\, \A)(\X-\F(\X))+\F(\X))=-\T(\X).\]
Apply mathematical induction for the final result\rl
\begin{lemma}
If $\T^n(\X)=\X$ and $\F$ is odd then
\[\T^n(-\X)=-\X\]
\end{lemma}
\pf Lemma (\ref{lm:odd}). \rl
\vs6\noi So if $\T$ has one periodic point, it has two periodic points that are mirror images of each other (and they have the same dynamics).
\begin{lemma}
If $\,\T$ is an even function then $\T$ cannot have a non zero periodic point.
\end{lemma}
\pf Assume $\X\neq \0$. If $\T^n$ is an IDE it is 1-1. If $\T^n(\X)=\X$ then
\[\T^n(-\X)=\T^n(\X)=\X\] and so $\T$ is not 1-1. \rl
\begin{lemma}
Suppose that there are two points $\X, \;\;\Y \ni \|\X-\Y \|>0$ and $\F(\X)=\F(\Y)$. Then $\|\T(\X)-\T(\Y)\|>0$ and $\T$ is {\rm 1-1}. \rm .
\end{lemma}
\pf Direct computation. \rl
%======================================IDEs for when det(A)=0==============================
\section{\sml IDEs from ODEs where $\det(\A)=0$}
In this section it is assumed that $\det(\A(\X)=0$. The relevance of this condition is that second order equations that are nonautonomous, when expressed as thee-dimensional equations, will satisfy this condition.
\vs6\noi {\bf Example}
Consider
\[\begin{array}{lll}
\dot{x} &=& y\\
\dot{y} &=& -x +\cos(t)
\end{array}\]
As a three-dimensional equation it can be rewritten as
\[\begin{array}{lll}
\dot{x} &=& y\\
\dot{y} &=& -x +\cos(z)\\
\dot{z}&=&1
\end{array}\]
Arranging the equations in matrix form gives
\[\l(\begin{array}{lll}
\dot{x}\\
\dot{y}\\
\dot{z}
\end{array}\r) =
\l(\begin{array}{ccc}
0&1&0\\
-1&0&0\\
0&0&0
\end{array}\r)
\l(\begin{array}{c}
x\\
y\\
z \end{array}\r)
+\l(\begin{array}{c}
0\\
\cos(z)\\
1
\end{array}
\r)
\]
%==========================
\section{\sml Functions of an IDE}
\begin{lemma}
\label{lm:functions}
Let $\T_h$ be an IDE and assume $\J(\T_h)$ exists and let $f$ be a mapping from the range of the IDE to $\Cx^n$ or $\Rl^n$.
Assume that $f'$ is bounded and continuous. Then
\[\|f(\T_h(\X))-f(\T_h(\Y))\|\leq h M \,\J(\T_h)\, \|\X-\Y\|\]
\end{lemma}
\pf
\rl
%===========================================TWO FUNDAMENTAL THEOREMS=================
\section{\sml Two Fundamental IDE Theorems}
\label{sc:ft}
This section presents two fundamental theorems that show how the dynamics of an IDE are derived from an ODE by deriving the Jacobian of the system at a fixed point, i.e. where $\dot{\X}=0$.
\begin{theorem}
\label{tm:can}
Assume
\begq
\dot{\X}=\G(\X)
\label{eq:ode}
\endq
is an ODE in n-dimensional space with $\G(\p)=0$, $\J(\G(\p))\neq 0$, where $\J$ is the Jacobian derivative.
Let
\begq
\label{eq:ide}
\T_h(\X)= \exp(h\,\J(\G(\X)))(\X-\F(\X))+\F(\X)
\endq
where \[\F(\X)=\X-\J(\G(\X))^{-1}\G(\X)\]
Then
\vs6\noindent
{\rm (1)} Eq. \ref{eq:ode} and Eq. \ref{eq:ide} have the same fixed point, $\p$. \\
{\rm (2)} The Jacobian of both vector fields are identical at $\p$\\
{\rm (3)} and therefore, the Jacobian of both vector fields at the fixed point have the same eigenvectors and eigenvalues\\
{\rm (4)} The dynamics of both equations in a neighborhood of $\p$ are identical\\
\end{theorem}
\pf Statements (1)-(4) are direct computations.
\vs6\noi Theorem \ref{tm:can} presents a third form of an IDE .
\vs6\noi {\bf Third Form of the Standard IDE}
\[\T(\X)=\exp(h\, \A)\Q(\X)+ \X-\Q(\X)=(\exp(h\, \A)-\I)\Q(\X)+ \X\]
\begin{lemma}
\[\T(\X)=\exp(h\, \A)\Q(\X)+ \X-\Q(\X)=(\exp(h\, \A)-\I)\Q(\X)+ \X\] is an IDE.
\end{lemma}
\pf Let
\[\F(\X)= \X-\Q(\X)\]
then substituting and rearranging
\[\T(\X)=\exp(h\, \A)\Q(\X)+ \X-\Q(\X)\]
gives $\T$ in standard IDE form
\[\T(\X)=\exp(h\, \A)(\X-\F(\X)) +\F(\X)\]
\rl
\begin{corollary}
Assume that an IDE arises from {\rm Eq.( \ref{eq:ode})} and is in canonical form according to Theorem \ref{tm:can}. Further, assume that $\|\G(\X)\|\leq M$ where $M$ is the bound of the associated IDE $\T_h$ and that the Euler one-step integrator for {\rm Eq. \ref{eq:ode}} for step size $h$ is given by
\[\X(h)=\X_0+h\, \G(\X_0)\]
for an initial condition $\X(0)=\X_0$.
Then
\[\|\T_h(\X_0)-\X(h)\|\leq o(h^2)\]
\end{corollary}
\pf Using Theorem \ref{tm:can},
\[\T_h(\X_0)= ( \X_0+h\, \J(\G(\X_0))\,\J(\G(\X_0))^{-1}\,\G(\X_0))+o(h^2)\]
\[\|\X_0+h\, \J(\G(\X_0))\,\J(\G(\X_0))^{-1}\,\G(\X_0)+o(h^2)-(\X_0+h\, \G(\X_0))\|\leq o(h^2)\]
\rl
\begin{theorem}
\label{tm:gts}
Assume
\begq
\dot{\X}=\sum_{i=1}^n sg_i(\X)\,\A_i\, \X
\label{eq:ode1}
\endq
is an ODE in n-dimensional space constructed from n-phased gates {\rm [12]} and that each $\A_i$ is an $n \times n$ matrix of constants.
Let
\begq
\label{eq:ide1}
\T_h(\X)= \X+\sum_{i=1}^n sg_i(\X) (\exp(h \, \A_i)-\I) \X
\endq
\vs6\noindent
{\rm (1)} Eq. \ref{eq:ide1} is an IDE \\
{\rm (2)} The difference between the solution of Eq. \ref{eq:ode1} and Eq. \ref{eq:ide1} has Lebesgue measure zero\\
{\rm (3)} The IDE of Eq. \ref{eq:ide1} can be made C$^\infty$ \\
{\rm (4)} The difference between the solution of Eq. \ref{eq:ode1} and the C$^\infty$ version of Eq. \ref{eq:ide1} over any finite interval can be made to have arbitrarily small measure Lebesgue measure
\end{theorem}
\pf The theorem follows from well-known fundamental results.
\rl
\begin{lemma}
\label{lm:can}
Let $\T$ be an arbitrary diffeomorphism on $\Rl^n$ and assume that $\T(\p)=\p$ and that $\det(\J(\T)) \neq 0$ for all $\X$. Let $\J(\T)_{\p}$ be the Jacobian derivative of $\T$ at $\p$. Then
\[\T_*(\X)=\l(\frac{\exp(h\, \J(\T)_{\p})-\I}{h}\r)(\X-\p)+\p\]
is a linear IDE having the fixed point $\p$ and having the same dynamics as $\T$ up to order $h$ in a small neighborhood of $\, \p$ as $h \ra 0$.
Also, using the same notation for convenience, let
\[\T_*(\X)=(\J(\T)_{\p})^h(\X-\p)+\p\]
Then $\T_*$ is an IDE and $\T$ and $\,\T_*$ have exactly the same dynamics for $h=1$
\end{lemma}
\pf Since $\det(\J(\T)) \neq 0$ There exist a $\C(\X)$ such that $\J(\T)=\exp(\C(\X))$ for all $\X$. The rest is a direct computation
\rl
\begin{lemma}
Let
\[\dot{\X}=\F(\X)\]
and assume that $\F(\p_1)=\F(\p_2)=0$. Let $\J(\F)(\p_i)$ be the Jacobians at these fixed points.
Then
\[T_{h,i}=\exp(h\,\J(\F)(\p_i))\, \X-\p_i)+\p_i\]
are IDEs with fixed points $\p_i$ and the dynamics of $\T_{h,i}$ are the same as $\X(t)$ at the fixed points.
\end{lemma}
\begin{proposition}
\label{pr:fixedp}
Assume $\R$ and $\S$ are IDEs with fixed points $\R_h(\p)=\p$ and $\S_h(\q)=\q$
and let
\[\T_h =f(\X)\, \R_h+(1-f(\X))\, \S_h\]
where $f(\p)=1,, f(\q)=0$
then $\T$ is and IDE and $\T(\p)=\p$ and $\T(\q)=\q$.
\end{proposition}
\pf Direct computation verifies the fixed points. That $\T_h$ is an IDE follows from proposition \ref{pr:fus} \rl
\vs6\noi The following is a well known result:
\begin{theorem}
Given two points $\p \neq \q$ there exist a $C^\infty$ function for which $f(\p)-1$ and $f(\q)=0$.
\end{theorem}
\pf \cite{bi:xx}
\rl
%========================Further Results==============================
\section{\sml Operator IDEs}
The equation $\A\, \Phi\, (\A\, \exp(-h\A)+(\exp(-h\A)-\I)\, \B\, \Phi)^{-1}$ demonstrates the value of considering IDE operators defined by constant matrices separately from their role in defining IDEs.
This operator treatment can be greatly generalized to include linear operators on a Hilbert space. For example, if $\L$ is a bounded linear operator on a Hilbert space of continuous functions then
\[\exp(h\, \L)(f)\] is a sensible expression. Further
\[\T(f)=\exp(h\, \L)(f-g)+g\]
is also a sensible expression.
\vs6\noi These examples indirectly provide a generalization of the concept of an ODE in a Hilbert or Banach space because one may suppose that if an ODE could be defined in such infinite dimensional spaces, the IDE would be a local approximation to the solution of such an equation.
\begin{proposition}
Let $\T(\Phi)$ and $\S(\Phi)$ be matrix IDEs and $\Phi$ is an $n\times n$ matrix.
\vs6\noi Then $\S(\Phi)\cdot \T(\Phi)$ is an IDE.
\end{proposition}
\pf
\[(\S(\Phi)\cdot \T(\Phi))(\X)-\X =\S(\Phi)\cdot (\T(\Phi)(\X))-\T(\Phi)(\X)+\T(\Phi)(\X)-\X \]
\[\S(\Phi)\cdot (\T(\Phi)(\X))-\T(\Phi)(\X)+\T(\Phi)(\X)-\X \]
\[(\S(\Phi)-\I)\cdot (\T(\Phi)(\X))+(\T(\Phi)-\I)\,\X \]
\rl
\begin{proposition}
Assume that for all $\X$
\[\A(\X)\,\X=\B(\X)\X\]
and that
\[\A(\X)\B(\X)=\B(\X)\A(\X)\]
Then for all $\X$
\[\exp(h\, \A(\X))\,\X=\exp(h\,\B(\X))\X\]
\end{proposition}
\pf
It must be shown that
\[\A(\X)^2\, \X=\B( \X)^2 \, \X\]
\[\A(\X)^2\, \X=\A(\X)\,\A(\X)\, \X =\A(\X)\B( \X) \, \X=\B(\X)\A( \X) \, \X=\B( \X)^2 \, \X\]
\rl
\begin{proposition}
\[\dot{\X}=\A(\X)\, \X\]
Assume that for all $\X$
\[\A(\X)\,\X=\B(\X)\X\]
Then for all $\X$
\[\|\exp(h\, \A(\X))\,\X-\exp(h\,\B(\X))\X\| \leq h M\]
\end{proposition}
\pf
Since
\[\dot{\X}=\A(\X)\, \X\]
and
\[\A(\X)\,\X=\B(\X)\X\]
then
\[\dot{\X}=\B(\X)\, \X\]
so
\[\|\exp(h\, \A(\X_0))\X_0-\X(h)+\X(h)-\exp(h\,\B(\X_0))\X_0\|=\]
\[\|\exp(h\, \A(\X_0))\,\X_0-\X(h)\|+\|\X(h)-\exp(h\,\B(\X_0))\X_0\|\]
\rl
%========================FUSION=======================
\section{\sml Fusion Theory}
\begin{proposition}
\label{pr:fus1}
Assume that $\T_h$ and $\S_h$ are IDEs and that $\lambda \in \Cx$. Let
\[\R_h=\lambda \, \T_h+(1-\lambda)\, \S_h\]
then $\R_h$ is an IDE
\end{proposition}
\pf
\[\R_h(\X)-\X=\lambda \, (\T_h(\X)-\X)+(1-\lambda)\, (\S_h(\X)-\X)\]
\rl
\begin{proposition}
\label{pr:fus}
Let $\T_h$, $\S_h$ and $\R_h$ be IDEs.
Then
\[\R_h\,\T_h+(\I-\R_h)\,\S_h\] is an IDE.
\end{proposition}
\pf
\[\R_h\,\T_h+(\I-\R_h)\,\S_h-\I=\]
\[\R_h\,\T_h+(\I-\R_h)\,\S_h-\I+\R_h-\R_h=\]
\[\R_h\,(\T_h-\I)+(\I-\R_h)\,(\S_h-\X) \]
\rl
\begin{corollary}
Let $\lambda(\X)$ be a smooth function of a complex variable
and let
\[\R_h=\lambda(\X) \, \T_h(\X)+(1-\lambda(\X))\, \S_h(\X)\]
then $\R_h$ is an IDE
\end{corollary}
\pf
\[(\R_h -\I)\, \X=\lambda(\X) \, \T_h(\X)+(1-\lambda(\X))\, \S_h(\X)-\I\, \X=\]
\[\lambda(\X) \, \T_h(\X)+(1-\lambda(\X))\, \S_h(\X)-\I+f(\X)\I-f(\X)\I=\]
\[\lambda(\X) \, (\T_h(\X)-\X)+(1-\lambda(\X))\, (\S_h(\X)-\X)\]
\rl
\begin{corollary}
\label{cor:bolfus}
Let
\[
f\l(\begin{array}{l}
p\\
q\\
r
\end{array}\r)=\l(\begin{array}{l}
f_1(x,y,z)\\
f_2(x,y,z)\\
f_3(p,q,r)
\end{array}\r)
\]
then $f(r,p,q)\T_h+(1-f(r,p,q)\, \S_h$ is an IDE
\end{corollary}
\pf Direct application of previous results. \rl
\begin{proposition}
Assume that $\B$ is a Bernoulli automorphism on $\Cx^2$ and that $\P$ is an almost periodic function on $\Cx^2$ and that $\B$ is embedded in $\T$ and $\P$ is embedded in $\S$
Let
\[\H_h=\lambda(\X) \, \T_h(\X)+(1-\lambda(\X))\, \S_h(\X)\]
then $\H_1$ is the fundamental map {\rm [11]}.
\end{proposition}
\pf
Direct computation.
\rl
\begin{corollary}
Let $0\leq \lambda \leq 1$ and let
\[\H_h(\X)=\lambda \, \T_h(\X)+(1-\lambda)\, \S_h(\X)\]
then $\H_h(\X)$ is an IDE.
\end{corollary}
The weighted sum of two IDEs is an IDE.
\begin{proposition}
Let $\T(\X)=\exp(h\, \A)(\X-\F(\X))+\F(\X)$
Then there exists an ODE that generates $\T$
\end{proposition}
\pf The ODE is given by
\[\dot{\X}=-\A^{-1}\, \X-\A^{-1}\F(\X)\]
\rl
\begin{corollary}
Let $\H$ be embedded into an IDE. Then
there exist an ODE that generates that IDE.
\end{corollary}
\begin{proposition}
Let $\A=\A_1+\A_2$ be any decomposition of $\A$ into the sum of two matrices.
\vs6\noi Then $\T=\lambda \, \exp(h \,\A_1)\X+(1-\lambda) \exp(h \, \A_2)\X$
is an IDE
\end{proposition}
\begin{corollary}
Let $\A=\A_D+\A_C$ where $\A_D$ is the diagonal part of $\A$ and $\A_C=\A-\A_D$ (the generalized curl) .
\vs6\noi Then $\T=\lambda \, \exp(h \,\A_D)\X+(1-\lambda) \exp(h \, \A_C)\X$
is an IDE
\end{corollary}
\begin{corollary}
Let $\A=\D+\N$ be the Jordan decomposition of $\A$
\vs6\noi Then $\T=\lambda \, \exp(h \,\D)\X+(1-\lambda) \exp(h \, \N)\X$
is an IDE
\end{corollary}
\begin{lemma}
Let $\A= \D+\N$ be the Jordan decomposition of $\A$ and consider
\[\dot{\X}=\N\, \X +\D \, \X\]
Then
\[(2 \,\exp(h\, \N)-\I) \X\]
is an IDE
\end{lemma}
\begin{corollary}
If $\N^2=0$
then $(\I+h\, \N )\X$ is an IDE
\end{corollary}
\begin{corollary}
If $\N^k=0$
then
\[(\I+\sum_{j=1}^{k-1} \frac{(h\, \N)^j}{j!})\X\]
is an IDE
\end{corollary}
\begin{lemma}
If $|a|>1$ and $\T(\X)=\exp(h\, \A)\, \X$
is an IDE, then
$\S(\X)=a\, \exp(h\, \A)\, \X$ is not an IDE.
\end{lemma}
\pf
$\S^n =a^n \T_{n\, h}$
\rl
\vs6\noi In general, a scalar multiple of an IDE is not an IDE even if axiom {\bf A1} is relaxed.
\begin{proposition}
\label{pr:comp}
Assume that $\T$ and $\S$ are IDEs.
Then $\T\circ \S$ is an IDE.
\end{proposition}
\pf
\[\T(\S(\X))-\X=\T(\S(\X))-\S(\X)+\S(\X)-\X\]
\rl
\begin{corollary}
Let $\B$ be any $n\times n$ matrix and let $\T$ be an IDE.
Then
\[\S(\X)=\exp(h\,\B)\, \T(\X)\]
is an IDE
\end{corollary}
\pf
$\S$ is the composition of two IDEs.
\rl
\vs6\noi Multiplication of an IDE by $\exp(h\,\B)$ is the analog of scalar multiplication of vectors. It is not commutative.
%==============================Array==========================
\section{\sml The Array of IDEs}
\label{sc:array}
%=====================Canonical IDE=========================
\subsection{\sml The Canonical IDE}
The canonical IDE arises from an autonomous ODE and provides the starting point for the general theory.
\vs6\noi {\bf Definition: The Canonical IDE}
\vs6\noi The canonical IDE $\T_h(\X)$ is given by the expression
\[\T_h(\X)=\exp(h\, \A)(\X-\F(\X))+\F(\X)\] where $\A$ is an $n \times n$ matrix of real numbers; and, $\F$ is a $C^1$ diffeomorphism on a region of $\Rl^n$ and $0 \leq h \leq 1$.
The definition of the canonical IDE also applies for matrices and diffeomorphisms on $\Cx^n$, n-dimensional complex space. Both will be treated in this paper when convenient.
\vs6\noi While the parameter $h$ will be generally considered to lie in the interval $[0,1]$, for applications it may be reasonable to use larger values \cite{bi:rb6}.
\vs6\noi {\bf Second Form of the canonical IDE}
An algebraic rearrangement of factors and terms gives a second form for a canonical IDE.
\[\T(\X)=\exp(h\, \A)\X+ (\I-\exp(h\, \A))\F(\X)\]
Both forms will be used as needed. Typically,the parameter $h$ will be omitted as a subscript of $\T$ when there is no confusion.
\vs6\noi It is necessary to establish sufficient conditions under which the canonical IDE is an IDE which satisfies the IDE axioms.
\begin{theorem}
\label{tm:std} Let
\[\T_h(\X)=\exp(h\, \A)(\X-\F(\X))+\F(\X)\]
with $\|\A^n\|\leq A_M$, $0\leq h \leq M_h$ and $\F(\X)$ is $C^1$ with $\|\F(\X)\|\leq M_{\F}$, $\|\J(\F(\X))\|\leq M_{\J}$ and that $\|\X\| \leq M_\X$.
Then, the canonical IDE is a formal IDE
\end{theorem}
\pf It is necessary to know when the canonical IDE satisfies the axioms for a formal IDE.
{\bf A1} is clear. {\bf A2} follows from
\[ \|\exp(h\, \A)\X+ (\I-\exp(h\, \A))\F(\X)\|\leq \]
\[\|\exp(h\, \A)\X\|+ \|(\I-\exp(h\, \A))\|\|\F(\X)\| \leq\]
\[\|\exp(h\, \A)\|\|\X\|+ \|(\I-\exp(h\, \A))\|\|\F(\X)\| \leq\]
\[\exp(h)\, M_A \,M_{\X}+M_A\,|(\I-\exp(h))|M_{\F}\leq M\]
\vs6\noi {\bf A3} follows from
\[\exp(h\, \A)(\X-\F(\X))+\F(\X)-\X=\]
\[(\exp(h\, \A)-\I)(\X-\F(\X))\]
and so
\[|\exp(h)-1|\, M_A\|(\X-\F(\X))\| \leq h\, M\]
\vs6\noi {\bf A5} Follows from
\[\T_h(\T(_h(\X)))=\exp(h\, \A)(\T_h(\X))+(\I-\exp(h\, \A))\F(\T_h(\X))\]
\[=\exp(h\, \A)(\exp(h\, \A)(\X-\F(\X))+\F(\X))+(\I-\exp(h\, \A))\F(\T_h(\X))\]
\[=\exp(2\, h\, \A)(\X-\F(\X))+\exp(h\, \A)\F(\X))+(\I-\exp(h\, \A))\F(\T_h(\X))\]
\[=\exp(2\, h\, \A)(\X-\F(\X))+\F(\X)+\]
\[\exp(h\, \A)\F(\X))-\F(\X)+(\I-\exp(h\, \A))\F(\T_h(\X))\]
\[=\T_{2\,h }(\X)+(\exp(h\, \A)-\I)\F(\X)+(\I-\exp(h\, \A))\F(\T_h(\X))\]
\[=\T_{2\,h }(\X)+(\I-\exp(h\, \A))(\F(\T_h(\X))-\F(\X))\]
\[=\T_{2\,h }(\X)+(\I-\exp(h\, \A))(\J\F(\xi)(\T_h(\X))-\X))\]
\[\|\T_h^2(\X)-(\T_{2\,h }(\X)\|\approx \|(\I-\exp(h\, \A))(\J\F(\xi)(\T_h(\X))-\X))\|\]
\[\|\T_h^2(\X)-(\T_{2\,h }(\X)\| \approx h^2\M\]
\rl
\vs6\noindent
The fundamental theorem of canonical IDEs is as follows:
\begin{theorem}
Given the initial value problem
\[\begin{array}{lcl}
&&\dot{\X}=\A \X +\G(\X)\\
&&\X(0,\X_0)=\X_0, [t, t+h]
\end{array}
\]
where $\det(\A)\neq 0$ Let
\[\T_h(\X)=\exp(h\, \A)(\X-\F(\X))+\F(\X)\] where
\[\F(\X)=-\A^{-1}\G(\X)\]
and assume that $\|\X(t)\|\leq M$ for all $t$. Then, over any small interval of time $(t,t+h)$
\[\|\T_h(\X_0)-\X(h,\X_0)\| \leq M \, h\]
\end{theorem}
\pf
\[\X(h)=\exp(h\, \A)\X_0+ \exp(h\, \A)\int_0^h\exp(-s\, \A)\G(\X(s))ds\]
\[=\exp(h\, \A)\X_0+ \exp(h\, \A)\int_0^h\exp(-s\, \A)ds \cdot (-1)\G(\X(\xi))\]
with $0 \leq \xi \leq h$ by the mean value theorem. Then
\[\X(h)=\exp(h\, \A)\X_0+ \exp(h\, \A)(\A^{-1}(\exp(-h\, \A)-\I) \, (-1)\G(\X(\xi))\]
\[=\exp(h\, \A)\X_0+ (\A^{-1}(\I-\exp(h\, \A)) (-1)\G(\X(\xi))\]
\[=\exp(h\, \A)(\X_0-\A^{-1}(-1)\G(\X(\xi)))+ \A^{-1} (-1)\G(\X(\xi))\]
\[=\exp(h\, \A)(\X_0-\F(\X(\xi)))+ \F(\X(\xi)\]
\[= \exp(h\, \A)(\X_0-\F(\X_0))+ \F(\X_0)+\epsilon(h)\]
Since
\[\T(\X_0)= \exp(h\, \A)(\X_0-\F(\X_0))+ \F(\X_0)\]
therefore
\[\|\X(h)-\T_h(\X_0)\| \leq M\, h\] \rl
\vs6\noindent
For the simplest case, $\F(\X)=0$ the theorem states that
\[\T_h(\X_0)=\exp(h\, \A)\X_0 =\X(h)\]
\begin{corollary}
Consider the initial value problem:
\[\begin{array}{lcl}
\dot{\X}(t)&=&\H(\X)=\A\X + (\H(\X)-\A\,\X)=\A \X+\F(\X)\\
\X(0)&=&\X_0\\
\|\H(\X)\|&\leq & M_{\H}
\end{array}\]
Let $\T(\X)=(\exp(h\, \A)(\X-\F(\X))+\F(\X)$
where $\F(\X)=\H(\X)-\A \, \X$.
Then $\T(\X)$ is and IDE.
\end{corollary}
\pf
Direct computation \rl
\vs6\noi
This corollary shows that there are many ways to derive an IDE for a given initial value problem. The specific choice must be guided by the application.
%===================================ELEMENTARY IDEs=====================================
\subsection{\sml The Elementary IDEs}
\[\T(\X)=\exp(h\, \A)\, \X\]
%========================================================================
\subsection{\sml Complex IDEs}
\[\T(\X)=\exp(h\, \A(\X))\, \X\]
%===================================ALGEBRA ===============================
\newpage
\section{\sml The Algebra of IDEs}
\label{sc:alg}
This section will present several results necessary to have a "calculus" of IDEs.
\vs6\noi {\bf Multiplication of IDEs}
Let $\S_h $ and $\T_h$ be IDEs. Then the product of these two IDE is defined as follows
\[(\S_h \cdot \T_h) (\X)\dff (\S_h\,\circ \Ix_\X \circ \T_h) (\X)\]
Note that IDE multiplication is not commutative and does not coincide with IDE composition.
\subsection{\sml IDE Algebra}
\begin{lemma}
Assume $\T,, \T_1,\, \T_2, \, \T_3, ... \T_n$ are IDEs.
Then
\[\T+\sum_i^n ( \T_i-\I)\] is an IDE.
\end{lemma}
\pf Direct computation.
\rl
\begin{lemma}
Let $0<\lambda_i<1$ be such that
\[\sum_i^n \lambda_i=1\]
and assume $\T_1,\, \T_2, \, \T_3, ... \T_n$ are IDEs.
Then \[\sum_i^n \lambda_i \T_i\] is an IDE.
\end{lemma}
\pf
\[\X=\sum_i^n \lambda_i \X\]
\rl
%=============================
\begin{lemma}
If $\T$ is an IDE then $\T^{-1}$ is an IDE.
\end{lemma}
\pf
\[\|\T^{-1}-\I\|= \| (\I-\T)\T^{-1}\|\leq M\, \|(\I-\T)\|\]
\rl
\begin{proposition}
Let ${\cal G}$ be the set of all IDEs on a bounded region of $\R^n$.
Then ${\cal G}$ is a non commutative group with the group operation being composition.
\end{proposition}
\pf
Apply preceding results
\rl
\begin{proposition}
Let $\lambda \ra \A(\X,\lambda)$ be a one parameter family of mappings from $[0, \,1]$ into the set of all bounded differentiable matrix functions in $\R^n$. Assume that for all $\X, \, \lambda$ $\|\A(\X, \lambda)\|\leq M$.
Let $\T_\lambda=\exp(h \, \A(\X,\lambda))$ be a family of IDEs and let $\lambda_1< \, \lambda_2< \, \lambda_3< \cdots <\lambda_n$ be an increasing sequence in $[0, \, 1]$, then
\[\frac{1}{n}\sum_i^n \T_{\lambda_i}\]
is an IDE.
\vs6\noi Also, let $0<\omega_1< \, \omega_2< \, \omega_3< \cdots <\omega_n<1$ and that $\sum_i^n\omega_i=1$
then
\[\sum_i^n \omega_i \, \T_{\lambda_i}\]
is and IDE.
\end{proposition}
%==============================COMBINING IDEs===========================
%===================================Algebra of Linear ID=================================
\subsection{\sml Basic Algebraic Relationships for Linear IDEs with Constant Coefficients}
In addition to the general theory of IDEs, there is a need for algebraic relationships that facilitate proofs and simplifications of problems. In this section the subscript designating the parameter $h$ will be omitted to simplify the computations. An IDE is linear if the component of Axiom {\bf A3} solves a linear ODE.
\vs6\noi {\bf Definition}
\begq
\T^G(\X)=\exp(\A\cdot h)(\X-G(\X))+G(\X)
\endq
then $ \T^0(\X)=\exp(\A\cdot h)\X$, where $0(\X)=0$ for all $\X$ and $S_G(\X)=\X-G(\X)$.
\begin{theorem}{Sums}
\noi Let $G,F$ be mappings from $\R^n$ to $\R^n$ and $(G + F)(\X)=G(\X)+F(\X))$
then
\begq
\T^{G + F}=\T^G+\T^F- \T^0
\label{eq;alg01}
\endq
\end{theorem}
\vs6\noi Proof:
\begin{eqnarray}
\T^{G + F}&=&\exp(\A\cdot h)(\X-G- F))+(G + F)\\
& =& \exp(\A\cdot h)(\X-G)+G+F -\exp(\A\cdot h)(F)\\
&=& \T^G +F-\exp(\A\cdot h)(F)\\
&=& \T^G +F-\exp(\A\cdot h)(F)+\T^0-\T^0\\
&=& \T^G+\T^F -\T^0
\end{eqnarray}
This theorem is easily generalized:
\[\T^{\sum F_i}=\sum_i\T^{F{_i}}-(n-1)\cdot \T^0\]
This result allows the decomposition of a very complex sum into smaller components.
\begin{theorem}{Compositions}
\noi Let $G,F$ be mappings from $\R^n$ to $\R^n$ and $(G \circ F)(\X)=G(F(\X))$
then
\begq
\T^{G \circ F}=\T^G(F)+\T^F -F
\label{eq;alg02}
\endq
\end{theorem}
\vs6\noi Proof:
\begin{eqnarray}
\T^{G \circ F}&=&\T^0(S^{G\circ F})+G\circ F\\
& =& \T^0(S^F+S^G(F))+G\circ F\\
&=& \T^0(S^F)+\T^0(S^G(F))+G\circ F\\
&=& \T^0(S_F)+\T^0(S^G(F))+F-F+G\circ F\\
&=& \T^0(S_F)+F+\T^0(S^G(F))+G\circ F-F\\
&=&\T^G(F)+\T^F -F
\end{eqnarray}
\begin{theorem}{Nonlinearity}
\noi Let $G$ be a mapping from $\R^n$ to $\R^n$ with $\X,\Y \in \R^n$ and let $\Lambda(\X,\Y)=G(\X+\Y)-(G(\X)+G(\Y))$ (note that $\Lambda$ measures the degree to which $G$ is nonlinear)
then
\begq
\T^{G} (\X+\Y)-(\T^G (\X)+\T^G(\Y))= (\T^0-\I)\Lambda (\X,\Y)
\label{eq:alg03}
\endq
\end{theorem}
\vs6\noi Proof: Direct computation.
\begin{theorem}{Conjugation}
\noi Assume $\T^G(\A)(\X)=\exp(\A\cdot h)(\X-G(\X))+G(\X)$
then
\begq
\T^{G}(\J^{-1}\A \J)(\X)= \J^{-1}\T^{\J G}(\A)(\J \X)
\label{eq:alg05}
\endq
\end{theorem}
\vs6\noi Proof: Direct computation.
\begin{theorem}{Diagonalization}
\noi Assume $\T^G(\A)(\X)=\exp(\A\cdot h)(\X-G(\X))+G(\X)$ where $\A=\D+\N$ and $\D \cdot \N=\N \cdot \D$
then
\begq
\T^G(\D+\N)(\X)=\T_0(\N)(\T^G(\D)(\X)=\T^0(\D)(\T^G(\N)(\X)
\label{eq:alg06}
\endq
\end{theorem}
\vs6\noi Proof: Direct computation. This result and the former apply to expressing $\A$ in Jordan Normal Form.
%==========================================Outline of IDEs==================================================
%===============================================================================================
\subsection{\sml An Inventory of IDEs}
The following is a listing of the most common IDEs. While the form of an IDE can be algorithmically complex, there are two forms that are common. Let $\T_t(X_0))$ be the solution of an ODE with initial condition $\T_0(X_0)=X_0$, then
\[\X\ra \T_h(\X-G(\X))+G(\X)\]
is an IDE for smooth $G$. Next,
\[\X \ra \T_h(\X)+H(h,\X)\]
is an IDE for smooth $H$.
\vs6\noi{\bf Linear Homogeneous ID}
\begq
\T_h(\X)=\exp(\A \cdot h)\X
\endq
\noi{\bf Origin}
\begq
\dot\X=\A \cdot \X
\endq
Axiom {\bf A1} of Sec. \ref{sc:fd}, requires that the absolute value of the eigenvalues of $\A$ is less than or equal to 1. Clearly, $\T_0={\bf I}$.
\begq
\|\T_h(\X)-\X\|=\|\exp(\A \cdot h)\X-\X\|\leq \|\A\|\cdot h
\endq
\vs6\noi{\bf Linear Inhomogeneous}
\begq
\T_h(\X)=\exp(\A \cdot h)(\X-G(\X))+G(\X)
\endq
\vs6 \noi {\bf Origin}
\begq
\dot\X=\A \cdot (\X-H(\X))+H(\X)
\endq
Note that $H\neq G$ but is derived to assure that the IDE is morphologically equivalent to the solution of the ODE.
\vs6\noi{\bf Simple Nonlinear Homogeneous}
\begq
\T(\X)=\exp(f(\X)\A \cdot h)\X
\endq
where $f$ is a complex valued function of $\X$. If $f$ is a constant along integral curves, this IDE originates from
\begq
\dot\X=f(\X)\A \cdot \X
\endq
\noi{\bf Origin}
The twist equation of [9], Sec. 5.1 is an example.
\vs6\noi{\bf Simple Nonlinear Inhomogeneous}
\begq
\T(\X)=\exp(f(\X)\A \cdot h)(\X-G(\X))+G(\X)
\endq
If $f$ is a constant along integral curves, this IDE originates from
\begq
\dot\X=f(\X)\A \cdot (\X-H(\X))+H(\X)
\endq
\noi{\bf Origin}
The square wave Twist and Flip equation from [10], Sec. 3.2 is an example.
\vs6\noi{\bf Compound Nonlinear Homogeneous}
\begq
\T(\X)=\exp(\A(\X) \cdot h)\X
\endq
\vs6\noi Special case
\begq
\T(\X)=\exp(\A(f(\X)) \cdot h)\X
\endq
Where $f$ is constant along integral curves.
\vs6\noi{\bf Origin}
This IDE originates from [9], Sec. 5.2.
\begq
\dot\X=\A(f(\X)) \cdot \X
\endq
\vs6\noi{\bf Compound Nonlinear Inhomogeneous}
\begq
\T(\X)=\exp(\A(\X) \cdot h)(\X-G(\X))+G(\X)
\endq
\noi{\bf Origin}
An example is the morphological equivalent of the Chua double scroll, Fig. (\ref{fg:scroll}).
%==========================Transcendental Functions===================
\subsection{\sml IDEs from Elementary Functions}
\label{sc:ef}
\begin{lemma}
Assume that $\A$ is matrix function of $\X$. Then the following are IDEs:
\[\begin{array}{l}
\cosh(h\,\A)\\
\I+ \sinh(h\,\A)\\
\I+\tanh(h\, \A)\\
\I+\sin(h\, \A)\\
\end{array}\]
\end{lemma}
\pf direct computation\rl
%17
%==========================================================Solving IDEs===================
\chapter{Solving IDEs}
\label{ch:solve}
\begin{center}
\parbox{3.5in}{\em Solving IDEs is analogous to solving ODEs, but is far less difficult and can be solved in closed form in terms elementary functions.}
\end{center}
\vs6\noi Solving an IDE consists in evaluating the operator $\exp(h \A(\X))$ where $\A(\X)$ is a matrix valued function on $\Rl^n$ for some $n$. There is clearly no complete solution of this problem, however many cases of interest do have solutions. The two most common methods are (1) decomposition of the exponential into simple factors that are easy to evaluate; (2) numerically evaluating the exponential. Not every exponential of the form $\exp(h\, \A(\X))$ can be numerically evaluated efficiently, but as will be demonstrated in Sec.\ref{sc:class}, many familiar cases can be numerically evaluated. Any IDE for which the exponentials can be evaluated will be called {\em solvable}.
\vs6\noi In addition to the two direct methods mentioned above there is a third method called {\em fusion}, proposition \ref{pr:fus}, which constructs an IDE by fusing two solvable IDEs.
%-----------------------------------------decomposition--------------------------------------
\section{\sml Decomposition of IDEs into Elementary Factors}
\label{sc:dec}
It would be very convenient if IDEs could be decomposed into simple factors that are well understood. However, the following only holds for commuting matrices. I.e.,
\[\exp(h\,( \A+\B))=\exp(h\, \A)\exp(h\, \B) \equiv \A\,\B=\B\,\A\]
In terms of the Lie bracket commutativity is expressed as
\[[\A,\B]=0\]
The Campbell-Hausdorff-Baker Theorem expresses this difficulty
\[\exp(( \A+\B+\frac{1}{2}[\A,\B]+\cdots ))=\exp( \A)\exp( \B)\]
If $\A,\, \B$ are replaced by $h\,\A,\; h\,\B$ this becomes
\[\exp((h\, \A+h\,\B+\frac{1}{2}[\A,\B]\,h^2+\cdots ))=\exp(h\, \A)\exp(h\, \B)\]
So to the first order in $h$, locally,
\[\exp(h\, (\A+\B)) \approx \exp(h\, \A)\exp(h\, \B)\]
so long as $[\A,\B]$ is sufficiently small.
\begin{proposition}
Let $\A,\, \B \in \M_n$ and assume
\[\det(\I-(\exp(h\, \A)\exp(h\, \B))) \neq 0\]
Then there exist a diffeomorphism on $\Rl^n$ such that
\[\exp(h\, (\A+\B))\X = (\exp(h\, \A)\exp(h\, \B) (\X-\F(\X))+\F(\X)\] and
\end{proposition}
\pf Formally solve for $\F$. \rl
\vs6\noi As an alternative to decomposition, it is possible to obtain very good global solutions to IDEs when the solution is an attractor. In particular, using numeral methods to calculate the exponential of a matrix $\A$.
\[\C\,(\X-\F(\X))+\F(\X)\]
where $\C$ is the numerical approximation to $\exp(h\,\A)$
will provide a global solution to the IDE. This result arises from the observation that for ODEs which have attractor solutions, the Euler One-step integrator will provide a good approximation to the solution of the relevant ODE.
\begin{lemma}
Assume that
\[\dot{\X}=\A(\X)\X\]
has a global solution provided by the Euler one-step integrator
\[\X_{n+1}=\X_n+h \, \dot{\X}\]
Then
\[\X_{n+1}=\exp(h \A(\X_n))\,\X_n\] up to order $h^2$ is a global solution as well.
\end{lemma}
\pf
\[\exp(h \A(\X))\, \X= (\I+h\,\A(\X) +o(h^2))\, \X=\X+h\,\dot{\X} +o(h^2)\] \rl
\vs6\noi This result will be used in Sec.(\ref{sc:class}) in conjunction with the numerical evaluation of the exponential of a matrix to derive the classical chaotic attractors, such as the Lorenz attractor, as IDEs.
%==============================Analytical Evaluation==================================
\section{\sml Analytical Evaluation of the Exponential Function}
\label{sc:ae}
Analytical evaluation of the exponential function $\exp(h\,\A)$ where $\A$ is a matrix requires that for some $n$, $\A^n=f(\A, \I)$. This can be realized in several useful cases and is the subject of this section. If $\A$ is a function of $\X$ then $\lambda$ may also be a function of $\X$.
\begin{lemma}
Let $\X=(a_i)$ and $\A=\Ix_\X$. Then $\exp(\A)=\Ix_{(\exp(a_i))}$
\end{lemma}
\pf Direct computation. \rl
\begin{example}
Let
\[\A=\l( \mtx \lambda_1 .0. 0. \lambda_2\par\r)\]
then
\[\exp(h\, \A)=\l( \mtx \exp(h\,\lambda_1) .0. 0. \exp(h\,\lambda_2)\par\r)\]
\end{example}
\begin{lemma}
\label{lm:cos}
\vs6\noi Let $\B^2=-\omega \I$, and $\omega>0$ where
\[\B=\l( \mtx 0 .\omega. -\omega. 0\par\r) = \omega\,\l( \mtx 0 .1. -1. 0\par\r) \hspace{6pt} \]
then
\[\exp(h\, \B)=\l( \mtx \cos(h\, \omega) .\sin(h\,\omega). -\sin(h\,\omega). \cos(h\, \omega)\par\r) \]
\end{lemma}
\pf Direct computation using
\[\exp\l( \mtx \lambda. \omega.-\omega. -\lambda\par \r)=\exp(\A)=\]
\[\exp(\A)=\I+\A+\frac{\A^2}{2!} +\frac{\A^3}{3!} +\frac{\A^4}{4!} +\cdots\]
\[\A^3=\gamma\,\A\;\;\; \A^4=\gamma^2\,\I\;\;\; \A^5=\gamma^2\,\A\;\;\;\A^6=\gamma^3\,\I\;\;\; \A^7=\gamma^3\,\A\]
\[\exp(\A)=\I+\A+\frac{\gamma\,\I}{2!} +\frac{\gamma\,\A}{3!} +\frac{\gamma^2\,\I}{4!} +\frac{\gamma^2\,\A}{5!}+\frac{\gamma^3\,\I}{6!}+\frac{\gamma^3\,\A}{7!}+\cdots\]
\rl
\begin{lemma}
\label{lm:cosh}
\vs6\noi Let $\B^2=\omega \I$, and $\omega>0$ where
\[\B=\l( \mtx 0 .\omega. \omega. 0\par\r) = \omega\,\l( \mtx 0 .1. 1. 0\par\r)\hspace{6pt}\]
then
\[\exp(h\, \B)=\l( \mtx \cosh(h\, \omega) .\sinh(h\,\omega). \sinh(h\,\omega). \cosh(h\, \omega)\par\r)\]
\end{lemma}
\pf Direct computation. \rl
\begin{lemma}
Let \[\B=\l( \mtx 0 .1. -a^2. 0\par\r)\]
$\B^2=-a^2 \I$ then
\[\exp(h\, \B)=\l( \mtx \cos(h\, a) .\sin(h\,a)/a. -a\,\sin(h\,a). \cos(h\, a)\par\r)\]
\end{lemma}
\pf Direct computation \rl
\begin{lemma}
Let
\[\B=\l( \mtx 0 .1. a^2. 0\par\r)\hspace{6pt}\]
then $ \B^2=a^2 \I$ and
\[\exp(h\, \B)=\l( \mtx \cosh(h\, a) .\sinh(h\,a)/a. a\,\sinh(h\,a). \cosh(h\, a)\par\r)\]
\end{lemma}
\pf Direct computation. \rl
\begin{lemma}
Let $\A^n=\0$. Then
\[\exp(\A)= \sum_i^{n-1}\A^i/i!\]
\end{lemma}
\pf Direct computation. \rl
\begin{example}
Let
\[\B=\l( \mtx 0 .1. 0. 0\par\r)\hspace{6pt}\]
then $\B^2=\0$ and
\[\exp(h\, \B)=\l( \mtx 1 . h. 0. 1 \par \r)\]
\end{example}
\begin{lemma}
Let
\[\exp\l( \mtx \lambda. \omega.-\omega. -\lambda\par \r)=\exp(\lambda \, \D+\omega \,\B)\]
where
\[\D= \l(\mtx 1.0.0.-1\par\r)\;\;\;\mbox{and} \;\;\; \B=\l(\mtx 0.1.-1.0\par\r)\]
Then
\[\A^2=(\lambda^2-\omega^2)\I=\gamma\,\I\]
and if $\gamma=\eta^2$ then
\[\exp(\A)= \sum_{n=0}^\infty \frac{\eta^{2n}}{2n!}\,\I+\frac{1}{\eta}\,\sum_{n=0}^\infty \frac{\eta^{2n+1}}{(2n+1)!}\,\A\]
\[\exp(\A)=\cosh(\eta)\,\I+\frac{1}{\eta}\sinh(\eta)\,\A=\]
\[\exp(\A)=\l(\mtx \cosh(\eta).\frac{\omega}{\eta}\sinh(\eta).-\frac{\omega}{\eta}\sinh(\eta).\cosh(\eta)\par\r)\]
and if $\gamma=-\eta^2$ then
\[\exp(\A)=\l(\mtx \cos(\eta).\frac{\omega}{\eta}\sin(\eta).-\frac{\omega}{\eta}\sin(\eta).\cos(\eta)\par\r)\]
\end{lemma}
\pf Use
\[\cosh(i z)=\cos(z)\;\;\; i\, \sinh(iz)=\sin(z)\]
and lemma \ref{lm:cos} and lemma \ref{lm:cosh}.
\rl
\vs6\noi In the nonlinear case, if $\gamma^2-\omega^2=f(x,y)$, then this exponential map may shift between an hyperbolic function and a circular function. It is a result of how basic functions are derived that there is no one-dimensional "elementary function" that can shift between circular functions and hyperbolic functions as the sign of $f$ changes from positive to negative.
\vs6\noi A second value of this example is that it serves to illustrate the limitations of using the formula
\[\exp(h \, (\A+\B))\approx \exp(h \,\A)\, \exp(h \, \B)\] even for small $h$, in the nonlinear case, in that it loses the transition between the circular and hyperbolic functions even though it is a good local approximation.
\[\exp\l( \mtx \lambda. 0.0. -\lambda\par \r)\exp\l( \mtx 0. \omega.-\omega. 0\par \r)=\]
\[\l( \mtx \exp(\lambda). 0.0. \exp(-\lambda)\par \r)\, \l( \mtx \cos(\omega). \sin(\omega).-\sin(\omega). \cos(\omega)\par \r)=\]
\[ \l( \mtx \exp(\lambda)\cos(\omega). \exp(\lambda)\sin(\omega).-\exp(-\lambda)\sin(\omega). \exp(-\lambda)\cos(\omega)\par \r)\]
In this form, either the circular or hyperbolic function survives (depending on whether $\omega$ is real or complex) but the transition between them does not for a nonlinear IDE. For a linear IDE, the problem is that
\[h^2[\A,\B]\] in the Campbell-Baker-Hausdorff formula may still be greater that $h$. In this case, the number $h^2 \,\lambda\, \omega$ may be large and so the approximation may fail globally.
\vs6\noi An approach to this problem is the IDE
\[\T(\X)=f(\X)\,\exp(h\, \A_1)+(1-f(\X))\,\exp(h\, \A_2)\]
where
\[\A_i=\l( \mtx \lambda. \omega.(-1)^i\,\omega. -\lambda\par \r)\]
The IDE $\T$ shifts between circular and hyperbolic as a function of $\X$.
\vs6\noi More complex recurrence relations will be used to derive an IDE for the R\"{o}ssler attractor in Sec.\ref{sc:rs} as in Lemma \ref{lm:det0} below:
\begin{lemma}
\label{lm:det0}
Assume $\A^2=a \, \A$ and that $\det(\A)=0$.
Then
\[\exp(h\,\A)=\I+ \l(\frac{\exp(a \,h)-1}{a}\r)\,\A \]
\end{lemma}
\pf
\[ \A^3=a^2\, \A\;\;\; \A^4=a^3\, \A\;\;\; \A^5=a^4\, \A \cdots\]
\[\exp(h\, \A)=\I+h\,\A+\frac{h^2\,a}{2!}\, \A+\frac{h^3\,a^2}{3!}\, \A \cdots=\]
\[\I+\frac{1}{a}\, \A\,(h\,a+\frac{h^2\,a^2}{2!}+\frac{h^3\,a^3}{3!} \cdots=\I+\l(\frac{\exp(h\,a)-1}{a}\r)\, \A,\]
\rl
\begin{lemma}
The results of lemma \ref{lm:det0} hold even when $\det(\A)\neq 0$
\end{lemma}
\pf Direct computation.
\rl
\vs6\noi In general, many matrices have a recurrence relationship that can be used to evaluate the exponential function; however, most exponentials will require numerical evaluation.
\section{Decomposition of Three-dimensional Matrices}
Let $\A=(a_{i \, j})$.
\[\A= \]
%----------------------------------------------Numerical Evaluation ----------------------------------
\section{\sml Numerical Evaluation of the Exponential Function}
\label{sc:num}
When an analytical evaluation of the exponential function is not available, it may be still possible to solve the IDE by numerically evaluating the exponential function for a fixed value of $h$. In the following example $h=0.001$ is used in the numerical evaluation of $\exp(h\, \A)$.
\vs6\noi {\bf Example}
\[\T(\X)=\exp(h\, \A)(\X-\F(\X))+\F(\X)\]
\[\F(\X)=\l(\begin{array}{c}
2 + f(x \cdot z) \cdot \lambda\\
(0.5 - f(x \cdot z) \cdot \lambda) / 0.398\\
(-0.5 + f(x \cdot z)\cdot \lambda ) / 0.398
\end{array}\r)\]
\[\A=\l(\begin{array}{rrr}
0.0 & -1.0 &-1.0 \\
1.0 &0.398& 0.0\\
9.52236& 0& -0.21
\end{array}\r)\]
\[\exp(h\,\A)=\]
\[\begin{array} {rcl}
a1(1, 1) = 0.999994739091538 & a1(1, 2) = -0.00100019727257 & a1(1, 3) = -0.000999893253783\\
a1(2, 1) = 0.001000197272570& a1(2, 2) = 1.0003975790802600& a1(2, 3) = -0.0000005000309\\
a1(3, 1) = 0.009521343524090& a1(3, 2) = -0.00000476147424& a1(3, 3) = 0.999785261539144
\end{array}\]
\[ f(u) = \frac{\exp(8 \cdot u) - 1}{\exp(8 \cdot u) + 1}=\tanh(16\, u)\]
\[\begin{array}{rcl}
X1& = &2 + f(x \cdot z) \cdot \lambda\\
Y1&=& (0.5 - f(x \cdot z) \cdot \lambda) / 0.398\\
Z1& =& (-0.5 + f(x \cdot z)\cdot \lambda ) / 0.398\\
\lambda &=& 1.7899
\end{array}\]
\vs6\noi Code starts here:
\vs6\noi
[{\bf Insert initialization data here}]
\[ x = 0: y = 0: z = 0\]
\vs6\noi
For i = 1 to N
\[\begin{array}{rcl}
X1& = &2 + f(x \cdot z) \cdot \lambda\\
Y1&=& (0.5 - f(x \cdot z) \cdot \lambda) / 0.398\\
Z1& =& (-0.5 + f(x \cdot z)\cdot \lambda ) / 0.398
\end{array}\]
\[\begin{array}{rcl}
u1& =& a1(1, 1) \cdot (x - X1) + a1(1, 2) \cdot (y - Y1) + a1(1, 3) \cdot (z - Z1) + X1\\
v1& =& a1(2, 1) \cdot (x - X1) + a1(2, 2) \cdot (y - Y1) + a1(2, 3) \cdot (z - Z1) + Y1\\
w1& =& a1(3, 1) \cdot (x - X1) + a1(3, 2) \cdot (y - Y1) + a1(3, 3) \cdot (z - Z1) + Z1
\end{array}\]
\[ x = u1 \; y = v1 \; z = w1\]
[{\bf Insert display data here}]
\vs6\noi
Next i
\begin{figure}[htbp]
\includegraphics[height=3.283in,width=3.533in,angle=0]{C:/Research/Book/Figures/eps/Rossler.eps}
\caption{{\bf Chaotic Linear Fusion IDE}}
\label{fg:Rossler1}
\end{figure}
%========================Combined methods===============================
\subsection{\sml Combing Analytical and Numerical Methods}
In some cases it is useful to decompose a matrix into a constant component and a variable component. For example
\[\A=\l(\mtxs 0. -1.-1. 1. 2. 0. sgn(x). 0. -sgn(x)\par\r)=\l(\mtxs 0. -1.-1. 1. 2. 0. 0. 0. 0\par\r)+\l(\mtxs 0. 0.0. 0. 0. 0. sgn(x). 0. -sgn(x)\par\r)\]
The constant matrix can be numerically evaluated and the second term can be analytically evaluated since it has a simple recurrence relation,
\[\A^2=\pm \A\]
which was treated in lemma \ref{lm:det0}.
%=========================Fusion========================
\subsection{\sml Fusion Methods}
Assuming that the IDE of interest can be partitioned into simpler IDEs, the solution may be obtained through fusion. For example, given two solvable IDEs, $\T,\;\;\S$ and a complex number $\lambda$ the theory asserts that
\[\lambda \T+(1-\lambda) \S\]
is also an IDE. More generally, certain conveniently chosen real valued function of a vector variable $f(\X)$ may be used for fusion:
\[f(\X)\, \T+(1-f(\X))\, \S\]
is an IDE.
\vs6\noi A very simple case of interest is illustrated by the fusion of two linear hyperbolic IDEs,
\[f(\X)\,\exp(h \A)+(1-f(\X))\,\exp(h\, \C)\]
where $f(\X)= \tanh(\beta g(\X))$.
For example,
\[\A=\l(\mtxs 2. 0. 0.0. 1. 0.0.0.-1\par \r) \hspace{6pt} \C=\l(\mtxs -2. 0. 0.0. 1. 0.0.0.1\par \r)\]
and $g(\X)=x_1$.
\vs6\noi The function $f$ has "fused" two separate linear IDEs, $ \exp(h \A), \; {\rm and}\; \exp(h\, \C)$, into a single IDE which is nonlinear due to the presence of the fusion function $f$, see Sec.(\ref{sc:vortex}) where fusion is used to create a vortex attractor.
\vs6\noi It is possible to use exponential fusion as well to construct an IDE:
\[\T(\X)=\exp(h \,(f(\X)\A+ (1-f(\X))\,\C))\,\X\]
\vs6\noi The following illustrates Boolean fusion per corollary \ref{cor:bolfus}. Boolean fusion is more complex than standard fusion in that it requires a Boolean function that is iterated to define the transition surface. This makes the system four-dimensional. The Boolean function is the mapping $r\ra \bar{p}\wedge(r \vee q)$ where $p, \;\;q$ are functions of $\X$.
\begin{figure}[htbp]
\includegraphics[height=2.18in,width=4.493in,angle=0]{C:/Research/Book/Figures/eps/Canonical3D.eps}
\caption{{\sml The Canonical IDE Analog of a Shift. In this Figure $x=0.1\; y=0.0\, z=h-1$}}
\label{fg:canonical3d}
\end{figure}
\begq
\label{cd:can3d}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for all Plates of Fig. \ref{fg:canonical3d} is as follows:}\\
&&\mbox{ Initial conditions must be chosen properly}\\
x&=& 0.0\\
y&=& 0.0\\
z&=& h-1.0\\
&& \mbox{For i = 1 to N}\\
\\
q &=& 0.5 \cdot (1 + {\rm sgn}(h-1 - z))\\
p &=& 0.5 \cdot (1 + \tanh(200\,(z +h)))\\
r &=& (1 - p) \cdot (r + q - r \cdot q)\\
\\
u_1 &=& \exp(-0.05\cdot h)((x - 0.5) \cdot \cos(4.0 \cdot h) + y \cdot \sin(4.0 \cdot h)) + 0.5\\
v_1 &=& \exp(-0.05\cdot h)(y \cdot \cos(4.0 \cdot h) - (x - 0.5) \cdot \sin(4.0 \cdot h))\\
w_1&=& \exp(0.2\cdot h)(z+1)-1\\
\\
u_2 &=& \exp(-0.05\cdot h)(x \cdot \cos(4.0 \cdot h) + y \cdot \sin(4.0 \cdot h))\\
v_2 &=& \exp(-0.05\cdot h)(y \cdot \cos(4.0 \cdot h) - x \cdot \sin(4.0 \cdot h))\\
w_2&=& \exp(0.2\cdot h)\, z\\
\\
x &=& r \cdot u_1 + (1-r) \cdot u_2\\
y &=& r \cdot v_1 + (1-r) \cdot v_2\\
z&=& r \cdot w_1 + (1-r) \cdot w_2\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
The Boolean function $r$ above is an algebraic version of the logical expressions found in lemma \ref{lm:ar} and is therefore a $C^\infty$ function. That this is an IDE follows from corollary \ref{cor:bolfus}. The IDEs are of the form
\[\T_h(\X)=\exp(h\, \A)\, \exp(h \, 4.0\, \B)(\X-\X_0)+\X_0 \mbox{ and } \S_h(\X)=\exp(h\, \A)\exp(h \, 4.0\, \B)\X\]
where
\[\A=\l(\mtxs \lambda_1.0.0.0.\lambda_1.0.0.0.\lambda_2\par \r)\hspace{12pt} \B=\l(\mtxs 0.1.0.-1.0.0.0.0.0\par \r) \]
where $\lambda_1=0.05$ and $\lambda_2=0.2$. There is nothing special about the two fixed points chosen one of which is the origin. The Boolean transition surface is critical.
\begin{figure}[htbp]
\includegraphics[height=1.88in,width=4.023in,angle=0]{C:/Research/Book/Figures/eps/Canonical3D02.eps}
\caption{{\sml The Canonical IDE Analog of a Shift.}}
\label{fg:canonical3d02}
\end{figure}
\begq
\label{cd:can3d02}
\left.
\begin{array}{lcl}
&&\mbox{\bf The Code for the Dynamics for all Plates of Fig. \ref{fg:canonical3d02} is as follows:}\\
&&\mbox{ Initial conditions must be chosen properly}\\
x&=& 0.0\\
y&=& 0.0\\
z&=& h-1.0\\
&& \mbox{For i = 1 to N}\\
\\
q &=& 0.5 \cdot (1 + \tanh(20\cdot (h-1 - z))\\
p &=& 0.5 \cdot (1 + \tanh(20\cdot (z +h)))\\
r &=& (1 - p) \cdot (r + q - r \cdot q)\\
\\
u_1 &=& \exp(-0.05\cdot h)((x - 0.5) \cdot \cos(4.0 \cdot h) + y \cdot \sin(4.0 \cdot h)) + 0.5\\
v_1 &=& \exp(-0.05\cdot h)(y \cdot \cos(4.0 \cdot h) - (x - 0.5) \cdot \sin(4.0 \cdot h))\\
w_1&=& \exp(0.2\cdot h)(z+1)-1\\
\\
u_2 &=& \exp(-0.05\cdot h)(x \cdot \cos(4.0 \cdot h) + y \cdot \sin(4.0 \cdot h))\\
v_2 &=& \exp(-0.05\cdot h)(y \cdot \cos(4.0 \cdot h) - x \cdot \sin(4.0 \cdot h))\\
w_2&=& z-h\\
\\
x &=& r \cdot u_1 + (1-r) \cdot u_2\\
y &=& r \cdot v_1 + (1-r) \cdot v_2\\
z&=& r \cdot w_1 + (1-r) \cdot w_2\\
&&\mbox{\bf Plot Points}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
%======================Continuous Transformation Groups========================================
\subsection{\sml IDEs and Continuous Transformation Groups}
Continuous transformation groups arise from ODEs in general \cite{bi:ei}. They have a direct relationship with PDEs. This section will only touch on the relationship between IDEs and continuous transformation groups from first order PDEs. Further development will be presented in a later paper.
\vs6\noi The following form of a vector field will be used:
\[\l(\vt \dot{x}. \dot{y} \par\r)=\l(\frac{\dot{r}}{r}{\bf I} +\dot{\theta} {\bf B}\r)\l(\vt x. y \par \r) \]
This form leads to the local IDE approximation,
\[\T_h(\X)=\exp(h \, \dot{r}/r)\,\exp(h \,\dot{\theta}\,\B)\, \X=\]
\[\exp(h \, \dot{r}/r)\,\l(\mtx \cos(h\, \dot{\theta}).\sin(h\, \dot{\theta}).-\sin(h\, \dot{\theta}).\cos(h\, \dot{\theta})\par \r)\, \l(\vt x.y \par \r)\]
Taking $\dot{r}=0$ and $\dot{\theta}=r$ produces the twist. Setting $\dot{r}=r\, (1-r)$ produces a limit cycle. Setting $\dot{r}=0$ and setting $\dot{\theta}=\sqrt{1-k^2\, \sin^2(\theta)}$ produces elliptic functions. IDEs having specific characteristics may be derived using this form of a vector field.
\vs6\noi {\bf Example} There are two assumptions. (1) The group
is measure preserving; (2) the Group preserves lines
through the origin. This gives the PDE for $\dot{r}$:
\[\frac{1}{r}<{\rm X},\nabla \dot{r}>+\frac{\dot{r}}{r}+<{\bf
B}{\rm X}, \nabla \dot{\theta}>=0\]
By a change of notation the PDE is put into standard form. Let
\[\l( \vt \dot{r}. \dot{\theta}\par \r)=\l(\vt g(x,y). f(\theta) \par \r)\]
Now the PDE becomes
\begin{eqnarray}
x\, p+ y\, q= -(z+r\,f'(\theta))
\end{eqnarray}
\vs6\noi Making further simplifying assumptions gives the form
\begin{eqnarray}
x\, p+ y\, q= -(c+1)z
\end{eqnarray}
By an application of standard methods for solving first-order partial
differential equations gives
\[z=\frac{y}{-(c+1)}F(x/y)\]
Since $y=r\,\sin(\theta), x=r\,\cos(\theta)$ choosing $f(\theta)=a+b\sin(\theta)$
\[r=r_0\l(\frac{f(\theta_0)}{f(\theta)}\r)^{1/c}\]
By choosing
$a>b$, the equation $\dot{\theta}=a+b\sin(\theta)$ is
solvable in closed form for $\sin(\theta)$.
\[\frac{b+a\sin(\theta)}{a+b\sin(\theta)}=\sin(k\,t+C_0)\]
where $k=\sqrt{a^2-b^2}$, and $C_0$ is a constant of integration
to be determined from the initial conditions. From this relation
I obtain $\sin(\theta), \cos(\theta)$. The general solution in rectangular
coordinates is given by:
\[\l(\vt x(t). y(t) \par\r)=r_0\,\l(\frac{f(\theta_0)}{f(\theta)}\r)^{1/c}\l(\vt \cos(\theta).\sin(\theta)\par \r) \]
where $c>0$. Note that the root factor is not a constant since
$f(\theta)$ is a function of time. The orbits cannot be
linear, and, by construction, the system is
divergence-free.
\vs6\noi To obtain the IDE, substitute $\theta(h)$ for $\theta(t)$ and substitute for $\theta_0, \;\;\;r_0$ their functions of $x,\;y$
\[\T_h(\X)= r(\X)\,\l(\frac{f(\theta(\X))}{f(\theta(h))}\r)^{1/c}\l(\vt \cos(\theta(h)).\sin(\theta(h))\par \r) \]
%======================partial differential equations================================
\subsection{\sml Partial Differential Equations}
An important class of partial differential equations are linear second order equations such as
\[\frac{\pr^2z}{\pr x^2}+\frac{\pr^2z}{\pr y^2}=0\]
This equation is solved by separation of variables by assuming a solution of the form
\[z(x,y)=f(x)g(y)\]
Substituting $z(x,y)$ in to the PDE and rearranging gives the equation
\[\frac{f''(x)}{f(x)}=-\frac{g''(y)}{g(y)}\]
and since each side of this equation is a function of a different independent variable it can be concluded that
\[\frac{f''(x)}{f(x)}=\lambda \;\mbox{ and }\; \frac{g''(y)}{g(y)}=-\lambda\]
This technique, known as separation of variables, has reduced the problem of solving a PDE to solving two familiar ODEs. Putting these two ODEs into IDE form gives
\[\dot{\X}=\A\,\X \; \mbox{ and }\;\dot{\Y}=\B\,\Y\]
These may be solved as IDEs
\[\T_h(\X)=\exp(-h\,\lambda \A)\,\X\; \mbox{ and }\; \S_h(\Y)=\exp(h\,\lambda \B)\,\Y\]
\vs6\noi A PDE usually comes with a boundary condition which makes it necessary to form an infinite sum of the form
\[ \sum_{i=0}^\infty a_i \, \exp(-h_1\,\lambda_i \A)\,\X \,\exp(h_2\,\lambda_i \B)\,\Y \]
In this form, the vectors $\X,\;\Y$ represent the initial conditions associated with the ODE. These must be absorbed into the boundary conditions represented by the eigenvalues and the coefficients $a_i$. This is expressed in the following equation
\[ \sum_{i=0}^\infty \exp(-h_1\,\lambda_i \A)\,\exp(h_2\,\lambda_i \B)\,\C_i \]
Typically, the form of the solution would be
\[z(x,t)=\sum_{i=0}^\infty \exp(-t\,\lambda_i \A)\,\exp(x\,\lambda_i \B)\,\C_i \]
where evaluation is straight forward. For the IDE, there must be a new convention for evaluation since iteration replaces evaluation of the function variables.
\[z(n\, h_1,n\, h_2)=\sum_{i=0}^\infty \exp(-n\, h_2\,\lambda_i \A)\,\exp(n\, h_1\,\lambda_i \B)\,\C_i \]
Generalizing
\[z_n=\sum_{i=0}^\infty \T^n_{\lambda_i \,h_1}\,\S^n_{\lambda_i \,h_2}\,\C_i \]
More generally, where the parameters are understood
\[z_n=\sum_{i=0}^\infty \T^n_i\,\S^n_i\,\C_i \]
and $\T,\; \S$ are IDEs derived from the separation of variables ODEs.
\vs6\noi another variation is given by
\[\sum_{i=0}^\infty \T^n_i\,\S^n_i\,\C_i(\X) \]
where $\C_i(\X)$ is a vector function which satisfies an ODE. This is the origin of the Lorenz ODE.
%*********************************************************************************************************************************************************************
%18
%==========================Calculus==========================
\chapter{The Calculus of IDEs}
\label{ch:cal}
\begin{center}
\parbox{3.5in}{\em IDE theory parallels some areas of vector calculus. }
\end{center}
%===============================LIMITS=============================
\section{\sml Infinite Series and Limits}
\label{sc:lm}
\begin{proposition}
Assume that the series $\sum_i^\infty f_i(\X)=1$ where each $f_i$ is a diffeomorphism converges absolutely and assume $\T_1,\, \T_2, \, \T_3, ... \T_\infty$ are IDEs.
Then \[\sum_i^\infty f_i(\X) \T_i\] is an IDE.
\end{proposition}
\pf Direct computation.
\rl
\begin{corollary}
Assume $\sum_i^\infty a_i=1$ in an absolutely convergent series.
Then \[\sum_i^\infty a_i\T_i\] is an IDE.
\end{corollary}
\begin{corollary}
\[\sum_i^n \frac{1}{n}=1\]
therefore
\[\frac{1}{n}\sum_i^n \T_i\] is an IDE.
\end{corollary}
\begin{proposition}
Assume
\[\dot{\X}=\sum_i^\infty \A_i(\X)\X\] is absolutely convergent and that $\sum_{i=1}^\infty \lambda_i=1$ with $0< \lambda_i$.
Let
\[\T_i(\X)=\exp(h \A_i(\X))\, \X\]
then
\[\sum_i^\infty \lambda_i \T_i\]
is an IDE
\end{proposition}
\begin{lemma}
Assume $\T_n$ is a uniformly bounded infinite sequence of IDEs and that
\[\T_n\ra \T\]
Then $\T$ is an IDE.
\end{lemma}
\pf
\[\|\T-\I\|=\|\T-\T_n+\T_n-\I\|\]
\rl
\begin{lemma}
Assume $\T_n$ is a uniformly bounded infinite sequence of IDEs with bound $M$ and that
\[\sum_i^\infty \, a_i \] is absolutely convergent.
Then
\[\sum_i^\infty \, a_i \, \T_i \] is absolutely convergent, but is not necessarily and IDE.
\end{lemma}
\pf
\[\sum_i^\infty \, a_i \, M\]is absolutely convergent.
\rl
\begin{lemma}
Assume $\T_n$ is a uniformly bounded infinite sequence of IDEs with bound $M$ and that
\[\sum_i^\infty \, a_i \] is absolutely convergent. Let
\[\sum_n a_n =S\]
Then
\[\frac{1}{S}\sum_i^\infty \, a_i \, \T_i \] is an IDE.
\end{lemma}
\pf
\[\frac{1}{S}\sum_i^\infty \, a_i =1\]
\rl
\begin{lemma}
Assume $\T_n$ is a uniformly bounded infinite sequence of IDEs with bound $M$ and that
\[\sum_i^\infty \, a_i \] is absolutely convergent.
Then
\[\T+\sum_i^\infty \, a_i \, (\T_i-\I) \] is an IDE.
\end{lemma}
\pf
Direct computation.
\rl
\vs6\noi {\bf Definition} An a.e. IDE is a mapping that is an IDE almost everywhere in the Lebesgue sense.
\begin{lemma}
Let $\T_i=\exp(h\, f(r_i)\, \B)$ and let $\Rl^n=\cup E_i$ where $E_i \cap E_j = \delta_{ij}$
\[\chi_{E_i}(\X)= 1 \;\;\mbox{for} \;\; \X \in E_i\]
and zero otherwise.
\[\sum_i^\infty \, \chi_{E_i}(\X)\, =1 \]
Then
\[\sum_i^\infty \, \chi_{E_i}(\X)\,\exp(h\, f(r_i) \B)\, \]
is an IDE a.e. and
\[\exp(h\, f(r)\, \B)\approx \sum_i^\infty \, \chi_{E_i}(\X)\,\exp(h\, f(r_i) \B)\]
\end{lemma}
\pf Follows from previous results.
\rl
%=======================================VECTOR CALCULUS========================
\section{\sml The Vector Calculus of IDEs: Divergence and the Generalized Curl}
As mentioned earlier the generalized curl can be viewed as composed of a "bending" component as in a node [10], page 93, and a "rotation" component as in a spiral or circle. This idea can be generalized to any number of dimensions by using the matrix $\A_C$. The rotation component in any number of dimensions is determined by the discriminant of the characteristic equation for the eigenvalues which determines when there exist a pair of imaginary eigenvalues, $\lambda=a\pm b \, i$. The discriminant plays an important role in the formation of complexity.
\vs6\noi {\bf The IDE Vector Field}
\vs6\noi The approximate vector field is given by
\begq
\T(\X)-\X = \exp(h\,\A)(\X-\F(x,z))+\F(x,z)-\X = (\exp(h\,\A)-\I)(\X-\F(x,z))\endq
Dividing by $h$ the vector field of this equation is
\begq \V(\X)=\A\,(\X-\F(x,z))=\A\,\X-\A\,\F(x,z)\endq
to the first order in $h$.
\vs6\noi {\bf The Jacobian of the Vector Field}
\vs6\noi The Jacobian of this vector field is given by
\begq \J(\V)=\J(\A\,\X)-\J(\A\,\F(x,z))\endq
The first term (the linear/folding part of the IDE) is
\begq\J(\A\,\X)=\A\endq
%==============================IDE are more General than solutions of ODEs============================
%=========================================================================
\section{\sml IDEs are more General than ODEs}
\label{sc:OD}
To illustrate the generality of IDEs impose the condition that the Diffeomorphism be measure preserving.
\vs6 For the conventional linear case $f(t)= \exp(\A\cdot t)\X_0$ the IDE is given by the mapping $\T_h(\X)= \exp(\A\cdot h)\X$, where $\X$ is a vector and $\A$ is an n by n matrix. If the solution of the ODE is bounded, then the IDE is a very good approximation for even very large $h$. For $h=1$ a finite difference equation is obtained.
\vs6 \noi Consider the following equation:
\begq
\l(\vt x.y \par \r)\ra \l(\vt x\cdot \cos(h)+y\cdot \sin(h)\cdot f(x,y). y\cdot \cos(h)-x\cdot\sin(h) \par \r)
\label{eq:IDE01}
\endq
$f=1$ gives the familiar Linear IDE that will approximate the solution to the corresponding ODE. To see how to obtain a much larger range of IDEs, require that $\det(J(\T))=1$. Then derive the first order PDE
\begq
\cos^2(h)+y\cos(h)\sin(h)f_x+\sin^2(h)(f+y f_y)=1
\endq
Changing notation to make the equations clear and correspond to conventional solution methods gives
\begq
\cos^2(h)+y\cos(h)\sin(h)\cdot z_x+\sin^2(h)(z+y z_y)=1
\endq
this gives
\footnotesize
\begin{eqnarray}
y\cos(h)\sin(h)\cdot z_x+\sin^2(h)\cdot z+\sin^2(h)\cdot y\cdot z_y&=&\sin^2(h)\\
y\cos(h)\sin(h)\cdot z_x+\sin^2(h)\cdot y \cdot z_y&=&\sin^2(h)-\sin^2(h)\cdot z\\
y\cos(h)\cdot z_x+\sin(h)\cdot y \cdot z_y&=&\sin(h)(1-z)
\end{eqnarray}
\normalsize
The solution of this PDE is derived from the relations
\begq
\frac{dx}{y\cos(h)}=\frac{dy}{y\sin(h)}=\frac{dz}{\sin(h)(1-z)}
\endq
from which $F(x\sin(h)-y\cos(h), (1-z)\cdot y)=0$, is obtained, where $F$ is any arbitrary function of two variables.
Choosing $F(x,y)= x\sin(h)-y\cos(h)+ (1-z)\cdot y=0$ and solving for $z$ gives
\[z=\frac{x\sin(h)+y\cdot (1-\cos(h))}{y}\]
Substituting this into Eq.( \ref{eq:IDE01}) gives
\begq
\label{eq:IDE02}
\l(\vt x.y \par \r)\ra \l(\vt (\cos(h)+\sin^2(h))\cdot x + \sin(h)(1-\cos(h))\cdot y . y\cdot \cos(h)-x\cdot\sin(h) \par \r)
\endq
In practice, $F$ is determined by "boundary conditions" whereas here is only presented a simple example to illustrate the ideas. To see that this IDE does not come from the solution of an ODE note that Axiom {\bf A5} violated:
\[\T_h^2(\X)\neq \T_{2\cdot h}(\X)\]
%=============================================Generalizing the Example==========================================
%=====================================================================================
\subsection{\sml Generalizations of the Example}
\label{sc:gen}
In this section two ideas are presented. (1) IDEs can arise from first order PDEs; (2) When using IDEs to solve ODEs, it is possible to incorporate first integrals of the ODE to simplify the PDEs.
%======================================PDEs====================================================
\vs6\noi {\bf An IDE, like the solution of a PDE, may contain one or more arbitrary functions}
Consider
\begq
\label{eq:IDE3}
\T\l(\vt x.y \par \r)= \l(\vt x\cdot \cos(h)+y\cdot \sin(h)\cdot f(x,y). y\cdot \cos(h)-x\cdot\sin(h)\cdot g(x,y) \par \r)
\endq
By requiring that the IDE satisfy Axiom {\bf A5}, simplifies the expression because Axiom {\bf A5} implies that $f\cdot g=1$.
The requirement that $\det (\J(\T))=1 $ imposes conditions on $f$ which produce a PDE for $f$.
\footnotesize
\begin{eqnarray}
{\bf Det} (\J(\T))&=&(\cos(h)+y\cdot \sin(h)\cdot f_x)(\cos(h)-x\cdot \sin(h)\cdot g_y)+\nonumber \\
& & (\sin(h)\cdot g+x\cdot \sin(h)\cdot g_x)(\sin(h)\cdot f+y\cdot \sin(h)\cdot f_y)\\
&=& \cos^2(h)+\sin^2(h)\cdot f\cdot g+\sin^2(h)\cdot x\cdot y\dot(f_x\cdot g_y-f_y\cdot g_x)+\nonumber\\
& & \sin(h) \cos(h)(y\cdot f_x-x\cdot g_y)+\sin^2(h)\cdot(y\cdot g\cdot f_y+x\cdot f\cdot g_x)\\
&=& 1
\label{eq:IDE7}
\end{eqnarray}
\normalsize
Note that \[g_y\cdot f_x-f_y\cdot g_x=0\]
\begin{eqnarray}
{\bf Det} (\J(\T)) &=&\nonumber \\
& &1+ \sin(h) \cos(h)(y\cdot f_x-x\cdot g_y)+\nonumber\\
& & +\sin^2(h)\cdot(y\cdot g\cdot f_y+x\cdot f\cdot g_x) = 1
\label{eq:IDE8}
\end{eqnarray}
\normalsize
or, simplifying further
\begq
\cos(h)\cdot (y\cdot f_x-x\cdot g_y) + \sin(h)\cdot (y\cdot g\cdot f_y+x\cdot f\cdot g_x) = 0
\label{fg:IDE8}
\endq
Since $g\cdot f=1$, one possible solution of this PDE gives the ID
\begq
\T\l(\vt x.y \par \r)\ra \l(\vt x\cdot \cos(h)+y\cdot \sin(h)\cdot r. y\cdot \cos(h)-x\cdot\sin(h)/r \par \r)
\label{eq:IDE4}
\endq
Where $r^2=0.5(x^2+\sqrt{x^4+4y^2})$ as seen in \cite{bi:bc6}. Equation (\ref{eq:IDE4}) is a nonlinear autonomous IDE that arises from an nonlinear autonomous ODE as seen in \cite{bi:bc6}, sec. 5.2. By omitting the addition axiom, {\bf A5}, condition, then a broader range of IDEs are obtained. In general, autonomous IDEs contain the initial conditions and $h$ with no time variable, and possibly an arbitrary function (to be illustrated later) that is determined by the conditions of the problem to be solved.
%=============================================Boundary Conditions======================================
\subsection{\sml Using a First Integral as a Boundary Condition on the PDEs}
Consider
\[\dot \X=F(\X)\hspace{12pt} \X(0)=\X_0\]
and assume that there is an invariant function $G(\X)=G(\X_0)$, or first integral, for $\dot \X$. Then this relationship can be used in Eq.(\ref{eq:IDE3}) to place constraints on the system of PDEs that arise from ${\bf Det}(\J(\T))=1$. In particular $G(\T(\X))=G(\X)$.
\vs6 An alternative form of the general solution, $F(x\sin(h)-y\cos(h), (1-z)\cdot y)=0$, of the PDE, from Sec. (\ref{sc:OD}) is $(1-z)\cdot y=g(x\sin(h)-y\cos(h))$. Solving for $z$ and changing notation and putting this into Eq.(\ref{eq:IDE7}) gives
\begq
\l(\vt x.y \par \r)\ra \l(\vt x\cdot \cos(h)+y\cdot \sin(h)\cdot (y+g(x\sin(h)-y\cos(h))/y. y\cdot \cos(h)-x\cdot\sin(h) \par \r)
\label{fg:IDE07}
\endq
\begq
\l(\vt x.y \par \r)\ra \l(\vt x\cdot \cos(h)+y\cdot \sin(h)+\sin(h)\cdot g(y\cos(h)-x\sin(h)). y\cdot \cos(h)-x\cdot\sin(h) \par \r)
\label{eq:IDE08}
\endq
Note that sign changes have been made that are irrelevant since $g$ is an arbitrary function. In general, IDEs may contain an arbitrary function which, in this case, is eliminated from Eq.(\ref{eq:IDE8}) by imposing relevant boundary conditions on the problem. If the first integral $x^2+y^2=r^2$ is used of the simple IDE as the boundary condition, then $g=0$ and the simple IDE Eq.(\ref{eq:IDE09}) is recovered.
\begq
\l(\vt x.y \par \r)\ra \l(\vt x\cdot \cos(h)+y\cdot \sin(h)). y\cdot \cos(h)-x\cdot\sin(h) \par \r)
\label{eq:IDE09}
\endq
A routine computation shows that for $h$=0, that Eq.(\ref{eq:IDE8}) is the identity map and that it is measure preserving.
%===============================KRYLOFF-BOGOLIUBOFF=========================
\section{\sml The Method of Kryloff-Bogoliuboff}
Consider the equation
\[\ddot{x}+\omega^2\, x+\mu f(\dot{x},x)=0\]
or more generally
\[\dot{\X}=\B \X+G(\X)=\, 0\]
where the small parameter $\mu$ is included in $G$.
Assume a solution of the form
\[x(t)=a(t)\cos(\omega t +\phi(t))\]
Rewrite this trial solution as
\[\X(t)=\exp(\omega t \B)\l(\vt a(t)\, \cos(\phi(t)).a(t)\sin(\phi(t))\par\r)=\exp(\omega t \B)\, \Y(t)\]
Differentiating gives
\[\dot{\X}=\B \X+\exp(\omega t \B)\dot{\Y}\]
The method requires that
\[\exp(\omega t \B)\dot{\Y}=G(\exp(\omega t \B)\Y)\]
or
\[\dot{\Y}=\exp(-\omega t \B)G(\exp(\omega t \B)\Y)\]
Putting this into IDE form gives
\[\dot{\Y}=\exp(-\omega t \B)\M(t,\Y)\Y\]
In place of averaging over a period as done by K-B, integrate over a small interval $h$, using the second mean value theorem
\[\Y=\Y_0+\int_0^h \exp(-\omega s \B)ds\, G(\exp(\omega h \B)\Y)\]
\[\Y=\X_0+\int_0^h \exp(-\omega s \B)ds\, G(\X)\]
\[\Y=\X_0+(-1/\omega)\B^{-1} (\exp(-\omega h \B)-\I)G(\X)\]
\[\Y=\X_0+(-1/\omega)\B^{-1} (\exp(-\omega h \B)-\I)G(\X)\]
%\[\Y_{n+1}=\exp(h \exp(-\omega h \B)\M(h,\Y_n))\Y_n\]
%\[\T(\Y)=\exp(-\omega h \B)\M(h,\Y)\Y\]
using
\[\Y=\exp(-\omega h \B) \X\]
gives
\[\exp(-\omega h \B) \X=\X_0+(-1/\omega)\B^{-1} (\exp(-\omega h \B)-\I)G(\X_0)\]
\[ \X=\exp(\omega h \B)\X_0+(-1/\omega)\B^{-1} (\I-\exp(-\omega h \B))G(\X_0)\]
\[ \X=\exp(\omega h \B)\X_0+ (\I-\exp(-\omega h \B)(-1/\omega)\B^{-1}G(\X_0)\]
\[ \X=\exp(\omega h \B)(\X_0+(-1/\omega)\B^{-1}G(\X_0))+ (-1/\omega)\B^{-1}G(\X_0)\]
or
\[ \T(\X)=\exp(\omega h \B)(\X-(-1/\omega)\B^{-1}G(\X))+ (-1/\omega)\B^{-1}G(\X)\]
%=======================================EMBEDDING=======================
\section{\sml Embedding of a Diffeomorphism in an IDE}
\label{sc:emb}
\begin{definition}{\bf \sml Embedding}
\label{df:emb}
A diffeomorphism $\H$ is said to be embedded in an IDE $\T_h$ when $\T_1(\X)=\H(\X)$ for all $\X$.
\end{definition}
Using the relationship of the standard IDE to vector fields it is possible to link a large class of diffeomorphisms (for example the H\'{e}non map) to vector fields.
This relationship is presented in the following proposition.
\begin{proposition} {\bf Time One Map}
\label{pr:emb}
\vs6\noi
Let $\H$ be any arbitrary diffeomorphism on $\Rl^n$ and assume that $\A$ is a fixed $n \times n$ matrix such that
\[\det(\I-\exp(h\, \A))\neq 0\]
then there exists an IDE $\T(\X)=\exp(h\, \A)(\X-\F(\X))+\F(\X)$ such that for all $\X$ and for $h=1$
\[\exp(\A)(\X-\F(\X))+\F(\X)=\H(\X)\]
and therefore $\H=\T_1$ is a time one map for the IDE.
\end{proposition}
\pf Set $h=1$ in $\T(\X)$, and assume that
\[\exp( \A)(\X-\F(\X))+\F(\X)=\H(\X)\]
then
\begin{eqnarray}
&&\F(\X)-\exp(\A)\F(\X)=\H(\X)-\exp( \A)(\X)\\
&&(\I-\exp( \A))\F(\X)=\H(\X)-\exp( \A)(\X)\\
&&\F(\X)=(\I-\exp( \A))^{-1}(\H(\X)-\exp( \A)(\X))
\end{eqnarray}
and
\begin{eqnarray}
\T(\X)&=&\exp(h\, \A)(\X-(\I-\exp( \A))^{-1}(\H(\X)-\exp( \A)(\X)))+\\
&&(\I-\exp( \A))^{-1}(\H(\X)-\exp( \A)(\X)))
\end{eqnarray}
\rl
\vs6\noi
This proposition says that there are many solutions for $\H$ which are dependent on the choice of $\A$.
\begin{example} {\bf Embedding Diffeomorphisms in IDE}
Choosing $\A=\I$ gives the result
\[\F(\X)= \frac{\H(\X)-e \X}{(1-e)}\]
and
\[\T(\X)=\exp(h)\l(\X-\frac{\H(\X)-e \X}{(1-e)}\r)+\frac{\H(\X)-e \X}{(1-e)}\]
\[\T(\X)=\exp(h)\X+(1-\exp(h))\l(\frac{\H(\X)-e \X}{(1-e)}\r)\]
\end{example}
\begin{example} {\bf Embedding Diffeomorphisms in IDE}
Choosing $\A=\B$
where
\[\B=\l(\mtx 0.1.-1.0 \par\r)\]
\[\F(\X)=(\I-\exp( \B))^{-1}(\H(\X)-\exp( \B)(\X))\]
\[\F(\X)=\l(\mtx 1-\cos(1).\sin(1).-\sin(1).1-\cos(1) \par\r)^{-1}\l(\H(\X)-\l(\mtx \cos(1).\sin(1).-\sin(1).\cos(1) \par\r) (\X)\r)\]
\small
\[\F(\X)=\frac{1}{2(1-\cos(1))}\l(\mtx 1-\cos(1).-\sin(1).\sin(1).1-\cos(1) \par\r)\l(\H(\X)-\l(\mtx \cos(1).\sin(1).-\sin(1).\cos(1) \par\r) (\X)\r)\]
\end{example}
Using the second IDE form
\[\T(\X)=\exp(h\, \A)\X+ (\I-\exp(h\, \A))(\I-\exp( \A))^{-1}(\H(\X)-\exp( \A)(\X))\]
If $\H$ has a linear term so that $\H(\X)=\exp(\C)\,\X+\H_0(\X)$ then by selecting $\A=\C$ the IDE is a combination of a linear term from $\H$ and a modulated nonlinear term of $\H$:
\[\T(\X)=\exp(h\, \C)\X+ (\I-\exp(h\, \C))(\I-\exp( \C))^{-1}(\H_0(\X))\]
For $h=0, \; \T(\X)=\X$ and for $h=1, \; \T(\X)= \H(\X)$. For the modified H\'{e}non map
\[\exp(\C)=\l(\mtx 0.1.b.0\par\r)\] and
\[\H_0(\X)=\l(\vt -a\,x^2. 0\par\r)\]
\vs6\noi Let $\H$ be an arbitrary diffeomorphism on a region of $\Cx^n$. An embedding of $\H$ in an IDE exists when there is an IDE $\T_h$ such that $\T_h(\X) =\H(\X)$ for $h=1$ and for all $\X$.
\begin{theorem}
Let $\H(\X)=\A\X+\H_0(\X)$ be any diffeomorphism on $\Cx^n$ where $\A$ is a nonzero $n \times n$ matrix. Also assume that there exists a possibly complex matrix $\C$ such that
\[\A=\exp(\C)\]
Then $\H$ can be embedded in a standard complex IDE, $\T_h(\X)$. Further, there exists a complex ODE which defines a complex vector field corresponding to the IDE. This relationship sets up a correspondence between complex vector fields and diffeomorphism satisfying the above hypothesis.
\end{theorem}
\pf Apply preceding results \rl
\vs6\noi This "correspondence" shows that many diffeomorphisms do not arise from vector fields on $\Rl^n$, but rather from vector fields on $\Cx^n$.
\begin{lemma}
\label{lm:ber}
Let
\[\H(Z)=(1-\lambda)\l( \vt w^2 \, z. w \, z \par\r)\]
be a diffeomorphism on $\Cx^2$. Assume there exist a $\C$ such that
\[\exp(\C)= \l(\mtx a.0.0.b\par\r)\]
For this $\C$
\[\exp(h\, \C)(\Z-\F(\Z))+\F(\Z)\]
where
\[\F(\Z)=(\I-\exp( \C))^{-1}(\H(\Z)-\exp( \C)(\Z)))\]
is an embedding of the Bernoulli map in an IDE.
\end{lemma}
\pf
Follows from preceding results. \rl
\vs6\noi Let $\H=\lambda \B+(1-\lambda)\A$ where $\B$ is Bernoulli and $\A$ is almost periodic. This is the fundamental map of [11]. Thus, the fundamental map can be embedded in a complex IDE.
\vs6\noi
Refer to \cite{bi:rb6} Eq. 30 to motivate the following proposition.
\begin{proposition}
Assume $\C(h, \X)=\ln(\A(h,\X))$ exists for all $01$.
This series converges uniformly and is
thus continuous, but is not differentiable term by term. If only a
finite number of terms are used, the function has all derivatives and is
periodic. However, a very large number of terms and used, then
it begins to resemble white noise. Also, when using only a
finite number of terms, it must be the solution
of a homogeneous linear differential equation of very high order.
If an infinite number of terms are used, it must solve, in a formal
sense, a
second-order hyperbolic linear partial differential equation
having as a boundary condition a chaotic variable. All of these
features demonstrate that there may be a very fine line between
periodic and non-chaotic phenomena and chaotic or stochastic processes.
With a given sampling rate, it is always possible to use a
large enough number of terms to assure that the sampling rate
will fail to reveal that the equation is periodic and not
classical white
noise.
\vs6\noi
With a small modification, the periodicity can be abolished. For
example use:
\begq
g_t(x)=\sum_{n=0}^\infty a_n \sin(2.1^n \cdot 2\pi(x+t))
\endq
A further property of these processes is something analogous to
sensitive dependence on initial conditions. Ideally this property
should read: There exists a number
$\tau$ such that given a point $x_0$ and a
neighborhood of the point, ${\rm U}_{x_0}$, there is another point
$x \in U_{x_0}$ with $|g_t(x)-g_t(x_0)|>\tau$, for almost all $t$.
While this cannot be true for a
continuous function, it can be true in practical circumstances
when the sampling rate, for a fixed choice of the $a_n$,
cannot confirm or deny the continuity of the function.
Thus, relative to a sampling rate, it can happen that the function may as well
not be continuous. Conclude that relative to a given measuring
frequency it may be impossible to decide if a process is
stochastic and discontinuous or chaotic and continuous. Further,
given any measurement process, it is always possible to construct a finite dimensional,
infinitely differentiable chaotic process for which it is impossible
to distinguish said process from an infinite dimensional, totally
discontinuous stochastic process. For example, it is possible
to construct a twist-and-flip map where the integral curves are
diamond shaped, and only the amplitude is chaotic, thus allowing
it to mimic Brownian motion exactly with a superposition of
a finite number of twist-and-flip chaotic processes.
%=======================================================================
\ssc{Wide-sense Stationary Processes and Chaos}
Every measure-preserving mapping of a measure space defines a
stationary stochastic process.
Conversely, every stationary processes can be formally
understood as arising from
measure-preserving dynamical systems. This is the subject of ergodic
theory. A close review of the proof of this fact in Doob[1953],
reveals that
this line of thought, while accurate, could benefit from a more
direct example of how chaotic and stationary processes overlap.
\vs6\noi
The most general wide-sense real-valued stationary process
can be put into the form
\[x(t)=\sum_{j=1}^k u_j \cos(2 \pi \lambda_j t) +v_j \sin(2 \pi
\lambda_j t)\]
where $u_j, v_j$ are mutually orthogonal real random variables,
or can be approximated arbitrarily close by a process of this
form, Doob[1953], where the $\lambda_j$ depend on the process.
\vs6\noi
Take $u_j=a_j \sin(2 \pi 2^j x)$, $v_j=a_j \cos(2 \pi 2^j x)$, and
$\lambda_j=2^j$, gives the chaotic process
\[g(x,t)=\sum_{j=1}^k a_n\sin(2 \pi 2^j (x+ t))\]
discussed earlier. Choosing the sequence $a_n$, and the frequencies
$\lambda_j$ makes this process as chaotic needed.
\vs6\noi
In general, construct the wide-sense stationary process
\[\sum_{j=-\infty}^{\infty} a_j f_j(x) \exp(2 \pi i \lambda_j t)\]
as a chaotic process by choosing the $f_j$ appropriately.
%===========================================================================
\ssc{Poisson Processes}
The basic construction of a Poisson process from chaotic
processes is illustrated by the following example:
\begq
f(x,t)=\sum_{n=0}^\infty \alpha_n\, f_n(x,t)
\endq
where
\[f_n(x,t)=0.5 \cdot (1+{\rm sgn}(t-g_n(x)))\]
and
\[g_n(x)=\sum_{k=1}^n \beta_k \, h({\rm T}^k(x))\]
T is a chaotic map of the unit interval, $\alpha_n,\beta_n$ are
constants chosen to fit the data, and $h$ is a properly chosen
function. For example, one choice for $h$ is $\exp(-\lambda
\cdot u)$
%=======================================================================
\ssc{Martingale and Markov Processes}
This section, will use conventional terminology found in
leading text books on stochastic processes, particularly
Doob \cite{bi:jd}
\vs6\noi
In order to define a Martingale the notion of conditional
expectation is needed. Given two measurable functions, $f,g$ on [0,1],
define the conditional expectation of $f$, given that $g(x)\in
[a,b]$ by the average value of $f$ over the interval
$g^{-1}[a,b]$, and write this as
\[{\bf E}(f| g(x)\in [a,b])=\frac{1}{\mu(g^{-1}([a,b]))}\int_{g^{-1}([a,b])} f(x)dx\]
where $\mu(\cdot)$ may be thought of as Lebesgue measure.
\vs6\noi
In ergodic theory, conditional expectation is defined with respect
to a partition of the domain of a measurable function as follows:
Let ${\cal P}=\{{\rm E}_i\}$ be a partition of [0,1]. Then
\[{\bf E}(f| {\cal P})= \sum_{i}\frac{1}{\mu({\rm E}_{i})} \cdot
\int_{{\rm E}_{i}} f(x)dx \cdot \chi_{{\rm E}_{i}}(x),\]
The conditional expectation of $f$ takes
on the average value of $f$ over each element of the partition,
${\rm E}_i$. Start with a partition of the range of $g$ and
take its inverse under $g$, let the granularity of the
partition increase indefinitely and apply this definition. Doing this
obtains the familiar definition of conditional expectation found in
elementary texts on probability theory.
\vs6\noi
Given the definition of conditional expectation, a stochastic
process $\xi_t$, is a martingale, if each random variable has a
finite mean value, and if $t_10$ combined with the flip as was done with
Eq.(12). Figure 1 shows the double scroll obtained from Eq.(13)
with the eigenvalue $\gamma=100$.
\vs6\noi The point of this analysis is to conclude that the source of chaos in
Eq.(11) and (13) can be understood by analyzing a two-dimensional
single scroll obtained by considering the limit of (13) as
as the contracting eigenvalue , $\gamma \rightarrow \infty$. As
this happens, $ z \rightarrow 0.984$ giving a limiting
two-dimensional single scroll on which all complex
dynamics occur. The linear part of the two-dimensional single
scroll is given by:
\[\begin{array}{cccccr}
\hspace{21pt}\left ( \begin{array}{c}
\dot{x}(t) \\
\\
\dot{y}(t)
\end{array} \right )
& = &
\left [
\begin{array}{ccc}
0.0 & -9.876\\
& \\
1.0 & 0.334
\end{array} \right ]
\left ( \begin{array}{c}
x-0.4455\\
\\
y+0.054
\end{array} \right ) & \hspace{21pt} (14)
\end{array} \]
The nonlinear part is supplied by apply
the flip map when ${\rm sgn}(x-1.287y+0.984)<0.0$.
\vs6\noi Equation (14) is solved by
\[\begin{array}{lr}
\begin{array}{ccl}
x(t) & = &\exp(\alpha t/2)\,[(x_0-a)\cos(\omega t) + C_1
\sin(\omega t)]+a \\
y(t) & = &\exp(\alpha t/2)\,[(y_0-b)\cos(\omega t) + C_2
\sin(\omega t)]+b
\end{array} & (15)
\end{array}\]
where
\[\begin{array}{lcl}
C_1=-(0.5\alpha (x_0-a)+\beta (y_0-b))/\omega\\
C_2=-((x_0-a)+0.5(y_0-b))/\omega
\end{array}\]
and
$\alpha=0.334,\,\,\beta=9.876,\,\,\omega=\sqrt{\beta-(0.5\alpha)^2}$,
$a,\,b$ are as in Eq.(11).
%======================One-dimensional Maps==========================
\subsection{\sml One-dimensional Maps from the Single Scroll}
The single scroll maps the line
$y=(x-0.984)/1.287$ to its image under the flip map.
For
initial conditions of the form $0.3 \leq x \leq 0.85$
and $y=(x-0.984)/1.287$, a segment of this line is mapped into
itself.
There are two fixed points on this line segment:
$(0.54,-0.347)$ and $(0.6876,-0.2293)$. We
have now associated Eq.(9) with a one-dimensional map
of a segment of the line $y=(x-0.984)/1.287$ onto itself.
\vs6\noi Here is a review how this one-dimensional map works:
Begin with an initial condition on the line
$y=(x-0.984)/1.287$ with the value of the $x$ coordinate in the
closed interval [0.3, 0.85]. Use
Eq.(15) to produce a trajectory which expands outward
until it meets the line $y=(x+0.984)/1.287$. Then apply the
flip map, which takes this point on the line $y=(x+0.984)/1.287$
back to the line $y=(x-0.984)/1.287$ where the $x$ value will lie
in the closed interval [0.3, 0.85]. This flipped point will then be
used as the initial conditions for Eq.(15) to
generate an new trajectory. Hence,
this line segment is mapped onto itself. From this conclude
that the source of the complexity, or chaos, in Eqs.(9),(11), and
(13)
can be traced to a one-dimensional map.
\vspace{6pt}
Convert
Eq.(13) into a smooth equation by replacing
the "sgn" function by the sigmoid function, Eq.(7),
thus obtaining a
$\Ci$ vector field whose chaotic
properties are closely tied to a given one-dimensional map so
long as $\gamma$ is large. The
possibility that Eq.(13) could be reduced to a
simple one-dimensional map was suggested by Prof. Morris Hirsch.
%=========================Summary==================
\subsection{\sml Single Scrolls are General}
Following the derivation of the Chua single scroll it is possible to define single scrolls for a wide array of equations and to see that the single scroll is the basis for chaos in a wide array of chaotic three-dimensional autonomous ODEs. See Fig. \ref{fg:scroll}.
\begin{figure}[htbp]
\includegraphics[height=3.017in,width=3.037in,angle=0]{C:/Research/Book/Figures/eps/SingleScroll.eps}
\caption{{\bf Chaotic Single Scroll}}
\label{fg:scroll}
\end{figure}
\begq
\label{cd:scrl}
\left.
\begin{array}{lcl}
&&\mbox{\sml The code for Fig. \ref{fg:scroll} is as follows:}\\
N &= &3500009\\
h &=& 0.001\\
\alpha &=& 0.1 \\
x_0& = &0.5\\
y_0 &=& 0.01\\
\\
&& \mbox{For i = 1 To N}\\
u &=& \exp(\alpha \, h) \cdot ((x - x_0) \cdot \cos(h) + (y - y0) \cdot \sin(h)) + x_0\\
v &=& \exp(\alpha \, h) \cdot ((y - y_0) \cdot \cos(h) - (x - x0) \cdot \sin(h)) + y_0\\
\\
x &=& u\\
y &=& v\\
\\
&&\mbox{\bf Plot point} \\
&& \mbox{ If\; $\|(y - (x + b))\|$} <0.005\\
&&\mbox{Then\; $ x = -x$}\\
&&\mbox{Next i}
\end{array} \right \}
\endq
\vs6\noi The single scroll presented here is a hybrid of an IDE and a flip about the y-axis. The IDE is given by
\[\T_h(\X)=\exp(h\, \lambda)\exp(h\,\B)(\X-\X_0)+\X_0\]
and the flip is given by the {\em if, then} logic gate : if $\|(y - (x + b)) < 0.005\|$, then $x = -x$. The flip is an IDE with $h=1.0$.
\vs6\noi The sum of the two is not an IDE.
\vs6\noi
The form of this specific equation is
\[f(\X)\, \T_{0.001}+(1-f(\X))\F_{1.0}\]
\vs6\noi The logic gate may be easily replaced by a simple device
\[x0 = 0.5: y0 = 0.07\]
\[x = x0 + 0.01: y = x - b: z = -0.001\]
For i = 1 To N
\[\begin{array}{lll}
ux& = &\sgn(0.001 - (y - (x + b)))\\
u1& =& \exp(\alpha \cdot h) \cdot ((x - x_0) \cdot \cos(h) + (y - y_0) \cdot \sin(h)) + x_0\\
v1& =& \exp(\alpha \cdot h) \cdot ((y - y_0) \cdot \cos(h) - (x - x_0) \cdot \sin(h)) + y_0\\
w1& = &z\\
x &=& ux \cdot u1: y = v1: z = w1
\end{array}\]
{\bf Plot point}
Next i
\vs6\noi This equation can be used to generate a first return or {\em Poincar\'{e}} map, see Fig. \ref{fg:singlescrollFR}.
\begin{figure}[htbp]
\includegraphics[height=2.627in,width=2.587in,angle=0]{C:/Research/Book/Figures/eps/SingleScrollFR.eps}
\caption{{\sml Chaotic Single Scroll First Return Map. In this figure the horizontal axis is time and the vertical axis is the x-coordinate value that corresponds to the associated time. }}
\label{fg:singlescrollFR}
\end{figure}
\vs6\noi The illustration makes the distinction between chaos and "pseudo-randomness" as illustrated by a Bernoulli map such as
\[\l(\vt x.y\par\r)\ra \l(\mtx 2.1.1.1\par\r)\l(\vt x.y\par\r)\mod(1)\]
see Fig.(\ref{fg:ber}).
\begin{figure}[htbp]
\includegraphics[height=2.54in,width=2.57in,angle=0]{C:/Research/Book/Figures/eps/Bernoulli.eps}
\caption{{\bf Idealization of a random process: A Bernoulli Automorphism}}
\label{fg:ber}
\end{figure}
\vs6\noi The chaotic first return map has obvious areas of density that are not present in the Bernoulli map.
\vs6\noi The next objective is to replace the flip by an IDE that provides the needed 180 degree rotation.
%======================Lifting the Single scroll=========================
\subsection{\sml The Three-dimensional Single Scroll}
\label{sc:single}
The two-dimensional single scroll can be lifted to a three dimensional scroll by various means. The condition of $x \ra -x$ when $\|(y - (x + b)) < 0.005$ can be replaced by a three-dimensional rotation across the y-axis. See Fig. \ref{fg:ss3D}.
\vs6\noi Fig. \ref{fg:ss3D} is generated by logic gates.
\begin{figure}[htbp]
\includegraphics[height=3.08in,width=2.967in,angle=0]{C:/Research/Book/Figures/eps/singlescroll3d.eps}
\caption{{\sml Three-dimensional Single Scroll Using Logic Gates}}
\label{fg:ss3D}
\end{figure}
\vs6\noi Using logic gates is convenient but has the disadvantage that it cannot be differentiated. Thus, is it preferable to replace logic gates by algebraic equivalents using lemma \ref{lm:ar}. Typically, it is useful to begin fusion using the $\sgn$ function. Fig. \ref{fg:sssgn}
\begin{figure}[htbp]
\includegraphics[height=2.823in,width=2.99in,angle=0]{C:/Research/Book/Figures/eps/singlescroll3dsgn.eps}
\caption{{\sml Three-dimensional Single Scroll Using $\sgn$ function without Logic Gates}}
\label{fg:sssgn}
\end{figure}
\begq
\label{cd:sssgn}
\left.
\begin{array}{lcl}
&&\mbox{The code for Fig. \ref{fg:sssgn} is as follows:}\\
&& \mbox{For i = 1 To N}\\
u_1& =& \exp(\alpha\, h) \cdot ((x - x_0) \cdot \cos(h) + (y - y_0) \cdot \sin(h)) + x_0\\
v_1 &=& \exp(\alpha\, h) \cdot ((y - y_0) \cdot \cos(h) - (x - x_0) \cdot \sin(h)) + y_0\\
w_1& = &z\\
\\
u_2& =& x \cdot \cos(h) - z \cdot \sin(h)\\
v_2& =& y\\
w_2&=& z \cdot \cos(h) + x \cdot \sin(h)\\
\\
xa &=& 0.5 \cdot (1 -\sgn(y - (x + b)))\\
xb &=& 1 - xa\\
\\
za &=& 0.5 \cdot (1 - \sgn(z))\\
zb &=& 1 - za\\
\\
x &=& xa \cdot zb \cdot u_1 + (1 - xa \cdot zb) \cdot u_2\\
y &=& xa \cdot zb \cdot v_1 + (1 - xa \cdot zb) \cdot v_2\\
z &=& xa \cdot zb \cdot w_1 + (1 - xa \cdot zb) \cdot w_2\\
\\
&& \mbox{\bf Plot point}\\
&& \mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi The form of this equation is
\[f(\X)\cdot g(\X) \T_h+(1-f(\X)\cdot g(\X))\,\S_h\]
Where $\T$ is the scroll part and $\S$ is the flip. Its form assures that it is an IDE.
\vs6\noi The last stage is construction after the $\sgn$ function is to replace the $\sgn$ with the hyperbolic tangent, see Figs. \ref{fg:ss3Dexp} and \ref{fg:ss3Dexp01}
\begin{figure}[htbp]
\includegraphics[height=3.53in,width=2.92in,angle=0]{C:/Research/Book/Figures/eps/singlescroll3dexp01.eps}
\caption{{\sml Three-dimensional Single Scroll Using Tanh with $\beta=6.0$}}
\label{fg:ss3Dexp}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=2.877in,width=2.427in,angle=0]{C:/Research/Book/Figures/eps/singlescroll3dexp02.eps}
\caption{{\sml Three-dimensional Single Scroll Using Tanh with $\beta=2.0$}}
\label{fg:ss3Dexp01}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=3.083in,width=3.627in,angle=0]{C:/Research/Book/Figures/eps/SingleScrollFlip.eps}
\caption{\sml A Single Scroll with a 180 Degree Rotation or Flip}
\label{fg:singles}
\end{figure}
\vs6\noi Figure \ref{fg:singles}is produced by combining an IDE ($\exp(\alpha \,h)\exp(h \B)$) with $h=0.001$ with a 180 degree rotation, -\I. It is a hybrid of a IDE and a discrete map.
\begq
\label{cd:sng}
\left.
\begin{array}{lcl}
&& \mbox{The code for Fig. \ref{fg:singles} is as follows:}\\
&&\mbox{For i = 1 To N}\\
ux& =& \sgn(|(y - (a\, x + b))| - 0.005)\\
u& =& \exp(\alpha \, h)\cdot ((x - x_0) \cdot \cos(h) + (y - y_0) \cdot \sin(h)) + x_0\\
v& =& \exp(\alpha \, h)\cdot ((y - y_0) \cdot \cos(h) - (x - x_0) \cdot \sin(h)) + y_0\\
x& =& ux \cdot u\\
y &=& ux \cdot v\\
&& \mbox{\bf Plot Point}\\
&&\mbox{Next i}
\end{array}\right \}
\endq
\vs6\noi In order to compute a one-dimensional map note that the scroll maps the line $y = a\, x + b$ onto itself. The code used to produce both Fig. \ref{fg:Oned01} and \ref{fg:Oned02} is as follows:
\begq
\label{cd:oned01}
\left.
\begin{array}{lcl}
&&\mbox{\bf The code for Fig. \ref{fg:Oned01} is as follows:}\\
&& \mbox{For i = 1 to 200000}\\
X1& =& 0.16 + 1.1\, (j / M)\\
Y1& = &X1 - b\\
x & =& X1\\
y &= &Y1\\
ux& =& \sgn(|(y - (a\, x + b))| - 0.005)\\
{\rm If} \; ux & = &-1 \;\; {\rm Then\; GoTo\; line1}\\
u& =& \exp(\alpha \, h)\cdot ((x - x_0) \cdot \cos(h) + (y - y_0) \cdot \sin(h)) + x_0\\
v& =& \exp(\alpha \, h)\cdot ((y - y_0) \cdot \cos(h) - (x - x_0) \cdot \sin(h)) + y_0\\
x& =& ux \cdot u\\
y &=& ux \cdot v\\
\\
&&\mbox{\bf Plot Point}\\
&& \mbox{Next i}\\
&& \mbox{Next j}
\end{array}\right \}
\endq
\vs6\noi What is clear from these one-dimensional maps is that the presence of a transverse homoclinic point is not sufficient to completely characterize complex dynamics. Both one-dimensional maps are chaotic, but Fig. \ref{fg:Oned02} is far more complex than Fig. \ref{fg:Oned01}.
\vs6\noi Fig. \ref{fg:Oned02} has a subinterval more closely resembling a shift on 10 or more symbols whereas Fig. \ref{fg:Oned01} has no obvious relationship to a shift. What can be inferred is that very simple dynamical systems can be globally characterized by the presence of a transverse homoclinic point but that other systems are better characterized by the structure of their associated one-dimensional maps when they can be found. Possibly, the complexity of biological and social systems are better characterized by one-dimensional maps.
\begin{figure}[htbp]
\includegraphics[height=2.52in,width=2.913in,angle=0]{C:/Research/Book/Figures/eps/OneDMap01.eps}
\caption{\sml One-dimensional map with transition function with $a = 1.0$}
\label{fg:Oned01}
\end{figure}
\vs6\noi In Figs. \ref{fg:Oned01} and \ref{fg:Oned02}, the single scroll is unchanged. The only change is in the slope of the transition function. This illustrates that the complexity of the single scroll arises from the transition function.
\begin{figure}[htbp]
\includegraphics[height=2.187in,width=2.913in,angle=0]{C:/Research/Book/Figures/eps/OneDMap02.eps}
\caption{\sml One-dimensional map with transition function $a= 2.0$}
\label{fg:Oned02}
\end{figure}
\vs6\noi Since three-dimensional diffeomorphisms such as Chua, Lorenz and R\"{o}ssler can be built up form single scrolls, the complexity of these diffeomorphisms lies in the underlying one-dimensional maps embedded within. From the analysis of one-dimensional maps in Sec \ref{sc:OneD}, it is clear that one-dimensional maps with a hyperbolic fixed point are the limit set of the unstable manifold of a two-dimensional diffeomorphism. In some cases the one-dimensional map is a shift, Fig. \ref{fg:Shift2Symbols} and \ref{fg:Shift2SymbolsA}; but in most cases it is not. However, within the one-dimensional maps there are many parts of a shift.
\[f(x) = -1 / (\exp(\beta \cdot (1 - x)) + 1)\]
For i = 1 To N
\[\begin{array}{lcl}
u& = &2 \cdot x + f(b \cdot y + 2 \cdot x)\\
v& =& b \cdot y + 2 \cdot x\\
x& =& u\\
y &=& v
\end{array}\]
{\bf Plot Point}
Next i
\begin{figure}[htbp]
\includegraphics[height=2.197in,width=3.363in,angle=0]{C:/Research/Book/Figures/eps/Shift2Symbols.eps}
\caption{\sml The Unstable Manifold produced by lifting of a One-sided Shift on two symbols to a Two-dimensional map with parameters $\beta=15.0,\;\;b=0.15$}
\label{fg:Shift2Symbols}
\end{figure}
\begin{figure}[htbp]
\includegraphics[height=1.91in,width=3.42in,angle=0]{C:/Research/Book/Figures/eps/Shift2SymbolsA.eps}
\caption{\sml The Unstable Manifold Produced by Lifting a One-sided Shift on two symbols to a Two-dimensional map with parameters $\beta=150.0,\;\;\;b=0.000001$}
\label{fg:Shift2SymbolsA}
\end{figure}
\vs6\noi The determinant of the Jacobian of the lifted map is $b$. For $0****0$, in which case the degree of rotation, or frequency, is $\sqrt{3}(S-T)/2$
\vs6\noi Rearranging the condition $Q^3+R^2>0$ to get $Q\, (Q/R)^2+1>0$ provides a sufficient condition for rotation, i.e., $Q>0$. In terms of the coefficients of the characteristic polynomial
\begq Q>0 \Rightarrow a_2>(\div^2)/3=3\,(\div/3)^2
\label{eq:ccond}
\endq
\vs6\noi Using the Chua equation as a reference point, the following is a baseline linear system around which to organize.
\begin{equation}
\left.
\begin{array}{ccccccccc}
a(1,1) & =& \lambda_1 & a(1,2) & =& \omega & a(1,3) & =& 0\\
a(2,1) & =& -\omega & a(2,2) & =& \lambda_2 &a(2,3) & =& 0 \\
a(3,1) & =& 0 & a(3,2) & =& 0 & a(3,3) & =& \lambda_3
\end{array}
\label{eq:bls}\right\}
\end{equation}
\vs6\noi Using Eq. (\ref{eq:bls}) the coefficients of the characteristic polynomial are
\begin{equation}
\left. \begin{array}{cclcl}
a_1&=&\lambda_1+\lambda_2+\lambda_3& =&\div (\A)\\
a_2&=&\lambda_1 \, \lambda_2 + \lambda_3 \, \lambda_2 + \lambda_1 \, \lambda_3 +\omega^2& =& \codiv(\A)+\omega^2\\
a_3&=&\lambda_3(\lambda_1\, \lambda_3+\omega^2)&=&\det (\A)
\end{array}
\label{eq:cpoly}\right\}
\end{equation}
\vs6\noi From Eq. (\ref{eq:cpoly}) a sufficient condition for rotation is given by
\begin{equation}
\left.
\begin{array}{ccl}
3\, a_2-a_1^2&=&\lambda_1 \, \lambda_2 + \lambda_3 \, \lambda_2 + \lambda_1 \, \lambda_3 +3\omega^2-(\lambda_1+\lambda_2+\lambda_3)^2 \\
& {\rm or}& \\
3\omega^2&>& (\lambda_1+\lambda_2+\lambda_3)^2-(\lambda_1 \, \lambda_2 + \lambda_3 \, \lambda_2 + \lambda_1 \, \lambda_3)
\end{array}
\label{eq:div}\right\}
\end{equation}
Rewriting the last line in more intuitive terms gives the equation relating rotation and divergence:
\begq
\mbox{rot}^2> (\div^2-\codiv)/3
\label{eq:cir}
\endq
One result that can be obtained from this equation is that a system may have non zero curl without rotation simply by reducing the magnitude of the parameter $\omega$. Nodes are examples (See \cite{bi:hs} page 93).
\vs6\noi In order for stretching and folding, in this example, to produce complexity there must be at least nonlinear divergence providing stretching combined with linear folding with the linear folding providing rotation rather than just curl. Also, the systems of interest are bounded so that linear folding such as a spiral source (which will become unbounded) must be accompanied by nonlinear stretching or folding.
\vs6\noindent
The detailed example examined in later sections in this book has a linear spiral source in the $x-y$ plane and contracting component inward along the $z-$axis. The two combined dynamics have a hyperbolic fixed point at the origin.
%=================================EXAMPLE ONE===========================
\section{\sml Example Analysis for $\omega \cdot h$ Small}
Using the ideas of the previous section leads to the following IDE:
\footnotesize
\begin{equation}
\label{eq:bline}
\begin{aligned}
\left. \begin{array}{ccl}
x &\ra &\exp(\lambda_1)(x-f(x,z)) \, \cos(\omega \,h) + (y+f(x,z)) \, \sin(\omega \,h))+f(x,z)\\
y& \ra& \exp(\lambda_2)((y+f(x,z)) \, \cos(\omega \,h) - (x -f(x,z))\, \sin(\omega \,h))-f(x,z)\\
z &\ra& \exp(\lambda_3)( z-\, f(x,z))+f(x,z)
\end{array} \right\}
\end{aligned}
\end{equation}
\normalsize
\vs6\noi System \ref{eq:bline} can be put into the following form,
\begq \X\ra \exp(h \,\D)\exp(h \, \omega\, \B)(\X-\F(x,z))+\F(x,z)\endq
where
\begq \B=\l(\mtxs 0.1.0.-1.0.0.0.0.1\par\r)\endq
For h small the following first-order approximation is valid.
\begq \X\ra \exp(h(\D+\omega\B))(\X-\F(x,z))+\F(x,z)\endq
Setting $\A=\D+\omega\B$ the IDE can be expressed in standard form
\begq \T(\X)= \exp(h\,\A)(\X-\F(x,z))+\F(x,z)\endq
\vs6\noi To obtain a simple example choose $\F$ as follows:
\begq \F(x,z)= f(x,z)\l( \vts 1.-1.1\par \r)\endq
The Jacobian of $\F$ is
\begq \J(\F)=\A\l(\mtxs f_x.0.f_z. -f_x.0.-f_z.f_x.0.f_z\par\r)\endq
where
\begq
\A=\l(\mtxs \lambda_1. \omega. 0. -\omega. \lambda_2. 0. 0.0.\lambda_3\par\r)\endq
This gives $\J(\F)$ as
\begq
\J(\F)= \l(\mtxs (\lambda_1\, -\omega)f_x. 0. (\lambda_1\, -\omega)f_z. -(\omega+\lambda_2)f_x.0. -(\omega+\lambda_2)f_z. \lambda_3 \, f_x. 0. \lambda_3\, f_z \par \r)\endq
\vs6\noi Subtract the nonlinear term from the linear term to get
\footnotesize
\begq
\left.
\begin{aligned}
\begin{array}{lcc}
\A+\J(\F)&=&\l(\mtxs \lambda_1. \omega. 0. -\omega. \lambda_2. 0. 0.0.\lambda_3\par\r)-\l(\mtxs (\lambda_1\, -\omega)f_x. 0. (\lambda_1\, -\omega)f_z. -(\omega+\lambda_2)f_x.0. -(\omega+\lambda_2)f_z. \lambda_3 \, f_x. 0. \lambda_3\, f_z \par \r)\\
&{\rm therefore} &\\
\J(\V)&=& \l(\mtxs \lambda_1-(\lambda_1\, -\om**