Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download

GAP 4.8.9 installation with standard packages -- copy to your CoCalc project to get it

563553 views
1
\documentclass[12pt,a4paper]{scrartcl}
2
\usepackage{amsmath}
3
\usepackage{amsfonts}
4
\usepackage{amsthm}
5
\usepackage{amssymb}
6
\usepackage[latin1]{inputenc}
7
\usepackage{mathptmx}
8
\usepackage{}
9
\usepackage{array}
10
\usepackage{color}
11
\usepackage{hyperref}
12
\usepackage{url}
13
\usepackage{graphicx}
14
\usepackage{multicol}
15
\usepackage{dsfont}
16
17
\usepackage{booktabs}
18
19
%\usepackage[ignoreunlbld,nomsgs]{refcheck}
20
21
\usepackage{pgf, tikz}
22
\usepackage{color}
23
24
% !TeX spellcheck = en_US
25
26
%\textwidth=15cm \textheight=22cm \topmargin=0.5cm
27
%\oddsidemargin=0.5cm \evensidemargin=0.5cm
28
29
\usepackage[T1]{fontenc}
30
31
\usepackage[scaled=0.8]{beramono}
32
33
\usepackage{fancyvrb} \RecustomVerbatimEnvironment{Verbatim}{Verbatim}
34
{xleftmargin=15pt, frame=single, fontsize=\small}
35
36
37
\newcounter{listi}
38
\newcommand{\stdli}{ \topsep0ex \partopsep0ex % .5ex plus.25ex minus.125ex%
39
\parsep.2ex plus.1ex minus.1ex \itemsep0ex% .5ex plus.25ex minus.125ex%
40
\leftmargin2.5em \labelwidth2em \labelsep.5em \rightmargin0em}% \samepage }
41
\newenvironment{arab}{\begin{list}{\textup{(\arabic{listi})}}%
42
{\usecounter{listi}\stdli}}{\end{list}}
43
\newenvironment{rome}{\begin{list}{\textup{(\roman{listi})}}%
44
{\usecounter{listi}\stdli}}{\end{list}}
45
\newenvironment{latin}{\begin{list}{\textup{(\alph{listi})}}%
46
{\usecounter{listi}\stdli}}{\end{list}}
47
\renewenvironment{itemize}{\begin{list}{{$\bullet$}}{\stdli}}{\end{list}}
48
\newenvironment{myverb}{\begin{small}}{\end{small}\pagebreak[2]} %%%%% \vspace{-0.8\baselineskip}
49
50
51
\newtheorem{theorem}{Theorem}
52
\newtheorem{lemma}[theorem]{Lemma}
53
\newtheorem{corollary}[theorem]{Corollary}
54
\newtheorem{proposition}[theorem]{Proposition}
55
56
57
\theoremstyle{definition}
58
\newtheorem{remark}[theorem]{Remark}
59
\newtheorem{definition}[theorem]{Definition}
60
61
\let\phi=\varphi
62
63
\def\CC{{\mathbb C}}
64
\def\ZZ{{\mathbb Z}}
65
\def\QQ{{\mathbb Q}}
66
\def\RR{{\mathbb R}}
67
\def\EE{{\mathbb E}}
68
\def\AA{{\mathbb A}}
69
\def\PP{{\mathbb P}}
70
\def\NN{{\mathbb N}}
71
72
\def\cL{{\mathcal L}}
73
74
\def\Ker{\operatorname{Ker}}
75
\def\Im{\operatorname{Im}}
76
\DeclareMathOperator{\gp}{gp}
77
\DeclareMathOperator{\rank}{rank}
78
\DeclareMathOperator{\conv}{conv}
79
\DeclareMathOperator{\aff}{aff}
80
\DeclareMathOperator{\cone}{cone}
81
\DeclareMathOperator{\rec}{rec}
82
\DeclareMathOperator{\mrank}{mrank}
83
\DeclareMathOperator{\Hilb}{Hilb}
84
\DeclareMathOperator{\vol}{vol}
85
\DeclareMathOperator{\Cl}{Cl}
86
\DeclareMathOperator{\para}{par}
87
88
\DeclareMathOperator{\totdeg}{totdeg}
89
90
91
\def\cG{{\mathcal G}}
92
\def\cR{{\mathcal R}}
93
94
\let\hat=\widehat
95
\let\tilde=\widetilde
96
\let\Bar=\overline
97
98
\let\iso=\cong
99
100
\let\epsilon=\varepsilon
101
\def\discuss#1{\marginparsep=1em\marginparwidth=60pt
102
\marginpar{\tt \footnotesize \raggedright #1}}
103
104
\definecolor{darkgray}{gray}{0.00}
105
106
\addtokomafont{section}{\color{darkgray}}
107
108
\setkomafont{sectionentry}{\large}
109
110
\addtokomafont{subsection}{\color{darkgray}}
111
112
\addtokomafont{subsubsection}{\normalsize}
113
114
\parindent=0pt \parskip=4pt
115
116
\setcounter{tocdepth}{3}
117
118
%\def\Normaliz#1+{\textsf{Normaliz}}
119
%\def\jNormaliz#1+{\textsf{jNormaliz}}
120
%\def\NmzIntegrate#1+{\textsf{NmzIntegrate}}
121
122
\def\itemtt[#1]{\item[\textbf{\ttt{#1}}]}
123
124
\def\ttt{\texttt}
125
126
\def\version{3.4.0}
127
\def\NmzDir{normaliz-\version}
128
129
130
\begin{document}
131
\vspace*{2cm}
132
133
\centerline{\Large\textbf{Normaliz \version}} \vspace*{0.5cm}
134
135
\centerline{\today}
136
137
\vspace*{1cm}
138
139
\begin{center}Winfried Bruns, Tim R�mer, Richard Sieg and Christof
140
S�ger\\[14pt]
141
Normaliz 2 team member: Bogdan Ichim\\[14pt]
142
\url{http://normaliz.uos.de}\qquad\qquad\qquad
143
\url{https://github.com/Normaliz}\\[14pt]
144
\url{mailto:normaliz@uos.de}
145
\end{center}
146
147
148
149
\tableofcontents
150
151
\newpage
152
153
%%%%%%%%%%%%%%%%%%%%%%%%%%%%% INTRODUCTION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
154
\section{Introduction}\label{facil}
155
156
\subsection{The objectives of Normaliz}
157
158
The program Normaliz is a tool for computing
159
the Hilbert bases and enumerative data of rational cones and, more generally, sets of lattice points in rational polyhedra. The mathematical background and the terminology of this manual are explained in Appendix A. For a thorough treatment of the mathematics involved we refer the reader to
160
\cite{BG}. The terminology follows \cite{BG}. For
161
algorithms of Normaliz see \cite{BI}, \cite{BHIKS},
162
\cite{BIS}, \cite{BK02}, \cite{BSS}, and \cite{BS}. Some new developments are briefly explained in this manual.
163
164
Both polyhedra and lattices can be given by
165
\begin{arab}
166
\item systems of generators and/or
167
\item constraints.
168
\end{arab}
169
Since version 3.1, cones need not be pointed and polyhedra need not have vertices, but are allowed to contain a positive-dimensional affine subspace.
170
171
In addition to generators and constraints, affine monoids can be defined by lattice ideals, in other words, by binomial equations.
172
173
In order to describe a rational polyhedron by \emph{generators}, one specifies a finite set of vertices $x_1,\dots,x_n\in\QQ^d$ and a set $y_1,\dots,y_m\in\ZZ^d$ generating a rational cone $C$. The polyhedron defined by these generators is
174
$$
175
P=\conv(x_1,\dots,x_n)+C,\qquad C=\RR_+y_1+\dots+\RR_+y_m.
176
$$
177
An affine lattice defined by generators is a subset of $\ZZ^d$ given as
178
$$
179
L=w+L_0,\qquad L_0=\ZZ z_1+\dots+\ZZ z_r, \qquad w,z_1,\dots,z_r\in \ZZ^d.
180
$$
181
\emph{Constraints} defining a polyhedron are affine-linear inequalities with integral coefficients, and the constraints for an affine lattice are affine-linear diophantine equations and congruences. The conversion between generators and constraints is an important task of Normaliz.
182
183
184
The first main goal of Normaliz is to compute a system of generators for
185
$$
186
P\cap L.
187
$$
188
The minimal system of generators of the monoid $M=C\cap L_0$ is the Hilbert basis $\Hilb(M)$ of $M$. The homogeneous case, in which $P=C$ and $L=L_0$, is undoubtedly the most important one, and in this case $\Hilb(M)$ is the system of generators to be computed. In the general case the system of generators consists of $\Hilb(M)$ and finitely many points $u_1,\dots,u_s\in P\cap L$ such that
189
$$
190
P\cap L=\bigcup_{j=1}^s u_j+M.
191
$$
192
193
The second main goal are enumerative data that depend on a grading
194
of the ambient lattice. Normaliz computes the Hilbert series and
195
the Hilbert quasipolynomial of the monoid or set of lattice points in a polyhedron. In combinatorial terminology: Normaliz computes Ehrhart series and quasipolynomials of rational polyhedra. Normaliz also computes weighted
196
Ehrhart series and Lebesgue integrals of polynomials over
197
rational polytopes.
198
199
Normaliz now has a variant called QNormaliz. Its basic number class are elements of a subfield of $\RR$ instead of integers. This extends the scope of Normaliz to certain nonrational polyhedra, but limits its applicability to convex hull computations and triangulations since finite generation of the lattice points in such polyhedra is no longer given.
200
201
The computation goals of Normaliz can be set by the user. In particular, they can be restricted to subtasks, such as the lattice points in a polytope or the leading coefficient of the Hilbert (quasi)polynomial.
202
203
Performance data of Normaliz can be found in \cite{BIS}.
204
205
\emph{Acknowledgement.}\enspace In 2013--2016 the development of Normaliz has been supported by the DFG SPP 1489 ``Algorithmische und ex\-pe\-rimentelle Methoden in Algebra, Geometrie und Zahlentheorie''.
206
207
\subsection{Platforms, implementation and access from other systems}
208
209
Executables for Normaliz are provided for Mac OS, Linux and MS Windows. If the executables prepared cannot be run on your system, then you can compile Normaliz yourself (see Section \ref{Compile}). The statically linked Linux binaries provided by us can be run in the Linux subsystem that is available in MS Windows 10.
210
211
Normaliz is written in C++, and should be compilable on every system that has a GCC compatible compiler. It uses the standard packages Boost and GMP (see Section \ref{Compile}). The parallelization is based on OpenMP.
212
213
Normaliz consists of two parts: the front end ` normaliz'' for input and output and the C++ library ``libnormaliz'' that does the computations.
214
215
Normaliz can be accessed from the interactive general purpose system \textsc{Python} via the interface \textsc{PyNormaliz} written by Sebastian Gutsche with contributions by Justin Shenk and Richard Sieg.
216
217
Normaliz can also be accessed from the following systems:
218
\begin{itemize}
219
\item \textsc{Singular} via the library \ttt{normaliz.lib},
220
\item \textsc{Macaulay 2} via the package \ttt{Normaliz.m2},
221
\item \textsc{CoCoA} via an external library and libnormaliz,
222
\item \textsc{GAP} via the GAP package \textsc{NormalizInterface} \cite{GAP-NmzInterface} which uses libnormaliz,
223
\item \textsc{polymake} (thanks to the \textsc{polymake}
224
team),
225
\item \textsc{SageMath} via PyNormaliz (on preparation, thanks to Matthias K�ppe).
226
\end{itemize}
227
228
The Singular and Macaulay 2 interfaces are contained in the
229
Normaliz distribution. At present, their functionality is limited to Normaliz 2.10. Nevertheless they profit from the newer versions.
230
231
Furthermore, Normaliz is used by the B. Burton's system
232
\textsc{Regina} and in \textsc{SecDec} by S. Borowka et al.
233
234
Normaliz does not have its own interactive shell. We recommend the access via PyNormaliz, GAP for SageMath (in preparation) for interactive use.
235
236
\subsection{Major changes relative to version 3.1.1}
237
238
In 3.2.0;
239
240
\begin{arab}
241
242
\item installation via \ttt{autotools} (written by Matthias K�ppe),
243
244
% \item the Python interface PyNormaliz,
245
246
\item automatic choice of algorithmic variants (can be switched off),
247
248
\item sparse format for vectors and matrices,
249
250
\item constraints in symbolic form,
251
252
\item Hilbert series with denominator corresponding to a homogeneous system of parameters,
253
254
\item choice of an output directory,
255
256
\item improvements of \verb|libnormaliz|: extension of enumeration \verb|ConeProperty|, constructors based on \verb|Matrix<Integer>|, additional functions for retrieval of results,
257
258
\item a better implementation of \verb|Approximate| and its use in the inhomogeneous case,
259
260
\item option \verb|Symmetrize| that produces symmetrized input for nmzIntegrate and runs nmzIntegrate on this input,
261
262
\item QNormaliz, a version of Normaliz using coordinates in an extension of $\QQ$ (restricted to convex hull computations and triangulation).
263
264
\end{arab}
265
266
In 3.3.0:
267
268
\begin{arab}
269
\item inclusion of NmzIntegrate in Normaliz as a part of libnormaliz,
270
\item fractions in input files,
271
\item controlled interruption of Normaliz.
272
\end{arab}
273
274
In 3.4.0:
275
276
\begin{arab}
277
\item limit of parallelization via libnormaliz,
278
\item floating point numbers in input,
279
\item project-and-lift algorithm for lattice points in polytopes, also with a floating point variant,
280
\item significantly improved subdivision, equivalent replacement for Scip.
281
\item fast Gorenstein test,
282
\item restriction of number of significant coefficients of quasipolynomial,
283
\item definition of semi-open parallelepipeds in input and output of their lattice points.
284
\end{arab}
285
286
287
288
See the file \verb|CHANGELOG| in the basic package for more information on the history of Normaliz.
289
290
291
\subsection{Future extensions}
292
293
\begin{arab}
294
\item Computation and exploitation od automorphism groups,
295
\item multigraded Hilbert series,
296
\item access from further systems,
297
\item Gr�bner and Graver bases.
298
\end{arab}
299
300
\subsection{Download and installation}
301
302
Download
303
\begin{itemize}
304
\item the zip file with the Normaliz source, documentation,
305
examples and further platform independent components, and
306
307
\item the zip file containing the executable for your system
308
\end{itemize}
309
from the Normaliz website\medskip
310
311
\centerline{\url{http://normaliz.uos.de}}\medskip
312
313
and unzip both in the same directory of your choice. In it, a
314
directory \ttt{\NmzDir} (called Normaliz directory in the
315
following) is created with several subdirectories.
316
317
See Section \ref{Distr} for more details on the distribution and consult Section \ref{Compile} if you want to compile Normaliz yourself.
318
%\newpage
319
320
\section{Normaliz by examples}\label{Examples}
321
322
\subsection{Terminology}\label{Term}
323
324
For the precise interpretation of parts of the Normaliz output some terminology is necessary, but this section can be skipped at first reading, and the user can come back to it when it becomes necessary. We will give less formal descriptions along the way.
325
326
As pointed out in the introduction, Normaliz ``computes'' intersections $P\cap L$ where $P$ is a rational polyhedron in $\RR^d$ and $L$ is an affine sublattice of $\ZZ^d$. It proceeds as follows:
327
328
\begin{arab}
329
\item If the input is inhomogeneous, then it is homogenized by introducing a homogenizing coordinate: the polyhedron $P$ is replaced by the cone $C(P)$: it is the closure of $\RR_+(P\times \{1\})$ in $\RR^{d+1}$. Similarly $L$ is replaced by $\widetilde L=\ZZ(L\times \{1\})$. In the homogeneous case in which $P$ is a cone and $L$ is a subgroup of $\ZZ^d$, we set $C(P)=P$ and $\widetilde L=L$.
330
331
\item The computations take place in the \emph{efficient lattice}
332
$$
333
\EE=\widetilde{L}\cap \RR C(P).
334
$$
335
where $\RR C(P)$ is the linear subspace generated by $C(P)$. The internal coordinates are chosen with respect to a basis of $\EE$. The \emph{efficient cone} is
336
$$
337
\CC=\RR_+(C(P)\cap \EE).
338
$$
339
340
\item Inhomogeneous computations are truncated using the dehomogenization (defined implicitly or explicitly).
341
342
\item The final step is the conversion to the original coordinates. Note that we must use the coordinates of $\RR^{d+1}$ if homogenization has been necessary, simply because some output vectors may be non-integral.
343
\end{arab}
344
345
Normaliz computes inequalities, equations and congruences defining $\EE$ and $\CC$. The output contains only those constraints that are really needed. They must always be used jointly: the equations and congruences define $\EE$, and the equations and inequalities define $\CC$. Altogether they define the monoid $M=\CC\cap\EE$. In the homogeneous case this is the monoid to be computed. In the inhomogeneous case we must intersect $M$ with the dehomogenizing hyperplane to obtain $P\cap L$.
346
347
In this section, only pointed cones (and polyhedra with vertices) will be discussed. Nonpointed cones will be addressed in Section \ref{Nonpointed}.
348
349
350
\subsection{Practical preparations}
351
352
You may find it comfortable to run Normaliz via the GUI jNormaliz \cite{AI}. In the Normaliz directory open jNormaliz by clicking
353
\ttt{jNormaliz.jar} in the appropriate way. (We assume that
354
Java is installed on your machine.)
355
\begin{figure}[bht]
356
\centering
357
\includegraphics[width = 80 mm, bb=0 0 690 560]{jNormaliz.jpg}\\%width = 80 mm, bb=0 0 689 430
358
\caption{jNormaliz}\label{new}
359
\end{figure}
360
In the jNormaliz file dialogue choose one of the input files
361
in the subdirectory \ttt{example}, say \ttt{small.in}, and
362
press \ttt{Run}. In the console window you can watch Normaliz
363
at work. Finally inspect the output window for the results.
364
365
The menus and dialogues of jNormaliz are self explanatory, but you
366
can also consult the documentation \cite{AI} via the help menu.
367
368
\emph{Remark}\enspace The jNormaliz drop down menus do presently not cover all options of Normaliz. But since all computation goals and algorithmic variants can be set in the input file, there is no real restriction in using jNormaliz. The only option not reachable by jNormaliz is the output directory (see Section \ref{outcontrol}).
369
370
Moreover, one can, and often will, run Normaliz from the
371
command line. This is fully explained in Section \ref{options}. At this point it is enough to call Normaliz by typing
372
\begin{Verbatim}
373
normaliz -c <project>
374
\end{Verbatim}
375
where \ttt{<project>} denotes for the project to be computed. Normaliz will load the file \ttt{project.in}. The option \verb|-c| makes Normaliz to write a progress report on the terminal. Normaliz writes its results to \verb|<project>.out|.
376
377
Note that you may have to prefix \ttt{normaliz} by a path name, and \ttt{<project>} must contain a path to the input file if it is not in the current directory. Suppose the Normaliz directory is the current directory and we are using a Linux or Mac system. Then
378
\begin{Verbatim}
379
./normaliz -c example/small
380
\end{Verbatim}
381
will run \ttt{small.in} from the directory \ttt{example}. On Windows we must change this to
382
\begin{Verbatim}
383
.\normaliz -c example\small
384
\end{Verbatim}
385
386
The commands given above will run Normaliz with the at most $8$ parallel threads. For the very small examples in this tutorial you may want to add \ttt{-x=1} to suppress parallelization. For large examples, you can increase the number of parallel threads by \verb|-x=<N>| where \verb|<N>| is the number of threads that you want to suggest. See Section \ref{exec}.
387
388
As long as you don't specify a computation goal on the command line or in the input file, Normaliz will use the \emph{default computation goals}:
389
\begin{verbatim}
390
HilbertBasis
391
HilbertSeries
392
ClassGroup
393
\end{verbatim}
394
The computation of the Hilbert series requires the explicit or implicit definition of a grading. Normaliz does only complain that a computation goal cannot be reached if the goal has been set explicitly.
395
For example, if you say \verb|HilbertSeries| and there is no grading, an exception will be thrown and Normaliz terminates, but an output file with the already computed data will be written.
396
397
Normaliz will always print the results that are obtained on the way to the computation goals and do not require extra effort.
398
399
Appendix \ref{Console} helps you to read the console output that you have demanded by the option \verb|-c|.
400
\subsection{A cone in dimension $2$}\label{cone_ex}
401
402
We want to investigate the cone $C=\RR_+(2,1)+\RR_+(1,3)\subset \RR^2$:
403
\begin{center}
404
\begin{tikzpicture}[scale=0.7]
405
\filldraw[yellow] (0,0) -- (1.833,5.5) -- (4.5,5.5) -- (4.5,2.25) -- cycle;
406
\draw (0,0) -- (1.833,5.5);
407
\draw (0,0) -- (4.5,2.25) node at (-0.3,-0.3){\small $0$};
408
\foreach \x in {0,...,4}
409
\foreach \y in {0,...,5}
410
{
411
\filldraw[fill=black] (\x,\y) circle (1.5pt);
412
}
413
\draw[red,thick] (1,1) circle (4pt);
414
\draw[red,thick] (1,2) circle (4pt);
415
\draw[red,thick] (1,3) circle (4pt);
416
\draw[red,thick] (2,1) circle (4pt);
417
\end{tikzpicture}
418
\end{center}
419
420
This cone is defined in the input file \verb|2cone.in|:
421
\begin{Verbatim}
422
amb_space 2
423
cone 2
424
1 3
425
2 1
426
\end{Verbatim}
427
The input tells Normaliz that the ambient space is $\RR^2$, and then a cone with 2 generators is defined, namely the cone $C$ from above.
428
429
The figure indicates the Hilbert basis, and this is our first computation goal.
430
431
If you prefer to consider the columns of a matrix as input vectors (or have a matrix in this format from another system) you can use the input
432
\begin{Verbatim}
433
amb_space 2
434
cone transpose 2
435
1 2
436
3 1
437
\end{Verbatim}
438
Note that the number \verb|2| following \verb|transpose| is now the number of \emph{columns}. Later on we will also show the use of formatted matrices.
439
440
\subsubsection{The Hilbert basis}
441
In order to compute the Hilbert basis, we run Normaliz from jNormaliz or by
442
\begin{Verbatim}
443
./normaliz -c example/2cone
444
\end{Verbatim}
445
and inspect the output file:
446
\begin{Verbatim}
447
4 Hilbert basis elements
448
2 extreme rays
449
2 support hyperplanes
450
\end{Verbatim}
451
Self explanatory so far.
452
\begin{Verbatim}
453
embedding dimension = 2
454
rank = 2 (maximal)
455
external index = 1
456
internal index = 5
457
original monoid is not integrally closed
458
\end{Verbatim}
459
The embedding dimension is the dimension of the space in which the computation is done. The rank is the rank of the lattice $\EE$ (notation as in Section \ref{Term}). In fact, in our example $\EE=\ZZ^2$, and therefore has rank $2$.
460
461
For subgroups $G\subset U\subset \ZZ^d$ we denote the order of the torsion subgroup of $U/G$ by the \emph{index} of $G$ in $U$. The \emph{external index} is the index of the lattice $\EE$ in $\ZZ^d$. In our case $\EE=\ZZ^d$, and therefore the external index is $1$. Note: the external index is $1$ exactly when $\EE$ is a direct summand of $\ZZ^d$.
462
463
For this example and many others the \emph{original monoid} is well defined: the generators of the cone used as input are contained in $\EE$. (This need not be the case if $\EE$ is a proper sublattice of $\ZZ^d$, and we let the original monoid be undefined in inhomogeneous computations.) Let $G$ be the subgroup generated by the original monoid. The \emph{internal index} is the index of $G$ in $\EE$.
464
465
The original monoid is \emph{integrally closed} if and only if the it contains the Hilbert basis, and this is evidently false for our example. We go on.
466
467
\begin{Verbatim}
468
size of triangulation = 1
469
resulting sum of |det|s = 5
470
\end{Verbatim}
471
The primal algorithm of Normaliz relies on a (partial) triangulation. In our case the triangulation consists of a single simplicial cone, and (the absolute value of) its determinant is~$5$.
472
\begin{Verbatim}
473
No implicit grading found
474
\end{Verbatim}
475
If you do not define a grading explicitly, Normaliz tries to find one itself: the grading is defined if and only if there is a linear form $\gamma$ on $\EE$ under which all extreme rays of the efficient cone $\CC$ have value $1$, and if so, $\gamma$ is the implicit grading. Such does not exist in our case.
476
477
The last information before we come to the vector lists:
478
\begin{Verbatim}
479
rank of class group = 0
480
finite cyclic summands:
481
5: 1
482
\end{Verbatim}
483
The class group of the monoid $M$ has rank $0$, in other words, it is finite. It has one finite cyclic summand of order $5$.
484
485
This is the first instance of a multiset of integers displayed as a sequence of pairs
486
\begin{quote}
487
\verb|<n>: <m>|
488
\end{quote}
489
Such an entry says: the multiset contains the number \verb|<n>| with multiplicity \verb|<m>.|
490
491
492
Now we look at the vector lists (typeset in two columns to save space):
493
\begin{Verbatim}
494
4 Hilbert basis elements: 2 extreme rays:
495
1 1 1 3
496
1 2 2 1
497
1 3
498
2 1 2 support hyperplanes:
499
-1 2
500
3 -1
501
\end{Verbatim}
502
503
The support hyperplanes are given by the linear forms (or inner normal vectors):
504
\begin{align*}
505
-x_1+2x_2&\ge 0,\\
506
3x_1-x_2&\ge 0.
507
\end{align*}
508
509
If the order is not fixed for some reason, Normaliz sorts vector lists as follows : (1) by degree if a grading exists and the application makes sense, (2) lexicographically.
510
511
512
\subsubsection{The cone by inequalities}\label{ineq_ex}
513
514
Instead by generators, we can define the cone by the inequalities just computed (\verb|2cone_ineq.in|). We use this example to show the input of a formamtted matrix:
515
\begin{Verbatim}
516
amb_space auto
517
inequalities
518
[[-1 2] [3 -1]]
519
\end{Verbatim}
520
A matrix of input type \verb|inequalities| contains \emph{homogeneous} inequalities. Normaliz can determine the dimension of the ambient space from the formatted matrix. Therefore we can declare the ambient space as being ``auto determined'' (but \verb|amb_space 2| is not forbidden).
521
522
We get the same result as with \verb|2cone.in| except that the data depending on the original monoid cannot be computed: the internal index and the information on the original monoid are missing since there is no original monoid.
523
524
\subsubsection{The interior}\label{strict_ex}
525
Now we want to compute the lattice points in the interior of our cone. If the cone $C$ is given by the inequalities $\lambda_i(x)\ge0$ (within $\aff(C)$), then the interior is given by the inequalities $\lambda_i(x)>0$. Since we are interested in lattice points, we work with the inequalities $\lambda_i(x)\ge 1$.
526
527
528
The input file \verb|2cone_int.in| says
529
\begin{Verbatim}
530
amb_space 2
531
strict_inequalities 2
532
-1 2
533
3 -1
534
\end{Verbatim}
535
The strict inequalities encode the conditions
536
\begin{align*}
537
-x_1+2x_2&\ge 1,\\
538
3x_1-x_2&\ge 1.
539
\end{align*}
540
This is our first example of inhomogeneous input.
541
\begin{center}
542
\begin{tikzpicture}[scale=0.7]
543
\filldraw[yellow] (0,0) -- (1.833,5.5) -- (4.5,5.5) -- (4.5,2.25) -- cycle;
544
\filldraw[lightgray] (0.6,0.8) -- (2.166,5.5) --(4.5,5.5) -- (4.5,2.75) -- cycle;
545
\draw (0,0) -- (1.833,5.5);
546
\draw (0,0) -- (4.5,2.25) node at (-0.3,-0.3){\small $0$};
547
\draw (0,-1) -- (2.166,5.5);
548
\draw (0,0.5) -- (4.5,2.75);
549
\foreach \x in {0,...,4}
550
\foreach \y in {0,...,5}
551
{
552
\filldraw[fill=black] (\x,\y) circle (1.5pt);
553
}
554
\draw[red,thick] (1,1) circle (4pt);
555
\draw[red,thick] (1,2) circle (4pt);
556
\draw[blue,thick] (0.6,0.8) circle (4pt);
557
\end{tikzpicture}
558
\end{center}
559
Alternatively we could use the following two equivalent input files, in a more intuitive notation:
560
\begin{Verbatim}
561
amb_space 2
562
constraints 2
563
-1 2 > 0
564
3 -1 > 0
565
\end{Verbatim}
566
\begin{Verbatim}
567
amb_space 2
568
constraints 2
569
-1 2 >= 1
570
3 -1 >= 1
571
\end{Verbatim}
572
There is an even more intuitive way to type the input file using symbolic constraints that we will introduce in Section \ref{cong_ex}.
573
574
Normaliz homogenizes inhomogeneous computations by introducing an auxiliary homogenizing coordinate $x_{d+1}$. The polyhedron is obtained by intersecting the homogenized cone with the hyperplane $x_{d+1}=1$. The recession cone is the intersection with the hyperplane $x_{d+1}=0$. The recession monoid is the monoid of lattice points in the recession cone, and the set of lattice points in the polyhedron is represented by its system of module generators over the recession monoid.
575
576
Note that the homogenizing coordinate serves as the denominator for rational vectors. In our example the recession cone is our old friend that we have already computed, and therefore we need not comment on it.
577
578
\begin{Verbatim}
579
2 module generators
580
4 Hilbert basis elements of recession monoid
581
1 vertices of polyhedron
582
2 extreme rays of recession cone
583
3 support hyperplanes of polyhedron (homogenized)
584
585
embedding dimension = 3
586
affine dimension of the polyhedron = 2 (maximal)
587
rank of recession monoid = 2
588
\end{Verbatim}
589
The only surprise may be the embedding dimension: Normaliz always takes the dimension of the space in which the computation is done. It is the number of components of the output vectors. Because of the homogenization it has increased by $1$.
590
\begin{Verbatim}
591
size of triangulation = 1
592
resulting sum of |det|s = 25
593
\end{Verbatim}
594
In this case the homogenized cone has stayed simplicial, but the determinant has changed.
595
\begin{Verbatim}
596
dehomogenization:
597
0 0 1
598
\end{Verbatim}
599
The dehomogenization is the linear form $\delta$ on the homogenized space that defines the hyperplanes from which we get the polyhedron and the recession cone by the equations $\delta(x)=1$ and $\delta(x)=0$, respectively. It is listed since one can also work with a user defined dehomogenization.
600
\begin{Verbatim}
601
module rank = 1
602
\end{Verbatim}
603
This is the rank of the module of lattice points in the polyhedron over the recession monoid. In our case the module is an ideal, and so the rank is $1$.
604
605
The output of inhomogeneous computations is always given in homogenized form. The last coordinate is the value of the dehomogenization on the listed vectors, $1$ on the module generators, $0$ on the vectors in the recession monoid:
606
\begin{Verbatim}
607
2 module generators: 4 Hilbert basis elements of recession monoid:
608
1 1 1 1 1 0
609
1 2 1 1 2 0
610
1 3 0
611
2 1 0
612
\end{Verbatim}
613
The module generators are $(1,1)$ and $(1,2)$.
614
\begin{Verbatim}
615
1 vertices of polyhedron:
616
3 4 5
617
\end{Verbatim}
618
Indeed, the polyhedron has a single vertex, namely $(3/5,4/5)$.
619
\begin{Verbatim}
620
2 extreme rays of recession cone: 3 support hyperplanes of polyhedron (homogenized):
621
1 3 0 -1 2 -1
622
2 1 0 0 0 1
623
3 -1 -1
624
\end{Verbatim}
625
Two support hyperplanes are exactly those that we have used to define the polyhedron -- and it has only $2$. But Normaliz always outputs the support hyperplanes that are needed for the cone that one obtains by homogenizing the polyhedron, as indicated by `homogenized''. The homogenizing variable is always $\ge 0$. In this case the support hyperplane $(0,0,1)$ is essential for the description of the cone. Note that it need not always appear.
626
627
628
\subsection{A lattice polytope}\label{lattice_polytope}\label{polytope_ex}
629
630
\begin{minipage}[b]{0.5\textwidth}
631
The file \ttt{polytope.in} contains
632
633
\begin{Verbatim}
634
amb_space 4
635
polytope 4
636
0 0 0
637
2 0 0
638
0 3 0
639
0 0 5
640
\end{Verbatim}
641
\end{minipage}
642
\hspace{1cm}
643
\begin{minipage}[t]{0.4\textwidth}
644
\tikzset{facet style/.style={opacity=1.0,very thick,line,join=round}}
645
\begin{tikzpicture}[x = {(-0.5cm,-0.5cm)},
646
y = {(0.9659cm,-0.25882cm)},
647
z = {(0cm,1cm)},
648
scale = 0.8]
649
\draw [->,dashed] (-0.5, 0, 0) -- (3.0,0,0);
650
651
\draw [->,dashed] (0, -0.5, 0) -- (0,6.0,0);
652
653
\draw [->,dashed] (0, 0, -0.5) -- (0,0,3.5);
654
655
\draw[thin] (0,0,0) -- (2,0,0) -- (0,5,0) -- cycle;
656
\draw[thin] (0,0,0) -- (2,0,0) -- (0,0,3) -- cycle;
657
\draw[thin] (0,0,0) -- (0,5,0) -- (0,0,3) -- cycle;
658
\draw[thick] (2,0,0) -- (0,5,0) -- (0,0,3) -- cycle;
659
660
\filldraw[fill=white] (0,0,0) circle (2pt);
661
\filldraw[fill=white] (2,0,0) circle (2pt);
662
\filldraw[fill=white] (0,5,0) circle (2pt);
663
\filldraw[fill=white] (0,0,3) circle (2pt);
664
\end{tikzpicture}
665
\end{minipage}
666
667
This is a good place to mention that Normaliz also accepts matrices (and vectors) in sparse format:
668
669
\begin{Verbatim}
670
amb_space 4
671
polytope 4
672
;
673
1:2;
674
2:3;
675
3:5;
676
\end{Verbatim}
677
Each input row, concluded by \verb|;|, lists the indices and the corresponding nonzero values in that row of the matrix.
678
679
The Ehrhart monoid of the integral polytope with the $4$
680
vertices
681
$$
682
(0,0,0)\,,\quad (2,0,0)\,,\quad (0,3,0)\quad\text{ and }\quad (0,0,5)
683
$$
684
in $\RR^3$ is to be computed. The generators of the Ehrhart monoid are obtained by attaching a further coordinate $1$ to the vertices, and this explains \verb|amb_space 4|. In fact, the input type \verb|polytope| is nothing but a convenient (perhaps superfluous) version of
685
\begin{Verbatim}
686
amb_space 4
687
cone 4
688
0 0 0 1
689
2 0 0 1
690
0 3 0 1
691
0 0 5 1
692
\end{Verbatim}
693
694
Running \ttt{normaliz} produces the file \ttt{polytope.out}:
695
\begin{Verbatim}
696
19 Hilbert basis elements
697
18 Hilbert basis elements of degree 1
698
4 extreme rays
699
4 support hyperplanes
700
701
embedding dimension = 4
702
rank = 4 (maximal)
703
external index = 1
704
internal index = 30
705
original monoid is not integrally closed
706
\end{Verbatim}
707
Perhaps a surprise: the lattice points of the polytope do not yield all Hilbert basis elements.
708
\begin{Verbatim}
709
size of triangulation = 1
710
resulting sum of |det|s = 30
711
\end{Verbatim}
712
Nothing really new so far. But now Normaliz finds a grading given by the last coordinate. See \ref{grading} below for general information on gradings.
713
\begin{Verbatim}
714
grading:
715
0 0 0 1
716
717
degrees of extreme rays:
718
1: 4
719
\end{Verbatim}
720
Again we encounter the notation \verb|<n>: <m>|: we have $4$ extreme rays, all of degree $1$.
721
\begin{Verbatim}
722
Hilbert basis elements are not of degree 1
723
\end{Verbatim}
724
Perhaps a surprise: the polytope is not integrally closed as defined in \cite{BG}. Now we see the enumerative data defined by the grading:
725
\begin{Verbatim}
726
multiplicity = 30
727
728
Hilbert series:
729
1 14 15
730
denominator with 4 factors:
731
1: 4
732
733
degree of Hilbert Series as rational function = -2
734
735
Hilbert polynomial:
736
1 4 8 5
737
with common denominator = 1
738
\end{Verbatim}
739
The polytope has $\ZZ^3$-normalized volume $30$ as indicated by the multiplicity. The Hilbert (or Ehrhart) function counts the lattice points in $kP$, $k\in\ZZ_+$. The corresponding generating function is a
740
rational function $H(t)$. For our polytope it is
741
$$
742
\frac{1+14t+15t^2}{(1-t)^4}.
743
$$
744
The denominator is given in multiset notation: \verb|1: 4| say that the factor $(1-t^1)$ occurs with multiplicity $4$.
745
746
The Ehrhart polynomial (again we use a more general term in
747
the output file) of the polytope is
748
$$
749
p(k)=1+4k+8k^2+5k^3\,.
750
$$
751
In our case it has integral coefficients, a rare exception. Therefore one usually needs a denominator.
752
753
Everything that follows has already been explained.
754
\begin{Verbatim}
755
756
rank of class group = 0
757
finite cyclic summands:
758
30: 1
759
760
***********************************************************************
761
762
18 Hilbert basis elements of degree 1:
763
0 0 0 1
764
...
765
2 0 0 1
766
767
1 further Hilbert basis elements of higher degree:
768
1 2 4 2
769
770
4 extreme rays: 4 support hyperplanes:
771
0 0 0 1 -15 -10 -6 30
772
0 0 5 1 0 0 1 0
773
0 3 0 1 0 1 0 0
774
2 0 0 1 1 0 0 0
775
\end{Verbatim}
776
777
The support hyperplanes give us a description of the polytope
778
by inequalities: it is the solution of the system of the $4$
779
inequalities
780
$$ x_3\ge0\,,\quad
781
x_2\ge0\,,\quad x_1\ge0\quad\text{ and }\quad
782
15x_1+10x_2+6x_3\le30\,.
783
$$
784
785
\subsubsection{Only the lattice points}\label{only_lattpoints}
786
787
Suppose we want to compute only the lattice points in our polytope. In the language of graded monoids these are the degree $1$ elements, and so we add \verb|Deg1Elements| to our input file (\verb|polytope_deg1.in|):
788
\begin{Verbatim}
789
amb_space 4
790
polytope 4
791
0 0 0
792
2 0 0
793
0 3 0
794
0 0 5
795
Deg1Elements
796
/* This is our first explicit computation goal*/
797
\end{Verbatim}
798
We have used this opportunity to include a comment in the input file. The computation of lattice points in a polytope will be taken up again in Sections \ref{project_example} and \ref{LattPoints}.
799
800
We lose all information on the Hilbert series, and from the Hilbert basis we only retain the degree $1$ elements.
801
802
\subsection{A rational polytope}\label{rational}
803
804
The type \verb|polytope| can (now) be used for rational polytopes as well.
805
\begin{center}
806
\begin{tikzpicture}[scale=3.0]
807
\filldraw[yellow] (0.5,0.5) -- (-0.333,-0.333) -- (0.25,-0.5) -- cycle;
808
\draw (0.5,0.5) -- (-0.333,-0.333) -- (0.25,-0.5) -- cycle;
809
\draw[->] (0,-0.6) --(0,0.6);
810
\draw[-> ] (-0.6,0) -- (0.6,0);
811
\filldraw[fill=white] (0.5,0.5) circle (0.5pt);
812
\filldraw[fill=white] (-0.333,-0.333) circle (0.5pt);
813
\filldraw[fill=white] (0.25,-0.5) circle (0.5pt);
814
\end{tikzpicture}
815
\end{center}
816
817
We want to investigate the Ehrhart series of the triangle $P$
818
with vertices
819
$$
820
(1/2,1/2),\ (-1/3,-1/3),\ (1/4,-1/2).
821
$$
822
For this example the procedure above yields the input file \ttt{rational.in}:
823
824
\begin{Verbatim}
825
amb_space 3
826
polytope 3
827
1/2 1/2
828
-1/3 -1/3
829
1/4 -1/2
830
HilbertSeries
831
\end{Verbatim}
832
This is the first time that we used the shortcut \verb|unit_vector <n>| which represents the $n$th unit vector $e_n\in\RR^d$ and is only allowed for input types which require a single vector.
833
834
From the output file we only list the data of the Ehrhart series.
835
\begin{Verbatim}
836
multiplicity = 5/8
837
838
Hilbert series:
839
1 0 0 3 2 -1 2 2 1 1 1 1 2
840
denominator with 3 factors:
841
1: 1 2: 1 12: 1
842
843
degree of Hilbert Series as rational function = -3
844
845
Hilbert series with cyclotomic denominator:
846
-1 -1 -1 -3 -4 -3 -2
847
cyclotomic denominator:
848
1: 3 2: 2 3: 1 4: 1
849
850
Hilbert quasi-polynomial of period 12:
851
0: 48 28 15 7: 23 22 15
852
1: 11 22 15 8: 16 28 15
853
2: -20 28 15 9: 27 22 15
854
3: 39 22 15 10: -4 28 15
855
4: 32 28 15 11: 7 22 15
856
5: -5 22 15 with common denominator = 48
857
6: 12 28 15
858
\end{Verbatim}
859
The multiplicity is a rational number. Since in dimension $2$
860
the normalized area (of full-dimensional polytopes) is twice
861
the Euclidean area, we see that $P$ has Euclidean area $5/16$.
862
863
Unlike in the case of a lattice polytope, there is no canonical choice of the denominator of the Ehrhart series. Normaliz gives it in $2$ forms. In the first form the numerator polynomial is
864
$$
865
1+3t^3+2t^4-t^5+2t^6+2t^7+t^8+t^9+t^{10}+t^{11}+2t^{12}
866
$$
867
and the denominator is
868
$$
869
(1-t)(1-t^2)(1-t^{12}).
870
$$
871
As a rational function, $H(t)$ has degree $-3$. This implies
872
that $3P$ is the smallest integral multiple of $P$ that
873
contains a lattice point in its interior.
874
875
Normaliz gives also a representation as a quotient of coprime
876
polynomials with the denominator factored into cyclotomic
877
polynomials. In this case
878
we have
879
$$
880
H(t)=-\frac{1+t+t^2+t^3+4t^4+3t^5+2t^6}{\zeta_1^3\zeta_2^2\zeta_3\zeta_4}
881
$$
882
where $\zeta_i$ is the $i$-th cyclotomic polynomial
883
($\zeta_1=t-1$, $\zeta_2=t+1$, $\zeta_3=t^2+t+1$,
884
$\zeta_4=t^2+1$).
885
886
Normaliz transforms the representation with cyclotomic
887
denominator into one with denominator of type
888
$(1-t^{e_1})\cdots(1-t^{e_r})$, $r=\textup{rank}$, by choosing
889
$e_r$ as the least common multiple of all the orders of the
890
cyclotomic polynomials appearing, $e_{r-1}$ as the lcm of those
891
orders that have multiplicity $\ge 2$ etc.
892
893
There are other ways to form a suitable denominator with $3$
894
factors $1-t^e$, for example
895
$g(t)=(1-t^2)(1-t^3)(1-t^4)=-\zeta_1^3\zeta_2^2\zeta_3\zeta_4$.
896
Of course, $g(t)$ is the optimal choice in this case. However,
897
$P$ is a simplex, and in general such optimal choice may not
898
exist. We will explain the reason for our standardization
899
below.
900
901
Let $p(k)$ be the number of lattice points in $kP$. Then $p(k)$
902
is a quasipolynomial:
903
$$
904
p(k)=p_0(k)+p_1(k)k+\dots+p_{r-1}(k)k^{r-1},
905
$$
906
where the coefficients depend on $k$, but only to the extent
907
that they are periodic of a certain period $\pi\in\NN$. In our
908
case $\pi=12$ (the lcm of the orders of the cyclotomic
909
polynomials).
910
911
The table giving the quasipolynomial is to be read as follows:
912
The first column denotes the residue class $j$ modulo the
913
period and the corresponding line lists the coefficients
914
$p_i(j)$ in ascending order of $i$, multiplied by the common
915
denominator. So
916
$$
917
p(k)=1+\frac{7}{12}k+\frac{5}{16}k^2, \qquad k\equiv 0\pod{12},
918
$$
919
etc. The leading coefficient is the same for all residue
920
classes and equals the Euclidean volume.
921
922
Our choice of denominator for the Hilbert series is motivated
923
by the following fact: $e_i$ is the common period of the
924
coefficients $p_{r-i},\dots,p_{r-1}$. The user should prove
925
this fact or at least verify it by several examples.
926
927
Especially in the case of a simplex the representation of the Hilbert series shown so far may not be the expected one. In fact, there is a representation in which the exponents of $t$ in the denominator are the degrees of the integral extreme generators. So one would expect the denominator to be $(1-t^2)(1-t^3)(1-t^4)$ in our case. The generalization to the nonsimplicial case uses the degrees of a homogeneous system of parameters (see \cite[p. 200]{BG}). Normaliz can compute such a denominator if the computation goal \verb|HSOP| is set (\verb|rationalHSOP.in|):
928
\begin{Verbatim}
929
Hilbert series (HSOP):
930
1 1 1 3 4 3 2
931
denominator with 3 factors:
932
2: 1 3: 1 4: 1
933
\end{Verbatim}
934
935
Note that the degrees of the elements in a homogeneous system of parameters are by no means unique and that there is no optimal choice in general. To find a suitable sequence of degrees Normaliz must compute the face lattice of the cone to some extent. Therefore be careful not to ask for \verb|HSOP| if the cone has many support hyperplanes.
936
937
\emph{Warning}: It is tempting, but not a good idea to define the polytope by the input type \verb|vertices|. It would make Normaliz compute the lattice points in the polytope, but not in the cone over the polytope, and we need these to determine the Ehrhart series.
938
939
\subsubsection{The rational polytope by inequalities}\label{rat_ineq}
940
941
We extract the support hyperplanes of our polytope from the output file and use them as input (\verb|poly_ineq.in|):
942
\begin{Verbatim}
943
amb_space 3
944
inequalities 3
945
-8 2 3
946
1 -1 0
947
2 7 3
948
grading
949
unit_vector 3
950
HilbertSeries
951
\end{Verbatim}
952
At this point we have to help Normaliz because it has no way to guess that we want to investigate the polytope defined by the inequalities and the choice $x_3=1$. This is achieved by the specification of the grading that maps every vector to its third coordinate.
953
954
These data tell us that the polytope, as a subset of $\RR^2$, is defined by the inequalities
955
\begin{align*}
956
-8x_1+2x_2+3&\ge0,\\
957
x_1-x_2+0&\ge0,\\
958
2x_1+7x_2+3&\ge 0.
959
\end{align*}
960
These inequalities are inhomogeneous, but we are using the homogeneous input type \verb|inequalities| which amounts to introducing the grading variable $x_3$ as explained above..
961
962
Why don't we define it by the ``natural'' inhomogeneous inequalities using \verb|inhom_inequalities|? We could do it, but then only the polytope itself would be the object of computation, and we would have no access to the Ehrhart series. We could just compute the lattice points in the polytope. (Try it.)
963
964
The inequalities as written above look somewhat artificial. It is certainly more natural to write them in the form
965
\begin{align*}
966
8x_1-2x_2&\le 3,\\
967
x_1-x_2&\ge0,\\
968
2x_1+7x_2&\ge -3.
969
\end{align*}
970
and for the direct transformation into Normaliz input we have introduced the type \verb|constraints|. But Normaliz would then interpret the input as inhomogeneous and we run into the same problem as with \verb|inhom_inequalities|. The way out: we tell Normaliz that we want a homogeneous computation (\verb|poly_hom_const.in|):
971
\begin{Verbatim}
972
amb_space 3
973
hom_constraints 3
974
8 -2 <= 3
975
1 -1 >= 0
976
2 7 >= -3
977
grading
978
unit_vector 3
979
HilbertSeries
980
\end{Verbatim}
981
982
983
\subsection{Magic squares}\label{eq_ex}
984
985
Suppose that you are interested in the following type of ``square''
986
\begin{center}
987
\begin{tabular}{|l|l|l|}
988
\hline
989
$x_1$ & $x_2$ & $x_3$\\
990
\hline
991
$x_4$ & $x_5$ & $x_6$\\
992
\hline
993
$x_7$ & $x_8$ & $x_9$\\
994
\hline
995
\end{tabular}
996
\end{center}
997
and the problem is to find nonnegative values for $x_1,\dots,x_9$
998
such that the 3 numbers in all rows, all columns, and both
999
diagonals sum to the same constant $\mathcal{M}$. Sometimes
1000
such squares are called \emph{magic} and $\mathcal M$ is the
1001
\emph{magic constant}. This leads to a linear system of
1002
equations
1003
\begin{align*}
1004
& x_1+x_2+x_3=x_4+x_5+x_6;\\
1005
& x_1+x_2+x_3=x_7+x_8+x_9;\\
1006
& x_1+x_2+x_3=x_1+x_4+x_7;\\
1007
& x_1+x_2+x_3=x_2+x_5+x_8;\\
1008
& x_1+x_2+x_3=x_3+x_6+x_9;\\
1009
& x_1+x_2+x_3=x_1+x_5+x_9;\\
1010
& x_1+x_2+x_3=x_3+x_5+x_7.\\
1011
\end{align*}
1012
This system is encoded in the file
1013
\ttt{3x3magic.in}:
1014
\begin{Verbatim}
1015
amb_space 9
1016
equations 7
1017
1 1 1 -1 -1 -1 0 0 0
1018
1 1 1 0 0 0 -1 -1 -1
1019
0 1 1 -1 0 0 -1 0 0
1020
1 0 1 0 -1 0 0 -1 0
1021
1 1 0 0 0 -1 0 0 -1
1022
0 1 1 0 -1 0 0 0 -1
1023
1 1 0 0 -1 0 -1 0 0
1024
grading
1025
sparse 1:1 2:1 3:1;
1026
\end{Verbatim}
1027
1028
The input type \verb|equations| represents \emph{homogeneous} equations. The first equation reads
1029
$$
1030
x_1+x_2+x_3-x_4-x_5-x_6=0,
1031
$$
1032
and the other equations are to be interpreted analogously. The magic constant is a natural choice for the grading. It is given in sparse form, equivalent to the dense form
1033
\begin{Verbatim}
1034
grading
1035
1 1 1 0 0 0 0 0 0
1036
\end{Verbatim}
1037
1038
It seems that we have forgotten to define the cone. This may indeed be the case, but doesn't matter: if there is no input type that defines a cone, Normaliz chooses the positive orthant, and this is exactly what we want in this case.
1039
1040
The output file contains the following:
1041
1042
\begin{Verbatim}
1043
5 Hilbert basis elements
1044
5 Hilbert basis elements of degree 1
1045
4 extreme rays
1046
4 support hyperplanes
1047
1048
embedding dimension = 9
1049
rank = 3
1050
external index = 1
1051
1052
size of triangulation = 2
1053
resulting sum of |det|s = 4
1054
1055
grading:
1056
1 1 1 0 0 0 0 0 0
1057
with denominator = 3
1058
\end{Verbatim}
1059
The input degree is the magic constant. However, as the
1060
denominator $3$ shows, the magic constant is always divisible
1061
by $3$, and therefore the effective degree is $\mathcal M/3$.
1062
This degree is used for the multiplicity and the Hilbert
1063
series.
1064
\begin{Verbatim}
1065
degrees of extreme rays:
1066
1: 4
1067
1068
Hilbert basis elements are of degree 1
1069
\end{Verbatim}
1070
This was not to be expected (and is no longer true for $4\times 4$ squares).
1071
\begin{Verbatim}
1072
multiplicity = 4
1073
1074
Hilbert series:
1075
1 2 1
1076
denominator with 3 factors:
1077
1: 3
1078
1079
degree of Hilbert Series as rational function = -1
1080
1081
Hilbert polynomial:
1082
1 2 2
1083
with common denominator = 1
1084
\end{Verbatim}
1085
The Hilbert series is
1086
$$
1087
\frac{1+2t+t^2}{(1-t)^3}.
1088
$$
1089
The Hilbert polynomial is
1090
$$
1091
P(k)=1+2k+2k^2,
1092
$$
1093
and after substituting $\mathcal M/3$ for $k$ we obtain the
1094
number of magic squares of magic constant $\mathcal M$, provided $3$ divides $\mathcal M$. (If $3\nmid \mathcal M$, there is no magic square of magic constant $\mathcal M$.)
1095
\begin{Verbatim}
1096
rank of class group = 1
1097
finite cyclic summands:
1098
2: 2
1099
\end{Verbatim}
1100
So the class group is $\ZZ\oplus (\ZZ/2\ZZ)^2$.
1101
\begin{Verbatim}
1102
5 Hilbert basis elements of degree 1:
1103
0 2 1 2 1 0 1 0 2
1104
1 0 2 2 1 0 0 2 1
1105
1 1 1 1 1 1 1 1 1
1106
1 2 0 0 1 2 2 0 1
1107
2 0 1 0 1 2 1 2 0
1108
1109
0 further Hilbert basis elements of higher degree:
1110
\end{Verbatim}
1111
The $5$ elements of the Hilbert basis represent the magic
1112
squares
1113
\begin{center}
1114
\begin{tabular}{|l|l|l|}
1115
\hline
1116
2 & 0 & 1\\
1117
\hline
1118
0 & 1 & 2\\
1119
\hline
1120
1 & 2 & 0\\
1121
\hline
1122
\end{tabular}
1123
\hspace{10mm}
1124
\begin{tabular}{|l|l|l|}
1125
\hline
1126
1 & 0 & 2\\
1127
\hline
1128
2 & 1 & 0\\
1129
\hline
1130
0 & 2 & 1\\
1131
\hline
1132
\end{tabular}
1133
\hspace{10mm}
1134
\begin{tabular}{|l|l|l|}
1135
\hline
1136
1 & 1 & 1\\
1137
\hline
1138
1 & 1 & 1\\
1139
\hline
1140
1 & 1 & 1\\
1141
\hline
1142
\end{tabular}
1143
\hspace{10mm}
1144
\begin{tabular}{|l|l|l|}
1145
\hline
1146
1 & 2 & 0\\
1147
\hline
1148
0 & 1 & 2\\
1149
\hline
1150
2 & 0 & 1\\
1151
\hline
1152
\end{tabular}
1153
\hspace{10mm}
1154
\begin{tabular}{|l|l|l|}
1155
\hline
1156
0 & 2 & 1\\
1157
\hline
1158
2 & 1 & 0\\
1159
\hline
1160
1 & 0 & 2\\
1161
\hline
1162
\end{tabular}
1163
\end{center}
1164
All other solutions are linear combinations of these squares
1165
with nonnegative integer coefficients.
1166
One of these $5$ squares is clearly in the interior:
1167
\begin{Verbatim}
1168
4 extreme rays: 4 support hyperplanes:
1169
0 2 1 2 1 0 1 0 2 -2 -1 0 0 4 0 0 0 0
1170
1 0 2 2 1 0 0 2 1 0 -1 0 0 2 0 0 0 0
1171
1 2 0 0 1 2 2 0 1 0 1 0 0 0 0 0 0 0
1172
2 0 1 0 1 2 1 2 0 2 1 0 0 -2 0 0 0 0
1173
\end{Verbatim}
1174
These $4$ support hyperplanes cut out the cone generated by the magic squares from the linear subspace they generate. Only one is reproduced as a sign inequality. This is due to the fact that the linear subspace has submaximal dimension and there is no unique lifting of linear forms to the full space.
1175
\begin{Verbatim}
1176
6 equations: 3 basis elements of lattice:
1177
1 0 0 0 0 1 -2 -1 1 1 0 -1 -2 0 2 1 0 -1
1178
0 1 0 0 0 1 -2 0 0 0 1 -1 -1 0 1 1 -1 0
1179
0 0 1 0 0 1 -1 -1 0 0 0 3 4 1 -2 -1 2 2
1180
0 0 0 1 0 -1 2 0 -2
1181
0 0 0 0 1 -1 1 0 -1
1182
0 0 0 0 0 3 -4 -1 2
1183
\end{Verbatim}
1184
So one of our equations has turned out to be superfluous (why?). Note that also the equations are not reproduced exactly. Finally, Normaliz lists a basis of the efficient lattice $\EE$ generated by the magic squares.
1185
1186
\subsubsection{With even corners}\label{magiceven}\label{cong_ex}
1187
1188
We change our definition of magic square by requiring that the
1189
entries in the $4$ corners are all even. Then we have to
1190
augment the input file by the following (\ttt{3x3magiceven.in}):
1191
\begin{Verbatim}
1192
congruences 4 sparse
1193
1:1 10:2;
1194
3:1 10:2;
1195
7:1 10:2;
1196
9:1 10:2;
1197
\end{Verbatim}
1198
This sparse form is equivalent to the dense form
1199
\begin{Verbatim}
1200
congruences 4
1201
1 0 0 0 0 0 0 0 0 2
1202
0 0 1 0 0 0 0 0 0 2
1203
0 0 0 0 0 0 1 0 0 2
1204
0 0 0 0 0 0 0 0 1 2
1205
\end{Verbatim}
1206
The first $9$ entries in each row represent the coefficients of the coordinates in the homogeneous congruences, and the last is the modulus:
1207
$$
1208
x_1\equiv 0\mod 2
1209
$$
1210
is the first congruence etc.
1211
1212
We could also define these congruences as symbolic constraints:
1213
\begin{Verbatim}
1214
constraints 4 symbolic
1215
x[1] ~ 0(2);
1216
x[3] ~ 0(2);
1217
x[7] ~ 0(2);
1218
x[9] ~ 0(2);
1219
\end{Verbatim}
1220
1221
The output changes accordingly:
1222
1223
1224
\begin{Verbatim}
1225
9 Hilbert basis elements
1226
0 Hilbert basis elements of degree 1
1227
4 extreme rays
1228
4 support hyperplanes
1229
1230
embedding dimension = 9
1231
rank = 3
1232
external index = 4
1233
1234
size of triangulation = 2
1235
resulting sum of |det|s = 8
1236
1237
grading:
1238
1 1 1 0 0 0 0 0 0
1239
with denominator = 3
1240
1241
degrees of extreme rays:
1242
2: 4
1243
1244
multiplicity = 1
1245
1246
Hilbert series:
1247
1 -1 3 1
1248
denominator with 3 factors:
1249
1: 1 2: 2
1250
1251
degree of Hilbert Series as rational function = -2
1252
1253
Hilbert series with cyclotomic denominator:
1254
-1 1 -3 -1
1255
cyclotomic denominator:
1256
1: 3 2: 2
1257
1258
Hilbert quasi-polynomial of period 2:
1259
0: 2 2 1
1260
1: -1 0 1
1261
with common denominator = 2
1262
\end{Verbatim}
1263
After the extensive discussion in Section \ref{rational} it should be easy for you to write down the Hilbert series and the Hilbert quasipolynomial. (But keep in mind that the grading has a denominator.)
1264
\begin{Verbatim}
1265
rank of class group = 1
1266
finite cyclic summands:
1267
4: 2
1268
1269
***********************************************************************
1270
1271
0 Hilbert basis elements of degree 1:
1272
1273
9 further Hilbert basis elements of higher degree:
1274
...
1275
1276
4 extreme rays:
1277
0 4 2 4 2 0 2 0 4
1278
2 0 4 4 2 0 0 4 2
1279
2 4 0 0 2 4 4 0 2
1280
4 0 2 0 2 4 2 4 0
1281
\end{Verbatim}
1282
We have listed the extreme rays since they have changed after the introduction of the congruences, although the cone has not changed. The reason is that Normaliz always chooses the extreme rays from the efficient lattice $\EE$.
1283
\begin{Verbatim}
1284
1285
4 support hyperplanes:
1286
...
1287
1288
6 equations:
1289
... 3 basis elements of lattice:
1290
2 0 -2 -4 0 4 2 0 -2
1291
2 congruences: 0 1 2 3 1 -1 0 1 2
1292
1 0 0 0 0 0 0 0 0 2 0 0 6 8 2 -4 -2 4 4
1293
0 1 0 0 1 0 0 0 0 2
1294
\end{Verbatim}
1295
The rank of the lattice has of course not changed, but after the introduction of the congruences the basis has changed.
1296
1297
\subsubsection{The lattice as input}\label{latt_ex}
1298
1299
It is possible to define the lattice by generators. We demonstrate this for the magic squares with even corners. The lattice has just been computed (\verb|3x3magiceven_lat.in|):
1300
1301
\begin{Verbatim}
1302
amb_space 9
1303
lattice 3
1304
2 0 -2 -4 0 4 2 0 -2
1305
0 1 2 3 1 -1 0 1 2
1306
0 0 6 8 2 -4 -2 4 4
1307
grading
1308
1 1 1 0 0 0 0 0 0
1309
\end{Verbatim}
1310
It produces the same output as the version starting from equations and congruences.
1311
1312
\verb|lattice| has a variant that takes the saturation of the sublattice generated by the input vectors (\verb|3x3magic_sat.in|):
1313
1314
\begin{Verbatim}
1315
amb_space 9
1316
saturation 3
1317
2 0 -2 -4 0 4 2 0 -2
1318
0 1 2 3 1 -1 0 1 2
1319
0 0 6 8 2 -4 -2 4 4
1320
grading
1321
1 1 1 0 0 0 0 0 0
1322
\end{Verbatim}
1323
Clearly, we remove the congruences by this choice and arrive at the output of \verb|3x3magic.in|.
1324
1325
\subsection{Decomposition in a numerical semigroup}\label{inhom_eq_ex}
1326
1327
Let $S=\langle 6,10,15\rangle$, the numerical semigroup generated by $6,10,15$. How can $97$ be written as a sum in the generators?\medskip
1328
1329
In other words: we want to find all nonnegative integral solutions to the equation
1330
$$
1331
6x_1+10x_2+15x_3=97
1332
$$
1333
1334
1335
\begin{minipage}[b]{0.5\textwidth}
1336
Input (\verb|NumSemi.in|):
1337
\begin{Verbatim}
1338
amb_space 3
1339
constraints 1 symbolic
1340
6x[1] + 10x[2] + 15x[3] = 97;
1341
\end{Verbatim}
1342
\end{minipage}
1343
\hspace{1cm}
1344
\begin{minipage}[t]{0.4\textwidth}
1345
\tikzset{facet style/.style={opacity=1.0,very thick,line,join=round}}
1346
\begin{tikzpicture}[x = {(0.8cm,-0.5cm)},
1347
y = {(0.9659cm,0.20cm)},
1348
z = {(0cm,1cm)},
1349
scale = 0.5]
1350
\draw [->,dashed] (-0.5, 0, 0) -- (3.0,0,0);
1351
1352
\draw [->,dashed] (0, -0.5, 0) -- (0,6.0,0);
1353
1354
\draw [->,dashed] (0, 0, -0.5) -- (0,0,3.5);
1355
1356
%\draw[thin] (0,0,0) -- (2,0,0) -- (0,5,0) -- cycle;
1357
%\draw[thin] (0,0,0) -- (2,0,0) -- (0,0,3) -- cycle;
1358
%\draw[thin] (0,0,0) -- (0,5,0) -- (0,0,3) -- cycle;
1359
\filldraw[color=yellow] (2,0,0) -- (0,5,0) -- (0,0,3) -- cycle;
1360
\draw[thick] (2,0,0) -- (0,5,0) -- (0,0,3) -- cycle;
1361
1362
\filldraw[fill=white] (0,0,0) circle (2pt);
1363
\filldraw[fill=white] (2,0,0) circle (2pt);
1364
\filldraw[fill=white] (0,5,0) circle (2pt);
1365
\filldraw[fill=white] (0,0,3) circle (2pt);
1366
\end{tikzpicture}
1367
\end{minipage}
1368
1369
\medskip
1370
The equation cuts out a triangle from the positive orthant.
1371
1372
The set of solutions is a module over the monoid $M$ of solutions of the homogeneous equation $6x_1+10x_2+15x_3=0$. So $M=0$ in this case.
1373
\begin{Verbatim}
1374
6 module generators:
1375
2 1 5 1
1376
2 4 3 1
1377
2 7 1 1
1378
7 1 3 1
1379
7 4 1 1
1380
12 1 1 1
1381
1382
0 Hilbert basis elements of recession monoid:
1383
\end{Verbatim}
1384
The last line is as expected, and the $6$ module generators are the goal of the computation.
1385
1386
Normaliz is smart enough to recognize that it must compute the lattice points in a polygon, and does exactly this. You can recognize it in the console output: Normaliz \version\ has used the project-and-lift algorithm. We will discuss it further in Section \ref{project_example} and Section \ref{project}.
1387
1388
For those who like to play: add the option \verb|--NoProjection| to the command line. Then the terminal output will change; Normaliz computes the lattice points as a truncated Hilbert basis via a triangulation (only one simplicial cone in this case).
1389
1390
\subsection{A job for the dual algorithm}\label{job_dual}
1391
1392
We increase the size of the magic squares to $5\times 5$. Normaliz can do the same computation as for $3\times 3$ squares, but this will take some minutes. Suppose we are only interested in the Hilbert basis, we should use the dual algorithm for this example. (The dual algorithm goes back to Pottier \cite{Po}.) The input file is \ttt{5x5dual.in}:
1393
1394
\begin{Verbatim}
1395
amb_space 25
1396
equations 11
1397
1 1 1 1 1 -1 -1 -1 -1 -1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1398
...
1399
1 1 1 1 0 0 0 0 -1 0 0 0 -1 0 0 0 -1 0 0 0 -1 0 0 0 0
1400
grading
1401
1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1402
HilbertBasis
1403
\end{Verbatim}
1404
The input file does not say anything about the dual algorithm mentioned in the section title. With this input it is chosen automatically. See Section \ref{div_labor} for a discussion of when this happens. But you can insist on the dual algorithm by adding \verb|DualMode| to the input (or \verb|-d| to the command line). Or, if you want to compare it to the primal algorithm add \verb|PrimalMode| (or \verb|-P| to the command line).
1405
1406
1407
The Hilbert basis contains $4828$ elements, too many to be listed here.
1408
1409
With the file \verb|5x5.in| you can compute the Hilbert basis and the Hilbert series, and the latter with HSOP:
1410
\begin{Verbatim}
1411
Hilbert series (HSOP):
1412
1 15 356 4692 36324 198467 ... 198467 36324 4692 356 15 1
1413
denominator with 15 factors:
1414
1: 5 2: 3 6: 2 12: 1 60: 2 420: 1 1260: 1
1415
1416
degree of Hilbert Series as rational function = -5
1417
1418
The numerator of the Hilbert Series is symmetric.
1419
\end{Verbatim}
1420
In view of the length of the numerator of the Hilbert series it may be difficult to observe the symmetry. So Normaliz does it for you. The symmetry shows that the monoid is Gorenstein, but if you are only interested in the Gorenstein property, there is a much faster way to check it (see Section \ref{Gorenstein}).
1421
1422
The size $6\times 6$ is out of reach for the Hilbert series, but the Hilbert basis can be computed (in the automatically chosen dual mode). It takes some hours.
1423
1424
\subsection{A dull polyhedron}\label{inhom_ineq_ex}
1425
1426
We want to compute the polyhedron defined by the inequalities
1427
\begin{align*}
1428
\xi_2 &\ge -1/2,\\
1429
\xi_2 &\le 3/2,\\
1430
\xi_2 &\le \xi_1+3/2.
1431
\end{align*}
1432
They are contained in the input file \verb|InhomIneq.in|:
1433
\begin{Verbatim}
1434
amb_space 2
1435
constraints 3
1436
0 1 >= -1/2
1437
0 1 <= 3/2
1438
-1 1 <= 3/2
1439
grading
1440
unit_vector 1
1441
\end{Verbatim}
1442
The grading says that we want to count points by the first coordinate.
1443
\begin{center}
1444
\begin{tikzpicture}[scale=0.7]
1445
1446
\filldraw[yellow] (5,-0.5) -- (-2,-0.5) -- (0,1.5) -- (5,1.5) -- cycle;
1447
1448
\foreach \x in {-2,...,5}
1449
\foreach \y in {-1,...,2}
1450
{
1451
\filldraw[fill=black] (\x,\y) circle (1.5pt);
1452
}
1453
\draw[->] (-2.5,0) -- (5.5,0);
1454
\draw[->] (0,-1.5) -- (0,2.5);
1455
\draw[thick] (5,-0.5) -- (-2,-0.5) -- (0,1.5) -- (5,1.5);
1456
\end{tikzpicture}
1457
\end{center}
1458
1459
It yields the output
1460
\begin{Verbatim}
1461
2 module generators
1462
1 Hilbert basis elements of recession monoid
1463
2 vertices of polyhedron
1464
1 extreme rays of recession cone
1465
3 support hyperplanes of polyhedron (homogenized)
1466
1467
embedding dimension = 3
1468
affine dimension of the polyhedron = 2 (maximal)
1469
rank of recession monoid = 1
1470
1471
size of triangulation = 1
1472
resulting sum of |det|s = 8
1473
1474
dehomogenization:
1475
0 0 1
1476
1477
grading:
1478
1 0 0
1479
\end{Verbatim}
1480
The interpretation of the grading requires some care in the inhomogeneous case. We have extended the input grading vector by an entry $0$ to match the embedding dimension. For the computation of the degrees of \emph{lattice points} in the ambient space you can either use only the first $2$ coordinates or take the full scalar product of the point in homogenized coordinates and the extended grading vector.
1481
1482
\begin{Verbatim}
1483
module rank = 2
1484
multiplicity = 2
1485
\end{Verbatim}
1486
The module rank is $2$ in this case since we have two ``layers'' in the solution module that are parallel to the recession monoid. This is of course also reflected in the Hilbert series.
1487
\begin{Verbatim}
1488
Hilbert series:
1489
1 1
1490
denominator with 1 factors:
1491
1: 1
1492
1493
shift = -1
1494
\end{Verbatim}
1495
We haven't seen a shift yet. It is always printed (necessarily) if the Hilbert series does not start in degree $0$. In our case it starts in degree $-1$ as indicated by the shift $-1$. We thus get the Hilbert series
1496
$$
1497
t^{-1}\,\frac{t+t}{1-t}=\frac{t^{-1}+1}{1-t}.
1498
$$
1499
1500
Note: We used the opposite convention for the shift in Normaliz 2.
1501
1502
Note that the Hilbert (quasi)polynomial is always computed for the unshifted monoid defined by the input data. (This was different in previous versions of Normaliz.)
1503
\begin{Verbatim}
1504
degree of Hilbert Series as rational function = -1
1505
1506
Hilbert polynomial:
1507
2
1508
with common denominator = 1
1509
1510
***********************************************************************
1511
1512
2 module generators:
1513
-1 0 1
1514
0 1 1
1515
1516
1 Hilbert basis elements of recession monoid:
1517
1 0 0
1518
1519
2 vertices of polyhedron:
1520
-4 -1 2
1521
0 3 2
1522
1523
1 extreme rays of recession cone:
1524
1 0 0
1525
1526
3 support hyperplanes of polyhedron (homogenized):
1527
0 -2 3
1528
0 2 1
1529
2 -2 3
1530
\end{Verbatim}
1531
1532
The dual algorithm that was used in Section \ref{job_dual} can also be applied to inhomogeneous computations. We would of course loose the Hilbert series. In certain cases it may be preferable to suppress the computation of the vertices of the polyhedron if you are only interested in the integer points; see Section \ref{InhomDual}.
1533
1534
\subsubsection{Defining it by generators}\label{polyh_ex}
1535
1536
If the polyhedron is given by its vertices and the recession cone, we can define it by these data (\verb|InhomIneq_gen.in|):
1537
\begin{Verbatim}
1538
amb_space 2
1539
vertices 2
1540
-4 -1 2
1541
0 3 2
1542
cone 1
1543
1 0
1544
grading
1545
unit_vector 1
1546
\end{Verbatim}
1547
The output is identical to the version starting from the inequalities.
1548
1549
\subsection{The Condorcet paradox}\label{Condorcet}
1550
1551
In this section we assume that you use an executable of Normaliz that was built with CoCoALib (ee Section \ref{Compile}), for example an executable for Linux or Mac OS from the Normaliz web site. If not, then simply disregard any remark on symmetrization. Everything runs very quickly also without it.
1552
1553
In social choice elections each of the $k$ voters picks a linear preference order of the $n$ candidates. There are $n!$ such orders. The election result is the vector $(x_1,\dots,x_N), $N=n!, in which $x_i$ is the number of voters that have chosen the $i$-th preference order in, say, lexicographic enumeration of these orders. In the following we assume the \emph{impartial anonymous culture} according to which every preference order has the same basic weight of $1/n!$.
1554
\medskip
1555
1556
We say that candidate $A$ \emph{beats} candidate $B$ if the majority of the voters prefers $A$ to $B$. As the Marquis de \emph{Condorcet} (and others) observed, ``beats'' is not transitive, and an election may exhibit the \emph{Condorcet paradox}: there is no Condorcet winner. (See \cite{BS} and the references given there for more information.)
1557
1558
We want to find the probability for $k\to\infty$ that there is a Condorcet winner for $n=4$ candidates. The event that $A$ is the Condorcet winner can be expressed by linear inequalities on the election outcome (a point in $24$-space). The wanted probability is the lattice normalized volume of the polytope cut out by the inequalities at $k=1$. The file \verb|Condorcet.in|:
1559
1560
\begin{Verbatim}
1561
amb_space 24
1562
inequalities 3
1563
1 1 1 1 1 1 -1 -1 -1 -1 -1 -1 1 1 -1 -1 1 -1 1 1 -1 -1 1 -1
1564
1 1 1 1 1 1 1 1 -1 -1 1 -1 -1 -1 -1 -1 -1 -1 1 1 1 -1 -1 -1
1565
1 1 1 1 1 1 1 1 1 -1 -1 -1 1 1 1 -1 -1 -1 -1 -1 -1 -1 -1 -1
1566
nonnegative
1567
total_degree
1568
Multiplicity
1569
\end{Verbatim}
1570
The first inequality expresses that $A$ beats $B$, the second and the third say that $A$ beats $C$ and $D$. (So far we do not exclude ties, and they need not be excluded for probabilities as $k\to\infty$.)
1571
1572
In addition to these inequalities we must restrict all variables to nonnegative values, and this is achieved by adding the attribute \verb|nonnegative|. The grading is set by \verb|total_degree|. It replaces the grading vector with $24$ entries $1$. Finally \verb|Multiplicity| sets the computation goal.
1573
1574
From the output file we only mention the quantity we are out for:
1575
\begin{Verbatim}
1576
multiplicity = 1717/8192
1577
\end{Verbatim}
1578
Since there are $4$ candidates, the probability for the existence of a Condorcet winner is $1717/2048$.
1579
1580
We can refine the information on the Condorcet paradox by computing the Hilbert series. Either we delete \verb|Multiplicity| from the input file or, better, we add \verb|--HilbertSeries| (or simply \verb|-q|) on the command line. The result:
1581
\begin{Verbatim}
1582
Hilbert series:
1583
1 5 133 363 4581 8655 69821 100915 ... 12346 890 481 15 6
1584
denominator with 24 factors:
1585
1: 1 2: 14 4: 9
1586
1587
degree of Hilbert Series as rational function = -25
1588
\end{Verbatim}
1589
1590
Normaliz automatically uses symmetrization for this example , but it is too small to demonstrate the power of symmetrization. For others, symmetrization can convert days into seconds.
1591
1592
Since symmetrization has been used, you will also find a file \verb|Condorcet.symm.out| in your directory. It contains the data computed for the symmetrization. You need not care at this point. We take continue the discussion of symmetrization in Section \ref{symmetrize}.
1593
1594
\subsubsection{Excluding ties}\label{excluded_ex}
1595
1596
Now we are more ambitious and want to compute the Hilbert series for the Condorcet paradox, or more precisely, the number of election outcomes having $A$ as the Condorcet winner depending on the number $k$ of voters. Moreover, as it is customary in social choice theory, we want to exclude ties. The input file changes to \verb|CondorcetSemi.in|:
1597
\begin{Verbatim}
1598
amb_space 24
1599
excluded_faces 3
1600
1 1 1 1 1 1 -1 -1 -1 -1 -1 -1 1 1 -1 -1 1 -1 1 1 -1 -1 1 -1
1601
1 1 1 1 1 1 1 1 -1 -1 1 -1 -1 -1 -1 -1 -1 -1 1 1 1 -1 -1 -1
1602
1 1 1 1 1 1 1 1 1 -1 -1 -1 1 1 1 -1 -1 -1 -1 -1 -1 -1 -1 -1
1603
nonnegative
1604
total_degree
1605
HilbertSeries
1606
\end{Verbatim}
1607
We could omit \verb|HilbertSeries|, and the computation would include the Hilbert basis. The type \verb|excluded_faces| only affects the Hilbert series. In every other respect it is equivalent to \verb|inequalities|.
1608
1609
From the file \verb|CondorcetSemi.out| we only display the Hilbert series:
1610
\begin{Verbatim}
1611
Hilbert series:
1612
6 15 481 890 12346 ... 100915 69821 8655 4581 363 133 5 1
1613
denominator with 24 factors:
1614
1: 1 2: 14 4: 9
1615
1616
shift = 1
1617
1618
degree of Hilbert Series as rational function = -24
1619
\end{Verbatim}
1620
Surprisingly, this looks like the Hilbert series in the previous section read backwards, roughly speaking. This is true, and one can explain it as we will see below.
1621
1622
It is justified to ask why we don't use \verb|strict_inequalities| instead of \verb|excluded_faces|. It does of course give the same Hilbert series. However, Normaliz cannot (yet) apply symmetrization in inhomogeneous computations. Moreover, the algorithmic approach is different, and according to our experience \verb|excluded_faces| is more efficient, independently of symmetrization.
1623
1624
\subsubsection{At least one vote for every preference order}\label{strict_signs_ex}
1625
1626
Suppose we are only interested in elections in which every preference order is chosen by at least one voter. This can be modeled as follows (\verb|Condorcet_one.in|):
1627
\begin{Verbatim}
1628
amb_space 24
1629
inequalities 3
1630
1 1 1 1 1 1 -1 -1 -1 -1 -1 -1 1 1 -1 -1 1 -1 1 1 -1 -1 1 -1
1631
1 1 1 1 1 1 1 1 -1 -1 1 -1 -1 -1 -1 -1 -1 -1 1 1 1 -1 -1 -1
1632
1 1 1 1 1 1 1 1 1 -1 -1 -1 1 1 1 -1 -1 -1 -1 -1 -1 -1 -1 -1
1633
strict_signs
1634
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1635
total_degree
1636
HilbertSeries
1637
\end{Verbatim}
1638
The entry $1$ at position $i$ of the vector \verb|strict_signs| imposes the inequality $x_i\ge1$. A $-1$ would impose the inequality $x_i\le -1$, and the entry $0$ imposes no condition on the $i$-th coordinate.
1639
1640
\begin{Verbatim}
1641
Hilbert series:
1642
1 5 133 363 4581 8655 69821 100915 ... 12346 890 481 15 6
1643
denominator with 24 factors:
1644
1: 1 2: 14 4: 9
1645
1646
shift = 24
1647
1648
degree of Hilbert Series as rational function = -1
1649
\end{Verbatim}
1650
Again we encounter (almost) the Hilbert series of the Condorcet paradox (without side conditions). It is time to explain this coincidence. Let $C$ be the Condorcet cone defined by the nonstrict inequalities, $M$ the monoid of lattice points in it, $I_1\subset M$ the ideal of lattice points avoiding the $3$ facets defined by ties, $I_2$ the ideal of lattice points with strictly positive coordinates, and finally $I_3$ the ideal of lattice points in the interior of $C$. Moreover, let $\mathds{1}\in\ZZ^{24}$ be the vector with all entries $1$.
1651
1652
Since $\mathds{1}$ lies in the three facets defining the ties, it follows that $I_2=M+\mathds{1}$. This explains why we obtain the Hilbert series of $I_2$ by multiplying the Hilbert series of $M$ by $t^{24}$, as just observed. Generalized Ehrhart reciprocity (see \cite[Theorem 6.70]{BG}) then explains the Hilbert series of $I_1$ that we observed in the previous section. Finally, the Hilbert series of $I_3$ that we don't have displayed is obtained from that of $M$ by ``ordinary'' Ehrhart reciprocity. But we can also obtain $I_1$ from $I_3$: $I_1=I_3-\mathds{1}$, and generalized reciprocity follows from ordinary reciprocity in this very special case.
1653
1654
The essential point in these arguments (apart from reciprocity) is that $\mathds{1}$ lies in all support hyperplanes of $C$ except the coordinate hyperplanes.
1655
1656
You can easily compute the Hilbert series of $I_3$ by making all inequalities strict.
1657
1658
As the terminal output shows, symmetrization has not been applied for the reason mentioned above: \verb|strict_signs| is an inhomogeneous input type. It would of course be possible to encode the strict signs as \verb|excluded_faces|. Then the sparse format of matrices is very handy:
1659
\begin{Verbatim}
1660
excluded_faces 24
1661
1:1;
1662
1:2;
1663
...
1664
1:24;
1665
\end{Verbatim}
1666
This is a shorthand for the unit matrix.
1667
1668
\subsection{Testing normality}\label{normalization_ex}
1669
1670
We want to test the monoid $A_{4\times 4\times 3}$ defined by $4\times4\times3$ contingency tables for normality (see \cite{BHIKS} for the background). The input file is \ttt{A443.in}:
1671
\begin{Verbatim}
1672
amb_space 40
1673
cone_and_lattice 48
1674
1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0
1675
...
1676
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1
1677
HilbertBasis
1678
\end{Verbatim}
1679
Why \verb|cone_and_lattice|? Well, we want to find out whether the monoid is normal, i.e., whether $M=C(M)\cap\gp(M)$. If $M$ is even integrally closed in $\ZZ^{24}$, then it is certainly integrally closed in the evidently smaller lattice $\gp(M)$, but the converse does not hold in general, and therefore we work with the lattice generated by the monoid generators.
1680
1681
It turns out that the monoid is indeed normal:
1682
\begin{Verbatim}
1683
original monoid is integrally closed
1684
\end{Verbatim}
1685
Actually the output file reveals that $M$ is even integrally closed in $\ZZ^{24}$: the external index is $1$, and therefore $\gp(M)$ is integrally closed in $\ZZ^{24} $.
1686
1687
The output files also shows that there is a grading on $\ZZ^{24}$ under which all our generators have degree $1$. We could have seen this ourselves: Every generator has exactly one entry $1$ in the first $16$ coordinates. (This is clear from the construction of $M$.)
1688
1689
A noteworthy detail from the output file:
1690
\begin{Verbatim}
1691
size of partial triangulation = 48
1692
\end{Verbatim}
1693
It shows that Normaliz uses only a partial triangulation in Hilbert basis computations; see \cite{BHIKS}.
1694
1695
It is no problem to compute the Hilbert series as well if you are interested in it. Simply add \verb|-q| to the command line or remove \verb|HilbertBasis| from the input file. Then a full triangulation is needed (size $2,654,272$).
1696
1697
Similar examples are \verb|A543|, \verb|A553| and \verb|A643|. The latter is not normal, as we will see below. Even on a standard PC or laptop, the Hilbert basis computation does not take very long because Normaliz uses only a partial triangulation. The Hilbert series can still be determined, but the computation time will grow considerably since the it requires a full triangulation. See \cite{BIS} for timings.
1698
1699
\subsubsection{Computing just a witness}\label{IsIC}
1700
1701
If the Hilbert basis is large and there are many support hyperplanes, memory can become an issue for Normaliz, as well as computation time. Often one is only interested in deciding whether the given monoid is integrally closed (or normal). In the negative case it is enough to find a single element that is not in the original monoid -- a witness disproving integral closedness. As soon as such a witness is found, Normaliz stops the Hilbert basis computation (but will continue to compute other data if they are asked for). We look at the example \verb|A643.in| (for which the full Hilbert basis is not really a problem):
1702
1703
1704
\begin{Verbatim}
1705
amb_space 54
1706
cone_and_lattice 72
1707
1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 ...
1708
...
1709
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 ...
1710
IsIntegrallyClosed
1711
\end{Verbatim}
1712
Don't add \verb|HilbertBasis| because it will overrule \verb|IsIntegrallyClosed|!
1713
1714
The output:
1715
\begin{Verbatim}
1716
72 extreme rays
1717
153858 support hyperplanes
1718
1719
embedding dimension = 54
1720
rank = 42
1721
external index = 1
1722
internal index = 1
1723
original monoid is not integrally closed
1724
witness for not being integrally closed:
1725
0 0 1 0 1 1 1 1 0 0 1 0 0 1 0 1 0 1 1 0 1 1 0 0 1 1 1 0 0 1 1 0 0 1 1 ...
1726
1727
grading:
1728
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 ...
1729
1730
degrees of extreme rays:
1731
1: 72
1732
1733
***********************************************************************
1734
1735
72 extreme rays:
1736
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 ...
1737
...
1738
\end{Verbatim}
1739
1740
If you repeat such a computation, you may very well get a different witness if several parallel threads find witnesses. Only one of them is delivered.
1741
1742
\subsection{Convex hull computation/vertex enumeration}
1743
1744
Normaliz computes convex hulls as should be very clear by now, and the only purpose of this section is to emphasize that Normaliz can be restricted to this task by setting an explicit computation goal. By convex hull computation we mean the determination of the support hyperplanes of a polyhedron is given by generators (or vertices). The converse operation is vertex enumeration. Both amount to the dualization of a cone, and can therefore be done by the same algorithm.
1745
1746
As an example we take the input file \verb|cyclicpolytope30-15.in|, the cyclic polytope of dimension 15 with 30 vertices (suggested by D. Avis and Ch. Jordan):
1747
\begin{Verbatim}
1748
/* cyclic polytope of dimension 15 with 30 vertices */
1749
amb_space 16
1750
polytope 30
1751
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1752
2 4 8 16 32 64 128 256 512 1024 2048 4096 8192 16384 32768
1753
...
1754
30 900 27000 810000 ... 478296900000000000000 14348907000000000000000
1755
SupportHyperplanes
1756
\end{Verbatim}
1757
Already the entries of the vertices show that the computation cannot be done in 64 bit arithmetic. But you need not be worried. Just start Normaliz as usual. It will simply switch to infinite precision by itself, as shown by the terminal output (use the option \verb|-c| or \verb|--Verbose|).
1758
\begin{Verbatim}
1759
\.....|
1760
Normaliz 3.2.0 \....|
1761
\...|
1762
(C) The Normaliz Team, University of Osnabrueck \..|
1763
January 2017 \.|
1764
\|
1765
************************************************************
1766
Compute: SupportHyperplanes
1767
Could not convert 15181127029874798299.
1768
Arithmetic Overflow detected, try a bigger integer type!
1769
Restarting with a bigger type.
1770
************************************************************
1771
starting primal algorithm (only support hyperplanes) ...
1772
Generators sorted lexicographically
1773
Start simplex 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
1774
gen=17, 72 hyp
1775
gen=18, 240 hyp
1776
gen=19, 660 hyp
1777
gen=20, 1584 hyp
1778
gen=21, 3432 hyp
1779
gen=22, 6864 hyp
1780
gen=23, 12870 hyp
1781
gen=24, 22880 hyp
1782
gen=25, 38896 hyp
1783
gen=26, 63648 hyp
1784
gen=27, 100776 hyp
1785
gen=28, 155040 hyp
1786
gen=29, 232560 hyp
1787
gen=30, 341088 hyp
1788
Pointed since graded
1789
Select extreme rays via comparison ... done.
1790
------------------------------------------------------------
1791
transforming data... done.
1792
\end{Verbatim}
1793
Have a look at the output file if you are not afraid of $341088$ linear forms.
1794
1795
If you have looked closely at the terminal output above, you should have stumbled on the lines
1796
\begin{Verbatim}
1797
Could not convert 15181127029874798299.
1798
Arithmetic Overflow detected, try a bigger integer type!
1799
\end{Verbatim}
1800
They show that Normaliz has tried the computation in 64 bit integers, but encountered a number that is too large for this precision. It has automatically switched to infinite precision. (See Section \ref{Integer} for more information on integer types.)
1801
1802
\subsection{Lattice points in a polytope}\label{project_example}
1803
1804
The computation of lattice points in a polytope can be viewed as a truncated Hilbert basis computation, and we have seen in preceding examples. But Normaliz can be restricted to their computation, with homogeneous as well as with inhomogeneous input. Let us look at \verb|ChF_8_1024.in|:
1805
1806
\begin{Verbatim}
1807
amb_space 8
1808
constraints 16
1809
0.10976576 0.2153132834 ... 0.04282847494 >= -1/2
1810
...
1811
0.10976576 -0.2153132834 ... -0.04282847494 >= -1/2
1812
0.10976576 0.2153132834 ... 0.04282847494 <= 1/2
1813
0.10976576 -0.2153132834 ...-0.04282847494 <= 1/2
1814
ProjectionFloat
1815
\end{Verbatim}
1816
1817
This example comes from numerical analysis; see Ch. Kacwin, J. Oettershagen and T. Ullrich (\url{arXiv:1606.00492}, Math. Monatshefte, to appear). This explains the decimal fractions in the input. Normaliz converts them immediately into ordinary fractions of type numerator/denominator, and then makes the input integral as usual.
1818
1819
In the output file you can see to what integer vectors Normaliz has converted the inequalities of the input file:
1820
\begin{Verbatim}
1821
16 support hyperplanes of polyhedron (homogenized):
1822
5488288000 10765664170 ... 2141423747 25000000000
1823
...
1824
-5488288000 10765664170 ... 2141423747 25000000000
1825
\end{Verbatim}
1826
1827
The option \verb|ProjectionFloat| indicates that we want to compute only the lattice points in the polytope defined by the inequalities and that we want to use the floating point variant of the project-and-lift algorithm; \verb|Projection| would make Normaliz use its ordinary arithmetic in this algorithm. For our example the difference in time is not really significant, but when you try \verb|ChF_16_1048576.in|, it becomes very noticeable. Let us have a look at the relevant part of then terminal output:
1828
\begin{Verbatim}
1829
Computing lattice points by project-and-lift
1830
Projection
1831
embdim 9 inequalities 16
1832
embdim 8 inequalities 56
1833
embdim 7 inequalities 112
1834
embdim 6 inequalities 140
1835
embdim 5 inequalities 112
1836
embdim 4 inequalities 56
1837
embdim 3 inequalities 16
1838
embdim 2 inequalities 2
1839
Lifting
1840
embdim 2 Deg1Elements 5
1841
embdim 3 Deg1Elements 21
1842
embdim 4 Deg1Elements 73
1843
embdim 5 Deg1Elements 199
1844
embdim 6 Deg1Elements 397
1845
embdim 7 Deg1Elements 629
1846
embdim 8 Deg1Elements 907
1847
embdim 9 Deg1Elements 1067
1848
Project-and-lift complete
1849
\end{Verbatim}
1850
1851
We start with embedding dimension $9$ since we need a homogenizing coordinate in inhomogeneous computations. Then the polytope is successively projected onto a coordinate hyperplane until we reach a line segment given by $2$ inequalities. In the second part Normaliz lifts the lattice points back through all projections. The following figure illustrates the procedure for a polygon that is projected to a line segment.
1852
\begin{center}
1853
\begin{tikzpicture}[scale=0.7]
1854
1855
\filldraw[yellow] (-1.5,-0.7) -- (1,2) -- (4.3,3) -- (3,0.3) -- cycle;
1856
\draw (-1.5,-0.7) -- (1,2) -- (4.3,3) -- (3,0.3) -- cycle;
1857
1858
\foreach \x in {-1,...,4}
1859
{
1860
\draw[green] (\x,-1.5) -- (\x,3.5);
1861
}
1862
\draw[->] (-2.5,0) -- (5.5,0);
1863
\draw[->] (0,-1.5) -- (0,3.5);
1864
%\draw (-1.5,-1.5) -- (-1.5,3.5);
1865
\draw[color=red,thick] (-1.5,0) -- (4.3,0);
1866
1867
\foreach \x in {-2,...,5}
1868
\foreach \y in {-1,...,3}
1869
{
1870
\filldraw[fill=black] (\x,\y) circle (1.5pt);
1871
}
1872
\draw (-1.5,-0.7) -- (1,2) -- (4.3,3) -- (3,0.3) -- cycle;
1873
%\draw[thick] (5,0) -- (-1,-0) -- (0,1) -- (5,1);
1874
\end{tikzpicture}
1875
\end{center}
1876
The green lines show the fibers over the lattice points in the (red) line segment. Note that not every lattice point in the projection must be liftable to a lattice point in the next higher dimension.
1877
1878
In \verb|ChF_8_1024.in| we see
1879
\begin{Verbatim}
1880
1067 module generators:
1881
-4 0 0 0 0 0 0 0 1
1882
-3 0 0 0 -1 0 0 0 1
1883
-3 0 0 0 0 0 0 0 1
1884
...
1885
3 0 0 0 0 0 0 0 1
1886
3 0 0 0 1 0 0 0 1
1887
4 0 0 0 0 0 0 0 1
1888
\end{Verbatim}
1889
These are the lattice points that we wanted to compute.
1890
1891
Normaliz finds out that our polytope is in fact a parallelotope. This allows Normaliz to suppress the computation of its vertices. We are not interested in them, and they look frightening when written as ordinary fractions (computed with the additional option \verb|SupportHyperplanes|). This is only the first vertex, the denominator is the number in the last row:
1892
\begin{Verbatim}
1893
256 vertices of polyhedron:
1894
-7831972155307708173239167258085974255845869779051329651906336771582421875
1895
-2560494334732147696394408175864650673712115229853232268085759500000000000
1896
2411932924117448250036041241683237083742860005142447171295674845000000000
1897
-2170682283899852950367663781367299946065844697990214478942400250000000000
1898
1846013540077621750562232333569651551559659207659438074760922800500000000
1899
-1450403531662801634587765586956338287943865886737024582718631750000000000
1900
999055328718773316303519268629091038893656784654239444024061220000000000
1901
-509313990522468215816366827427428831508901797188810249435062450000000000
1902
2292486335803169657316823615602461625422283571089603408672092012129842506
1903
...
1904
\end{Verbatim}
1905
1906
Not all polytopes are parallelotopes, and in most cases Normaliz must compute the vertices or extreme rays as an auxiliary step, even if we are not interested in them. You can always add the option
1907
\begin{itemize}
1908
\itemtt [NoExtRaysOutput]
1909
\end{itemize}
1910
if you want to suppress their output. (The numerical information on the number of extreme rays etc. will however been included in the output file if it is available.)
1911
1912
On the other hand, the information provided by the vertices may be important. Instead of the unreadable integer output shown above, you can ask for
1913
\begin{itemize}
1914
\itemtt [VerticesFloat]
1915
\end{itemize}
1916
Then the vertices of polyhedra are printed in floating point format:
1917
\begin{Verbatim}
1918
256 vertices of polyhedron:
1919
-3.41637 -1.11691 1.0521 ... 0.435796 -0.222167 1
1920
-3.41637 -0.946868 0.435796 ... -1.0521 0.632677 1
1921
...
1922
\end{Verbatim}
1923
Notre that they can only be printed if a polyhedron is defined. This is always the case in inhomogeneous computations, but in the homogeneous case a grading is necessary.
1924
1925
\subsection{The integer hull}\label{IntHull}
1926
1927
The integer hull of a polyhedron $P$ is the convex hull of the set of lattice points in $P$ (despite of its name, it usually does not contain $P$). Normaliz computes by first finding the lattice points and then computing the convex hull. The computation of the integer hull is requested by the computation goal \verb|IntegerHull|.
1928
1929
The computation is somewhat special since it creates a second cone (and lattice) $C_\textup{int}$. In homogeneous computations the degree $1$ vectors generate $C_\textup{int}$ by an input matrix of type \verb|cone_and_lattice|. In inhomogeneous computations the module generators and the Hilbert basis of the recession cone are combined and generate $C_\textup{int}$. Therefore the recession cone is reproduced, even if the polyhedron should not contain a lattice point.
1930
1931
The integer hull computation itself is always inhomogeneous. The output file for $C_\textup{int}$ is \verb|<project>.IntHull.out|.
1932
1933
As a very simple example we take \verb|rationalIH.in| (\verb|rational.in| augmented by \verb|IntegerHull|):
1934
\begin{Verbatim}
1935
amb_space 3
1936
cone 3
1937
1 1 2
1938
-1 -1 3
1939
1 -2 4
1940
grading
1941
unit_vector 3
1942
HilbertSeries
1943
IntegerHull
1944
\end{Verbatim}
1945
It is our rational polytope from Section \ref{rational}. We know already that the origin is the only lattice point it contains. Nevertheless let us have a look at \verb|rationalIH.IntHull.out|:
1946
1947
\begin{Verbatim}
1948
1 vertices of polyhedron
1949
0 extreme rays of recession cone
1950
1 support hyperplanes of polyhedron (homogenized)
1951
1952
embedding dimension = 3
1953
affine dimension of the polyhedron = 0
1954
rank of recession monoid = 0
1955
internal index = 1
1956
1957
1958
***********************************************************************
1959
1960
1 vertices of polyhedron:
1961
0 0 1
1962
1963
0 extreme rays of recession cone:
1964
1965
1 support hyperplanes of polyhedron (homogenized):
1966
0 0 1
1967
1968
2 equations:
1969
1 0 0
1970
0 1 0
1971
1972
1 basis elements of lattice:
1973
0 0 1
1974
\end{Verbatim}
1975
Since the lattice points in $P$ are already known, the goal was to compute the constraints defining the integer hull. Note that all the constraints defining the integer hull can be different from those defining $P$. In this case the integer hull is cit out by the $2$ equations.
1976
1977
As a second example we take the polyhedron of Section \ref{inhom_ineq_ex}. The integer hull is the ''green'' polyhedron:
1978
\begin{center}
1979
\begin{tikzpicture}[scale=0.7]
1980
1981
\filldraw[yellow] (5,-0.5) -- (-2,-0.5) -- (0,1.5) -- (5,1.5) -- cycle;
1982
\filldraw[green] (5,0) -- (-1,-0) -- (0,1) -- (5,1) -- cycle;
1983
1984
\foreach \x in {-2,...,5}
1985
\foreach \y in {-1,...,2}
1986
{
1987
\filldraw[fill=black] (\x,\y) circle (1.5pt);
1988
}
1989
\draw[->] (-2.5,0) -- (5.5,0);
1990
\draw[->] (0,-1.5) -- (0,2.5);
1991
\draw (5,-0.5) -- (-2,-0.5) -- (0,1.5) -- (5,1.5);
1992
\draw[thick] (5,0) -- (-1,-0) -- (0,1) -- (5,1);
1993
\end{tikzpicture}
1994
\end{center}
1995
1996
The input is \verb|InhomIneqIH.in| ( \verb|InhomIneq.in| augmented by \verb|IntegerHull|). The data of the integer hull are found in \verb|InhomIneqIH.IntHull.out|:
1997
\begin{Verbatim}
1998
...
1999
2 vertices of polyhedron:
2000
-1 0 1
2001
0 1 1
2002
2003
1 extreme rays of recession cone:
2004
1 0 0
2005
2006
3 support hyperplanes of polyhedron (homogenized):
2007
0 -1 1
2008
0 1 0
2009
1 -1 1
2010
\end{Verbatim}
2011
2012
\subsection{Inhomogeneous congruences}\label{ChinRem}
2013
2014
We want to compute the nonnegative solutions of the simultaneous inhomogeneous congruences
2015
\begin{align*}
2016
x_1 + 2x_2 &\equiv 3\pod 7,\\
2017
2x_1 + 2x_2 &\equiv 4\pod{13}
2018
\end{align*}
2019
in two variables. The input file \verb|InhomCong.in| is
2020
\begin{Verbatim}
2021
amb_space 2
2022
constraints 2 symbolic
2023
x[1] + 2x[2] ~ 3 (7);
2024
2x[1] + 2x[2] ~ 4 (13);
2025
\end{Verbatim}
2026
This is an example of input of symbolic constraints. We use \verb|~| as the best ASCII character for representing the congruence sign $\equiv$.
2027
2028
Alternatively one can use a matrix in the input As for which we must move the right hand side over to the left.
2029
\begin{Verbatim}
2030
amb_space 2
2031
inhom_congruences 2
2032
1 2 -3 7
2033
2 2 -4 13
2034
\end{Verbatim}
2035
It is certainly harder to read.
2036
2037
The first vector list in the output:
2038
2039
\begin{Verbatim}
2040
3 module generators:
2041
0 54 1
2042
1 1 1
2043
80 0 1
2044
\end{Verbatim}
2045
Easy to check: if $(1,1)$ is a solution, then it must generate the module of solutions together with the generators of the intersections with the coordinate axes. Perhaps more difficult to find:
2046
\begin{Verbatim}
2047
6 Hilbert basis elements of recession monoid:
2048
0 91 0
2049
1 38 0
2050
3 23 0 1 vertices of polyhedron:
2051
5 8 0 0 0 91
2052
12 1 0
2053
91 0 0
2054
\end{Verbatim}
2055
Strange, why is $(0,0,1)$, representing the origin in $\RR^2$, not listed as a vertex as well?
2056
Well the vertex shown represents an extreme ray in the lattice $\EE$, and $(0,0,1)$ does not belong to $\EE$.
2057
2058
\begin{Verbatim}
2059
2 extreme rays of recession cone:
2060
0 91 0
2061
91 0 0
2062
2063
3 support hyperplanes of polyhedron (homogenized)
2064
0 0 1
2065
0 1 0
2066
1 0 0
2067
2068
1 congruences:
2069
58 32 1 91
2070
\end{Verbatim}
2071
Normaliz has simplified the system of congruences to a single one.
2072
\begin{Verbatim}
2073
3 basis elements of lattice:
2074
1 0 33
2075
0 1 -32
2076
0 0 91
2077
\end{Verbatim}
2078
Again, don't forget that Normaliz prints a basis of the efficient lattice $\EE$.
2079
2080
\subsubsection{Lattice and offset}\label{offset_ex}
2081
2082
The set of solutions to the inhomogeneous system is an affine lattice in $\RR^2$. The lattice basis of $\EE$ above does not immediately let us write down the set of solutions in the form $w+L_0$ with a subgroup $L_0$, but we can easily transform the basis of $\EE$: just add the first and the second vector to obtain $(1,1,1)$ -- we already know that it belongs to $\EE$ and any element in $\EE$ with last coordinate $1$ would do. Try the file \verb|InhomCongLat.in|:
2083
2084
\begin{Verbatim}
2085
amb_space 2
2086
offset
2087
1 1
2088
lattice 2
2089
32 33
2090
91 91
2091
\end{Verbatim}
2092
2093
2094
\subsubsection{Variation of the signs}\label{sign_ex}
2095
2096
Suppose we want to solve the system of congruences under the condition that both variables are negative (\verb|InhomCongSigns.in|):
2097
2098
\begin{Verbatim}
2099
amb_space 2
2100
inhom_congruences 2
2101
1 2 -3 7
2102
2 2 -4 13
2103
signs
2104
-1 -1
2105
\end{Verbatim}
2106
The two entries of the sign vector impose the sign conditions $x_1\le 0$ and $x_2\le 0$.
2107
2108
From the output we see that the module generators are more complicated now:
2109
\begin{Verbatim}
2110
4 module generators:
2111
-11 0 1
2112
-4 -7 1
2113
-2 -22 1
2114
0 -37 1
2115
\end{Verbatim}
2116
The Hilbert basis of the recession monoid is simply that of the nonnegative case multiplied by $-1$.
2117
2118
\subsection{Integral closure and Rees algebra of a monomial ideal}\label{Rees}
2119
2120
Next, let us discuss the example \ttt{MonIdeal.in} (typeset in two columns):
2121
2122
\begin{Verbatim}
2123
amb_space 5
2124
rees_algebra 9
2125
1 2 1 2 1 0 3 4
2126
3 1 1 3 5 1 0 1
2127
2 5 1 0 2 4 1 5
2128
0 2 4 3 2 2 2 4
2129
0 2 3 4
2130
\end{Verbatim}
2131
The input vectors are the exponent vectors of a monomial ideal $I$ in the ring $K[X_1,X_2,X_3,X_4]$. We want to compute the normalization of the Rees algebra of the ideal. In particular we can extract from it the integral closure of the ideal. Since we must introduce an extra variable $T$, we have \verb|amb_space 5|.
2132
2133
In the Hilbert basis we see the exponent vectors of the $X_i$, namely the unit vectors with last component $0$. The vectors with last component $1$ represent the integral closure $\overline I$ of the ideal. There is a vector with last component $2$, showing that the integral closure of $I^2$ is larger than~$\overline I^2$.
2134
\begin{Verbatim}
2135
16 Hilbert basis elements:
2136
0 0 0 1 0
2137
...
2138
5 1 0 1 1
2139
6 5 2 2 2
2140
2141
11 generators of integral closure of the ideal:
2142
0 2 3 4
2143
...
2144
5 1 0 1
2145
\end{Verbatim}
2146
The output of the generators of $\overline I$ is the only place where we suppress the homogenizing variable for ``historic'' reasons. If we extract the vectors with last component $1$ from the extreme rays, then we obtain the smallest monomial ideal that has the same integral closure as $I$.
2147
\begin{Verbatim}
2148
10 extreme rays:
2149
0 0 0 1 0
2150
...
2151
5 1 0 1 1
2152
\end{Verbatim}
2153
The support hyperplanes which are not just sign conditions describe primary decompositions of all the ideals $\overline{I^k}$ by valuation ideals. It is not hard to see that none of them can be omitted for large $k$ (for example, see: W. Bruns and G. Restuccia, Canonical modules of Rees algebras. J. Pure Appl. Algebra 201, 189--203 (2005)).
2154
\begin{Verbatim}
2155
23 support hyperplanes:
2156
0 0 0 0 1
2157
0 ...
2158
6 0 1 3 -13
2159
\end{Verbatim}
2160
2161
\subsubsection{Only the integral closure of the ideal}
2162
2163
If only the integral closure of the ideal is to be computed, one can choose the input as follows (\verb|IntClMonId.in|):
2164
\begin{Verbatim}
2165
amb_space 4
2166
vertices 9
2167
1 2 1 2 1
2168
...
2169
2 2 2 4 1
2170
cone 4
2171
1 0 0 0
2172
0 1 0 0
2173
0 0 1 0
2174
0 0 0 1
2175
\end{Verbatim}
2176
2177
The generators of the integral closure appear as module generators in the output and the generators of the smallest monomial ideal with this integral closure are the vertices of the polyhedron.
2178
2179
\subsection{Starting from a binomial ideal}\label{binom_ex}
2180
2181
As an example, we consider the binomial ideal generated by
2182
$$
2183
X_1^2X_2-X_4X_5X_6,\ \ X_1X_4^2-X_3X_5X_6,\ \ X_1X_2X_3-X_5^2X_6.
2184
$$
2185
We want to find an embedding of the toric ring it defines and
2186
the normalization of the toric ring. The input vectors are obtained as the differences of the two exponent vectors in the binomials. So the input ideal \verb+lattice_ideal.in+ is
2187
\begin{Verbatim}
2188
amb_space 6
2189
lattice_ideal 3
2190
2 1 0 -1 -1 -1
2191
1 0 -1 2 -1 -1
2192
1 1 1 0 -2 -1
2193
\end{Verbatim}
2194
In order to avoid special input rules for this case in which our object is not defined as a subset of an ambient space, but as a quotient of type \emph{generators/relations}, we abuse the name \verb|amb_space|: it determines the space in which the input vectors live.
2195
2196
We get the output
2197
\begin{Verbatim}
2198
6 original generators of the toric ring
2199
\end{Verbatim}
2200
namely the residue classes of the indeterminates.
2201
\begin{Verbatim}
2202
9 Hilbert basis elements
2203
9 Hilbert basis elements of degree 1
2204
\end{Verbatim}
2205
So the toric ring defined by the binomials is not normal. Normaliz found the standard grading on the toric ring. The normalization is generated in degree $1$, too (in this case).
2206
\begin{Verbatim}
2207
5 extreme rays
2208
5 support hyperplanes
2209
2210
embedding dimension = 3
2211
rank = 3 (maximal)
2212
external index = 1
2213
internal index = 1
2214
original monoid is not integrally closed
2215
\end{Verbatim}
2216
We saw that already.
2217
\begin{Verbatim}
2218
size of triangulation = 5
2219
resulting sum of |det|s = 10
2220
2221
grading:
2222
-2 1 1
2223
\end{Verbatim}
2224
This is the grading on the ambient space (or polynomial ring) defining the standard grading on our subalgebra. The enumerative data that follow are those of the normalization!
2225
\begin{Verbatim}
2226
degrees of extreme rays:
2227
1: 5
2228
2229
Hilbert basis elements are of degree 1
2230
2231
multiplicity = 10
2232
2233
Hilbert series:
2234
1 6 3
2235
denominator with 3 factors:
2236
1: 3
2237
2238
degree of Hilbert Series as rational function = -1
2239
2240
Hilbert polynomial:
2241
1 3 5
2242
with common denominator = 1
2243
2244
rank of class group = 2
2245
class group is free
2246
2247
***********************************************************************
2248
2249
6 original generators:
2250
0 0 1
2251
3 5 2
2252
0 1 0
2253
1 2 1
2254
1 3 0
2255
1 0 3
2256
\end{Verbatim}
2257
This is an embedding of the toric ring defined by the binomials. There are many choices, and Normaliz has taken one of them. You should check that the generators in this order satisfy the binomial equations. Turning to the ring theoretic interpretation, we can say that the toric ring defined by the binomial equations can be embedded into $K[Y_1,Y_2,Y_3]$ as a monomial subalgebra that is generated by $Y_1^0Y_2^0Y_3^1$,\dots,$Y_1^1Y_2^0Y_3^3$.
2258
2259
Now the generators of the normalization:
2260
\begin{Verbatim}
2261
9 Hilbert basis elements of degree 1: 5 extreme rays:
2262
0 0 1 0 0 1
2263
0 1 0 0 1 0
2264
1 0 3 1 0 3
2265
1 1 2 1 3 0
2266
1 2 1 3 5 2
2267
1 3 0
2268
2 3 2 5 support hyperplanes:
2269
2 4 1 -15 7 5
2270
3 5 2 -3 1 2
2271
0 0 1
2272
0 1 0
2273
1 0 0
2274
2275
0 further Hilbert basis elements of higher degree:
2276
\end{Verbatim}
2277
2278
2279
%%%%%%%%%%%%%%%%%%%%%%%%%%%%% INPUT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2280
\section{The input file}\label{input}
2281
2282
The input file \ttt{<project>.in} consists of one or
2283
several items. There are several types of items:
2284
2285
\begin{arab}
2286
\item definition of the ambient space,
2287
\item matrices with integer or rational entries (depending on the type),
2288
\item vectors with integer entries,
2289
\item constraints in or symbolic format,
2290
\item a polynomial,
2291
\item computation goals and algorithmic variants,
2292
\item numerical parameters,
2293
\item comments.
2294
\end{arab}
2295
2296
An item cannot include another item. In particular, comments can only be included between other items, but not within another item. Matrices and vectors can have two different formats, plain and formatted.
2297
2298
Matrices and vectors are classified by the following attributes:
2299
\begin{arab}
2300
\item generators, constraints, accessory,
2301
\item cone/polyhedron, (affine) lattice,
2302
\item homogeneous, inhomogeneous.
2303
\end{arab}
2304
In this classification, equations are considered as constraints on the lattice because Normaliz treats them as such -- for good reason: it is very easy to intersect a lattice with a hyperplane.
2305
2306
The line structure is irrelevant for the interpretation of the input, but it is advisable to use it for the readability of the input file.
2307
2308
The input syntax of Normaliz 2 can still be used. It is explained in Appendix \ref{OldSyntax}.
2309
2310
\subsection{Input items}
2311
2312
\subsubsection{The ambient space and lattice}
2313
2314
The ambient space is specified as follows:
2315
\begin{Verbatim}
2316
amb_space <d>
2317
\end{Verbatim}
2318
where \ttt{<d>} stands for the dimension $d$ of the ambient vector space $\RR^d$ in which the geometric objects live. The \emph{ambient lattice} $\AA$ is set to $\ZZ^d$.
2319
2320
Alternatively one can define the ambient space implicitly by
2321
\begin{Verbatim}
2322
amb_space auto
2323
\end{Verbatim}
2324
In this case the dimension of the ambient space is determined by Normaliz from the first formatted vector or matrix in the input file. It is clear that any input item that requites the knowledge of the dimension can only follow the first formatted vector or matrix.
2325
2326
\emph{In the following the letter $d$ will always denote the dimension set with} \verb|amb_space|.
2327
2328
2329
An example:
2330
\begin{Verbatim}
2331
amb_space 5
2332
\end{Verbatim}
2333
indicates that polyhedra and lattices are subobjects of $\RR^5$. The ambient lattice is $\ZZ^5$.
2334
2335
\emph{The first non-comment input item must specify the ambient space.}
2336
2337
\subsubsection{Plain vectors}
2338
2339
A plain vector is built as follows:
2340
\begin{Verbatim}
2341
<T>
2342
<x>
2343
\end{Verbatim}
2344
Again \ttt{<T>} denotes the type and \ttt{<x>} is the vector itself. The number of components is determined by the type of the vector and the dimension of the ambient space. At present, all vectors have length $d$.
2345
2346
Example:
2347
\begin{Verbatim}
2348
grading
2349
1 0 0
2350
\end{Verbatim}
2351
2352
Normaliz allows also the input of sparse vectors. Sparse input is signalized by the key word \verb|sparse| as the first entry. It is followed by entries of type \verb|<col>:<val>| where \verb|<pos>| denotes the column and \verb|<val>| the value in that column. (The unspecified columns have entry $0$.) A sparse vector is terminated by the character \verb|;| .
2353
2354
Example:
2355
\begin{Verbatim}
2356
grading
2357
sparse 1:1;
2358
\end{Verbatim}
2359
2360
2361
For certain vectors there also exist shortcuts. Examples:
2362
\begin{Verbatim}
2363
total_degree
2364
unit_vector 25
2365
\end{Verbatim}
2366
2367
2368
\subsubsection{Formatted vectors}
2369
2370
A formatted vector is built as follows:
2371
\begin{Verbatim}
2372
<T>
2373
[ <x> ]
2374
\end{Verbatim}
2375
where \ttt{<T>} denotes the type and \ttt{<x>} is the vector itself. The components can be separated by white space, commas or semicolons. An example showing all possibilities (not recommended):
2376
\begin{Verbatim}
2377
grading
2378
[1,0; 0 5]
2379
\end{Verbatim}
2380
2381
\subsubsection{Plain matrices}
2382
2383
A plain matrix is built as follows:
2384
\begin{Verbatim}
2385
<T> <m>
2386
<x_1>
2387
...
2388
<x_m>
2389
\end{Verbatim}
2390
Here \ttt{<T>} denotes the type of the matrix, \ttt{<m>} the number of rows, and \ttt{<x\_1>},...,\ttt{<x\_m>} are the rows. Some types allow rational and floating point matrix entries, others are restricted to integers; see Sections \ref{rational_input} and \ref{decimal_input}.
2391
2392
The number of columns is implicitly defined by the dimension of the ambient space and the type of the matrix. Example (with \verb|amb_space 3|):
2393
\begin{Verbatim}
2394
cone 3
2395
1/3 2 3
2396
4 5 6
2397
11 12/7 13/21
2398
\end{Verbatim}
2399
2400
Normaliz allows the input of matrices in transposed form:
2401
\begin{Verbatim}
2402
<T> transpose <n>
2403
<x_1>
2404
...
2405
<x_m>
2406
\end{Verbatim}
2407
Note that \verb|<n>| is now the number of \emph{columns} of the matrix that follows it (assumed to be the number of input vectors). The number of rows is determined by the dimension of the ambient space and the type of the matrix. Example:
2408
\begin{Verbatim}
2409
cone transpose 3
2410
1 0 3/2
2411
0 1/9 4
2412
\end{Verbatim}
2413
is equivalent to
2414
\begin{Verbatim}
2415
cone 3
2416
1 0
2417
0 1/9
2418
3/2 4
2419
\end{Verbatim}
2420
2421
Like vectors, matrices have a sparse input variant, again signalized by the key word \verb|sparse|. The rows are sparse vectors with entries \verb|<col>:<val>|, and each row is concluded by the character \verb|;|.
2422
2423
Example:
2424
\begin{Verbatim}
2425
inequalities 3 sparse
2426
1:1;
2427
2:1;
2428
3:1;
2429
\end{Verbatim}
2430
chooses the $3\times 3$ unit matrix as a matrix of type \verb|inequalities|. Note that also in case of transposed matrices, sparse entry is row by row.
2431
2432
\emph{Matrices may have zero rows.} Such empty matrices like
2433
\begin{Verbatim}
2434
inhom_inequalities 0
2435
\end{Verbatim}
2436
2437
can be used to make the input inhomogeneous (Section \ref{inhom_inp}) or to avoid the automatic choice of the positive orthant in certain cases (Section \ref{default}). (The empty \verb|inhom_inequalities| have both effects simultaneously.) Apart from these effects, empty matrices have no influence on the computation.
2438
2439
\subsubsection{Formatted matrices}
2440
2441
A formatted matrix is built as follows:
2442
\begin{Verbatim}
2443
<T>
2444
[ [<x_1>]
2445
...
2446
[<x_m>] ]
2447
\end{Verbatim}
2448
Here \ttt{<T>} denotes the type of the matrix and \verb|<x_1>|,\dots,\verb|<x_m>| are vectors. Legal separators are white space, commas and semicolons. An example showing all possibilities (not really recommended):
2449
\begin{Verbatim}
2450
cone [
2451
[ 2 1][3/7 4];
2452
[0 1],
2453
[9 10] [11 12/13]
2454
]
2455
\end{Verbatim}
2456
Similarly as plain matrices, formatted matrices can be given in transposed form, and they can be empty.
2457
2458
\subsubsection{Constraints in tabular format}\label{const_syntax}
2459
2460
This input type is somewhat closer to standard notation than the encoding of constraints in matrices. The general type of equations and inequalities is
2461
\begin{Verbatim}
2462
<x> <rel> <int>;
2463
\end{Verbatim}
2464
where \verb|<x>| denotes a vector of length $d$, \verb|<rel>| is one of the relations \verb|=, <=, >=, <, >| and \verb|<int>| is an integer.
2465
2466
Congruences have the form
2467
\begin{Verbatim}
2468
<x> ~ <int> (<mod>);
2469
\end{Verbatim}
2470
where \verb|<mod>| is a nonzero integer.
2471
2472
Examples:
2473
\begin{Verbatim}
2474
1/2 -2 >= 5
2475
1 -1/7 = 0
2476
-1 1 ~ 7 (9)
2477
\end{Verbatim}
2478
2479
Note: all numbers and relation signs must be separated by white space.
2480
2481
\subsubsection{Constraints in symbolic format}\label{symb_syntax}
2482
2483
This input type is even closer to standard notation than the encoding of constraints in matrices or in tabular format. It is especially useful if the constraints are sparse. Instead of assigning a value to a coordinate via its position in a vector, it uses coordinates named \verb|x[<n>]| where \verb|<n>| is the index of the coordinate. The index is counted from $1$.
2484
2485
The general type of equations and inequalities is
2486
\begin{Verbatim}
2487
<lhs> <rel> <rhs>;
2488
\end{Verbatim}
2489
where \verb|<lhs>| and \verb|<rhs>| denote linear function of the \verb|x<n>| with integer coefficients.
2490
As above, \verb|<rel>| is one of the relations \verb|=, <=, >=, <, >|. (An empty \verb|<lhs>| or \verb|<rhs>| has the value $0$.) Note the terminating semicolon.
2491
2492
Congruences have the form
2493
\begin{Verbatim}
2494
<lhs> ~ <rhs> (<mod>);
2495
\end{Verbatim}
2496
where \verb|<mod>| is a nonzero integer.
2497
2498
Examples:
2499
\begin{Verbatim}
2500
1/3x[1] >= 2x[2] + 5;
2501
x[1]+1=1/4x[2] ;
2502
-x[1] + x[2] ~ 7 (9);
2503
\end{Verbatim}
2504
2505
There is no need to insert white space for separation, but it may be inserted anywhere where it does not disrupt numbers or relation signs.
2506
2507
\subsubsection{Polynomials}\label{poly_input}
2508
2509
For the computation of weighted Ehrhart series and integrals Normaliz needs the input of a polynomial with rational coefficients. The polynomial is first read as a string. For the computation the string is converted by the input function of CoCoALib \cite{CoCoA}. Therefore any string representing a valid CoCoA expression is allowed. However the names of the indeterminates are fixed: \verb|x[1]|,\dots,\verb|x[<N>| where \verb|<N>]| is the value of \verb|amb_space|. The polynomial must be concluded by a semicolon.
2510
2511
Example:
2512
\begin{Verbatim}
2513
(x[1]+1)*(x[1]+2)*(x[1]+3)*(x[1]+4)*(x[1]+5)*
2514
(x[2]+1)*(x[3]+1)*(x[4]+1)*(x[5]+1)*(x[6]+1)*(x[7]+1)*
2515
(x[8]+1)*(x[8]+2)*(x[8]+3)*(x[8]+4)*(x[8]+5)*1/14400;
2516
2517
(x[1]*x[2]*x[3]*x[4])^2*(x[1]-x[2])^2*(x[1]-x[3])^2*
2518
(x[1]-x[4])^2*(x[2]-x[3])^2*(x[2]-x[4])^2*(x[3]-x[4])^2;
2519
\end{Verbatim}
2520
2521
\subsubsection{Rational numbers}\label{rational_input}
2522
2523
Rational numbers are allowed in input matrices, but not in all. They are \emph{not} allowed in vectors and in matrices containing lattice generators and in congruences, namely in
2524
\begin{Verbatim}
2525
lattice cone_and_lattice normalization offset open_facets
2526
congruences inhom_congruences rees_algebra lattice_ideal
2527
grading dehomogenization signs strict_signs
2528
\end{Verbatim}
2529
They are allowed in \verb|saturation| since it defines the intersection of the vector space generated by the rows of the matrix with the integral lattice.
2530
2531
Note: Only positive numbers are allowed as denominators. Negative denominators may result in a segmentation fault. Illegal formats may result in
2532
2533
\begin{Verbatim}
2534
std::exception caught... "mpz_set_str" ... exiting.
2535
\end{Verbatim}
2536
2537
Normaliz first reduces the input numbers to lowest terms. Then each row of a matrix is multiplied by the least common multiple of the denominators of its entries. In all applications in which the original monoid generators play a role, one should use only integers in input matrices to avoid any ambiguity.
2538
2539
\subsubsection{Decimal fractions and floating point numbers}\label{decimal_input}
2540
2541
Normaliz accepts decimal fractions and floating point numbers in its input files. These are precisely converted to ordinary fractions (or integers). Examples:
2542
\begin{Verbatim}
2543
1.1 --> 11/10 0.5 --> 1/2 -.1e1 --> -1
2544
\end{Verbatim}
2545
It is not allowed to combine an ordinary fraction and a decimal fraction in the same number. In other words, expressions like \verb|1.0/2| are not allowed.
2546
2547
\subsubsection{Computation goals and algorithmic variants}\label{subsecGoals}
2548
2549
These are single or compound words, such as
2550
\begin{Verbatim}
2551
HilbertBasis
2552
Multiplicity
2553
\end{Verbatim}
2554
The file can contain several computation goals, as in this example.
2555
2556
\subsubsection{Comments}
2557
2558
A comment has the form
2559
\begin{Verbatim}
2560
/* <text> */
2561
\end{Verbatim}
2562
where \ttt{<text>} stands for the text of the comment. It can have arbitrary length and stretch over several lines. Example:
2563
\begin{Verbatim}
2564
/* This is a comment
2565
*/
2566
\end{Verbatim}
2567
Comments are only allowed at places where also a new keyword would be allowed, especially not between the entries of a matrix or a vector. Comments can not be nested.
2568
2569
\subsubsection{Restrictions}
2570
2571
Input items can almost freely be combined, but there are some restrictions:
2572
2573
\begin{arab}
2574
\item Every input type can appear only once.
2575
\item The types
2576
\begin{center}
2577
\ttt {cone, cone\_and\_lattice, polytope, rees\_algebra}
2578
\end{center}
2579
exclude each other mutually.
2580
\item The input type \verb|subspace| excludes \verb|polytope| and \verb|rees_algebra|.
2581
\item The types
2582
\begin{center}
2583
\ttt {lattice}, \ttt{saturation}, \ttt{cone\_and\_lattice}
2584
\end{center}
2585
exclude each other mutually.
2586
\item \verb|polytope| can not be combined with \verb|grading|.
2587
\item The only type that can be combined with \ttt{lattice\_ideal} is \ttt{grading}.
2588
\item The following types cannot be combined with inhomogeneous types or \verb|dehomogenization|:
2589
\begin{center}
2590
\ttt{polytope, rees\_algebra, excluded\_faces}
2591
\end{center}
2592
2593
\item The following types cannot be combined with inhomogeneous types:
2594
\begin{center}
2595
\ttt{dehomogenization, support\_hyperplanes}
2596
\end{center}
2597
2598
\item Special restrictions apply for the input type \verb|open_facets|; see Section \ref{open_facets}.
2599
\end{arab}
2600
2601
\subsubsection{Homogeneous and inhomogeneous input}\label{inhom_inp}
2602
2603
Apart from the restrictions listed in the previous section, homogeneous and inhomogeneous types can be combined as well as generators and constraints. A single inhomogeneous type or \verb|dehomogenization| in the input triggers an inhomogeneous computation. The input item of inhomogeneous type may be an empty matrix.
2604
2605
\subsubsection{Default values}\label{default}
2606
2607
If there is no lattice defining item, Normaliz (virtually) inserts the the unit matrix as an input item of type \ttt{lattice}. If there is no cone defining item, the unit matrix is (additionally) inserted as an input item of type \ttt{cone}.
2608
2609
If the input is inhomogeneous, then Normaliz provides default values for vertices and the offset as follows:
2610
\begin{arab}
2611
\item If there is an input matrix of lattice type lattice, but no \ttt{offset}, then the offset $0$ is inserted.
2612
\item If there is an input matrix of type cone, but no \ttt{vertices}, then the vertex $0$ is inserted.
2613
\end{arab}
2614
2615
\textbf{An important point.}\enspace If the input does not contain any cone generators or inequalities, Normaliz automatically assumes that you want to compute in the positive orthant. In order to avoid this choice you can add an empty matrix of inequalities. This will not affect the results, but avoid the sign restriction.
2616
2617
2618
2619
\subsubsection{Normaliz takes intersections (almost always)}
2620
2621
The input may contain several cone defining items and several lattice defining items.
2622
2623
The sublattice $L$ defined by the lattice input items is the \emph{intersection} of the sublattices defined by the single items. The polyhedron $P$ is defined as the intersection of all polyhedra defined by the single polyhedron defining items. The object then computed by Normaliz is
2624
$$
2625
P\cap L.
2626
$$
2627
2628
There are three notable exceptions to the rule that Normaliz takes intersections:
2629
\begin{arab}
2630
\item \verb|vertices| and \verb|cone| form a unit. Together they define a polyhedron.
2631
\item The same applies to \verb|offset| and \verb|lattice| that together define an affine lattice.
2632
\item The \verb|subspace| is added to \verb|cone| or \verb|cone_and_lattice|.
2633
\end{arab}
2634
2635
\subsection{Homogeneous generators}
2636
2637
\subsubsection{Cones}\label{cone_synt}
2638
2639
The main type is \verb|cone|. The other two types are added for special computations.
2640
2641
\begin{itemize}
2642
\itemtt[cone] is a matrix with $d$ columns. Every row represents a vector, and they define the cone generated by them. Section \ref{cone_ex}, \verb|2cone.in|
2643
2644
\itemtt[subspace] is a matrix with $d$ columns. The linear subspace generated by the rows is added to the cone. Section \ref{subspace}.
2645
2646
\itemtt[polytope] is a matrix with $d-1$ columns. It is internally converted to \verb|cone| extending each row by an entry $1$. Section \ref{polytope_ex}, \verb|polytope.in|
2647
2648
\itemtt[rees\_algebra] is a matrix with $d-1$ columns. It is internally converted to type \verb|cone| in two steps: (i) each row is extended by an entry $1$ to length $d$. (ii) The first $d-1$ unit vectors of length $d$ are appended. Section \ref{Rees}, \verb|MonIdeal.in|.
2649
\end{itemize}
2650
2651
Moreover, it is possible to define a cone and a lattice by the same matrix:
2652
2653
\begin{itemize}
2654
\itemtt[cone\_and\_lattice] The vectors of the matrix with $d$ columns define both a cone and a lattice. Section \ref{normalization_ex}, \verb|A443.in|.
2655
2656
If \verb|subspace| is used in combination with \verb|cone_and_lattice|, then the sublattice generated by its rows is added to the lattice generated by \verb|cone_and_lattice|.
2657
\end{itemize}
2658
2659
2660
The Normaliz 2 types \verb|integral_closure| and \verb|normalization| can still be used. They are synonyms for \verb|cone| and \verb|cone_and_lattice|, respectively.
2661
2662
\subsubsection{Lattices}
2663
2664
There are $3$ types:
2665
2666
\begin{itemize}
2667
\itemtt[lattice] is a matrix with $d$ columns. Every row represents a vector, and they define the lattice generated by them. Section \ref{latt_ex}, \verb|3x3magiceven_lat.in|
2668
2669
\itemtt[saturation] is a matrix with $d$ columns. Every row represents a vector, and they define the \emph{saturation} of the lattice generated by them. Section \ref{latt_ex}, \verb|3x3magic_sat.in|.
2670
2671
\itemtt[cone\_and\_lattice] See Section \ref{cone_synt}.
2672
\end{itemize}
2673
2674
2675
\subsection{Homogeneous Constraints}
2676
2677
\subsubsection{Cones} \label{HomConstrCone}
2678
2679
\begin{itemize}
2680
\itemtt[inequalities] is a matrix with $d$ columns. Every row $(\xi_1,\dots,\xi_d)$ represents a homogeneous inequality
2681
$$
2682
\xi_1x_1+\dots+\xi_dx_d\ge 0, \qquad \xi_i\in\ZZ,
2683
$$
2684
for the vectors $(x_1,\dots,x_d)\in\RR^d$. Sections \ref{ineq_ex}, \ref{rat_ineq} , \verb|2cone_ineq.in|, \verb|poly_ineq.in|
2685
2686
\itemtt[signs] is a vector with $d$ entries in $\{-1,0,1\}$.
2687
It stands for a matrix of type \verb|inequalities| composed of the sign inequalities $x_i\ge 0$ for the entry $1$ at the $i$-th component and the inequality $x_i\le 0$ for the entry $-1$. The entry $0$ does not impose an inequality. See \ref{sign_ex}, \verb|InhomCongSigns.in|.
2688
2689
\itemtt[nonnegative] It stands for a vector of type \verb|sign| with all entries equal to $1$. See Section \ref{Condorcet}, \verb|Condorcet.in|.
2690
2691
\itemtt[excluded\_faces] is a matrix with $d$ columns. Every row $(\xi_1,\dots,\xi_d)$ represents an inequality
2692
$$
2693
\xi_1x_1+\dots+\xi_dx_d> 0, \qquad \xi_i\in\ZZ,
2694
$$
2695
for the vectors $(x_1,\dots,x_d)\in\RR^d$. It is considered as a homogeneous input type though it defines inhomogeneous inequalities. The faces of the cone excluded by the inequalities are excluded from the Hilbert series computation, but \verb|excluded_faces| behaves like \verb|inequalities| in every other respect .
2696
Section \ref{excluded_ex}, \verb|CondorcetSemi.in|.
2697
2698
\itemtt[support\_hyperplanes] is a matrix with $d$ columns. It requires homogeneous input. It is the input type for precomputed support hyperplanes. Therefore Normaliz checks if all input generators satisfy the inequalities defined by them. Apart from this extra check, it behaves like \verb|inequalities|. Note that it overrides all other inequalities in the input, but \verb|excluded_faces| still exclude the faces defined by them. Section \ref{supphyp_ex}, \verb|2cone_supp.in|.
2699
\end{itemize}
2700
2701
\subsubsection{Lattices}
2702
2703
\begin{itemize}
2704
\itemtt[equations] is a matrix with $d$ columns. Every row $(\xi_1,\dots,\xi_d)$ represents an equation
2705
$$
2706
\xi_1x_1+\dots+\xi_dx_d= 0, \qquad \xi_i\in\ZZ,
2707
$$
2708
for the vectors $(x_1,\dots,x_d)\in\RR^d$. Section \ref{eq_ex}, \verb|3x3magic.in|
2709
2710
\itemtt[congruences] is a matrix with $d+1$ columns. Each row $(\xi_1,\dots,\xi_d,c)$ represents a congruence
2711
$$
2712
\xi_1z_1+\dots+\xi_dz_d\equiv 0 \mod c, \qquad \xi_i,c\in\ZZ,
2713
$$
2714
for the elements $(z_1,\dots,z_d)\in\ZZ^d$. Section \ref{cong_ex}, \verb|3x3magiceven.in|.
2715
\end{itemize}
2716
2717
\subsection{Inhomogeneous generators}
2718
2719
\subsubsection{Polyhedra}
2720
2721
\begin{itemize}
2722
\itemtt[vertices] is a matrix with $d+1$ columns. Each row $(p_1,\dots,p_d,q)$, $q>0$, specifies a generator of a polyhedron (not necessarily a vertex), namely
2723
$$
2724
v_i=\biggl(\frac{p_{1}}{q},\dots,\frac{p_{n}}{q}\biggr), \qquad p_i\in\ZZ,q\in\ZZ_{>0},
2725
$$
2726
Section \ref{polyh_ex}, \verb|InhomIneq_gen.in|
2727
2728
\textbf{Note:}\enspace \verb|vertices| and \verb|cone| together define a polyhedron. If \verb|vertices| is present in the input, then the default choice for \verb|cone| is the empty matrix.
2729
\end{itemize}
2730
2731
The Normaliz 2 input type \verb|polyhedron| can still be used.
2732
2733
\subsubsection{Lattices}
2734
2735
\begin{itemize}
2736
\itemtt[offset] is a vector with $d$ entries. It defines the origin of the affine lattice.
2737
Section \ref{offset_ex}, \verb|InhomCongLat.in|.
2738
\end{itemize}
2739
2740
\textbf{Note:}\enspace \verb|offset| and \verb|lattice| (or \verb|saturation|) together define an affine lattice. If \verb|offset| is present in the input, then the default choice for \verb|lattice| is the empty matrix.
2741
2742
\subsection{Inhomogeneous constraints}
2743
2744
\subsubsection{Cones}
2745
2746
\begin{itemize}
2747
\itemtt[inhom\_inequalities] is a matrix with $d+1$ columns. We consider inequalities
2748
$$
2749
\xi_1x_1+\dots+\xi_dx_d\ge \eta, \qquad \xi_i,\eta\in\ZZ,
2750
$$
2751
rewritten as
2752
$$
2753
\xi_1x_1+\dots+\xi_dx_d+(-\eta) \ge 0
2754
$$
2755
and then represented by the input vectors
2756
$$
2757
(\xi_1,\dots,\xi_d,-\eta).
2758
$$
2759
Section \ref{inhom_ineq_ex}, \verb|InhomIneq.in|.
2760
2761
\itemtt[strict\_inequalities] is a matrix with $d$ columns. We consider inequalities
2762
$$
2763
\xi_1x_1+\dots+\xi_dx_d\ge 1, \qquad \xi_i\in\ZZ,
2764
$$
2765
represented by the input vectors
2766
$$
2767
(\xi_1,\dots,\xi_d).
2768
$$
2769
Section \ref{strict_ex}, \verb|2cone_int.in|.
2770
2771
\itemtt[strict\_signs] is a vector with $d$ components in $\{-1,0,1\}$. It is the ''strict'' counterpart to \verb|signs|. An entry $1$ in component $i$ represents the inequality $x_i>0$, an entry $-1$ the opposite inequality, whereas $0$ imposes no condition on $x_i$. \ref{strict_signs_ex}, \verb|Condorcet_one.in|
2772
\end{itemize}
2773
2774
\subsubsection{Lattices}
2775
2776
\begin{itemize}
2777
\itemtt[inhom\_equations] is a matrix with $d+1$ columns. We consider equations
2778
$$
2779
\xi_1x_1+\dots+\xi_dx_d= \eta, \qquad \xi_i,\eta\in\ZZ,
2780
$$
2781
rewritten as
2782
$$
2783
\xi_1x_1+\dots+\xi_dx_d+(-\eta) = 0
2784
$$
2785
and then represented by the input vectors
2786
$$
2787
(\xi_1,\dots,\xi_d,-\eta).
2788
$$
2789
See \ref{inhom_eq_ex}\verb|NumSemi.in|.
2790
2791
\itemtt[inhom\_congruences] We consider a matrix with $d+2$ columns. Each the row $(\xi_1,\dots,\xi_d,-\eta,c)$ represents a congruence
2792
$$
2793
\xi_1z_1+\dots+\xi_dz_d\equiv \eta \mod c, \qquad \xi_i,\eta,c\in\ZZ,
2794
$$
2795
for the elements $(z_1,\dots,z_d)\in\ZZ^d$. Section \ref{ChinRem}, \verb|InhomCongSigns.in|.
2796
\end{itemize}
2797
2798
\subsection{Tabular constraints}
2799
2800
\begin{itemize}
2801
\itemtt[constraints] allows the input of equations, inequalities and congruences in a format that is close to standard notation. As for matrix types the keyword \verb|constraints| is followed by the number of constraints. The syntax of tabular constraints has been described in Section \ref{cone_synt}. If $(\xi_1,\dots,\xi_d)$ is the vector on the left hand side and $\eta$ the integer on the right hand side, then the constraint defines the set of vectors $(x_1,\dots,x_d)$ such that the relation
2802
$$
2803
\xi_1x_1+\dots \xi_dx_d \texttt{ rel } \eta
2804
$$
2805
is satisfied, where \verb|rel| can take the values $=,\leq,\geq,<,>$ with the represented by input strings \verb|=,<=,>=,<,>|, respectively.
2806
2807
The input string \verb|~| represents a congruence $\equiv$ and requires the additional input of a modulus. It represents the congruence
2808
$$
2809
\xi_1x_1+\dots \xi_dx_d \equiv \eta \pmod c.
2810
$$
2811
Sections \ref{strict_ex}, \verb|2cone_int.in|, \ref{cong_ex}, \ttt{3x3magiceven.in}, \ref{inhom_ineq_ex}, \verb|InhomIneq.in|.
2812
\end{itemize}
2813
2814
A right hand side $\neq 0$ makes the input inhomogeneous, as well as the relations $<$ and $>$. Strict inequalities are always understood as conditions for integers. So
2815
$$
2816
\xi_1x_1+\dots +\xi_dx_d < \eta
2817
$$
2818
is interpreted as
2819
$$
2820
\xi_1x_1+\dots \xi_dx_d \le \eta-1,
2821
$$
2822
2823
2824
\subsubsection{Forced homogeneity}
2825
2826
It is often more natural to write constraints in inhomogeneous form, even when one wants the computation to be homogeneous. The type \verb|constraints| does not allow this. Therefore we have introduced
2827
\begin{itemize}
2828
\itemtt[hom\_constraints] for the input of equations, non-strict inequalities and congruences in the same format as \verb|constraints|, except that these constraints are meant to be for a homogeneous computation. It is clear that the left hand side has only $d-1$ entries now. See Section \ref{rat_ineq}, \verb|poly_hom_const.in|.
2829
\end{itemize}
2830
2831
\subsection{Symbolic constraints}
2832
2833
The input syntax is
2834
2835
\begin{itemize}
2836
\itemtt[constraints <n> symbolic] where \verb|<n>| is the number of constraints in symbolic form that follow.
2837
\end{itemize}
2838
2839
The constraints have the form described in Section \ref{symb_syntax}. Note that every symbolic constraint (including the last) must be terminated by a semicolon.
2840
2841
See \ref{inhom_eq_ex}, \verb|NumSemi.in|, \ref{ChinRem}, \verb|InhomCong.in|.
2842
2843
The interpretation of homogeneity follows the same rules as for tabular constraints. The variant \verb|hom_constraints| is allowed and works as for tabular constraints.
2844
2845
2846
\subsection{Relations}\label{relations}
2847
2848
Relations do not select a
2849
sublattice of $\ZZ^d$ or a subcone of $\RR^d$, but define a
2850
monoid as a quotient of $\ZZ_+^d$ modulo a system of
2851
congruences (in the semigroup sense!).
2852
2853
The rows of the input matrix of this type are interpreted as
2854
generators of a subgroup $U\subset\ZZ^d$, and Normaliz computes an affine monoid and its normalization as explained in Section \ref{binomials}.
2855
2856
Set $G=\ZZ^d/U$ and $L=G/\textup{torsion}(G)$. Then the ambient lattice
2857
is $\AA=\ZZ^r$, $r=\rank L$, and the efficient lattice is $L$, realized
2858
as a sublattice of $\AA$. Normaliz computes the image of $\ZZ^d_+$ in $L$ and its normalization.
2859
2860
\begin{itemize}
2861
\itemtt[lattice\_ideal] is a matrix with $d$ columns containing the generators of the subgroup $U$. Section \ref{binom_ex}, \verb|lattice_ideal.in|.
2862
\end{itemize}
2863
2864
The type \ttt{lattice\_ideal} cannot be combined with any other input type (except
2865
\ttt{grading})---such a combination would not make sense. (See Section \ref{grad_lattid} for the use of a grading in this case.)
2866
2867
\subsection{Unit vectors}\label{unit_vectors}
2868
2869
A grading or a dehomogenization is often given by a unit vector:
2870
\begin{itemize}
2871
\itemtt[unit\_vector <n>] represents the $n$th unit vector in $\RR^d$ where $n$ is the number given by \verb|<n>|.
2872
\end{itemize}
2873
This shortcut cannot be used as a row of a matrix. It can be used whenever a single vector is asked for, namely after \verb|grading|, \verb|dehomogenization|, \verb|signs| and \verb|strict_signs|. See Section \ref{rational}, \verb|rational.in|
2874
2875
\subsection{Grading}\label{grading}
2876
2877
This type is accessory. A $\ZZ$-valued grading can be specified in two ways:
2878
\begin{arab}
2879
\item \emph{explicitly} by including a grading in the input, or
2880
\item \emph{implicitly}. In this case Normaliz checks whether
2881
the extreme integral generators of the monoid lie in an
2882
(affine) hyperplane $A$ given by an equation $\lambda(x)=1$ with a $\ZZ$-linear form $\lambda$. If so, then $\lambda$ is used as the grading.\smallskip
2883
2884
\emph{Implicit gradings are only possible for homogeneous computations.}\smallskip
2885
2886
If the attempt to find an implicit grading causes an arithmetic overflow and \texttt{verbose} has been set (say, by the option\texttt{-c}), then Normaliz issues the warning
2887
\begin{Verbatim}
2888
Giving up the check for a grading
2889
\end{Verbatim}
2890
If you really need this check, rerun Normaliz with a bigger integer type.
2891
\end{arab}
2892
2893
Explicit definition of a grading:
2894
\begin{itemize}
2895
\itemtt[grading] is a vector of length $d$ representing the linear form that gives the grading. Section \ref{rational}, \verb|rational.in|.
2896
2897
\itemtt[total\_degree] represents a vector of length $d$ with all entries equal to $1$. Section \ref{Condorcet}, \verb|Condorcet.in|.
2898
\end{itemize}
2899
2900
Before Normaliz can apply the degree, it must be restricted
2901
to the effective lattice $\EE$. Even if the entries of the
2902
grading vector are coprime, it often happens that all degrees
2903
of vectors in $\EE$ are divisible by a greatest common divisor
2904
$ g>1$. Then $g$ is extracted from the degrees, and it will
2905
appear as \ttt{denominator} in the output file.
2906
2907
Normaliz checks whether all generators of the (recession) monoid have
2908
positive degree (after passage to the quotient modulo the unit group in the nonpointed case).
2909
Vertices of polyhedra may have degrees $\le 0$.
2910
2911
\subsubsection{\ttt{lattice\_ideal}} \label{grad_lattid}
2912
2913
In this case the unit vectors correspond to generators of the
2914
monoid. Therefore the degrees assigned to them must be
2915
positive. Moreover, the vectors in the input represent binomial
2916
relations, and these must be homogeneous. In other words, both
2917
monomials in a binomial must have the same degree. This amounts
2918
to the condition that the input vectors have degree $0$.
2919
Normaliz checks this condition.
2920
2921
\subsection{Dehomogenization}
2922
2923
Like \verb|grading| this is an accessory type.
2924
2925
Inhomogeneous input for objects in $\RR^d$ is homogenized by an additional coordinate and then computed in $\RR^{d+1}$, but with the additional condition $x_{d+1}\ge 0$, and then dehomogenizing all results: the substitution $x_{d+1}=1$ acts as the \emph{dehomogenization}, and the inhomogeneous input types implicitly choose this dehomogenization.
2926
2927
Like the grading, one can define the dehomogenization explicitly:
2928
\begin{itemize}
2929
\itemtt[dehomogenization] is a vector of length $d$ representing the linear form $\delta$.
2930
\end{itemize}
2931
2932
The dehomogenization can be any linear form $\delta$ satisfying the condition $\delta(x)\ge 0$ on the cone that is truncated. (In combination with constraints, the condition $\delta(x)\ge 0$ is automatically satisfied since $\delta$ is added to the constraints.)
2933
2934
The input type \verb|dehomogenization| can only be combined with homogeneous input types, but makes the computation inhomogeneous, resulting in inhomogeneous output. The polyhedron computed is the intersection of the cone $\CC$ (and the lattice $\EE$) with the hyperplane given by $\delta(x)=1$, and the recession cone is $\CC\cap\{x:\delta(x)=0\}$.
2935
2936
A potential application is the adaptation of other input formats to Normaliz. The output must then be interpreted accordingly.
2937
2938
Section \ref{dehom_ex}, \verb|dehomogenization.in|.
2939
2940
\subsection{Open facets}\label{open_facets}
2941
2942
The input type \verb|open_facets| is similar to \verb|strict_inequalities|. However, it allows to apply strict inequalities that are not yet known. This makes only sense for simplicial polyhedra where a facet can be identified by the generator that does \emph{not} lie in it.
2943
2944
\verb|open_facets| is a vector with entries $\in \{0,1\}$.
2945
2946
The restrictions for the use of open facets are the following:
2947
\begin{arab}
2948
\item Only the input types \verb|cone,| \verb|vertices| and \verb|grading| can appear together with \verb|open_facets|.
2949
\item The vectors in \verb|cone| are linearly independent.
2950
\item There is at most one vertex.
2951
\end{arab}
2952
The number of vectors in \verb|cone| may be smaller than $d$, but \verb|open_facets| must have $d$ entries.
2953
2954
2955
\verb|open_facets| make the computation inhomogeneous. They are interpreted as follows. Let $v$ be the vertex---if there are no \verb|vertices|, then $v$ is the origin. The shifted $C'=v+C$ is cut out by affine-linear inequalities $\lambda_i(x)\ge 0$ with coprime integer coefficients. We number these in such a way that $\lambda_i(v+c_i)\neq 0$ for the generators $c_i$ of $C$ (in the input order), $i=1,\dots,n$. Then all subsequent computations are applied to the shifted cone $C''=v'+C$ defined by the inequalities
2956
$$
2957
\lambda_i(x)\ge u_i
2958
$$
2959
where the vector $(u_1,\dots,u_d)$ is given by \verb|open_facets|. (If $\dim C<d$, then the entries $u_j$ with $j> \dim C$ are ignored.)
2960
2961
That $1$ indicates `open'' is in accordance with its use for the disjoint decomposition; see Section \ref{Disjoint}. Section \ref{LattPointsFPE} discusses an example.
2962
2963
\subsection{Numerical parameters}
2964
2965
Certain numerical parameters used by Normaliz can be set in the input file. Presently only:
2966
2967
\subsubsection{Number of significant coefficients of the quasipolynomial}
2968
2969
It can be set by
2970
\begin{Verbatim}
2971
nr_coeff_quasipol <n>
2972
\end{Verbatim}
2973
where \verb|<n>| is the number of highest coefficients to be printed. See Section \ref{highest_coeff}.
2974
2975
\subsection{Pointedness}
2976
2977
Since version 3.1 Normaliz can also compute nonpointed cones and polyhedra without vertices.
2978
2979
\subsection{The zero cone}\label{zero}
2980
2981
The zero cone with an empty Hilbert basis is a legitimate
2982
object for Normaliz. Nevertheless a warning message is issued
2983
if the zero cone is encountered.
2984
2985
\section{Computation goals and algorithmic variants}\label{Goals}
2986
2987
The library \verb|libnormaliz| contains a class \verb|ConeProperties| that collects computation goals, algorithmic variants and additional data that are used to control the work flow in \verb|libnormaliz| as well as the communication with other programs. The latter are not important for the Normaliz user, but are listed as a reference for \verb|libnormaliz|. See Appendix \ref{libnorm} for a description of \verb|libnormaliz|.
2988
2989
All computation goals and algorithmic variants can be communicated to Normaliz in two ways:
2990
\begin{arab}
2991
\item in the input file, for example \verb|HilbertBasis|,
2992
\item via a verbatim command line option, for example \verb|--HilbertBasis|.
2993
\end{arab}
2994
For the most important choices there are single letter command line options, for example \verb|-N| for \verb|HilbertBasis|. The single letter options ensure backward compatibility to Normaliz 2. In jNormaliz they are also accessible via their full names.
2995
2996
Some computation goals apply only to homogeneous computations, and some others make sense only for inhomogeneous computations.
2997
2998
Some single letter command line options combine two or more computation goals, and some algorithmic variants imply computation goals.
2999
3000
\subsection{Default choices and basic rules}
3001
3002
If several computation goals are set, all of them are pursued. In particular, computation goals in the input file and on the command line are accumulated. But
3003
\begin{itemize}
3004
\itemtt[-{}-ignore, -i] on the command line switches off the computation goals and algorithmic variants set in the input file.
3005
\end{itemize}
3006
3007
The default computation goal is set if neither the input file nor the command line contains a computation goal or an algorithmic variant that implies a computation goal. It is
3008
\begin{center}
3009
\verb|HilbertBasis| + \verb|HilbertSeries| + \verb|ClassGroup|.
3010
\end{center}
3011
3012
If set explicitly in the input file or on the command line the following adds these computation goals:
3013
\begin{itemize}
3014
\itemtt[DefaultMode]
3015
\end{itemize}
3016
3017
It is possible to set \verb|DefaultMode| explicitly in addition to other computation goals. If it is set, implicitly or explicitly, Normaliz will not complain about unreachable computation goals.
3018
3019
\subsection{The choice of algorithmic variants}
3020
3021
For its main computation goals Normaliz has algorithmic variants. It tries to choose the variant that seems best for the given input data. This automatic choice may however be a bad one. Therefore the user can completely control which algorithmic variant is used.
3022
3023
\subsubsection{Primal vs.\ dual}
3024
3025
For the computation of Hilbert bases Normaliz has two algorithms, the primal algorithm that is based on triangulations, and the dual algorithm that is of type ``pair completion''. We have seen both in Section \ref{Examples}. Roughly speaking, the primal algorithm is the first choice for generator input, and the dual algorithm is usually better for constraints input. The choice also applies to the computation of degree $1$ elements. However, for them the default choice is project-and-lift. See Section \ref{project}. The conditions under which the dual algorithm is chosen are specified in Section \ref{div_labor}.
3026
3027
The choice of the algorithm can be fixed or blocked:
3028
\begin{itemize}
3029
\itemtt[DualMode, -d] activates the dual algorithm for the computation of the Hilbert basis and degree $1$ elements. Includes \verb|HilbertBasis|, unless \verb|Deg1Elements| is set. It overrules \verb|IsIntegrallyClosed|.
3030
3031
\itemtt[PrimalMode, -P] blocks the use of the dual algorithm.
3032
\end{itemize}
3033
3034
The automatic choice can of course fail. See Section \ref{div_labor} for an example for which it is bad.
3035
3036
\subsubsection{Lattice points in polytopes}\label{approximate}
3037
3038
For this task Normaliz has several methods. They are discussed in Section \ref{LattPoints}. The default choice is the project-and-lift algorithm. It can be chosen explicitly:
3039
\begin{itemize}
3040
\itemtt[Projection, -j] ; it implies \verb|Deg1Elements|,
3041
3042
\itemtt[NoProjection] blocks it.
3043
\end{itemize}
3044
3045
Alternative choices are
3046
\begin{itemize}
3047
\itemtt[ProjectionFloat, -J], project-and-lift with floating point arithmetic,
3048
\itemtt[PrimalMode, -P], triangulation based method,
3049
\itemtt [Approximate, -r], approximation of rational polytopes followed by triangulation and
3050
\itemtt[DualMode, -d], dual algorithm.
3051
\end{itemize}
3052
\verb|PrimalMode| and \verb|DualMode| do not imply \verb|Deg1Elements| since they can also be used for Hilbert bases.
3053
\subsubsection{Bottom decomposition}
3054
3055
Bottom decomposition is a way to produce an optimal triangulation for a given set of generators. It is discussed in Section \ref{bottom_dec}. The criterion for its automatic choice is explained there. It can be forced or blocked:
3056
\begin{itemize}
3057
\itemtt[BottomDecomposition, -b] tells Normaliz to use bottom decomposition in the primal algorithm.
3058
3059
\itemtt[NoBottomDec, -o] forbids Normaliz to use bottom decomposition in the primal algorithm, even if it would otherwise be chosen because of large roughness (see Section \ref{bottom_dec}).
3060
\end{itemize}
3061
3062
An option to be mentioned in this context is
3063
\begin{itemize}
3064
\itemtt[KeepOrder, -k] forbids Normaliz to reorder the generators of the efficient cone $\CC$. Only useful if original monoid generators are defined. Also blocks \verb|BottomDecomposition|.
3065
\end{itemize}
3066
\verb|KeepOrder| is only allowed if \verb|OriginalMonoidGenerators| are defined.
3067
It is rarely a good idea to set \verb|KeepOrder| (try it). It is primarily used internally when data must be computed in an auxiliary cone.
3068
3069
\subsubsection{Symmetrization}
3070
3071
In rare cases Normaliz can use symmetrization in the computation of multiplicities or Hilbert series. If applicable, this is a very strong tool. We have mentioned it in Section \ref{Condorcet} and will discuss it in Section \ref{symmetrize}. It will be chosen automatically, but can also be forced or blocked:
3072
\begin{itemize}
3073
\itemtt[Symmetrize, -Y] lets Normaliz compute the multiplicity and/or the Hilbert series via symmetrization (or just compute the symmetrized cone).
3074
3075
\itemtt[NoSymmetrization] blocks symmetrization.
3076
\end{itemize}
3077
3078
\subsubsection{Subdivision of simplicial cones}
3079
3080
Normaliz tries to subdivide ''large'' simplicial cones; see Section \ref{subdiv}. If your executable is built with SCIP, you can set
3081
\begin{itemize}
3082
\itemtt[SCIP]
3083
\end{itemize}
3084
However, in general, Normaliz' own method is faster and more reliable.
3085
3086
Subdivision requires enlarging the set of generators and can lead to a nested triangulation (see Sections \ref{subdiv} and \ref{nested}). The subdivision can be blocked by
3087
\begin{itemize}
3088
\itemtt[NoSubdivision]
3089
\end{itemize}
3090
3091
3092
\subsection{Computation goals}\label{goals}
3093
3094
The computation goal \verb|Sublattice| does not imply any other computation goal. All other computation goals include \verb|Sublattice| and \verb|SupportHyperplanes|, apart from certain computation goals based on the dual algorithm or if \verb|Projection| or \verb|ProjectionFloat| is used for parallelotopes; see Section \ref{InhomDual}.
3095
3096
\subsubsection{Lattice data}
3097
3098
\begin{itemize}
3099
\itemtt[Sublattice, -S] (upper case S) asks Normaliz to compute the coordinate transformation to and from the efficient sublattice.
3100
\end{itemize}
3101
3102
\subsubsection{Support hyperplanes and extreme rays}
3103
3104
\begin{itemize}
3105
\itemtt[SupportHyperplanes, -s] triggers the computation of support hyperplanes and extreme rays.
3106
\end{itemize}
3107
3108
Normaliz tries to find a grading.
3109
3110
\begin{itemize}
3111
\itemtt[VerticesFloat] converts the format of vertices to floating point. It implies \texttt{SupportHyperplanes}.
3112
\end{itemize}
3113
3114
Note that \texttt{VerticesFloat} is not a pure output option. It is a computation goal, and therefore breaks \texttt{DefaultMode}.
3115
3116
3117
3118
\subsubsection{Hilbert basis and related data}
3119
3120
\begin{itemize}
3121
3122
\itemtt[HilbertBasis, -N] triggers the computation of the Hilbert basis. In inhomogeneous computations it asks for the Hilbert basis of the recession monoid \emph{and} the module generators.
3123
3124
\itemtt[Deg1Elements, -1] restricts the computation to the degree $1$ elements of the Hilbert basis. Requires the presence of a grading. Forbidden in inhomogeneous computations.
3125
3126
\itemtt[ModuleGeneratorsOverOriginalMonoid, -M] computes a minimal system of generators of the integral closure over the original monoid (see Section \ref{MinMod}). Requires the existence of original monoid generators.
3127
\end{itemize}
3128
3129
The boolean valued computation goal \verb|IsIntegrallyClosed| is also related to the Hilbert basis; see Section \ref{bool}.
3130
3131
\subsubsection{Enumerative data}
3132
3133
The computation goals in this section require a grading. They include \verb|SupportHyperplanes|.
3134
3135
\begin{itemize}
3136
\itemtt [HilbertSeries,-q] triggers the computation of the Hilbert series.
3137
3138
\itemtt[Multiplicity, -v] restricts the computation to the multiplicity.
3139
3140
\itemtt[HSOP] lets Normaliz compute the degrees in a homogeneous system of parameters and the induced representation of the Hilbert series.
3141
\itemtt[NoPerodBound] This option removes the period bound that Normaliz sets for the computation of the Hilbert quasipolynomial (presently $10^6$).
3142
\end{itemize}
3143
3144
\subsubsection{Combined computation goals}
3145
3146
Can only be set by single letter command line options:
3147
3148
\begin{itemize}
3149
\itemtt[-n] \verb|HilbertBasis| + \verb|Multiplicity|
3150
3151
\itemtt[-h] \verb|HilbertBasis| + \verb|HilbertSeries|
3152
3153
\itemtt[-p] \verb|Deg1Elements| + \verb|HilbertSeries|
3154
3155
\end{itemize}
3156
3157
\subsubsection{The class group}
3158
3159
\begin{itemize}
3160
\itemtt [ClassGroup, -C] is self explanatory, includes \verb|SupportHyperplanes|. Not allowed in inhomogeneous computations.
3161
\end{itemize}
3162
3163
\subsubsection{Integer hull}
3164
3165
\begin{itemize}
3166
\itemtt [IntegerHull, -H] computes the integer hull of a polyhedron. Implies the computation of the lattice points in it.
3167
\end{itemize}
3168
3169
More precisely: in homogeneous computations it implies \verb|Deg1Elements|, in inhomogeneous computations it implies \verb|HilbertBasis|. See Section \ref{IntHull}.
3170
3171
\subsubsection{Triangulation and Stanley decomposition}
3172
3173
\begin{itemize}
3174
3175
\itemtt[Triangulation, -T] makes Normaliz compute, store and export the full triangulation.
3176
3177
\itemtt[ConeDecomposition, -D] Normaliz computes a disjoint decomposition of the cone into semiopen simplicial cones. Implies \verb|Triangulation|.
3178
3179
\itemtt[TriangulationSize, -t] makes Normaliz count the simplicial cones in the full triangulation.
3180
3181
\itemtt[TriangulationDetSum] makes Normaliz additionally sum the absolute values of their determinants.
3182
3183
\itemtt[StanleyDec, -y] makes Normaliz compute, store and export the Stanley decomposition. Only allowed in homogeneous computations.
3184
3185
\end{itemize}
3186
3187
The triangulation and the Stanley decomposition are treated
3188
separately since they can become very large and may exhaust
3189
memory if they must be stored for output.
3190
3191
\subsubsection{Weighted Ehrhart series and integrals}
3192
3193
\begin{itemize}
3194
3195
\itemtt[WeightedEhrhartSeries, -E] makes Normaliz compute a generalized Ehrhart series.
3196
3197
\itemtt[VirtualMultiplicity, -L] makes Normaliz compute the virtual multiplicity of a weighted Ehrhart series.
3198
3199
\itemtt[Integral, -I] makes Normaliz compute an integral.
3200
\end{itemize}
3201
3202
These computation goals require a homogeneous computation.
3203
3204
Don't confuse these options with symmetrization. The latter symmetrizes (if possible) the given data and uses \verb|-E| or \verb|-L| internally on the symmetrized object. The options \verb|-E,-I,L| ask for the input of a polynomial. See Section \ref{poly_input}.
3205
3206
\subsubsection{Boolean valued computation goals}\label{bool}
3207
3208
They tell Normaliz to find out the answers to the questions they ask. Two of them are more important than the others since they may influence the course of the computations:
3209
3210
\begin{itemize}
3211
\itemtt[IsIntegrallyClosed, -w]: is the original monoid integrally closed? Normaliz stops the Hilbert basis computation as soon as it can decide whether the original monoid contains the Hilbert basis (see Section \ref{IsIC}). If the answer is `no'', Normaliz computes a witness, an element of the integral closure that is not contained in the original monoid.
3212
3213
\itemtt[IsPointed]: is the efficient cone $\CC$ pointed? This computation goal is sometimes useful to give Normaliz a hint that a nonpointed cone is to be expected. See Section \ref{IsPointed}.
3214
\end{itemize}
3215
3216
For the following we only need the support hyperplanes and the lattice:
3217
3218
\begin{itemize}
3219
\itemtt[IsGorenstein, -G]: is the monoid of lattice points Gorenstein? In addition to answering this question, Normaliz also computes the generator of the interior of the monoid (the canonical module) if the monoid is Gorenstein.
3220
\end{itemize}
3221
3222
The remaining ones:
3223
3224
\begin{itemize}
3225
3226
\itemtt[IsDeg1ExtremeRays]: do the extreme rays have degree $1$?
3227
3228
\itemtt[IsDeg1HilbertBasis]: do the Hilbert basis elements have degree 1?
3229
3230
\itemtt[IsReesPrimary]: for the input type \verb|rees_algebra|, is the monomial ideal primary to the irrelevant maximal ideal?
3231
3232
\end{itemize}
3233
3234
The last three computation goals are not really useful for Normaliz since they will be answered automatically. Note that they may trigger extensive computations.
3235
3236
\subsection{Integer type}\label{Integer}
3237
3238
There is no need to worry about the integer type chosen by Normaliz. All preparatory computations use infinite precision. The main computation is then tried with $64$ bit integers. If it fails, it will be restarted with infinite precision.
3239
3240
Infinite precision does not mean that overflows are completely impossible. In fact, Normaliz requires numbers of type `degree'' fit the type \verb|long| (typically 64 bit on 64 bit systems). If an overflow occurs in the computation of such a number, it cannot be remedied.
3241
3242
The amount of computations done with infinite precision is usually very small, but the transformation of the computation results from 64 bit integers to infinite precision may take some time. If you need the highest possible speed, you can suppress infinite precision completely by
3243
\begin{itemize}
3244
\itemtt[LongLong]
3245
\end{itemize}
3246
With this option, Normaliz cannot restart a failed computation.
3247
3248
On the other hand, the $64$ bit attempt can be bypassed by
3249
\begin{itemize}
3250
\itemtt[BigInt, -B]
3251
\end{itemize}
3252
3253
Note that Normaliz tries to avoid overflows by intermediate results (even if \verb|LongLong| is set). If such overflow should happen, the computation is repeated locally with infinite precision. (The number of such GMP transitions is shown in the terminal output.) If a final result is too large, Normaliz must restart the computation globally.
3254
3255
\verb|LongLong| is not a cone property.
3256
3257
\subsection{Control of computations and communication with interfaces}
3258
3259
In addition to the computation goals in Section \ref{goals},
3260
the following elements of \verb|ConeProperties| control the work flow in \verb|libnormaliz| and can be used by programs calling Normaliz to ensure the availability of the data that are controlled by them.
3261
3262
\begin{itemize}
3263
3264
\itemtt[Generators] controls the generators of the efficient cone.
3265
3266
\itemtt[OriginalMonoidGenerators] controls the generators of the original monoid.
3267
3268
\itemtt[ModuleGenerators] controls the module generators in inhomogeneous computation.
3269
3270
\itemtt[ExtremeRays] controls the extreme rays.
3271
3272
\itemtt[VerticesOfPolyhedron] controls the vertices of the polyhedron in the inhomogeneous case.
3273
3274
\itemtt[MaximalSubspace] controls the maximal linear subspace of the (homogenized) cone.
3275
3276
\itemtt [EmbeddingDim] controls the embedding dimension.
3277
\itemtt [Rank] controls the rank.
3278
3279
\itemtt[RecessionRank] controls the rank of the recession monoid in inhomogeneous computations.
3280
3281
\itemtt[AffineDim] controls the affine dimension of the polyhedron in inhomogeneous computations.
3282
3283
\itemtt[ModuleRank] in inhomogeneous computations it controls the rank of the module of lattice points in the polyhedron as a module over the recession monoid.
3284
3285
\itemtt[ExcludedFaces] controls the excluded faces.
3286
3287
\itemtt[InclusionExclusionData] controls data derived from the excluded faces.
3288
3289
\itemtt[Grading] controls the grading.
3290
\itemtt[GradingDenom] controls its denominator.
3291
3292
\itemtt[Dehomogenization] controls the dehomogenization.
3293
3294
\itemtt[ReesPrimaryMultiplicity] controls the multiplicity of a monomial ideal, provided it is primary to the maximal ideal generated by the indeterminates. Used only with the input type \verb|rees_algebra|.
3295
3296
\itemtt [WitnessNotIntegrallyClosed] controls witness against integral closedness.
3297
\itemtt [GeneratorOfInterior] controls the generator of the interior if the monoid is Gorenstein.
3298
3299
\itemtt[Equations] controls the equations.
3300
\itemtt[Congruences] controls the congruences.
3301
\itemtt[ExternalIndex] controls the external index.
3302
\itemtt[InternalIndex] controls the internal index.
3303
\itemtt[UnitGroupIndex] controls the unit group index.
3304
3305
3306
\itemtt[IsInhomogeneous] controls the inhomogeneous case..
3307
3308
\itemtt[HilbertQuasiPolynomial] controls the Hilbert quasipolynomial.
3309
3310
\itemtt[WeightedEhrhartQuasiPolynomial] controls the weighted Ehrhart quasipolynomial.
3311
3312
\itemtt[IsTriangulationNested] controls the indicator of this property.
3313
\itemtt[IsTriangulationPartial] similar.
3314
3315
\end{itemize}
3316
3317
\subsection{Rational and integer solutions in the inhomogeneous case}\label{InhomDual}
3318
3319
The integer solutions of a homogeneous diophantine system generate the rational solutions as well: every rational solution has a multiple that is an integer solution. Therefore the rational solutions do not need an extra computation. If you prefer geometric language: a rational cone is generated by its lattice points.
3320
3321
This is no longer true in the inhomogeneous case where the computation of the rational solutions is an extra task for Normaliz. This extra step is inevitable for the primal algorithm, but not for the dual algorithm. In general, the computation of the rational solutions is much faster than the computation of the integral solutions, but this by no means always the case.
3322
3323
Therefore we have decoupled the two computations if the dual algorithm is applied to inhomogeneous systems or to the computation of degree $1$ points in the homogeneous case. The combinations
3324
\begin{itemize}
3325
\itemtt [DualMode HilbertBasis, -dN]
3326
3327
\itemtt [DualMode Deg1Elements, -d1]
3328
3329
\itemtt [DualMode ModuleGenerators]
3330
\end{itemize}
3331
do not imply the computation goal \verb|SupportHyperplanes| (and not even \verb|Sublattice|) which would trigger the computation of the rational solutions (geometrically: the vertices of the polyhedron). If you want to compute them, you must add one of
3332
\begin{itemize}
3333
\itemtt[SupportHyperplanes, -s]
3334
3335
\itemtt[ExtremeRays]
3336
3337
\itemtt[VerticesOfPolyhedron]
3338
\end{itemize}
3339
The last choice is only possible in the inhomogeneous case. Another possibility in the inhomogeneous case is is to use \verb|DualMode| without \verb|-N|.
3340
3341
If \verb|Projection| or \verb|ProjectionFloat| is used for parallelotopes defined by inequalities, then Normaliz does not compute the vertices, unless asked for by one of the three computation goals just mentioned.
3342
%%%%%%%%%%%%%%%%%%%%%%%%%%%%% RUNNING %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
3343
\section{Running Normaliz}\label{options}
3344
3345
The standard form for calling Normaliz is
3346
\begin{quote}
3347
\verb|normaliz [options]| <project>
3348
\end{quote}
3349
where \verb|<project>| is the name of the project, and the corresponding input file is \verb|<project>.in|. Note that \verb|normaliz| may require to be prefixed by a path name, and the same applies to \verb|<project>|. A typical example on a Linux or Mac system:
3350
\begin{quote}
3351
\verb|./normaliz --verbose -x=5 example/big|
3352
\end{quote}
3353
that for MS Windows must be converted to
3354
\begin{quote}
3355
\verb|.\normaliz --verbose -x=5 example\big|
3356
\end{quote}
3357
3358
Normaliz uses the standard conventions for calls from the command line:
3359
\begin{arab}
3360
\item the order of the arguments on the command line is arbitrary.
3361
\item Single letter options are prefixed by the character \verb|-| and can be grouped into one string.
3362
\item Verbatim options are prefixed by the characters \verb|--|.
3363
\end{arab}
3364
3365
The options for computation goals and algorithmic variants have been described in Section \ref{Goals}. In this section the remaining options for the control of execution and output are discussed, together with some basic rules for the use of the options.
3366
3367
\subsection{Basic rules}
3368
The options for computation goals and algorithms variants have been explained in Section \ref{Goals}. The options that control the execution and the amount of output will be explained in the following. Basic rules for the use of options:
3369
3370
\begin{enumerate}
3371
\item If no \ttt{<project>} is given, the
3372
program will terminate.
3373
3374
\item The option \ttt{-x} differs from the other ones: \ttt{<T>} in \verb|-x=<T>|
3375
represents a positive number assigned to \ttt{-x}; see
3376
Section \ref{exec}.
3377
3378
\item Similarly the option \ttt{---OutputDir=<outdir>} sets the output directory; see \ref{outcontrol}.
3379
3380
\item Normaliz will look for \ttt{<project>.in} as input
3381
file.
3382
3383
If you inadvertently typed \ttt{rafa2416.in} as the project
3384
name, then Normaliz will first look for \ttt{rafa2416.in.in}
3385
as the input file. If this file doesn't exist,
3386
\ttt{rafa2416.in} will be loaded.
3387
3388
\item The options can be given in arbitrary order. All options, including those in the input file, are accumulated, and syntactically there is no mutual exclusion. However, some options may block others during the computation. For example, \verb|KeepOrder| blocks \verb|BottomDecomposition|.
3389
3390
\item If Normaliz cannot perform a computation explicitly asked for by the
3391
user, it will terminate. Typically this happens if no grading is given although
3392
it is necessary.
3393
3394
\item In the options include \verb|DefaultMode|, Normaliz does not complain about missing data
3395
(anymore). It will simply omit those computations that are impossible.
3396
3397
\item If a certain type of computation is not asked for explicitly, but can
3398
painlessly be produced as a side effect, Normaliz will compute it. For
3399
example, as soon as a grading is present and the Hilbert basis is computed, the
3400
degree $1$ elements of the Hilbert basis are selected from it.
3401
3402
\end{enumerate}
3403
3404
\subsection{Info about Normaliz}
3405
3406
\begin{itemize}
3407
\itemtt [-{}-help, -?] displays a help screen listing the Normaliz options.
3408
3409
\itemtt [-{}-version] displays information about the Normaliz executable.
3410
\end{itemize}
3411
3412
3413
\subsection{Control of execution}\label{exec}
3414
3415
The options that control the execution are:
3416
3417
\begin{itemize}
3418
\itemtt[{-}{-}verbose, -c] activates the verbose (``console'') behavior of
3419
Normaliz in which Normaliz writes additional
3420
information about its current activities to the
3421
standard output.
3422
3423
\itemtt[-x=<T>] Here \ttt{<T>} stands for a positive
3424
integer limiting the number of threads that Normaliz
3425
is allowed access on your system. The default value is
3426
$8$. (Your operating system may set a lower limit).
3427
3428
\ttt{-x=0} switches off the limit set by Normaliz.
3429
3430
If you want to run
3431
Normaliz in a strictly serial mode, choose
3432
\ttt{-x=1}.
3433
\end{itemize}
3434
3435
The number of threads can also be controlled by the environment
3436
variable \verb+OMP_NUM_THREADS+. See Section \ref{PerfPar} for
3437
further discussion.
3438
3439
\subsection{Interruption}\label{interrupt}
3440
3441
During a computation \verb|normaliz| can be interrupted by pressing Ctrl-C on the keyboard. If this happens, Normaliz will stop the current computation and write the already computed data to the output file(s).
3442
3443
At present, the Normaliz interrupt control has no effect during SCIP computations.
3444
3445
If Crtl-C is pressed during the output phase, Normaliz is stopped immediately.
3446
3447
\subsection{Control of output files}\label{outcontrol}
3448
3449
In the default setting Normaliz writes only the output file
3450
\ttt{<project>.out} (and the files produced by \ttt{Triangulation} and \ttt{StanleyDec}). The
3451
amount of output files can be
3452
increased as follows:
3453
\begin{itemize}
3454
\itemtt[{-}{-}files, -f] Normaliz writes the additional output files
3455
with suffixes \ttt{gen}, \ttt{cst}, and \ttt{inv},
3456
provided the data of these files have been computed.
3457
\itemtt[{-}{-}all-files, -a] includes \ttt{Files}, Normaliz writes all
3458
available output files (except \verb|typ|, the triangulation or the
3459
Stanley decomposition, unless these have been requested).
3460
\itemtt [{-}{-}<suffix>] chooses the output file with suffix \verb|<suffix>|.
3461
\end{itemize}
3462
3463
For the list of potential output files, their suffixes and their interpretation
3464
see Section \ref{optionaloutput}. There may be several options \verb|--<suffix>|.
3465
3466
If the computation goal \verb|IntegerHull| is set, Normaliz computes a second cone and lattice. The output is contained in \verb|<project>.IntHull.out|. The options for the output of \verb|<project>| are applied to \verb|<project>.IntHull| as well. There is no way to control the output of the two computations individually.
3467
3468
Similarly, if symmetrization has been used, Normaliz writes the file \verb|<project>.symm.out|. It contains the data of the symmetrized cone.
3469
3470
Sometimes one wants the output to be written to another directory. The output directory can be set by
3471
\begin{itemize}
3472
\itemtt[{-}{-OutputDir=<outdir>}]. The path \ttt{<outdir<} is an absolute path or a path relative to the current directory (which is not necessarily the directory of \verb|<project>.in|.)
3473
\end{itemize}
3474
Note that all output files will be written to the chosen directory. It must be created before Normaliz is started.
3475
3476
Extreme rays and vertices may have very long integer coordinates. One can suppress their output by
3477
\begin{itemize}
3478
\itemtt[NoExtRaysOutput]
3479
\end{itemize}
3480
\verb|NoExtRaysOutput| is not cone properties.
3481
3482
\subsection{Overriding the options in the input file}
3483
3484
Since Normaliz accumulates options, one cannot get rid of settings in the input file by command line options unless one uses
3485
\begin{itemize}
3486
\itemtt[{-}{-}ignore, -i] This option disables all settings in the input file.
3487
\end{itemize}
3488
3489
3490
3491
3492
%%%%%%%%%%%%%%%%%%%%%%%%%%%%% EXAMPLES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
3493
\section{More examples}\label{MoreExamples}
3494
3495
\subsection{Lattice points in polytopes once more}\label{LattPoints}
3496
3497
There are two ways to define polytopes for Normaliz:
3498
\begin{arab}
3499
\item by homogeneous input: a cone $C$ and a grading $\deg$ define the polytopes $P=C\cap \{x:\deg x=1 \}$,
3500
\item by inhomogeneous input: a bounded polyhedron $P$ is a polytope.
3501
\end{arab}
3502
As far as the computation of lattice points is concerned, there is essentially no difference in the algorithms that can be used.
3503
3504
However, there is a difference in the out file(s): for homogeneous input the lattice points in $P$ appear as ``degree $1$ elements of the Hilbert basis'' whereas they are the ``module generators'' in the inhomogeneous case.
3505
3506
Normaliz has three main algorithms for the computation of lattice points of which two have two variants each:
3507
\begin{arab}
3508
\item the project-and-lift algorithm (\verb|Projection, -j|),
3509
\item its variant using floating point arithmetic (\verb|ProjectionFloat, -J|),
3510
\item the triangulation based Normaliz primal algorithm specialized to lattice points (\verb|PrimalMode, -P|),
3511
\item its variant using approximation of rational polytopes (\verb|Approximate, -r|),
3512
\item the dual algorithm specialized to lattice points (\verb|DualMode, -d|).
3513
\end{arab}
3514
3515
The options \verb|Projection|, \verb|ProjectionFloat| and \verb|Approximate| imply the computation goals \verb|Deg1Points| (in the homogeneous case) and \verb|ModuleGenerators| (in the inhomogeneous case). Since \verb|PrimalMode| can also be used for the computation of Hilbert series and Hilbert bases, one must add the computation goal to it. In the homogeneous case one must add the computation goal also to \verb|DualMode|.
3516
3517
We recommend the reader to experiment with the following input files:
3518
\begin{itemize}
3519
\item \verb|5x5.in|
3520
\item \verb|6x6.in|
3521
\item \verb|max_polytope_cand.in|
3522
\item \verb|hickerson-18.in|
3523
\item \verb|knapsack_11_60.in|
3524
\item \verb|ChF_2_64.in|
3525
\item \verb|ChF_8_1024.in|
3526
\item \verb|ChF_16_1048576.in| (may take some time)
3527
\item \verb|pedro2.in|
3528
\end{itemize}
3529
3530
In certain cases you must use \verb|-i| on the command line to override the options in the input file.
3531
3532
\verb|max_polytope_cand.in| came up in connection with the paper ``Quantum jumps of normal polytopes'' by W. Bruns, J. Gubeladze and M. Micha\l{}ek, Discrete Comput.\ Geom.\ {56 (2016), no. 1, 181--215. The files \verb|ChF*.in| are taken from the paper `On the orthogonality of the Chebyshev-Frolov lattice and applications'' by Ch. Kacwin, J. Oettershagen and T. Ullrich (\url{arXiv:1606.00492}, Math. Monatshefte, to appear). \verb|hickerson-18.in| is taken from the LattE distribution \cite{LatInt}. \verb|pedro2.in| was suggested by P. Garcia-Sanchez.
3533
3534
\subsubsection{Project-and-lift}\label{project}
3535
3536
We have explained the project-and-lift algorithm in Section \ref{project_example}.
3537
This algorithm is very robust arithmetically since it needs not compute determinants or solve systems of linear equations. Moreover, the project-and-lift algorithm itself does not use the vertices of the polytope explicitly and only computes lattice points in $P$ and its successive projections. Therefore it is rather insensitive against rational vertices with large denominators. (To get started it must usually compute the vertices of the input polytope; an exception are parallelotopes, as mentioned in Section \ref{project_example}.)
3538
3539
The option for project-and-lift is
3540
\begin{itemize}
3541
\itemtt [Projection, -j]
3542
\end{itemize}
3543
3544
There are two complications that may slow it down unexpectedly: (i) the projections may have large numbers of support hyperplanes, as seen in the example \verb|ChF_16_1048576.in| (it uses floating point arithmetic in the lifting part):
3545
\begin{Verbatim}
3546
embdim 17 inequalities 32
3547
embdim 16 inequalities 240
3548
embdim 15 inequalities 1120
3549
...
3550
embdim 11 inequalities 22880
3551
embdim 10 inequalities 25740
3552
embdim 9 inequalities 22880
3553
...
3554
embdim 3 inequalities 32
3555
embdim 2 inequalities 2
3556
\end{Verbatim}
3557
3558
(ii) The projections may have many lattice points that cannot be lifted to the top. As an example we look at the terminal output of \verb|pedro2.in|:
3559
\begin{Verbatim}
3560
embdim 2 Deg1Elements 33
3561
embdim 3 Deg1Elements 478
3562
embdim 4 Deg1Elements 4201
3563
embdim 5 Deg1Elements 17449
3564
embdim 6 Deg1Elements 2
3565
\end{Verbatim}
3566
3567
Despite of these potential problems, \verb|Projection| is the default choice of Normaliz for the computation of lattice points (if not combined with Hilbert series or Hilbert basis). If you do not want to use it, you must either choose another method explicitly or switch it off by \verb|NoProjection|.
3568
3569
\emph{Parallelotopes.}\enspace
3570
Lattice points in parallelotopes that are defined by inequalities, like those in the input files \verb|ChF*.in|, can be computed without any knowledge of the vertices. In fact, for them it is favorable to present a face $F$ by the list of facets whose intersection $F$ is (and not by the list of the $2^{\dim F}$ vertices of $F$!). Parallelotopes are not only simple polytopes. It is important that two faces do not intersect if and only if they are contained in parallel facets, and this is easy to control.
3571
3572
Normaliz recognizes parallelotopes by itself, and suppresses the computation of the vertices unless asked to compute them.
3573
3574
\emph{Remark.}\enspace
3575
The triangulation based primal algorithm and the dual algorithm do not depend on the embedding of the computed objects into the ambient space since they use only data that are invariant under coordinate transformations. This is not true for project-and-lift and the approximation discussed below.
3576
3577
\subsubsection{Project-and-lift with floating point arithmetic}
3578
3579
Especially the input of floating point numbers often forces Normaliz into GMP arithmetic. Since GMP arithmetic is slow (compared to arithmetic with machine integers), Normaliz has a floating point variant of the project-and-lift algorithm. (Such an algorithm makes no sense for Hilbert bases or Hilbert series.) It behaves very well, even in computations for lower dimensional polytopes. We have not found a single deviation from the results with GMP arithmetic in our examples.
3580
3581
The option for the floating point variant of project-and-lift is
3582
\begin{itemize}
3583
\itemtt [ProjectionFloat, -J]
3584
\end{itemize}
3585
If you want a clear demonstration of the difference between \verb|Projection| and \verb|ProjectionFloat|, try \verb|ChF_16_1048576.in|.
3586
3587
The use of \verb|ProjectionFloat| or any other algorithmic variant is independent of the input type.
3588
\subsubsection{The triangulation based primal algorithm}
3589
3590
With this algorithm, Normaliz computes a triangulation as it does for the computation of Hilbert bases (in primal mode) for the cone over the polytope. Then it computes the lattice points in each of the subpolytopes defined by the simplicial subcones in the triangulation. The difference to the Hilbert basis calculation is that all points that do not lie in our polytope $P$ can be discarded right away and that no reduction is necessary.
3591
3592
The complications that can arise are (i) a large triangulation or (ii) large determinants of the simplicial cones. Normaliz tries to keep the triangulations small by restricting itself to a partial triangulation, but often there is nothing one can do. Normaliz deals with large determinants by applying project-and-lift to the simplicial subcones with large determinants. We can see this by looking at the terminal output of \verb|max_polytope_cand.in|, computed with \verb|-cP|:
3593
\begin{Verbatim}
3594
...
3595
evaluating 49 simplices
3596
||||||||||||||||||||||||||||||||||||||||||||||||||
3597
49 simplices, 819 deg1 vectors accumulated.
3598
47 large simplices stored
3599
Evaluating 47 large simplices
3600
Large simplex 1 / 47
3601
************************************************************
3602
starting primal algorithm (only support hyperplanes) ...
3603
Generators sorted lexicographically
3604
Start simplex 1 2 3 4 5
3605
Pointed since graded
3606
Select extreme rays via comparison ... done.
3607
------------------------------------------------------------
3608
transforming data... done.
3609
Computing lattice points by project-and-lift
3610
Projection
3611
embdim 5 inequalities 5
3612
...
3613
embdim 2 inequalities 2
3614
Lifting
3615
embdim 2 Deg1Elements 64
3616
...
3617
embdim 5 Deg1Elements 32268
3618
Project-and-lift complete
3619
...
3620
\end{Verbatim}
3621
After finishing the $49$ ``small'' simplicial cones, Normaliz takes on the $49$ ``large'' simplicial cones, and does them by project-and-lift. Therefore one can say that Normaliz takes a hybrid approach if you request primal mode.
3622
3623
An inherent weakness of the triangulation based algorithm is that its efficiency drops with $d!$ where $d$ is the dimension because the proportion of lattice points in $P$ of all points generated by the algorithm must be expected to be $1/d!$ (as long as small simplicial cones are evaluated). To some extent this is compensated by the extremely dast generation of the candidates.
3624
3625
\subsubsection{Lattice points by approximation}\label{approx}
3626
3627
Large determinants come up easily for rational polytopes whose vertices have large denominators. In previous versions, Normaliz fought against large determinants caused by rational vertices ny finding an integral polytope $Q$ containing $P$, computing the lattice points in $Q$ and then sieving out those that are in $Q\setminus P$:
3628
\begin{center}
3629
\begin{tikzpicture}[scale=0.6]
3630
\filldraw[fill=orange] (0,0) -- (0,1) -- (1,3) -- (2,4) -- (3,4) -- (4,1) -- (3,0) --cycle;
3631
\filldraw[fill=yellow] (0.3,0.6) -- (3.4,0.8) -- (2.3,3.8) --(1.2,2.7) -- cycle;
3632
\foreach \x in {0,...,4}
3633
\foreach \y in {0,...,4}
3634
{
3635
\filldraw[fill=black] (\x,\y) circle (1.5pt);
3636
}
3637
% \draw (1,0) -- (0,1) -- (2,2) --cycle;
3638
\end{tikzpicture}
3639
\end{center}
3640
3641
This approach is still possible. It is requested by the option
3642
\begin{itemize}
3643
\itemtt [Approximate, -r]
3644
\end{itemize}
3645
3646
This is often a good choice, especially in low dimension.
3647
3648
It is not advisable to use approximation for polytopes with a large number of vertices since it must be expected that the approximation multiplies the number of vertices by $\dim P+1$ so that it may become difficult to compute the triangulation.
3649
3650
\subsubsection{Lattice points by the dual algorithm}
3651
3652
Often the dual algorithm is extremely fast. But it can also degenerate terribly. It is very fast for \verb|6x6.in| run with \verb|-d1|. The primal algorithm or approximation fail miserably. (\verb|-1|, the default choice project-and-lift, is also quite good. The difference is that \verb|-d1| does not compute the vertices that in this case are necessary for the preparation of project-and-lift.)
3653
3654
On the other hand, the dual algorithm is hopeless already for the $2$-dimensional parallelotope \verb|ChF_2_64.in|. Try it. It is clear that the complicated arithmetic is is forbidding for the dual algorithm. (The dual algorithm successively computes the lattice points correctly for all intermediate polyhedra, defined as intersections of the half spaces that have been processed so far. The intermediate polyhedra can be much more difficult than the final polytope, as in this case.)
3655
3656
In certain cases (see Section \ref{div_labor}) Normaliz will try the dual algorithm if you forbid project-and-lift by \verb|NoProjection|.
3657
3658
\subsection{The bottom decomposition}\label{bottom_dec}
3659
3660
The triangulation size and the determinant sum of the triangulation are critical size parameters in Normaliz computations. Normaliz always tries to order the generators in such a way that the determinant sum is close to the minimum, and on the whole this works out well. The use of the bottom decomposition by \verb|BottomDecomposition, -b| enables Normaliz to compute a triangulation with the optimal determinant sum for the given set of generators, as we will explain in the following.
3661
3662
The determinant sum is independent of the order of the generators of the cone $C$ if they lie in a hyperplane $H$. Then the determinant sum is exactly the normalized volume of the polytope spanned by $0$ and $C\cap H$. The triangulation itself depends on the order, but the determinant sum is constant.
3663
\begin{center}
3664
\begin{tikzpicture}[scale=0.4]
3665
\filldraw[gray!20] (-3.5,5.833) -- (0,0) -- (6,4) -- (6,5.833) -- cycle;
3666
\filldraw[yellow] (0,0) -- (-3,5) -- (3,2) -- cycle;
3667
\draw (-3.5,5.833) -- (0,0) -- (6,4);
3668
\foreach \x in {-4,...,5}
3669
\foreach \y in {0,...,5}
3670
{
3671
\filldraw[fill=black] (\x,\y) circle (1.5pt);
3672
}
3673
\draw (-4,5.5) --(4,1.5) node at (4.5,1.3){\tiny $H$};
3674
3675
\draw (-3,5) circle (4pt) node at (3.5,4.5){\tiny $C$};
3676
\draw (1,3) circle (4pt);
3677
\draw (3,2) circle (4pt);
3678
\end{tikzpicture}
3679
\end{center}
3680
3681
This observation helps to find a triangulation with minimal determinant sum in the general case.
3682
We look at the \emph{bottom} (the union of the compact faces) of the polyhedron generated by $x_1,\dots,x_n$ as vertices and $C$ as recession cone, and take the volume underneath the bottom:
3683
\begin{center}
3684
\begin{tikzpicture}[scale=0.4]
3685
\filldraw[gray!20] (-3.5,5.833) -- (0,0) -- (6,4) -- (6,5.833) -- cycle;
3686
\filldraw[yellow] (0,0) -- (-3,5) -- (-1,3) -- (1,2) -- (3,2) -- cycle;
3687
3688
\draw (-3,5) -- (-1,3) -- (1,2) -- (3,2);
3689
3690
\draw (-3.5,5.833) -- (0,0) -- (6,4);
3691
\foreach \x in {-4,...,5}
3692
\foreach \y in {0,...,5}
3693
{
3694
\filldraw[fill=black] (\x,\y) circle (1.5pt);
3695
}
3696
3697
\draw (-3,5) circle (4pt) node at (3.5,4.5){\tiny $C$};
3698
\draw (-1,3) circle (4pt);
3699
\draw (1,3) circle (4pt);
3700
\draw (3,2) circle (4pt);
3701
\draw (1,2) circle (4pt);
3702
\end{tikzpicture}
3703
\end{center}
3704
With the option \texttt{BottomDecomposition}, \texttt{-b}, Normaliz computes a triangulation that respects the bottom facets. This yields the optimal determinant sum for the given generators. If one can compute the Hilbert basis by the dual algorithm, it can be used as input, and then one obtains the absolute bottom of the cone, namely the compact facets of the convex hull of all nonzero lattice points.
3705
3706
Normaliz does not always use the bottom decomposition by default since its computation requires some time and administrative overhead. However, as soon as the input ``profile'' is considered to be ``rough'' it is invoked. The measure of roughness is the ratio between the maximum degree (or $L_1$ norm without a grading) and the minimum. A ratio $\ge 10$ activates the bottom decomposition.
3707
3708
If you have the impression that the bottom decomposition slows down your computation, you can suppress it by \texttt{NoBottomDec}, \texttt{-o}.
3709
3710
The bottom decomposition is part of the subdivision of large simplicial cones discussed in the next section.
3711
3712
The example \verb|StrictBorda.in| belongs to social choice theory like \verb|Condorcet.in| (see Section \ref{Condorcet}), \verb|PluralityVsCutoff.in| and \verb|CondEffPlur.in|. The last two profit enormously from symmetrization (see Section \ref{symmetrize}), but \verb|StrictBorda.in| does not. Therefore we must compute the Hilbert series (or at least the multiplicity) for a monoid in dimension $24$ whose cone has $6363$ extreme rays. It demonstrates the substantial gain that can be reached by bottom decomposition. Since the roughness is large enough, Normaliz chooses bottom decomposition automatically, unless we block it.
3713
\begin{center}
3714
\begin{tabular}{|c|r|r|}\hline
3715
algorithm & triangulation size& determinant sum \\ \hline
3716
bottom decomposition& 30,399,162,846 &75,933,588,203 \\ \hline
3717
standard order of extreme rays, \ttt{-o} & 119,787,935,829 & 401,249,361,966\\ \hline
3718
\end{tabular}
3719
\end{center}
3720
3721
\subsection{Subdivision of large simplicial cones}\label{subdiv}
3722
3723
Especially in computations with rational polytopes one encounters very large determinants that can keep the Normaliz primal algorithm from terminating in reasonable time. As an example we take \verb|hickerson-18.in| from the LattE distribution \cite{LatInt}. It is simplicial and the complexity is totally determined by the large determinant $\approx 4.17\times 10^{14}$ (computed with \verb|-v|).
3724
3725
If we are just interested in the degree $1$ points, Normaliz uses the project-and-lift method of Section \ref{project} and finds $44$ degree $1$ points in the blink of an eye. If we use these points together with the extreme rays of the simplicial cone, then the determinant sum decreases to $\approx 1.3\times 10^{12}$, and the computation of the Hilbert basis and the Hilbert series is in reach. But it is better to pursue the idea of subdividing large simplicial cones systematically. Normaliz employs two methods:
3726
\begin{arab}
3727
\item computation of subdivision points by the IP solver SCIP,
3728
\item its own algorithm for finding optimal subdivision points, based on project-and-lift.
3729
\end{arab}
3730
3731
Normaliz tries to subdivide a simplicial cone if it has determinant $\ge 10^8$ or $10^7$ if the Hilbert basis is computed. Both methods are used recursively via stellar subdivision until simplicial cones with determinant $< 10^6$ have been reached or no further improvement is possible. All subdivision points are then collected, and the start simplicial cone is subdivided with bottom decomposition, which in general leads to substantial further improvement.
3732
3733
The use of SCIP requires a Normaliz executable built with SCIP see Section \ref{Compile}). Moreover, the option \verb|SCIP| must be set since in many cases the Normaliz method is faster and always finds a subdivision point if such exists.
3734
3735
The following table contains some performance data for subdivisions based on the Normaliz method (default mode, parallelization with 8 threads).
3736
\begin{center}
3737
\setlength{\tabcolsep}{3.2pt}
3738
\renewcommand{\arraystretch}{1.2}
3739
\begin{tabular}{|c|c|c|c|}
3740
\hline
3741
& \ttt{hickerson-16} & \ttt{hickerson-18} & \ttt{knapsack\_11\_60} \\ \hline
3742
simplex volume & $9.83\times 10^7$ & $4.17\times 10^{14}$ & $2.8\times 10^{14}$ \\ \hline
3743
stellar determinant sum & $3.93\times 10^6$ & $9.07\times 10^8$ & $1.15\times 10^8$\\ \hline
3744
volume under bottom & $8.10\times 10^5$ & $3.86\times 10^7$ & $2.02\times 10^7$ \\ \hline
3745
volume used & $3.93\times 10^6$ & $6.56\times 10^7$ & $2.61\times 10^7$ \\ \hline
3746
%improvement factor & 25 & $7.62\times10^6$ & $1.17\times 10^7$\\ \hline
3747
runtime without subdivision & 2.8 s & >12 d & >8 d \\ \hline
3748
runtime with subdivision & 0.4 s & 24 s & 5.1 s \\ \hline
3749
\end{tabular}
3750
\end{center}
3751
3752
A good nonsimplicial example showing the subdivision at work is \verb|hickerson-18plus1.in| with option \verb|-q|.
3753
3754
Note: After subdivision the decomposition of the cone may no longer be a triangulation in the strict sense, but a decomposition that we call a \emph{nested triangulation}; see \ref{nested}. If the creation of a nested triangulation must be blocked, one uses the option \verb|NoSubdivision|. Inevitably it blocks the subdivision of large simplicial cones.
3755
3756
\emph{Remark}\enspace The bounds mentioned above work well up to dimension $\approx 10$. For a fixed determinant, the probability for finding a subdivision point decreases rapidly.
3757
3758
\subsection{Primal vs. dual -- division of labor}\label{div_labor}
3759
3760
%\subsection{Normaliz tries to be smart}\label{smart}
3761
3762
As already mentioned several times, Normaliz has two main algorithms for the computation of Hilbert bases and degree $1$ points, the primal algorithm and the dual algorithm. It is in general very hard to decide beforehand which of the two is better for a specific example. Nevertheless Normaliz ties to guess, unless \verb|PrimalMode|, \verb|-P| or \verb|DualMode|, \verb|-d| is explicitly chosen by the user. In first approximation one can day that the dual algorithm is chosen if the computation is based on constraints and the number of inequalities is neither too small nor too large. Normaliz chooses the dual algorithm if at the start of the Hilbert basis computation the cone is defined by $s$ inequalities such that
3763
$$
3764
r+\frac{50}{r} \le s \le 2e
3765
$$
3766
where $r$ is the rank of the monoid to be computed and $e$ is the dimension of the space in which the data are embedded. These conditions ate typically fulfilled for diophantine systems of equations whose nonnegative solutions are asked for.
3767
In the case of very few or many hyperplanes Normaliz prefers the primal algorithm. While this combinatorial condition is the only criterion for Normaliz, it depends also on the arithmetic of the example what algorithm is better. At present Normaliz makes no attempt to measure it in some way.
3768
3769
When both Hilbert basis and Hilbert series are to be computed, the best solution can be the combination of both algorithms. We recommend \verb|2equations.in| as a demonstration example which combines the algorithmic variant \verb|DualMode| and the computation goal \verb|HilbertSeries|:
3770
\begin{Verbatim}
3771
amb_space 9
3772
equations 2
3773
1 6 -7 -18 25 -36 6 8 -9
3774
7 -13 15 6 -9 -8 11 12 -2
3775
total_degree
3776
DualMode
3777
HilbertSeries
3778
\end{Verbatim}
3779
As you will see, the subdivision of large simplicial cones is very useful for such computations.
3780
3781
Compare \verb|2equations.in| and \verb|2equations_default.in| for an impression on the relation between the algorithms.
3782
3783
\subsection{Checking the Gorenstein property}\label{Gorenstein}
3784
3785
If the Hilbert series has been computed, one can immediately see whether the monoid computed by Normaliz is Gorenstein: this is the case if and only if the numerator is a symmetric polynomial, and Normaliz indicates that (see Section \ref{job_dual}). However, there is a much more efficient way to check the Gorenstein property, which does not even require the existence of a grading: we must test whether the \emph{dual} cone has degree $1$ extreme rays. This amounts to checking the existence of an implicit grading on the dual cone.
3786
3787
This very efficient Gorenstein test is activated by the option \ttt{IsGorenstein}, equivalently \ttt{-G} on the command line. We take \verb|5x5Gorenstein.in|:
3788
3789
\begin{Verbatim}
3790
amb_space 25
3791
equations 11
3792
1 1 1 1 1 -1 -1 -1 -1 -1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
3793
...
3794
1 1 1 1 0 0 0 0 -1 0 0 0 -1 0 0 0 -1 0 0 0 -1 0 0 0 0
3795
IsGorenstein
3796
\end{Verbatim}
3797
3798
In the output we see
3799
\begin{Verbatim}
3800
Monoid is Gorenstein
3801
Generator of interior
3802
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
3803
\end{Verbatim}
3804
3805
In fact, the Gorenstein property is (also) equivalent to the fact that the interior of our monoid is generated by a single element as an ideal, and this generator is computed if the monoid is Gorenstein. (It defines the grading under which the extreme rays of the dual cone have degree~$1$.)
3806
3807
If the monoid is not Gorenstein, Normaliz will print the corresponding message.
3808
3809
\subsection{Symmetrization}\label{symmetrize}
3810
3811
Under certain conditions one can count lattice points in a cone $C$ by mapping $C$ to a cone $C'$ of lower dimension and then counting each lattice point $y$ in $C'$ with the number of its lattice preimages. This approach works well if the number of preimages is given by a polynomial in the coordinates of $y$. Since $C'$ has lower dimension, one can hope that its combinatorial structure is much simpler that that of $C$. One must of course pay a price: instead of counting each lattice point with the weight $1$, one must count it with a polynomial weight. This amounts to a computation of a weighted Ehrhart series that we will discuss in Section \ref{Poly_comp}. Similarly multiplicity can be computed as the virtual multiplicity of a polynomial after projection.
3812
3813
The availability of this approach depends on symmetries in the coordinates of $C$, and therefore we call it \emph{symmetrization}. Normaliz tries symmetrization under the following condition: $C$ is given by constraints (inequalities, equations, congruences, excluded faces) and the inequalities contain the sign conditions $x_i\ge 0$ for all coordinates $x_i$ of $C$. (Coordinate hyperplanes may be among the excluded faces.) Then Normaliz groups coordinates that appear in all constraints and the grading (!) with the same coefficients, and, roughly speaking, replaces them by their sum. The number of preimages that one must count for the vector $y$ of sums is then a product of binomial coefficients -- a polynomial as desired. More precisely, if $y_j$, $j=1,\dots,m$, is the sum of $u_j$ variables $x_i$ then
3814
$$
3815
f(y)=\binom{u_1+y_1-1}{u_1-1}\cdots \binom{u_m+y_m-1}{u_m-1}.
3816
$$
3817
is the number of preimages of $(y_1,\dots,y_m)$. This approach to Hilbert series has been suggested by A. Sch�rmann \cite{Sch}.
3818
3819
As an example we look again at the input for the Condorcet paradox:
3820
\begin{Verbatim}
3821
amb_space 24
3822
inequalities 3
3823
1 1 1 1 1 1 -1 -1 -1 -1 -1 -1 1 1 -1 -1 1 -1 1 1 -1 -1 1 -1
3824
1 1 1 1 1 1 1 1 -1 -1 1 -1 -1 -1 -1 -1 -1 -1 1 1 1 -1 -1 -1
3825
1 1 1 1 1 1 1 1 1 -1 -1 -1 1 1 1 -1 -1 -1 -1 -1 -1 -1 -1 -1
3826
nonnegative
3827
total_degree
3828
Multiplicity
3829
\end{Verbatim}
3830
The grading is completely symmetric, and it is immediately clear that the input is symmetric in the first $6$ coordinates. But also the column of three entries $-1$ appears $6$ times, and there are $6$ more groups of $2$ coordinates each (one group for each $\pm1$ pattern). With the suitable labeling, the number of preimages of$(y_1,\dots,y_8$) is given by
3831
$$
3832
f(y)=\binom{y_1+5}{5}(y_2+1)(y_3+1)(y_4+1)(y_5+1)(y_6+1)(y_7+1)\binom{y_8+5}{5}.
3833
$$
3834
Normaliz finds the groups of variables that appear with the same sign pattern, creates the data for the weighted Ehrhart series, and interprets it as the Hilbert series of the monoid defined by the input data.
3835
3836
However, there is a restriction. Since the polynomial arithmetic has its own complexity and Normaliz must do it in GMP integers, it makes no sense to apply symmetrization if the dimension does not drop by a reasonable amount. Therefore we require that
3837
$$
3838
\dim C' \le \frac{2}{3}\dim C).
3839
$$
3840
If called with the options \verb|-q| or \verb|-v| Normaliz will try symmetrization. If the inequality for $\dim C'$ is not satisfied, it will simply compute the Hilbert series or the multiplicity without symmetrization. (In default mode it of course tries symmetrization for the Hilbert series.)
3841
3842
Whenever Normaliz has used symmetrization, it writes the file \verb|<project>.symm.out| that contains the data of the symmetrized object. In it you find the multiplicity of \verb|<project>.out| as virtual multiplicity and the Hilbert series as weighted Ehrhart series.
3843
3844
If you use the option \verb|Symmetrize|, then the behavior depends on the other options:
3845
\begin{arab}
3846
\item If neither the \verb|HilbertSeries| nor \verb|Multiplicity| is to be computed, Normaliz writes only the output file \verb|<project>.symm.out| computed with \verb|SupportHyperplanes|.
3847
\item If one of these goals is to be computed, Normaliz will do the symmetrization, regardless of the dimension inequality above (and often this makes sense).
3848
\end{arab}
3849
By doing step (1) first, the user gets useful information of what to expect by symmetrization. In a second run, one can add \verb|HilbertSeries| or \verb|Multiplicity| if (1) was satisfactory.
3850
3851
The Condorcet example is too small in order to demonstrate the power of symmetrization. A suitable example is \verb|PluralityVsCutoff.in|:
3852
\begin{Verbatim}
3853
winfried@ubuntu:~/Dropbox/git_normaliz/source$ time ./normaliz -c ../example/PluralityVsCutoff
3854
\.....|
3855
Normaliz 3.3.0 \....|
3856
\...|
3857
(C) The Normaliz Team, University of Osnabrueck \..|
3858
March 2017 \.|
3859
\|
3860
************************************************************
3861
Command line: -c ../example/PluralityVsCutoff
3862
Compute: DefaultMode
3863
Embedding dimension of symmetrized cone = 6
3864
...
3865
------------------------------------------------------------
3866
transforming data... done.
3867
3868
real 0m2.655s
3869
user 0m5.328s
3870
sys 0m0.080s
3871
\end{Verbatim}
3872
The Hilbert series is computable without symmetrization, but you better make sure that there is no power failure for the next week if you try that. (The time above includes the Hilbert basis computed automatically in dual mode).
3873
3874
Another good example included in the distribution is \verb|CondEffPlur.in|, but it takes some hours with symmetrization (instead of days without). For it, the dimension drops only from $24$ to $13$.
3875
3876
Symmetrization is a special type of computations with a polynomial weight, and therefore requires Normaliz to be built with CoCoALib.
3877
3878
\subsection{Computations with a polynomial weight}\label{Poly_comp}
3879
3880
For a graded monoid $M$, which arises as the intersection $M=C\cap L$ of a rational cone$C$ and a lattice $L$, Normaliz computes the volume of
3881
the rational polytope
3882
$$
3883
P=\{x\in \RR_+ M: \deg x=1\},
3884
$$
3885
called the multiplicity of $M$ (for the given grading), the Hilbert series of $M$, and the quasipolynomial representing the Hilbert function. This Hilbert series of $M$ is also called the Ehrhart series of $P$ (with respect to $L$), and for the generalization introduced in this section we speak of Ehrhart series and functions.
3886
3887
The computations of these data can be understood as integrals of the
3888
constant polynomial $f=1$, namely with respect to the counting
3889
measure defined by $L$ for the Ehrhart function, and with
3890
respect to the (suitably normed) Lebesgue measure for the
3891
volume. Normaliz generalizes these computations to
3892
arbitrary polynomials $f$ in $n$ variables with rational
3893
coefficients. (Mathematically, there is no need to restrict
3894
oneself to rational coefficients for $f$.)
3895
3896
More precisely, set
3897
$$
3898
E(f,k)=\sum_{x\in M, \deg x=k} f(x),
3899
$$
3900
and call $E(f,\_)$ the \emph{weighted Ehrhart function} for
3901
$f$. (With $f=1$ we simply count lattice points.) The
3902
\emph{weighted Ehrhart series} is the ordinary generating
3903
function
3904
$$
3905
E_f(t)=\sum_{k=0}^\infty E(f,k)t^k.
3906
$$
3907
It turns out that $E_f(t)$ is the power series expansion of a
3908
rational function at the origin, and can always be written in
3909
the form
3910
$$
3911
E_f(t)=\frac{Q(t)}{(1-t^\ell)^{\totdeg f+\rank M}},\qquad Q(t)\in\QQ[t],\
3912
\deg Q< \totdeg f+\rank M.
3913
$$
3914
Here $\totdeg f$ is the total degree of the polynomial $f$, and
3915
$\ell$ is the least common multiple of the degrees of the
3916
extreme integral generators of $M$. See \cite{BS} for an
3917
elementary account, references and the algorithm used by Normaliz.
3918
3919
At present, weighted Ehrhart series can only be computed with homogeneous data. Note that \verb|excluded_faces| is a homogeneous input type. For them the monoid $M$ is replaced by the set
3920
$$
3921
M'=C'\cap L
3922
$$
3923
where $C'=C\setminus \mathcal F$ and $\mathcal F$ is the union of a set of
3924
faces
3925
(not necessarily facets) of $C$. What has been said above about the structure
3926
of the weighted Ehrhart series remains true. We discuss an example below.
3927
3928
It follows from the general theory of rational generating
3929
functions that there exists a quasipolynomial $q(k)$ with
3930
rational coefficients and of degree $\le \totdeg f+\rank M-1$ that
3931
evaluates to $E(f,k)$ for all $k\ge 0$.
3932
3933
Let $m=\totdeg f$ (we use this notation to distinguish the degree of the polynomial from the degree of lattice points) and $f_m$ be the degree $m$ homogeneous
3934
component of $f$. By letting $k$ go to infinity and
3935
approximating $f_m$ by a step function that is constant on the
3936
meshes of $\frac 1kL$ (with respect to a fixed basis), one sees
3937
$$
3938
q^{(j)}_{\totdeg f+\rank M-1}=\int_P f_m\, d\lambda
3939
$$
3940
where $d\lambda$ is the Lebesgue measure that takes value $1$
3941
on a basic mesh of $L\cap \RR M$ in the hyperplane of degree
3942
$1$ elements in $\RR M$. In particular, the \emph{virtual
3943
leading coefficient} $q^{(j)}_{\totdeg f+\rank M-1}$ is
3944
constant and depends only on $f_m$. If the integral vanishes,
3945
the quasipolynomial $q$ has smaller degree, and the true
3946
leading coefficient need not be constant. Following the
3947
terminology of commutative algebra and algebraic geometry, we
3948
call
3949
$$
3950
(\totdeg f+\rank M-1)!\cdot q_{\totdeg f+\rank M-1}
3951
$$
3952
the \emph{virtual multiplicity} of $M$ and $f$. It is an
3953
integer if $f_m$ has integral coefficients and $P$ is a lattice
3954
polytope.
3955
3956
The input format of polynomials has been discussed in Section \ref{poly_input}.
3957
3958
The terminal output contains a factorization of the polynomial as well as some computation results. From the terminal output you may also recognize that Normaliz first computes the triangulation and the Stanley decomposition and then applies the algorithms for integrals and weighted Ehrhart series.
3959
3960
\emph{Remarks} \enspace (1) Large computations with many parallel threads may require much memory due to the fact that very long polynomials must be stored. Another reason for large memory usage can be the precomputed triangulation or Stanley decomposition.
3961
3962
(2) You should think about the option \verb|BottomDecomposition|. It will be applied to the symmetrized input. (Under suitable conditions it is applied automatically.)
3963
3964
3965
\subsubsection{A weighted Ehrhart series}
3966
3967
We discuss the Condorcet paradox again (and the last time), now starting from the symmetrized form. The file \ttt{Condorcet.symm.in} from the directory
3968
\ttt{example} contains the following:
3969
3970
\begin{Verbatim}
3971
amb_space 8
3972
inequalities 3
3973
1 -1 1 1 1 -1 -1 -1
3974
1 1 -1 1 -1 1 -1 -1
3975
1 1 1 -1 -1 -1 1 -1
3976
nonnegative
3977
total_degree
3978
polynomial
3979
1/120*1/120*(x[1]+5)*(x[1]+4)*(x[1]+3)*(x[1]+2)*(x[1]+1)*(x[2]+1)*
3980
(x[3]+1)*(x[4]+1)*(x[5]+1)*(x[6]+1)*(x[7]+1)*(x[8]+5)*(x[8]+4)*
3981
(x[8]+3)*(x[8]+2)*(x[8]+1);
3982
\end{Verbatim}
3983
We have seen this polynomial in Section \ref{symmetrize} above.
3984
3985
3986
From the Normaliz directory we start the computation by
3987
\begin{Verbatim}
3988
./normaliz -cE example/Condorcet.symm
3989
\end{Verbatim}
3990
We could have used \verb|--WeightedEhrhartSeries| instead of \verb|-E| or put \verb|WeightedEhrhartSeries| into the input file.
3991
3992
The file \ttt{Condorcet.symm.out} we find the information on the weighted Ehrhart series:
3993
3994
\begin{Verbatim}
3995
Weighted Ehrhart series:
3996
1 5 133 363 ... 481 15 6
3997
Common denominator of coefficients: 1
3998
Series denominator with 24 factors:
3999
1: 1 2: 14 4: 9
4000
4001
degree of weighted Ehrhart series as rational function = -25
4002
4003
Weighted Ehrhart series with cyclotomic denominator:
4004
...
4005
\end{Verbatim}
4006
The only piece of data that we haven't seen already is the common denominator of coefficients. But since the polynomial has rational coefficients, we cannot any longer expect that the polynomial in the numerator of the series has integral coefficients. We list them as integers, but must then divide them by the denominator (which is$1$ in thus case since the weighted Ehrhart series is a Hilbert series in disguise). As usual, the representation with a denominator of cyclotomic polynomials follows.
4007
4008
And we have the quasipolynomial as usual:
4009
4010
\begin{Verbatim}
4011
Weighted Ehrhart quasi-polynomial of period 4:
4012
0: 6939597901822221635907747840000 20899225...000000 ... 56262656
4013
1: 2034750310223351797008092160000 7092764...648000 ... 56262656
4014
2: 6933081849299152199775682560000 20892455...168000 ... 56262656
4015
3: 2034750310223351797008092160000 7092764...648000 ... 56262656
4016
with common denominator: 6939597901822221635907747840000
4017
\end{Verbatim}
4018
4019
The left most column indicates the residue class modulo the period, and the
4020
numbers in line $k$ are the coefficients of the $k$-th polynomial after
4021
division by the common denominator. The list starts with $q_0^{(k)}$ and ends
4022
with (the constant) $q_{23}^{(k)}$.
4023
The interpretation of the remaining data is obvious:
4024
4025
\begin{Verbatim}
4026
Degree of (quasi)polynomial: 23
4027
4028
Expected degree: 23
4029
4030
Virtual multiplicity: 1717/8192
4031
\end{Verbatim}
4032
4033
4034
\subsubsection{Virtual multiplicity}
4035
4036
Instead of the option \verb|-E| (or (\verb|--WeightedEhrhartSeries|) we use \verb|-L| or (\verb|--VirtualMultiplicity|). Then we can extract the virtual multiplicity from the output file.
4037
4038
\subsubsection{An integral}
4039
In their paper \emph{Multiplicities of classical varieties} (Proc. Lond. Math. Soc. (3) 110 (2015), 1033--105) J. Jeffries, J. Monta�o and M. Varbaro ask
4040
for the computation of the integral
4041
$$
4042
\int\limits_{\substack{[0,1]^m \\ \sum{x}= t}}(x_1\cdots x_{m})^{n-m}\prod_{1\le i<j \le m}(x_j-x_i)^2 \mathrm d{\mu}\
4043
$$
4044
taken over the intersection of the unit cube in $\RR^m$ and the hyperplane of constant coordinate sum $t$. It is supposed that $t\le m \le n$. We compute the integral for $t=2$, $m=4$ and $n=6$.
4045
4046
The polytope is specified in the input file \ttt{j462.in} (partially typeset in 2
4047
columns):
4048
4049
\begin{Verbatim}
4050
amb_space 5 -1 0 0 0 1
4051
inequalities 8 0 -1 0 0 1
4052
1 0 0 0 0 0 0 -1 0 1
4053
0 1 0 0 0 0 0 0 -1 1
4054
0 0 1 0 0 equations 1
4055
0 0 0 1 0 -1 -1 -1 -1 2
4056
grading
4057
unit_vector 5
4058
polynomial
4059
(x[1]*x[2]*x[3]*x[4])^2*(x[1]-x[2])^2*(x[1]-x[3])^2*
4060
(x[1]-x[4])^2*(x[2]-x[3])^2*(x[2]-x[4])^2*(x[3]-x[4])^2;
4061
\end{Verbatim}
4062
4063
The $8$ inequalities describe the unit cube in $\RR^4$ by the inequalities $0\le z_i\le 1$
4064
and the equation gives the hyperplane $z_1+\dots+z_4=2$ (we must use homogenized coordinates!). (Normaliz would find the grading itself.)
4065
4066
From the Normaliz directory the computation is called by
4067
4068
\begin{Verbatim}
4069
./normaliz -cI example/j462
4070
\end{Verbatim}
4071
where \verb|-I| could be replaced by \verb|--Integral|.
4072
4073
It produces the output in \ttt{j462.out} containing
4074
4075
\begin{Verbatim}
4076
Integral: 27773/29515186701000
4077
\end{Verbatim}
4078
4079
\subsubsection{Restrictions in MS Windows}
4080
4081
We have not succeeded in compiling Normaliz with CoCoALib under MS Windows. in previous versions of Normaliz, the computations with polynomial weights were done by the separate program NmzIntegrate, and NmzIntegrate can still be used (in all operating systems). One must start the computation from NmzIntegrate (and not from Normaliz, as was also possible in previous versions).
4082
4083
Unfortunately 1.3 the last version of NmzIntegrate that we could compile under MS Windows. This causes some restrictions in the use of NmzIntegrate:
4084
4085
\begin{arab}
4086
\item Due to a bug it is possible that a segmentation fault occurs if excluded faces are used.
4087
\item The option \verb|OutputDir| is not available.
4088
\end{arab}
4089
4090
An excellent way out is to run Normaliz (and NmzIntegrate) in the Linux subsystem of Windows 10.
4091
4092
\subsection{Significant coefficients of the quasipolynomial}\label{highest_coeff}
4093
4094
If the degree and simultaneously the period of the Hilbert or weighted Ehrhart quasipolynomial are large, the space needed to store it (usually with large coefficients) may exceed the available memory. Depending on the application, only a certain number of the coefficients may be significant. Therefore one can limit the number of highest coefficients that are stored and printed. We look at the input file \texttt{CondorcetN.in}:
4095
\begin{Verbatim}
4096
amb_space 24
4097
inequalities 3
4098
1 1 1 1 1 1 -1 -1 -1 -1 -1 -1 1 1 -1 -1 1 -1 1 1 -1 -1 1 -1
4099
1 1 1 1 1 1 1 1 -1 -1 1 -1 -1 -1 -1 -1 -1 -1 1 1 1 -1 -1 -1
4100
1 1 1 1 1 1 1 1 1 -1 -1 -1 1 1 1 -1 -1 -1 -1 -1 -1 -1 -1 -1
4101
nonnegative
4102
total_degree
4103
nr_coeff_quasipol 2
4104
\end{Verbatim}
4105
4106
The output file shows the following information on the quasipolynomial:
4107
\begin{Verbatim}
4108
Hilbert quasi-polynomial of period 4:
4109
only 2 highest coefficients computed
4110
their common period is 2
4111
0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 15982652919 56262656
4112
1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 15528493056 56262656
4113
with common denominator = 6939597901822221635907747840000
4114
\end{Verbatim}
4115
Normaliz computes and prints only as many components of the quasipolynomial as required by the common period of the printed coefficients. Coefficients outside the requested range are printed as $0$.
4116
4117
The bound on the significant coefficients applies simultaneously to the Hilbert polynomial and the weighted Ehrhart quasipolynomial---usually one is interested in only one of them.
4118
4119
By default Normaliz computes the quasipolynomial only if the period does not exceed a preset bound, presently $10^6$. If this bound is too small for your computation, you can remove it by the option
4120
\begin{Verbatim}
4121
NoPeriodBound
4122
\end{Verbatim}
4123
4124
4125
4126
\subsection{Explicit dehomogenization}\label{dehom_ex}
4127
Inhomogeneous input for data in $\RR^{d}$ is homogenized by an extra $(d+1)$th coordinate. The dehomogenization sets the last coordinate equal to $1$. Other systems may prefer the first coordinate. By choosing an explicit dehomogenization Normaliz can be adapted to such input. The file \verb|dehomogenization.in|
4128
\begin{Verbatim}
4129
amb_space 3
4130
inequalities 2
4131
-1 1 0
4132
-1 0 1
4133
dehomogenization
4134
unit_vector 1
4135
\end{Verbatim}
4136
indicates that in this case the first variable is the homogenizing one. The output file
4137
\begin{Verbatim}
4138
1 module generators
4139
2 Hilbert basis elements of recession monoid
4140
1 vertices of polyhedron
4141
2 extreme rays of recession cone
4142
3 support hyperplanes of polyhedron (homogenized)
4143
4144
embedding dimension = 3
4145
affine dimension of the polyhedron = 2 (maximal)
4146
rank of recession monoid = 2
4147
4148
size of triangulation = 0
4149
resulting sum of |det|s = 0
4150
4151
dehomogenization:
4152
1 0 0
4153
4154
4155
module rank = 1
4156
4157
***********************************************************************
4158
4159
1 module generators:
4160
1 1 1
4161
4162
2 Hilbert basis elements of recession monoid:
4163
0 0 1
4164
0 1 0
4165
4166
1 vertices of polyhedron: 3 support hyperplanes of polyhedron (homogenized)
4167
1 1 1 -1 0 1
4168
-1 1 0
4169
2 extreme rays of recession cone: 1 0 0
4170
0 0 1
4171
0 1 0
4172
\end{Verbatim}
4173
shows that Normaliz does the computation in the same way as with implicit dehomogenization, except that now the first coordinate decides what is in the polyhedron and what belongs to the recession cone, roughly speaking.
4174
4175
Note that the dehomogenization need not be a coordinate. It can be any linear form that is nonnegative on the cone generators.
4176
4177
\subsection{Nonpointed cones}\label{Nonpointed}
4178
4179
Nonpointed cones and nonpositive monoids contain nontrivial invertible elements. The main effect is that certain data are no longer unique, or may even require a new definition. An important point to note is that cones always split off their unit groups as direct summands and the same holds for normal affine monoids. Since Normaliz computes only normal affine monoids, we can always pass to the quotient by the unit groups. Roughly speaking, all data are computed for the pointed quotient and then lifted back to the original cone and monoid. It is inevitable that some data are no longer uniquely determined, but are unique only modulo the unit group, for example the Hilbert basis and the extreme rays. Also the multiplicity and the Hilbert series are computed for the pointed quotient. From the algebraic viewpoint this means to replace the field $K$ of coefficients by the group ring $L$ of the unit group, which is a Laurent polynomial ring over $K$: instead of $K$-vector space dimensions one considers ranks over $L$.
4180
4181
\subsubsection{A nonpointed cone}
4182
4183
As a very simple example we consider the right halfplane (\verb|halfspace2.in|):
4184
\begin{Verbatim}
4185
amb_space 2
4186
inequalities 1
4187
1 0
4188
\end{Verbatim}
4189
When run in default mode, it yields the following output:
4190
\begin{Verbatim}
4191
1 Hilbert basis elements
4192
1 Hilbert basis elements of degree 1
4193
1 extreme rays
4194
1 support hyperplanes
4195
4196
embedding dimension = 2
4197
rank = 2 (maximal)
4198
external index = 1
4199
dimension of maximal subspace = 1
4200
4201
size of triangulation = 1
4202
resulting sum of |det|s = 1
4203
4204
grading:
4205
1 0
4206
4207
degrees of extreme rays:
4208
1: 1
4209
4210
Hilbert basis elements are of degree 1
4211
4212
multiplicity = 1
4213
4214
Hilbert series:
4215
1
4216
denominator with 1 factors:
4217
1: 1
4218
4219
degree of Hilbert Series as rational function = -1
4220
4221
Hilbert polynomial:
4222
1
4223
with common denominator = 1
4224
4225
rank of class group = 0
4226
class group is free
4227
4228
***********************************************************************
4229
4230
1 Hilbert basis elements of degree 1:
4231
1 0
4232
4233
0 further Hilbert basis elements of higher degree:
4234
4235
1 extreme rays:
4236
1 0
4237
4238
1 basis elements of maximal subspace:
4239
0 1
4240
4241
1 support hyperplanes:
4242
1 0
4243
\end{Verbatim}
4244
4245
In the preamble we learn that the cone contains a nontrivial subspace. In this case it is the vertical axis, and close to the end we see a basis of this subspace, namely $(0,1)$. This basis is always simultaneously a $\ZZ$-basis of the unit group of the monoid. The rest of the output is what we have gotten for the positive horizontal axis which in this case is a natural representative of the quotient modulo the maximal subspace, The quotient can always be embedded in the cone or monoid respectively, but there is no canonical choice. We could have gotten $(1,5)$ as the Hilbert basis as well.
4246
4247
Normaliz has found a grading. Of course it vanishes on the unit group, but is positive on the quotient monoid modulo the unit group.
4248
4249
Note that the data of type ``dimension'' (embedding dimension, rank, rank of recession monoid in the inhomogeneous case, affine dimension of the polyhedron)) are measured before the passage to the quotient modulo the maximal subspace. The same is true for equations and congruences (which are trivial for the example above).
4250
4251
\subsubsection{A polyhedron without vertices}
4252
4253
We define the affine halfspace of the figure by \verb|gen_inhom_nonpointed.in|:
4254
\begin{Verbatim}
4255
amb_space 2
4256
cone 3
4257
1 -1
4258
-1 1
4259
0 1
4260
vertices 1
4261
-1 -1 3
4262
\end{Verbatim}
4263
4264
\begin{center}
4265
\begin{tikzpicture}[scale=0.7]
4266
4267
\filldraw[yellow] (1.333,-2) -- (-2.667,2) -- (2.5,2) -- (2.5,-2) -- cycle;
4268
4269
\foreach \x in {-2,...,2}
4270
\foreach \y in {-2,...,2}
4271
{
4272
\filldraw[fill=black] (\x,\y) circle (1.5pt);
4273
}
4274
\draw[->] (-2.5,0) -- (2.5,0);
4275
\draw[->] (0,-2.5) -- (0,2.5);
4276
\draw (1.333,-2) -- (-2.667,2);
4277
\draw (-0.33,-0.333) circle (2.5pt);
4278
\end{tikzpicture}
4279
\end{center}
4280
4281
It is clear that the ``vertex'' is not a vertex in the strict sense, bt only gives a displacement of the cone. The output when run in default mode:
4282
4283
\begin{Verbatim}
4284
1 module generators
4285
1 Hilbert basis elements of recession monoid
4286
1 vertices of polyhedron
4287
1 extreme rays of recession cone
4288
2 support hyperplanes of polyhedron (homogenized)
4289
4290
embedding dimension = 3
4291
affine dimension of the polyhedron = 2 (maximal)
4292
rank of recession monoid = 2
4293
internal index = 3
4294
dimension of maximal subspace = 1
4295
4296
size of triangulation = 1
4297
resulting sum of |det|s = 3
4298
4299
dehomogenization:
4300
0 0 1
4301
4302
4303
module rank = 1
4304
4305
***********************************************************************
4306
4307
1 module generators:
4308
0 0 1
4309
4310
1 Hilbert basis elements of recession monoid:
4311
0 1 0
4312
4313
1 vertices of polyhedron:
4314
0 -2 3
4315
4316
1 extreme rays of recession cone:
4317
0 1 0
4318
4319
1 basis elements of maximal subspace:
4320
1 -1 0
4321
4322
2 support hyperplanes of polyhedron (homogenized):
4323
0 0 1
4324
3 3 2
4325
\end{Verbatim}
4326
4327
The ``vertex'' of the polyhedron shown is of course the lifted version of the vertex modulo the maximal subspace. It is not the input ``vertex'', but agrees with it up to a unit.
4328
4329
\subsubsection{Checking pointedness first}\label{IsPointed}
4330
4331
Nonpointed cones will be an exception in Normaliz computations, and therefore Normaliz assumes that the (recession) cone it must compute is pointed. Only in rare circumstances it could be advisable to have this property checked first. There is no need to do so when the dual algorithm is used since it does not require the cone to be pointed. Moreover, if an explicit grading is given or a grading dependent computation is asked for, one cannot save time by checking the pointedness first.
4332
4333
The exceptional case is a computation, say of a Hilbert basis, by the primal algorithm in which the computation of the support hyperplanes needs very long time to be completed. If you are afraid this may happen, you can force Normaliz to compute the support hyperplanes right away by adding \verb|IsPointed| to the computation goals. This is a disadvantage only if the cone is unexpectedly pointed.
4334
4335
\subsubsection{Input of a subspace} \label{subspace}
4336
4337
If a linear subspace contained in the cone is known a priori, it can be given to Normaliz via the input type \verb|subspace|. If Normaliz detects a \verb|subspace|, it appends the rows of the matrix to the generators of the cone, and additionally the negative of the sum of the rows (since we must add the subspace as a cone). If \verb|subspace| is combined with \verb|cone_and_lattice|, then the rows of \verb|subspace| are also appended to the generators of the lattice. It is not assumed that the vectors in \verb|subspace| are linearly independent or generate the maximal linear subspace of the cone. A simple example (\verb|subspace4.in|):
4338
\begin{Verbatim}
4339
amb_space 4
4340
cone 4
4341
1 0 2 0
4342
0 1 -2 1
4343
0 0 0 1
4344
0 0 0 -1
4345
subspace 1
4346
0 0 1 0
4347
\end{Verbatim}
4348
4349
From the output:
4350
\begin{Verbatim}
4351
2 Hilbert basis elements of degree 1:
4352
0 1 0 0
4353
1 0 0 0
4354
4355
0 further Hilbert basis elements of higher degree:
4356
4357
2 extreme rays:
4358
0 1 0 0
4359
1 0 0 0
4360
4361
2 basis elements of maximal subspace:
4362
0 0 1 0
4363
0 0 0 1
4364
4365
2 support hyperplanes:
4366
0 1 0 0
4367
1 0 0 0
4368
\end{Verbatim}
4369
4370
One should note that the maximal subspace is generated by the smallest face that contains all invertible elements. Therefore, in order to make all vectors in a face invertible, it is enough to put a single vector from the interior of the face into \verb|subspace|.
4371
4372
\subsubsection{Data relative to the original monoid}
4373
4374
If original monoid generators are defined, there are two data related to them that must be read with care.
4375
4376
First of all, we consider the original monoid generators as being built from the vectors in \verb|cone| or \verb|cone_and_lattice| plus the vectors in \verb|subspace| and additionally the negative of the sum of the latter (as pointed out above).
4377
4378
The test for ``Original monoid is integrally closed' is correct -- it returns \verb|true| if and only if the original monoid as just defined indeed equals the computed integral closure. (There was a mistake in version 3.0.)
4379
4380
The ``module generators over the original monoid'' only refer to the \emph{image} of the original monoid and the image of the integral closure \emph{modulo the maximal subspace}. They do not take into account that the unit group of the integral closure may not be generated by the original generators. An example in which the lack of integral closedness is located in the unit group (\verb|normface.in|):
4381
4382
\begin{Verbatim}
4383
amb_space 5
4384
cone 4
4385
0 0 0 1 1
4386
1 0 0 1 1
4387
0 1 0 1 1
4388
0 0 1 1 1
4389
subspace 4
4390
0 0 0 0 1
4391
1 0 0 0 1
4392
0 1 0 0 1
4393
1 1 2 0 1
4394
\end{Verbatim}
4395
4396
From the output file:
4397
4398
\begin{Verbatim}
4399
...
4400
dimension of maximal subspace = 4
4401
original monoid is not integrally closed
4402
unit group index = 2
4403
...
4404
4405
1 Hilbert basis elements of degree 1:
4406
0 0 0 1 0
4407
...
4408
1 module generators over original monoid:
4409
0 0 0 0 0
4410
\end{Verbatim}
4411
The original monoid is not integrally closed since the unit group of the integral closure is strictly larger than that of the original monoid: the extension has index $2$, as indicated. The quotients modulo the unit groups are equal, as can be seen from the generator over the original monoid or the Hilbert basis (of the integral closure) that is contained in the original monoid.
4412
4413
4414
\subsection{Exporting the triangulation}\label{Triang}
4415
4416
The option \ttt{-T} asks Normaliz to export the triangulation by writing the files
4417
\ttt{<project>.tgn} and \verb|<project>.tri|:
4418
4419
\begin{itemize}
4420
4421
\itemtt[tgn] The file \ttt{tgn} contains a matrix of vectors (in the
4422
coordinates of $\AA$) spanning the simplicial cones in
4423
the triangulation.
4424
4425
\itemtt[tri]
4426
The file \ttt{tri} lists the simplicial subcones. There are two variants, depending on whether \verb|ConeDecomposition| had been set. Here we assume that \verb|ConeDecomposition| is not computed. See Section \ref{Disjoint} for the variant with \verb|ConeDecomposition|.
4427
4428
The first line contains the number of simplicial cones
4429
in the triangulation, and the next line contains the
4430
number $m+1$ where $m=\rank \EE$. Each of the following
4431
lines specifies a simplicial cone $\Delta$: the first
4432
$m$ numbers are the indices (with respect to the order
4433
in the file \ttt{tgn}) of those generators that span
4434
$\Delta$, and the last entry is the multiplicity of
4435
$\Delta$ in $\EE$, i.~e.\ the absolute value of the
4436
determinant of the matrix of the spanning vectors (as
4437
elements of $\EE$).
4438
\end{itemize}
4439
4440
The following example is the $2$-dimensional cross polytope with one excluded face (\verb|cross2.in|). The excluded face is irrelevant for the triangulation.
4441
4442
\begin{Verbatim}
4443
amb_space 3
4444
polytope 4
4445
1 0
4446
0 1
4447
-1 0
4448
0 -1
4449
excluded_faces 1
4450
1 1 -1
4451
\end{Verbatim}
4452
4453
Its \verb|tgn| and \verb|tri| files are
4454
\begin{Verbatim}
4455
tgn tri
4456
4 2
4457
3 4
4458
1 0 1 1 2 3 2
4459
0 1 1 1 3 4 2
4460
-1 0 1 plain
4461
0 -1 1
4462
\end{Verbatim}
4463
4464
We see the $4$ vertices $v_1,\dots,v_4$ in homogenized coordinates in \verb|tgn| and the $2$ simplices (or the simplicial cones over them) in \verb|tri|: both have multiplicity $2$. The last word \verb|plain| indicates that Normaliz has computed a triangulation in the strict sense, namely a simplicial subdivision in which neighboring simplicial cones match along common faces. The alternative is \verb|nested| that we discuss below.
4465
4466
In addition to the files \verb|<project>.tgn| and \verb|<project>.tri|, also the file \verb|<object>.inv| is written. It contains the data of the file \verb|<project>.out| above the line of stars in a human and machine readable format.
4467
4468
\subsubsection{Nested triangulations}\label{nested}
4469
4470
If Normaliz has subdivided a simplicial cone of a triangulation of the cone $C$, the resulting decomposition of $C$ may no longer be a triangulation in the strict sense. It is rather a \emph{nested triangulation}, namely a map from a rooted tree to the set of full-dimensional subcones of $C$ with the following properties:
4471
\begin{arab}
4472
\item the root is mapped to $C$,
4473
\item every other node is mapped to a full dimensional simplicial subcone,
4474
\item the simplicial subcones corresponding to the branches at a node $x$ form a triangulation of the simplicial cone corresponding to $x$.
4475
\end{arab}
4476
4477
The following figure shows a nested triangulation:
4478
4479
\begin{center}
4480
\begin{tikzpicture}[scale=1.0]
4481
\draw[very thick] (-4,4) -- (4,4) -- (0,0) -- cycle;
4482
\draw[thick] (-2,2) -- (2,2) -- (0,4) -- cycle;
4483
\draw (-1,3) -- (1,3) -- (0,2) -- cycle;
4484
\draw[dashed] (0.5,2.5) --(1.5,2.5) -- (1.0,2) --cycle;
4485
\end{tikzpicture}
4486
\end{center}
4487
4488
For the Normaliz computations, nested triangulations are as good as ordinary triangulations, but in other applications the difference may matter. With the option \verb|-T|, Normaliz prints the leaves of the nested triangulation to the \verb|tri| file. They constitute the simplicial cones that are finally evaluated by Normaliz.
4489
4490
The triangulation is always plain if \verb|-T| is the only computation goal or if it is just combined with \verb|-v|. Otherwise it can only fail to be plain if it contains determinants $\ge 10^8$.
4491
4492
The subdivision can be blocked by \verb|NoSubdivision|, independently of the computation goals.
4493
4494
\subsubsection{Disjoint decomposition}\label{Disjoint}
4495
4496
Normaliz can export the disjoint decomposition of the cone that it has computed. This decomposition is always computed together with a full triangulation, unless only the multiplicity is asked for. It represents the cone as the disjoint union of semiopen simplicial subcones. The corresponding closed cones constitute the triangulation, and from each of them some facets are removed so that one obtains a disjoint decomposition. See \cite{BIS} for more information. In the following figure, the facets separating the triangles are omitted in the triangle on the $-$ side.
4497
4498
\begin{center}
4499
\begin{scriptsize}
4500
%\tikzstyle{every node}=[circle, draw, fill=black, inner sep=0pt, minimum width=3pt]
4501
\begin{tikzpicture}
4502
[scale=0.7,auto=left, thick]
4503
%\foreach \from/\to in {n2/n4,n2/n5,n4/n5,n4/n6,n5/n7,n6/n7}
4504
% \foreach \x in {0, 1, ..., 9}
4505
\foreach \x/\y in {0/2, 2/0, 5/0, 5/2, 5/4, 7/0, 7/4, 9/2}
4506
\node [circle, draw, fill=black, inner sep=0pt, minimum width=2.5pt](n\x\y) at (\x,\y) {};
4507
\node [circle, draw, fill=black, inner sep=0pt, minimum width=2.5pt](n23) at (2.5,3) {};
4508
%\node [circle, draw, inner sep=0pt, minimum width=3pt, label=above:$O_C$](OC) at (2.8,1.7) {};
4509
4510
% \draw (\from) -- node[above]{$+$} node[below]{$-$} (\to);
4511
\draw (n20) -- node[right=-2pt, pos=0.4]{$+$} node[left=-2pt, pos=0.4]{$-$} (n23);
4512
\draw (n20) -- node[above=-2pt]{$+$} (n02);
4513
\draw (n50) -- node[right=-2pt]{$-$} node[left=-2pt]{$+$} (n23);
4514
\draw (n50) -- node[near end, right=-2pt]{$-$} node[near end, left=-2pt]{$+$} (n52);
4515
\draw (n52) -- node[right=-2pt]{$-$} node[left=-2pt]{$+$} (n54);
4516
\draw (n70) -- node[right=-2pt, pos=0.4]{$-$} node[left=-2pt, pos=0.4]{$+$} (n74);
4517
4518
\draw (n52) -- node[below=-2pt]{$+$} node[above=-2pt]{$-$} (n23);
4519
\draw (n52) -- node[below=-2pt]{$-$} node[above]{$+$} (n74);
4520
\draw (n50) -- node[right=-2pt]{$-$} node[left=-2pt]{$+$} (n74);
4521
4522
\draw (n02) -- node[below=-2pt]{$+$} (n23);
4523
\draw (n23) -- node[right=5pt]{$+$} (n54);
4524
\draw (n20) -- node[above=-2pt]{$+$} (n50);
4525
\draw (n50) -- node[above=-2pt]{$+$} (n70);
4526
\draw (n54) -- node[below=-2pt]{$+$} (n74);
4527
\draw (n70) -- node[above=-2pt]{$+$} (n92);
4528
\draw (n74) -- node[below=-2pt]{$+$} (n92);
4529
4530
4531
% \draw[to] (daq) -- node[midway,right] {raw event data\\level 1} (buffer);
4532
% \draw[to] (monitor) -- node[midway,above] {events} node[midway,below] {level 1} (datastore);
4533
\end{tikzpicture}
4534
\end{scriptsize}
4535
\end{center}
4536
4537
If you want to access the disjoint decomposition, you must activate the computation goal \verb|ConeDecomposition| or use the command line option is \verb|-D|. As an example we compute \verb|cross2.in| with the computation goal \verb|ConeDecomposition|. The file \verb|cross2.tri| now looks as follows:
4538
4539
\begin{Verbatim}
4540
2
4541
7
4542
1 2 3 2 0 0 0
4543
2 3 4 2 0 0 1
4544
plain
4545
\end{Verbatim}
4546
4547
As before the first line contains the size of the triangulation and the second is the number of entries of each row.
4548
The first $3$ entries in each line are the indices of the extreme rays with respect to the \verb|tgn| file and the fourth entry is the determinant. They are followed by a $0/1$ vector indicating the open facets in the order in which they are opposite to the extreme rays. If the corresponding entry is $1$, the facet must be removed.
4549
4550
In our example all facets of the first simplicial cone are kept, and from the second simplicial cone the facet opposite to the third extreme ray (with index $4$ relative to \verb|tgn|) must be removed.
4551
4552
The disjoint decomposition which is the basis of all Hilbert series computations uses the algorithm suggested by K�ppe and Verdoolaege \cite{KV}.
4553
4554
4555
\subsection{Exporting the Stanley decomposition}\label{Stanley}
4556
4557
The option \ttt{-y} makes Normaliz
4558
write the files \ttt{<project>.tgn}, \verb|<project>.dec| and \verb|<project>.inv|. Stanley decomposition is contained in the file with the suffix \verb|dec|. But this file also contains the inclusion/exclusion data if there are excluded faces:
4559
4560
(a) If there are any excluded faces, the file starts with the word
4561
\verb|in_ex_data|. The next line contains the number of such data that follow.
4562
Each of these lines contains the data of a face and the coefficient with which
4563
the face is to be counted: the first number lists the number of generators that
4564
are contained in the face, followed by the indices of the generators relative
4565
to the \verb|tgn| file and the last number is the coefficient.
4566
4567
(b) The second block (the first if there are no excluded faces) starts with
4568
the word \verb|Stanley_dec|, followed by the number of simplicial cones in the
4569
triangulation.
4570
4571
For each simplicial cone $\Delta$ in the
4572
triangulation this file contains a block of data:
4573
\begin{enumerate}
4574
\item[(i)] a line listing the indices $i_1,\dots,i_m$ of the
4575
generators $v_{i_1},\dots,v_{i_m}$ relative to the
4576
order in \ttt{tgn} (as in \ttt{tri}, $m=\rank \EE$);
4577
4578
\item[(ii)] a $\mu\times m$ matrix where $\mu$ is the
4579
multiplicity of $\Delta$ (see above).
4580
4581
In the notation of \cite{BIS}, each line lists an
4582
``offset'' $x+\epsilon(x)$ by its coordinates with
4583
respect to $v_{i_1},\dots,v_{i_m}$ as follows: if
4584
$(a_1,\dots,a_m)$ is the line of the matrix, then
4585
$$
4586
x+\epsilon(x)=\frac{1}{\mu}(a_1v_{i_1}+\dots+a_mv_{i_m}).
4587
$$
4588
\end{enumerate}
4589
4590
4591
The \verb|dec| file of the example above is
4592
\begin{Verbatim}
4593
in_ex_data
4594
1
4595
2 1 2 -1
4596
Stanley_dec
4597
2
4598
1 3 4 1 2 3
4599
2 2
4600
3 3
4601
0 0 2 0 0 0
4602
1 1 2 1 0 1
4603
\end{Verbatim}
4604
4605
There is $1$ face in \verb|in_ex_data| (namely the excluded one), it contains the $2$ generators $v_1$ and $v_2$ and appears with multiplicity $-1$. The Stanley decomposition consists of $4$ components of which each of the simplicial cone contains $2$. The second offset in the second simplicial cone is
4606
$$
4607
\frac12 (1v_1+0v_2+1v_3)=(0,0,1).
4608
$$
4609
4610
We recommend you to process the file \ttt{3x3magiceven.in} with the
4611
option \ttt{-ahTy} activated. Then inspect all
4612
the output files in the subdirectory \ttt{example} of the
4613
distribution.
4614
4615
\subsection{Module generators over the original monoid}\label{MinMod}
4616
4617
Suppose that the original generators are well defined in the input. This is always the case when these consists just of a \verb|cone| or a \verb|cone_and_lattice|. Let $M$ be the monoid generated by them. Then Normaliz computes the integral closure $N$ of $M$ in the effective lattice $\EE$. It is often interesting to understand the difference set $N\setminus M$. After the introduction of a field $K$ of coefficients, this amounts to understanding $K[N]$ as a $K[M]$-module. With the option \verb|ModuleGeneratorsOverOriginalMonoid, -M| Normaliz computes a minimal generating set $T$ of this module. Combinatorially this means that we find an irreducible cover
4618
$$
4619
N=\bigcup_{x\in T} x+M.
4620
$$
4621
Note that $0\in T$ since $M\subset N$.
4622
\begin{center}
4623
\begin{tikzpicture}[scale=0.7]
4624
\filldraw[yellow] (0,0) -- (1.833,5.5) -- (4.5,5.5) -- (4.5,2.25) -- cycle;
4625
\draw (0,0) -- (1.833,5.5);
4626
\draw (0,0) -- (4.5,2.25) node at (-0.3,-0.3){\small $0$};
4627
\foreach \x in {0,...,4}
4628
\foreach \y in {0,...,5}
4629
{
4630
\filldraw[fill=black] (\x,\y) circle (1.5pt);
4631
}
4632
\draw[red,thick] (1,1) circle (4pt);
4633
\draw[red,thick] (2,3) circle (4pt);
4634
\draw[red,thick] (1,2) circle (4pt);
4635
\draw[red,thick] (2,2) circle (4pt);
4636
\draw[red,thick] (0,0) circle (4pt);
4637
\draw[->,thick] (0,0) -- (1,3);
4638
\draw[->,thick] (0,0) -- (2,1);
4639
\end{tikzpicture}
4640
\end{center}
4641
As an example, we can run \verb|2cone.in| with the option \verb|-M| on the command line. This yields the output
4642
\begin{Verbatim}
4643
...
4644
4 Hilbert basis elements:
4645
1 1
4646
1 2 5 module generators over original monoid:
4647
1 3 0 0
4648
2 1 1 1
4649
1 2
4650
2 extreme rays: 2 2
4651
1 3 2 3
4652
2 1
4653
\end{Verbatim}
4654
4655
In the nonpointed case Normaliz can only compute the module generators of $N/N_0$ over $M/(M\cap N_0)$ where $N_0$ is the unit group of $N$. If $M_0\neq M_0$, this is not a system of generators of $M$ over $N$.
4656
4657
\subsubsection{An inhomogeneous example}
4658
4659
Let us have a look at a very simple input file (\verb|genmod_inhom2.in|):
4660
\begin{Verbatim}
4661
amb_space 2
4662
cone 2
4663
0 3
4664
2 0
4665
vertices 1
4666
0 0 1
4667
ModuleGeneratorsOverOriginalMonoid
4668
\end{Verbatim}
4669
4670
The cone is the positive orthant that we have turned into a polyhedron by adding the vertex $(0,0)$. The original monoid is generated by $(2,0)$ and $(0,3)$.
4671
4672
In addition to the original monoid $M$ and its integral closure $N$ we have a third object, namely the module $P$ of lattice points in the polyhedron.We compute
4673
\begin{enumerate}
4674
\item the system of generators of $P$ over $N$ (the \verb|module generators|) and
4675
\item the system of generators of $P$ over $N$ (the \verb|module generators over original monoid|).
4676
\end{enumerate}
4677
We do not compute the system of generators of $N$ over $M$ (that we get in the homogeneous case).
4678
4679
The output:
4680
\begin{Verbatim}
4681
1 module generators
4682
2 Hilbert basis elements of recession monoid
4683
1 vertices of polyhedron
4684
2 extreme rays of recession cone
4685
6 module generators over original monoid
4686
3 support hyperplanes of polyhedron (homogenized)
4687
4688
embedding dimension = 3
4689
affine dimension of the polyhedron = 2 (maximal)
4690
rank of recession monoid = 2
4691
internal index = 6
4692
4693
size of triangulation = 1
4694
resulting sum of |det|s = 6
4695
4696
dehomogenization:
4697
0 0 1
4698
4699
4700
module rank = 1
4701
4702
***********************************************************************
4703
4704
1 module generators:
4705
0 0 1
4706
4707
2 Hilbert basis elements of recession monoid:
4708
0 1 0
4709
1 0 0
4710
4711
1 vertices of polyhedron:
4712
0 0 1
4713
4714
2 extreme rays of recession cone:
4715
0 1 0
4716
1 0 0
4717
4718
6 module generators over original monoid:
4719
0 0 1
4720
0 1 1
4721
0 2 1
4722
1 0 1
4723
1 1 1
4724
1 2 1
4725
4726
3 support hyperplanes of polyhedron (homogenized):
4727
0 0 1
4728
0 1 0
4729
1 0 0
4730
\end{Verbatim}
4731
4732
\subsection{Lattice points in the fundamental parallelepiped}\label{LattPointsFPE}
4733
4734
Let $u_1,\dots,u_n$ be linearly independent vectors in $\ZZ^d\subset\RR^d$. They span a simplicial cone $C$. Moreover let $U$ be the subgroup of $(\RR^d,+)$ generated by $u_1,\dots,u_n$ and let $v\in\RR^d$. We are interested in the shifted cone $C'=v+C$. We assume that $C'$ contains a lattice point. This need not be true if $n<s$, but with our assumption we can also assume that $n=d$ after the restriction to the affine space spanned by $C'$. The \emph{fundamental} parallelepiped of $C$ (with respect to $U$) is
4735
$$
4736
F=\para(u_1,\dots,u_d)=\{q_qu_1+\dots+q_du_d: 0\le q_i<1 \}.
4737
$$
4738
Set $F'=v+F$. Then the translates $u+F'$, $u\in U$, tile $\RR^d$; so $F'$ is a fundamental domain for the action of $U$ on $\RR^d$ by translation, and we call it $F'$ the \emph{fundamental} parallelepiped of $C'$ (with respect to $U$). Every point in $\RR^d$ differs from exactly one point in $F'$ by an element of $U$. This holds in particular for the lattice points.
4739
4740
One of the main basic tasks if Normaliz is the computation of the lattice points in $F'$, especially in the case $v=0$ (but not only). Looking back at the examples in Section \ref{MinMod}, we see that we can in fact compute and export these lattice points via the computation goal \verb|ModuleGeneratorsOverOriginalMonoid|.
4741
4742
Often however, an additional complication comes up: we must shift $F'$ by an infinitesimally small vector in order to exclude certain facets of $C'$. This would be difficult in Normaliz without the input type \verb|open_facets| (see Section \ref{open_facets}). Recall that this is a $0$-$1$-vector whose entries $1$ indicate which facets must be avoided: if its $i$-th entry is $1$, then the facet opposite to $v+u_i$ must be made `open''.
4743
4744
The input file \verb|no_open_facets.in| is
4745
\begin{Verbatim}
4746
amb_space 2
4747
cone 2
4748
1 1
4749
-3 3
4750
vertices 1
4751
1/2 1/2 1
4752
ModuleGeneratorsOverOriginalMonoid
4753
\end{Verbatim}
4754
4755
Then \verb|no_open_facets.out| contains
4756
\begin{Verbatim}
4757
6 module generators over original monoid:
4758
-2 3 1
4759
-1 2 1
4760
-1 3 1
4761
0 1 1
4762
0 2 1
4763
1 1 1
4764
\end{Verbatim}
4765
These are $6$ encircled points in the left figure.
4766
4767
\begin{center}
4768
\begin{tikzpicture}[scale=0.7]
4769
\filldraw[yellow] (0.5,0.5) -- (1.5,1.5) -- (-1.5,4.5) -- (-2.5,3.5) -- cycle;
4770
\foreach \x in {-3,...,3}
4771
\foreach \y in {0,...,5}
4772
{
4773
\filldraw[fill=black] (\x,\y) circle (1.5pt);
4774
}
4775
\draw[->,thick] (0.5,0.5) -- (-2.5,3.5);
4776
\draw[->,thick] (0.5,0.5) -- (1.5,1.5);
4777
\draw[dashed] (-2.5,3.5) -- (-1.5,4.5) -- (1.5,1.5);
4778
\draw node at (0,-0.5){\small $0$};
4779
\draw node at (0.5,0.1){\small $v$};
4780
\draw[red,thick] (1,1) circle (4pt);
4781
\draw[red,thick] (0,1) circle (4pt);
4782
\draw[red,thick] (-1,2) circle (4pt);
4783
\draw[red,thick] (0,2) circle (4pt);
4784
\draw[red,thick] (-2,3) circle (4pt);
4785
\draw[red,thick] (-1,3) circle (4pt);
4786
\draw (0.5,0.5) circle (4pt);
4787
\draw[blue, thick] (0.6,0.6) -- (1.6,1.6) -- (-1.4,4.6) -- (-2.4,3.6) -- cycle;
4788
\end{tikzpicture}
4789
\qquad\qquad\qquad
4790
\begin{tikzpicture}[scale=0.7]
4791
\filldraw[yellow] (1,1) -- (2,2) -- (-1,5) -- (-2,4) -- cycle;
4792
\foreach \x in {-3,...,3}
4793
\foreach \y in {0,...,5}
4794
{
4795
\filldraw[fill=black] (\x,\y) circle (1.5pt);
4796
}
4797
\draw[->,thick] (1,1) -- (-2,4);
4798
\draw[->,thick] (1,1) -- (2,2);
4799
\draw[dashed] (-2,4) -- (-1,5) -- (2,2);
4800
\draw node at (0,-0.5){\small $0$};
4801
\draw node at (1,0.6){\small $v'$};
4802
\draw[red,thick] (1,1) circle (4pt);
4803
\draw[red,thick] (1,2) circle (4pt);
4804
\draw[red,thick] (0,3) circle (4pt);
4805
\draw[red,thick] (0,2) circle (4pt);
4806
\draw[red,thick] (-1,4) circle (4pt);
4807
\draw[red,thick] (-1,3) circle (4pt);
4808
\end{tikzpicture}
4809
\end{center}
4810
Now we add
4811
\begin{Verbatim}
4812
open_facets
4813
1 0
4814
\end{Verbatim}
4815
to the input (to get \verb|open_facets.in|). We have tried to indicate the infinitesimal shift by the blue rectangle in the left figure. The computation yields
4816
\begin{Verbatim}
4817
6 module generators over original monoid:
4818
-1 3 1
4819
-1 4 1
4820
0 2 1
4821
0 3 1
4822
1 1 1
4823
1 2 1
4824
\end{Verbatim}
4825
which are the encircled lattice points in the right figure. It is explained in Section \ref{open_facets} how the new vector $v'$ is computed.
4826
4827
Note that the lattice points are listed with the homogenizing coordinate $1$. In fact, both \verb|vertices| and \verb|open_facets| make the computation inhomogeneous. If both are missing, then the lattice points are listed without the homogenizing coordinate. If you want a uniform format for the output, you can use the zero vector for \verb|open_facets| or the origin as the vertex. Both options change the result only to the extent that the homogenizing coordinate is added.
4828
4829
\subsection{Precomputed support hyperplanes}\label{supphyp_ex}
4830
4831
Computing the support hyperplanes can be a very time consuming task, and if it has been the first step in the exploration of a difficult example, it may be desirable, to use the support hyperplanes as additional input in order to save computation time. This is especially true if Normaliz must do an intermediate computation of the support hyperplanes because of a large number of simplicial cones to be evaluated. The file \verb|2cone_supp.in| is just a toy example:
4832
\begin{Verbatim}
4833
amb_space 2
4834
cone 2
4835
2 1
4836
1 3
4837
support_hyperplanes 2
4838
-1 2
4839
3 -1
4840
\end{Verbatim}
4841
4842
As pointed out in Section \ref{HomConstrCone}, Normaliz must trust you---here is no way of checking the correctness of this input without recomputing it.
4843
4844
\subsection{Shift, denominator, quasipolynomial and multiplicity}\label{sdqm}
4845
4846
In this section we discuss the interplay of shift, denominator of the grading and the quasipolynomial. As long as the denominator is $1$, the situation is very simple and no ambiguity arises. See Section \ref{inhom_ineq_ex}. We modify the example from that section as follows (\verb|InhomIneq_7.in|):
4847
\begin{Verbatim}
4848
amb_space 2
4849
inhom_inequalities 3
4850
0 2 1
4851
0 -2 3
4852
2 -2 3
4853
grading
4854
7 0
4855
\end{Verbatim}
4856
The output related to the grading is
4857
\begin{Verbatim}
4858
grading:
4859
7 0 0
4860
with denominator = 7
4861
4862
4863
module rank = 2
4864
multiplicity = 2
4865
4866
Hilbert series:
4867
1 1
4868
denominator with 1 factors:
4869
1: 1
4870
4871
shift = -1
4872
4873
degree of Hilbert Series as rational function = -1
4874
4875
Hilbert polynomial:
4876
2
4877
with common denominator = 1
4878
\end{Verbatim}
4879
The Hilbert series computed by hand is
4880
$$
4881
\frac{t^{-7}+1}{1-t^7}.
4882
$$
4883
4884
We obtain it from the output as follows. The printed series is
4885
$$
4886
\frac{1+t}{1-t}.
4887
$$
4888
Now the shift is applied and yields
4889
$$
4890
\frac{t^{-1}+1}{1-t}.
4891
$$
4892
Finally we make the substitution $t\mapsto t^7$, and obtain the desired result.
4893
4894
Now we add the complication $x_1+x_2\equiv -1 \mod 8$ ((\verb|InhomIneq_7_8.in|):
4895
\begin{Verbatim}
4896
amb_space 2
4897
inhom_inequalities 3
4898
0 2 1
4899
0 -2 3
4900
2 -2 3
4901
grading
4902
7 0
4903
inhom_congruences 1
4904
1 1 1 8
4905
\end{Verbatim}
4906
The result:
4907
\begin{Verbatim}
4908
grading:
4909
7 0 0
4910
with denominator = 7
4911
4912
4913
module rank = 2
4914
multiplicity = 1/4
4915
4916
Hilbert series:
4917
1 0 0 0 0 0 0 1
4918
denominator with 1 factors:
4919
8: 1
4920
4921
shift = -1
4922
4923
degree of Hilbert Series as rational function = -2
4924
4925
Hilbert series with cyclotomic denominator:
4926
-1 1 -1 1 -1 1 -1
4927
cyclotomic denominator:
4928
1: 1 4: 1 8: 1
4929
4930
Hilbert quasi-polynomial of period 8:
4931
0: 0 4: 0
4932
1: 0 5: 0
4933
2: 0 6: 1
4934
3: 0 7: 1
4935
with common denominator = 1
4936
\end{Verbatim}
4937
The printed Hilbert series is
4938
$$
4939
\frac{1+t^7}{1-t^8}.
4940
$$
4941
The application of the shift yields
4942
$$
4943
\frac{t^{-1}+t^6}{1-t^8}.
4944
$$
4945
the correct result for the divided grading. \emph{The Hilbert quasipolynomial is computed for the divided grading}, as already explained in Section \ref{cong_ex}. As a last step, we can apply the substitution $t\mapsto t^7$ in order obtain the Hilbert series
4946
$$
4947
\frac{t^{-7}+t^{42}}{1-t^{56}}
4948
$$
4949
for the original grading.
4950
4951
Like the quasipolynomial, \emph{the multiplicity is computed for the divided grading}.
4952
4953
4954
4955
\section{Optional output files}\label{optionaloutput}
4956
4957
4958
When one of the options \ttt{Files,-f} or \ttt{AllFiles, -a} is activated, Normaliz
4959
writes additional output files whose names are of type
4960
\ttt{<project>.<type>}. (Note that the options \verb|-T, Triangulation| and \verb|-y, StanleyDec| as well as the options \verb|-E, -L, -I| calling NmzIntegrate also write files in addition to \verb|<project>.out|. \verb|Symmetrize| does not produce extra output files.) Moreover one can select the optional output files individually via command line options. Most of these files contain matrices in a simple format:
4961
\begin{Verbatim}
4962
<m>
4963
<n>
4964
<x_1>
4965
...
4966
<x_m>
4967
\end{Verbatim}
4968
where each row has \verb|<n>| entries. Exceptions are the files with suffixes \verb|cst|, \verb|inv|, \verb|esp|.
4969
4970
Note that the files are only written if they would contain at least one row.
4971
4972
As pointed out in Section \ref{outcontrol}, the optional output files for the integer hull are the same as for the original computation, as far as their content has been computed.
4973
4974
\subsection{The homogeneous case}
4975
4976
The option \ttt{-f} makes Normaliz write the following files:
4977
4978
\begin{itemize}
4979
\itemtt[gen] contains the Hilbert basis. If you want to use this file as an input file and reproduce the computation results, then you must make it a matrix of type \verb|cone_and_lattice| (and add the dehomogenization in the inhomogeneous case).
4980
4981
\itemtt[cst] contains the constraints defining the cone
4982
and the lattice in the same format as they would appear
4983
in the input: matrices of types \emph{constraints} following each
4984
other. Each matrix is concluded by the type of the constraints.
4985
Empty matrices are indicated by $0$ as the
4986
number of rows. Therefore there will always be at least
4987
$3$ matrices.
4988
4989
If a grading is defined, it will be appended. Therefore
4990
this file (with suffix \ttt{in}) as input for
4991
Normaliz will reproduce the Hilbert basis and all the
4992
other data computed, at least in principle.
4993
4994
\itemtt[inv] contains all the information from the
4995
file \ttt{out} that is not contained in any of the
4996
other files.
4997
\end{itemize}
4998
4999
If \ttt{-a} is activated, then the following files are written
5000
\emph{additionally:}
5001
5002
\begin{itemize}
5003
5004
\itemtt[ext] contains the extreme rays of the cone.
5005
5006
\itemtt[ht1] contains the degree $1$ elements of the
5007
Hilbert basis if a grading is defined.
5008
5009
\itemtt[egn,esp] These contain the Hilbert basis and
5010
support hyperplanes in the coordinates with respect to
5011
a basis of $\EE$. \ttt{esp} contains the grading and the dehomogenization in the
5012
coordinates of $\EE$. Note that no
5013
equations for $\CC\cap\EE$ or congruences for $\EE$ are
5014
necessary.
5015
5016
\itemtt[lat] contains the basis of the lattice $\EE$.
5017
5018
\itemtt[mod] contains the module generators of the integral closure modulo the original monoid.
5019
5020
\itemtt[msp] contains the basis of the maximal subspace.
5021
\end{itemize}
5022
5023
In order to select one or more of these files individually, add an option of type \verb|--<suffix>| to the command line where \verb|<suffix>| can take the values
5024
\begin{Verbatim}
5025
gen, cst, inv, ext, ht1, egn, esp, lat, mod, msp, typ
5026
\end{Verbatim}
5027
5028
The type \verb|typ| is not contained in \verb|Files| or \verb|AllFiles| since it can be extremely large. It is of the matrix format described above. It is the product of the matrices
5029
corresponding to \ttt{egn} and the transpose of \ttt{esp}. In other
5030
words, the linear forms representing the support
5031
hyperplanes of the cone $C$ are evaluated on the
5032
Hilbert basis. The resulting matrix, with the
5033
generators corresponding to the rows and the support
5034
hyperplanes corresponding to the columns, is written to
5035
this file.
5036
5037
The suffix \ttt{typ} is motivated by the fact that the
5038
matrix in this file depends only on the isomorphism
5039
type of monoid generated by the Hilbert basis (up to
5040
row and column permutations). In the language of
5041
\cite{BG} it contains the \emph{standard embedding}.
5042
5043
Note: the explicit choice of an optional output file does \emph{not} imply a computation goal. Output files that would contain unknown data are simply not written without a warning or error message.
5044
5045
\subsection{Modifications in the inhomogeneous case}
5046
5047
The optional output files are a subset of those that can be produced in the homogeneous
5048
case. The main difference is that the generators of the solution module and the
5049
Hilbert basis of the recession monoid appear together in the file \verb|gen|.
5050
They can be distinguished by evaluating the dehomogenization on them (simply the last component with inhomogeneous input), and the
5051
same applies to the vertices of the polyhedron and extreme rays of the
5052
recession cone. The file \verb|cst| contains the constraints defining the
5053
polyhedron and the recession module in conjunction with the dehomogenization, which is also contained in the \verb|cst| file, following the constraints.
5054
5055
With \verb|-a| the files \verb|egn| and \verb|esp| are produced. These files contain \verb|gen| and the support hyperplanes of the homogenized cone in the coordinates of $\EE$, as well as the dehomogenization.
5056
5057
%%%%%%%%%%%%%%%%%%%%%%%%%%%% TERMINAL OUTPUT %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
5058
5059
%\section{Terminal output}
5060
5061
%%%%%%%%%%%%%%%%%%%%%%%%%%%% PERFORMANCE AND PARALLELIZATION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
5062
5063
5064
5065
%%%%%%%%%%%%%%%%%%%%%%%%%%%%% LARGE COMPUTATIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
5066
5067
\section{Performance}\label{Perf}
5068
5069
\subsection{Parallelization}\label{PerfPar}
5070
5071
The executables of Normaliz have been compiled for parallelization
5072
on shared memory systems with OpenMP. Parallelization reduces the
5073
``real'' time of the computations considerably, even on relatively
5074
small systems. However, one should not underestimate the
5075
administrational overhead involved.
5076
\begin{itemize}
5077
\item It is not a good idea to use parallelization for very small problems.
5078
\item On multi-user systems with many processors it may be wise to limit
5079
the number of threads for Normaliz somewhat below the maximum
5080
number of cores.
5081
\end{itemize}
5082
By default, Normaliz limits the number of threads to 8. One can override this limit by the Normaliz
5083
option \ttt{-x} (see Section \ref{exec}).
5084
5085
Another way to set an upper limit to the number of threads is via the environment variable \verb|OMP_NUM_THREADS|:
5086
\begin{center}
5087
\verb+export OMP_NUM_THREADS=<T>+\qquad (Linux/Mac)
5088
\end{center}
5089
or
5090
\begin{center}
5091
\verb+set OMP_NUM_THREADS=<T>+\qquad (Windows)
5092
\end{center}
5093
where \ttt{<T>} stands for the maximum number of threads
5094
accessible to Normaliz. For example, we often use
5095
\begin{center}
5096
\verb+export OMP_NUM_THREADS=20+
5097
\end{center}
5098
on a multi-user system system with $24$ cores.
5099
5100
Limiting the number of threads to $1$ forces a strictly serial
5101
execution of Normaliz.
5102
5103
The paper \cite{BIS} contains extensive data on the effect of parallelization. On the whole Normaliz scales very well.
5104
However, the dual algorithm often performs best with mild parallelization, say with $4$ or $6$ threads.
5105
5106
\subsection{Running large computations}\label{Large}
5107
5108
Normaliz can cope with very large examples, but it
5109
is usually difficult to decide a priori whether an example is
5110
very large, but nevertheless doable, or simply impossible.
5111
Therefore some exploration makes sense.
5112
5113
See \cite{BIS} for some very large computations. The following
5114
hints reflect the authors' experience with them.
5115
5116
(1) Run Normaliz with the option \ttt{-cs} and pay attention
5117
to the terminal output. The number of extreme rays, but also
5118
the numbers of support hyperplanes of the intermediate cones
5119
are useful data.
5120
5121
(2) In many cases the most critical size parameter is the
5122
number of simplicial cones in the triangulation. It makes sense
5123
to determine it as the next step. Even with the fastest
5124
potential evaluation (option \ttt{-v}), finding the
5125
triangulation takes less time, say by a factor between $3$ and
5126
$10$. Thus it makes sense to run the example with \ttt{-t} in
5127
order to explore the size.
5128
5129
As you can see from \cite{BIS}, Normaliz has successfully
5130
evaluated triangulations of size $\approx 5\cdot 10^{11}$ in
5131
dimension $24$.
5132
5133
(3) Another critical parameter are the determinants of the
5134
generator matrices of the simplicial cones. To get some feeling
5135
for their sizes, one can restrict the input to a subset (of the
5136
extreme rays computed in (1)) and use the option \ttt{-v} or the computation goal \verb|TriangulationDetSum| if there is no grading.
5137
5138
The output file contains the number of simplicial cones as well
5139
as the sum of the absolute values of the determinants. The
5140
latter is the number of vectors to be processed by Normaliz
5141
in triangulation based calculations.
5142
5143
The number includes the zero vector for every simplicial cone
5144
in the triangulation. The zero vector does not enter the
5145
Hilbert basis calculation, but cannot be neglected for the
5146
Hilbert series.
5147
5148
Normaliz has mastered calculations with $> 10^{15}$ vectors.
5149
5150
(4) If the triangulation is small, we can add the option
5151
\ttt{-T} in order to actually see the triangulation in a file.
5152
Then the individual determinants become visible.
5153
5154
(5) If a cone is defined by inequalities and/or equations
5155
consider the dual mode for Hilbert basis calculation, even if
5156
you also want the Hilbert series.
5157
5158
(6) The size of the triangulation and the size of the
5159
determinants are \emph{not} dangerous for memory by themselves
5160
(unless \ttt{-T} or \ttt{-y} are set). Critical magnitudes can
5161
be the number of support hyperplanes, Hilbert basis candidates,
5162
or degree $1$ elements.
5163
5164
%%%%%%%%%%%%%%%%%%%%%%%%%%%%% DISTRIBUTION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
5165
\section{Distribution and installation}\label{Distr}
5166
5167
In order to install Normaliz you should first download the
5168
basic package. Follow the instructions in
5169
\begin{center}
5170
\url{http://normaliz.uos.de/download/}.
5171
\end{center}
5172
They guide you to our GitHub repository.
5173
5174
The basic package contains the documentation, examples, source
5175
code, jNormaliz, NmzIntegrate and the packages for PyNormaliz, Singular
5176
and Macaulay2. Then unzip the downloaded file
5177
\ttt{\NmzDir.zip} in a directory of your choice. (Any other
5178
downloaded zip file for Normaliz should be unzipped in this
5179
directory, too.)
5180
5181
This process will create a directory \ttt{\NmzDir} (called
5182
Normaliz directory) and several subdirectories in
5183
it. The names of the subdirectories created are
5184
self-explanatory. Nevertheless we give an overview:
5185
\begin{itemize}
5186
\item In the Normaliz directory you should
5187
find \ttt{jNormaliz.jar}, several files, for example \ttt{Copying}, and
5188
subdirectories.
5189
5190
\item The subdirectory \ttt{source} contains the source files.
5191
5192
\item The subdirectory \ttt{doc} contains the file you are reading
5193
and further documentation.
5194
5195
\item In the subdirectory \ttt{example} are the input
5196
files for some examples. It contains all named
5197
input files of examples of this manual.
5198
5199
\item Automated tests which run Normaliz on different inputs
5200
and options are contained in the subdirectory \ttt{test}.
5201
5202
\item The subdirectory \ttt{Singular} contains the
5203
\textsc{Singular} library \ttt{normaliz.lib} and a PDF file with
5204
documentation.
5205
5206
\item The subdirectory \ttt{Macaulay2} contains the
5207
\textsc{Macaulay2} package \ttt{Normaliz.m2}.
5208
5209
\item The subdirectory \ttt{PyNormaliz} contains the source
5210
\textsc{Python} interface.
5211
5212
\item The subdirectory \ttt{lib} contains libraries for
5213
jNormaliz.
5214
5215
\item Moreover, there are subdirectories whose name starts with \verb|Q|. They contain
5216
the source code, examples and tests for QNormaliz (see Appendix \ref{QNorm})
5217
\end{itemize}
5218
5219
We provide executables for Windows, Linux and Mac. Download the archive file
5220
corresponding to your system \ttt{\NmzDir<systemname>.zip}
5221
and unzip it. This process will store the executables of
5222
Normaliz and NmzIntegrate in the Normaliz directory. In case you want to run Normaliz from the
5223
command line or use it from other systems, you may have to copy
5224
the executables to a directory in the search path for
5225
executables.
5226
5227
Please read the release notes on the download page. Especially on Mac you may want to build Normaliz yourself since it is impossible to provide a statically linked executable with parallelization.
5228
5229
%%%%%%%%%%%%%%%%%%%%%%%%%%%%% COMPILATION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
5230
\section{Building Normaliz yourself}\label{Compile}
5231
5232
Normaliz offers the luxury of three build systems:
5233
\begin{enumerate}
5234
\item \ttt{autotools},
5235
\item \ttt{cmake}
5236
\item ``classical'' \ttt{make} using the ``handwritten'' \verb|Makefile.classic|.
5237
\end{enumerate}
5238
5239
The basic steps are the same for all three systems, namely
5240
\begin{itemize}
5241
\item configuration,
5242
\item compilation,
5243
\item installation.
5244
\end{itemize}
5245
The main difference is the way how the build system is configured whereas compilation and installation are essentially identical. In all cases the compilation of NmzIntegrate is included (as well as the compilation of the example program maxsimplex in Appendix \ref{maxsimplex}).
5246
5247
In the following we describe the basic steps of \verb|autotools| and \verb|cmake| for Linux 64 bit under certain standard assumptions. Comments on Mac OS follow in Section \ref{mac} The file \verb|INSTALL| in the directory \verb|source| contains more information. \verb|Makefile.classic| is only meant for Linux and the development of Normaliz.
5248
5249
The \verb|autotools| scripts have been written by Matthias K�ppe. The Normaliz team thanks him cordially for his generous help.
5250
5251
\subsection{Compiler prerequisites}
5252
5253
We require some C++11 features (e.g. \verb|std::exception_ptr|), supported by:
5254
\begin{itemize}
5255
\item GNU g++ 4.4,
5256
\item clang++ 2.9,
5257
\item Intel icpc 12.0
5258
\end{itemize}
5259
See \url{https://github.com/Normaliz/Normaliz/issues/26} for a more detailed discussion.
5260
5261
The mentioned compilers are also able to handle OpenMP 3.0, with the exception of clang++, there the first OpenMP support was introduced in 3.7.
5262
5263
\subsection{Required libraries}
5264
5265
For compiling Normaliz the following libraries are needed:
5266
\begin{itemize}
5267
\item GMP including the C++ wrapper (libgmpxx and libgmp)
5268
\item Boost (headers only)
5269
\end{itemize}
5270
5271
We will only discuss the basic use of cmake for compilation, see the file \verb|source/INSTALL| for additional information, especially on how to use customized paths. Also the use of the \ttt{autotools} system is explained in this file. The ``classical'' Makefile is meant for development. Therefore you should use \verb|autotools| or \verb|cmake|.
5272
5273
The installation will store the files in standard locations, and we assume in the following that they do not need individual include paths.
5274
5275
\subsection{Optional packages}
5276
5277
As discussed in the manual, Normaliz can use SCIP. If you want
5278
to use it, SCIP must be installed before the compilation of Normaliz, independently of
5279
the method used for building Normaliz.
5280
5281
To build SCIP download the scipoptsuite at \url{http://scip.zib.de/}. Notice that SCIP
5282
is not distributed under GPL, but the ZIB Academic License (\url{http://scip.zib.de/academic.txt}).
5283
Unpack it and then compile it with
5284
\begin{Verbatim}
5285
make ZLIB=false GMP=false READLINE=false scipoptlib
5286
\end{Verbatim}
5287
5288
Another optional package is CoCoALib. It is necessary if you want to compute integrals or weighted Ehrhart series and, hence, for symmetrization. If you want to compile Normaliz with CoCoALib, install CoCoALib first.
5289
The following sequence of commands will install it in the subdirectory \verb|CoCoA| of your home directory.
5290
\begin{Verbatim}
5291
mkdir ~/CoCoA/
5292
cd ~/CoCoA/
5293
wget http://cocoa.dima.unige.it/cocoalib/tgz/CoCoALib-0.99550.tgz
5294
tar xvf CoCoALib-0.99550.tgz
5295
cd CoCoALib-0.99550
5296
./configure --threadsafe-hack --no-boost
5297
make library -j2
5298
\end{Verbatim}
5299
5300
If CoCoALib-0.99550 should be no longer available, replace it by a newer version.
5301
5302
\subsection{autotools}
5303
5304
To build Normaliz with the \verb|autotools| system, navigate to the Normaliz directory and issue the following sequence of commands:
5305
\begin{Verbatim}
5306
./configure
5307
make
5308
\end{Verbatim}
5309
This will compile Normaliz, but most likely without SCIP and CoCoALib since they are optional libraries mentioned above and must be found. If they are not located at standard places, you must specify their paths. Examples (on the machine of a Normaliz team member):
5310
\begin{Verbatim}
5311
./configure --with-scipoptsuite-src=$HOME/SCIP/scipoptsuite-3.2.0/
5312
\end{Verbatim}
5313
or
5314
\begin{Verbatim}
5315
./configure --with-cocoalib=$HOME/CoCoA/CoCoALib-0.99550
5316
\end{Verbatim}
5317
or with both paths. If the libraries are found, Normaliz will be compiled with SCIP and CoCoALib, respectively, by the \verb|make| command. Check the terminal output of \verb|./configure| for success.
5318
5319
The next step is
5320
\begin{Verbatim}
5321
make
5322
\end{Verbatim}
5323
After this step you will find \verb|normaliz| in the directory \verb|source| (and \verb|maxsimplex| in its directory).
5324
5325
The last, optional step is
5326
\begin{Verbatim}
5327
sudo make install
5328
\end{Verbatim}
5329
It copies the header files, the library \verb|libnormaliz| and the executables (except \verb|maxsimplex|) into subdirectories of \verb|/usr/local|. It is of course possible to specify another installation path in the call of \verb|./configure|.
5330
5331
Note: In case you have checked out Normaliz from GitHub, the very first step is \verb|./bootstrap.sh|.
5332
5333
Unfortunately, the paths for SCIP are version dependent. We have tested versions 3.2.0 and 3.2.1.
5334
5335
\subsection{cmake}\label{cmake}
5336
5337
You may need to install \verb|cmake| first:
5338
\begin{Verbatim}
5339
sudo apt-get cmake cmake-curses-gui
5340
\end{Verbatim}
5341
5342
To build Normaliz with \verb|cmake|, start by creating a build directory within the Normaliz directory, say \verb|BUILD|. Then change the working directory to \verb|BUILD|.
5343
5344
The basic configuration (equivalent to \verb|configure| of \verb|autotools|) is
5345
\begin{Verbatim}
5346
cmake ../source
5347
\end{Verbatim}
5348
Then \verb|make| and \verb|make install| will complete the basic installation.
5349
5350
For the inclusion of SCIP, use (for example)
5351
\begin{Verbatim}
5352
SCIP_DIR=$HOME/SCIP/scipoptsuite-3.2.0/ cmake ../source
5353
\end{Verbatim}
5354
replacing \verb|$HOME/SCIP/scipoptsuite-3.2.0/| with your own path to SCIP if necessary. Similarly,
5355
\begin{Verbatim}
5356
COCOA_DIR=$HOME/CoCoA/CoCoALib-0.99550 cmake ../source/
5357
\end{Verbatim}
5358
Then \verb|make| and \verb|make install| will complete the work. After \verb|make| the executables can be found in \verb|BUILD| and its subdirectory \verb|maxsimplex|.
5359
5360
The main advantage of \verb|cmake| is the existence of a GUI in which you can change most settings originally chosen by \verb|cmake|. Call \verb|ccmake ../source| (2 times c) or, for a more sophisticated version, \verb|cmake-gui ../source|.
5361
5362
Note: Unfortunately, the paths for SCIP are version dependent. The configuration files for SCIP presently can find the versions 3.2.0 and 3.2.1. For another version you must edit the file \verb|FindSCIP.cmake| in \verb|source/cmake/Modules|.
5363
5364
5365
\subsection{Mac OS X}\label{mac}
5366
5367
Currently Apple does not supply a compiler which supports OpenMP.
5368
We recommend the use of LLVM 3.9 or newer from Homebrew. See
5369
\url{http://brew.sh/} from where you can also download GMP and Boost. For details see \ttt{INSTALL} in the directory \ttt{source} of the distribution.
5370
5371
For building Normaliz under Mac OS we recommend autotools if you want to use SCIP.
5372
5373
You can then follow the instructions for Linux.
5374
5375
Note: Do not try to compile Normaliz with static libraries for Mac OS X.
5376
5377
\subsection{Windows}
5378
5379
One can compile Windows executables with the Cygwin port of GCC. Unfortunately it is not compatible to OpenMP.
5380
5381
Using Visual Studio is a bit tricky. Microsoft's C++ compiler does not support OpenMP 3.0. Creating a Normaliz Visual Studio project via cmake is currently not fully supported. The executables that are offered in the Normaliz distribution have been compiled with Intel icpc and a manually created project. Please contact us if you want to build Normaliz on Windows.
5382
5383
Note that the statically linked Linux binaries run in the Linux subsystem of Windows 10. We have not yet tried to build Normaliz in it.
5384
5385
\section{Copyright and how to cite}
5386
5387
Normaliz 3.1 is free software licensed under the GNU General
5388
Public License, version 3. You can redistribute it and/or
5389
modify it under the terms of the GNU General Public License as
5390
published by the Free Software Foundation, either version 3 of
5391
the License, or (at your option) any later version.
5392
5393
It is distributed in the hope that it will be useful, but
5394
WITHOUT ANY WARRANTY; without even the implied warranty of
5395
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5396
GNU General Public License for more details.
5397
5398
You should have received a copy of the GNU General Public
5399
License along with the program. If not, see
5400
\url{http://www.gnu.org/licenses/}.
5401
5402
Please refer to Normaliz in any publication for which it has
5403
been used:
5404
\begin{center}
5405
W. Bruns, B. Ichim, T. R�mer, R. Sieg and C. S�ger: Normaliz. Algorithms for
5406
rational cones and affine monoids. Available at
5407
\url{http://normaliz.uos.de}
5408
\end{center}
5409
The corresponding \verb|\bibitem|:
5410
\begin{Verbatim}
5411
\bibitem{Normaliz} W. Bruns, B. Ichim, T. R\"omer, R. Sieg and C. S\"oger:
5412
Normaliz. Algorithms for rational cones and affine monoids.
5413
Available at \url{http://normaliz.uos.de}.
5414
\end{Verbatim}
5415
5416
A BibTeX entry:
5417
\begin{Verbatim}
5418
@Misc{Normaliz,
5419
author = {W. Bruns and B. Ichim and T. R\"omer and R. Sieg and C. S\"oger},
5420
title = Normaliz. Algorithms for rational cones and affine monoids,
5421
howpublished ={Available at \url{http://normaliz.uos.de}}
5422
\end{Verbatim}
5423
5424
It is now customary to evaluate mathematicians by such data as
5425
numbers of publications, citations and impact factors. The data
5426
bases on which such dubious evaluations are based do not list
5427
mathematical software. Therefore we ask you to cite the article
5428
\cite{BIS} in addition. This is very helpful for the younger
5429
members of the team.
5430
5431
\newpage
5432
5433
\appendix
5434
5435
\section{Mathematical background and terminology}
5436
5437
For a coherent and thorough treatment of the mathematical background we refer the reader to \cite{BG}.
5438
5439
\subsection{Polyhedra, polytopes and cones}
5440
5441
An \emph{affine halfspace} of $\RR^d$ is a subset given as
5442
$$
5443
H_\lambda^+=\{x: \lambda(x)\ge 0\},
5444
$$
5445
where $\lambda$ is an affine form, i.e., a non-constant map $\lambda:\RR^d\to\RR$, $\lambda(x)=\alpha_1x_1+\dots+\alpha_dx_d+\beta$ with $\alpha_1,\dots,\alpha_d,\beta\in\RR$. If $\beta=0$ and $\lambda$ is therefore linear, then the halfspace is called \emph{linear}. The halfspace is \emph{rational} if $\lambda$ is \emph{rational}, i.e., has rational coordinates. If $\lambda$ is rational, we can assume that it is even \emph{integral}, i.e., has integral coordinates, and, moreover, that these are coprime. Then $\lambda$ is uniquely determined by $H_\lambda^+$. Such integral forms are called \emph{primitive}, and the same terminology applies to vectors.
5446
5447
\begin{definition}
5448
A (rational) \emph{polyhedron} $P$ is the intersection of finitely many (rational) halfspaces. If it is bounded, then it is called a \emph{polytope}. If all the halfspaces are linear, then $P$ is a \emph{cone}.
5449
5450
The \emph{dimension} of $P$ is the dimension of the smallest affine subspace $\aff(P)$ containing $P$.
5451
\end{definition}
5452
5453
5454
A support hyperplane of $P$ is an affine hyperplane $H$ that intersects $P$, but only in such a way that $H$ is contained in one of the two halfspaces determined by $H$. The intersection $H\cap P$ is called a \emph{face} of $P$. It is a polyhedron (polytope, cone) itself. Faces of dimension $0$ are called \emph{vertices}, those of dimension $1$ are called \emph{edges} (in the case of cones \emph{extreme rays}), and those of dimension $\dim(P)-1$ are \emph{facets}.
5455
5456
When we speak of \emph{the} support hyperplanes of $P$, then we mean those intersecting $P$ in a facet. Their halfspaces containing $P$ cut out $P$ from $\aff(P)$. If $\dim(P)=d$, then they are uniquely determined (up to a positive scalar).
5457
5458
The constraints by which Normaliz describes polyhedra are
5459
\begin{arab}
5460
\item linear equations for $\aff(P)$ and
5461
\item linear inequalities (simply called support hyperplanes) cutting out $P$ from $\aff(P)$.
5462
\end{arab}
5463
In other words, the constraints are given by a linear system of equations and inequalities, and a polyhedron is nothing else than the solution set of a linear system of inequalities and equations. It can always be represented in the form
5464
$$
5465
Ax\ge b, \qquad A\in\RR^{m\times d}, b\in \RR^m,
5466
$$
5467
if we replace an equation by two inequalities.
5468
5469
\subsection{Cones}
5470
5471
The definition describes a cone by constraints. One can equivalently describe it by generators:
5472
5473
\begin{theorem}[Minkowski-Weyl]
5474
The following are equivalent for $C\subset\RR^d$;
5475
\begin{enumerate}
5476
\item $C$ is a (rational) cone;
5477
\item there exist finitely many (rational) vectors $x_1,\dots,x_n$ such that
5478
$$
5479
C=\{a_1x_1+\dots+a_nx_n:a_1,\dots,a_n\in\RR_+\}.
5480
$$
5481
\end{enumerate}
5482
\end{theorem}
5483
5484
By $\RR_+$ we denote the set of nonnegative real numbers; $\QQ_+$ and $\ZZ_+$ are defined in the same way.
5485
5486
The conversion between the description by constraints and that by generators is one of the basic tasks of Normaliz. It uses the \emph{Fourier-Motzkin elimination}.
5487
5488
Let $C_0$ be the set of those $x\in C$ for which $-x\in C$ as well. It is the largest vector subspace contained in $C$.
5489
A cone is \emph{pointed} if $C_0=0$. If a rational cone is pointed, then it has uniquely determined \emph{extreme integral generators}. These are the primitive integral vectors spanning the extreme rays. These can also be defined with respect to a sublattice $L$ of $\ZZ^d$, provided $C$ is contained in $\RR L$. If a cone is not pointed, then Normaliz computes the extreme rays of the pointed $C/C_0$ and lifts them to $C$. (Therefore they are only unique modulo $C_0$.)
5490
5491
The \emph{dual cone} $C^*$ is given by
5492
$$
5493
C^*=\{\lambda\in (\RR^d)^*:\lambda(x)\ge0 \text{ for all } x\in C\}.
5494
$$
5495
Under the identification $\RR^d=(\RR^d)^{**}$ one has $C^{**}=C$. Then one has
5496
$$
5497
\dim C_0+\dim C^*=d.
5498
$$
5499
In particular, $C$ is pointed if and only if $C^*$ is full dimensional, and this is the criterion for pointedness used by Normaliz. Linear forms $\lambda_1,\dots,\lambda_n$ generate $C^*$ if and only if $C$ is the intersection of the halfspaces $H_{\lambda_i}^+$. Therefore the conversion from constraints to generators and its converse are the same task, except for the exchange of $\RR^d$ and its dual space.
5500
5501
\subsection{Polyhedra}
5502
5503
In order to transfer the Minkowski-Weyl theorem to polyhedra it is useful to homogenize coordinates by embedding $\RR^d$ as a hyperplane in $\RR^{d+1}$, namely via
5504
$$
5505
\kappa:\RR^d\to\RR^{d+1},\qquad \kappa(x)=(x,1).
5506
$$
5507
If $P$ is a (rational) polyhedron, then the closure of the union of the rays from $0$ through the points of $\kappa(P)$ is a (rational) cone $C(P)$, called the \emph{cone over} $P$. The intersection $C(P)\cap(\RR^d\times\{0\})$ can be identified with the \emph{recession} (or tail) \emph{cone}
5508
$$
5509
\rec(P)=\{x\in\RR^d: y+x\in P\text{ for all } y\in P\}.
5510
$$
5511
It is the cone of unbounded directions in $P$. The recession cone is pointed if and only if $P$ has at least one bounded face, and this is the case if and only if it has a vertex.
5512
5513
The theorem of Minkowski-Weyl can then be generalized as follows:
5514
5515
\begin{theorem}[Motzkin]
5516
The following are equivalent for a subset $P\neq\emptyset$ of $\RR^d$:
5517
\begin{enumerate}
5518
\item $P$ is a (rational) polyhedron;
5519
\item $P=Q+C$ where $Q$ is a (rational) polytope and $C$ is a (rational) cone.
5520
\end{enumerate}
5521
If $P$ has a vertex, then the smallest choice for $Q$ is the convex hull of its vertices, and $C=\rec(P)$ is uniquely determined.
5522
\end{theorem}
5523
5524
The \emph{convex hull} of a subset $X\in\RR^d$ is
5525
$$
5526
\conv(X)=\{a_1x_1+\dots+a_nx_n: n\ge 1, x_1,\dots,x_n\in X, a_1,\dots,a_n\in\RR_+, a_1+\dots+a_n=1\}.
5527
$$
5528
5529
Clearly, $P$ is a polytope if and only if $\rec(P)=\{0\}$, and the specialization to this case one obtains Minkowski's theorem: a subset $P$ of $\RR^d$ is a polytope if and only if it is the convex hull of a finite set. A \emph{lattice polytope} is distinguished by having integral points as vertices.
5530
5531
Normaliz computes the recession cone and the polytope $Q$ if $P$ is defined by constraints. Conversely it finds the constraints if the vertices of $Q$ and the generators of $C$ are specified.
5532
5533
Suppose that $P$ is given by a system
5534
$$
5535
Ax\ge b, \qquad A\in\RR^{m\times d},\ b\in \RR^m,
5536
$$
5537
of linear inequalities (equations are replaced by two inequalities). Then $C(P)$ is defined by the \emph{homogenized system}
5538
$$
5539
Ax-x_{d+1}b\ge 0
5540
$$
5541
whereas the $\rec(P)$ is given by the \emph{associated homogeneous system}
5542
$$
5543
Ax\ge 0.
5544
$$
5545
5546
It is of course possible that $P$ is empty if it is given by constraints since inhomogeneous systems of linear equations and inequalities may be unsolvable. By abuse of language we call the solution set of the associated homogeneous system the recession cone of the system.
5547
5548
Via the concept of dehomogenization, Normaliz allows for a more general approach. The \emph{dehomogenization} is a linear form $\delta$ on $\RR^{d+1}$. For a cone $\widetilde C$ in $\RR^{d+1}$ and a dehomogenization $\delta$, Normaliz computes the polyhedron $P=\{x\in \widetilde C: \delta(x)=1\}$ and the recession cone $C=\{x\in \widetilde C: \delta(x)=0\}$. In particular, this allows other choices of the homogenizing coordinate. (Often one chooses $x_0$, the first coordinate then.)
5549
5550
In the language of projective geometry, $\delta(x)=0$ defines the hyperplane at infinity.
5551
5552
\subsection{Affine monoids}
5553
5554
An \emph{affine monoid} $M$ is a finitely generated submonoid of $\ZZ^d$ for some $d\ge0$. This means: $0\in M$, $M+M\subset M$, and there exist $x_1,\dots,x_n$ such that
5555
$$
5556
M=\{a_1x_1+\dots+a_nx_n: a_1,\dots,a_n\in\ZZ_+\}.
5557
$$
5558
We say that $x_1,\dots,x_n$ is a \emph{system of generators} of $M$. A monoid $M$ is positive if $x\in M$ and $-x\in M$ implies $x=0$. An element $x$ in a positive monoid $M$ is called \emph{irreducible} if it has no decomposition $x=y+z$ with $y,z\in M$, $y,z\neq0$. The \emph{rank} of $M$ is the rank of the subgroup $\gp(M)$ of $\ZZ^d$ generated by $M$. (Subgroups of $\ZZ^d$ are also called sublattices.)
5559
For certain aspects of monoid theory it is very useful (or even necessary) to introduce coefficients from a field $K$ (or a more general commutative ring) and consider the monoid algebra $K[M]$.
5560
5561
5562
\begin{theorem}[van der Corput]
5563
Every positive affine monoid $M$ has a unique minimal system of generators, given by its irreducible elements.
5564
\end{theorem}
5565
5566
We call the minimal system of generators the \emph{Hilbert basis} of $M$. Normaliz computes Hilbert bases of a special type of affine monoid:
5567
5568
\begin{theorem}[Gordan's lemma]
5569
Let $C\subset\RR^d$ be a (pointed) rational cone and let $L\subset \ZZ^d$ be a sublattice. Then $C\cap L$ is a (positive) affine monoid.
5570
\end{theorem}
5571
5572
The monoids $M=C\cap L$ of the theorem have the pleasant property that the group of units $M_0$ (i.e., elements whose inverse also belongs to $M$) splits off as a direct summand. Therefore $M/M_0$ is a well-defied affine monoid. If $M$ is not positive, then Normaliz computes a Hilbert basis of $M/M_0$ and lifts it to $M$.
5573
5574
Let $M\subset \ZZ^d$ be an affine monoid, and let $N\supset M$ be an overmonoid (not necessarily affine), for example a sublattice $L$ of $\ZZ^d$ containing $M$.
5575
5576
\begin{definition}
5577
The \emph{integral closure} (or \emph{saturation}) of $M$ in $N$ is the set
5578
$$
5579
\widehat M_N=\{x\in N: kx\in M \text{ for some } k\in \ZZ, k>0\}.
5580
$$
5581
If $\widehat M_N=M$, one calls $M$ \emph{integrally closed} in $N$.
5582
5583
The integral closure $\overline M$ of $M$ in $\gp(M)$ is its \emph{normalization}. $M$ is \emph{normal} if $\overline M=M$.
5584
\end{definition}
5585
5586
The integral closure has a geometric description:
5587
5588
\begin{theorem}\label{incl_cone}
5589
$$
5590
\widehat M_N =\cone(M)\cap N.
5591
$$
5592
\end{theorem}
5593
5594
Combining the theorems, we can say that Normaliz computes integral closures of affine monoids in lattices, and the integral closures are themselves affine monoids as well. (More generally, $\widehat M_N$ is affine if $M$ and $N$ are affine.)
5595
5596
In order to specify the intersection $C\cap L$ by constraints we need a system of homogeneous inequalities for $C$. Every sublattice of $\ZZ^d$ can be written as the solution set of a combined system of homogeneous linear diophantine equations and a homogeneous system of congruences (this follows from the elementary divisor theorem). Thus $C\cap L$ is the solution set of a homogeneous linear diophantine system of inequalities, equations and congruences. Conversely, the solution set of every such system is a monoid of type $C\cap L$.
5597
5598
In the situation of Theorem \ref{incl_cone}, if $\gp(N)$ has finite rank as a $\gp(M)$-module, $\widehat M_N$ is even a finitely generated module over $M$. I.e., there exist finitely many elements $y_1,\dots,y_m\in \widehat M_N$ such that $\widehat M_N=\bigcup_{i=1}^m M+y_i$. Normaliz computes a minimal system $y_1,\dots,y_m$ and lists the nonzero $y_i$ as a system of module generators of $\widehat M_N$ modulo $M$. We must introduce coefficients to make this precise: Normaliz computes a minimal system of generators of the $K[M]$-module $K[\widehat M_N]/K[M]$.
5599
5600
\subsection{Affine monoids from binomial ideals}\label{binomials}
5601
5602
Let $U$ be a subgroup of $\ZZ^n$. Then the natural image $M$ of
5603
$\ZZ_+^n\subset\ZZ^n$ in the abelian group $G=\ZZ^n/U$ is a
5604
submonoid of $G$. In general, $G$ is not torsionfree, and
5605
therefore $M$ may not be an affine monoid. However, the image
5606
$N$ of $M$ in the lattice $L=G/\textup{torsion}(G)$ is an affine
5607
monoid. Given $U$, Normaliz chooses an embedding
5608
$L\hookrightarrow\ZZ^r$, $r=n-\rank U$, such that $N$ becomes a
5609
submonoid of $\ZZ_+^r$. In general there is no canonical choice
5610
for such an embedding, but one can always find one, provided
5611
$N$ has no invertible element except $0$.
5612
5613
The typical starting point is an ideal $J\subset
5614
K[X_1,\dots,X_n]$ generated by binomials
5615
$$
5616
X_1^{a_1}\cdots X_n^{a_n}-X_1^{b_1}\cdots X_n^{b_n}.
5617
$$
5618
The image of $K[X_1,\dots,X_n]$ in the residue class ring of
5619
the Laurent polynomial ring $S=K[X_1^{\pm1},\dots,X_n^{\pm1}]$
5620
modulo the ideal $JS$ is exactly the monoid algebra $K[M]$ of
5621
the monoid $M$ above if we let $U$ be the subgroup of $\ZZ^n$
5622
generated by the differences
5623
$$
5624
(a_1,\dots,a_n)-(b_1,\dots,b_n).
5625
$$
5626
5627
Ideals of type $JS$ are called lattice ideals if they are
5628
prime. Since Normaliz automatically passes to
5629
$G/\textup{torsion}(G)$, it replaces $JS$ by the smallest lattice
5630
ideal containing it.
5631
5632
\subsection{Lattice points in polyhedra}\label{latt_hedra}
5633
5634
Let $P\subset \RR^d$ be a rational polyhedron and $L\subset \ZZ^d$ be an \emph{affine sublattice}, i.e., a subset $w+L_0$ where $w\in\ZZ^d$ and $L_0\subset \ZZ^d$ is a sublattice. In order to investigate (and compute) $P\cap L$ one again uses homogenization: $P$ is extended to $C(P)$ and $L$ is extended to $\cL=L_0+\ZZ(w,1)$. Then one computes $C(P)\cap \cL$. Via this ``bridge'' one obtains the following inhomogeneous version of Gordan's lemma:
5635
5636
\begin{theorem}
5637
Let $P$ be a rational polyhedron with vertices and $L=w+L_0$ an affine lattice as above. Set $\rec_L(P)=\rec(P)\cap L_0$. Then there exist $x_1,\dots,x_m\in P\cap L$ such that
5638
$$
5639
P\cap L=\{(x_1+\rec_L(P))\cap\dots\cap(x_m+\rec_L(P))\}.
5640
$$
5641
If the union is irredundant, then $x_1,\dots,x_m$ are uniquely determined.
5642
\end{theorem}
5643
5644
The Hilbert basis of $\rec_L(P)$ is given by $\{x: (x,0)\in \Hilb(C(P)\cap\cL)\}$ and the minimal system of generators can also be read off the Hilbert basis of $C(P)\cap \cL$: it is given by those $x$ for which $(x,1)$ belongs to $\Hilb(C(P)\cap\cL)$. (Normaliz computes the Hilbert basis of $C(P)\cap L$ only at ``levels'' $0$ and $1$.)
5645
5646
We call $\rec_L(P)$ the \emph{recession monoid} of $P$ with respect to $L$ (or $L_0$). It is justified to call $P\cap L$ a \emph{module} over $\rec_L(P)$. In the light of the theorem, it is a finitely generated module, and it has a unique minimal system of generators.
5647
5648
After the introduction of coefficients from a field $K$, $\rec_L(P)$ is turned into an affine monoid algebra, and $N=P\cap L$ into a finitely generated torsionfree module over it. As such it has a well-defined \emph{module rank} $\mrank(N)$, which is computed by Normaliz via the following combinatorial description: Let $x_1,\dots,x_m$ be a system of generators of $N$ as above; then $\mrank(N)$ is the cardinality of the set of residue classes of $x_1,\dots,x_m$ modulo $\rec_L(P)$.
5649
5650
Clearly, to model $P\cap L$ we need linear diophantine systems of inequalities, equations and congruences which now will be inhomogeneous in general. Conversely, the set of solutions of such a system is of type $P\cap L$.
5651
5652
5653
\subsection{Hilbert series}
5654
5655
Normaliz can compute the Hilbert series and the Hilbert
5656
(quasi)polynomial of a graded monoid. A \emph{grading} of a
5657
monoid $M$ is simply a homomorphism $\deg:M\to\ZZ^g$ where
5658
$\ZZ^g$ contains the degrees. The \emph{Hilbert series} of $M$
5659
with respect to the grading is the formal Laurent series
5660
$$
5661
H(t)=\sum_{u\in \ZZ^g} \#\{x\in M: \deg x=u\}t_1^{u_1}\cdots t_g^{u_g}=\sum_{x\in M}t^{\deg x},
5662
$$
5663
provided all sets $\{x\in M: \deg x=u\}$ are finite. At the moment, Normaliz can only handle the case $g=1$, and therefore we restrict ourselves to this case. We assume in the following that $\deg x >0$ for all nonzero $x\in M$ and that there exists an $x\in\gp(M)$ such that $\deg x=1$. (Normaliz always rescales the grading accordingly.) In the case of a nonpositive monoid, these conditions must hold for $M/M_0$, and its Hilbert series is considered as the Hilbert series of $M$.
5664
5665
The basic fact about $H(t)$ in the $\ZZ$-graded case is that it
5666
is the Laurent expansion of a rational function at the origin:
5667
\begin{theorem}[Hilbert, Serre; Ehrhart]
5668
Suppose that $M$ is a normal positive affine monoid. Then
5669
$$
5670
H(t)=\frac{R(t)}{(1-t^e)^r},\qquad R(t)\in\ZZ[t], %\label{raw}
5671
$$
5672
where $r$ is the rank of $M$ and $e$ is the least common multiple
5673
of the degrees of the extreme integral generators of $\cone(M)$. As a rational function, $H(t)$ has negative degree.
5674
\end{theorem}
5675
5676
The statement about the rationality of $H(t)$ holds under much more general hypotheses.
5677
5678
Usually one can find denominators for $H(t)$ of much lower
5679
degree than that in the theorem, and Normaliz tries to
5680
give a more economical presentation of $H(t)$ as a quotient of
5681
two polynomials. One should note that it is not clear what the
5682
most natural presentation of $H(t)$ is in general (when $e>1$).
5683
We discuss this problem in \cite[Section 4]{BIS}. The examples \ref{rational} and \ref{magiceven}, may serve as
5684
an illustration.
5685
5686
A rational cone $C$ and a grading together define the rational
5687
polytope $Q=C\cap A_1$ where $A_1=\{x:\deg x=1\}$. In this
5688
sense the Hilbert series is nothing but the Ehrhart series of
5689
$Q$.
5690
The following description of the Hilbert function $H(M,k)=\#\{x\in M: \deg x=k\}$ is equivalent to the previous theorem:
5691
5692
\begin{theorem}
5693
There exists a quasipolynomial $q$ with rational coefficients, degree $\rank M-1$ and period $\pi$ dividing $e$ such that $H(M,k)=q(k)$ for all $q\ge0$.
5694
\end{theorem}
5695
5696
The statement about the quasipolynomial means that there exist
5697
polynomials $q^{(j)}$, $j=0,\dots,\pi-1$, of degree $\rank M-1$ such that
5698
$$
5699
q(k)=q^{(j)}(k),\qquad j\equiv k\pod \pi,
5700
$$
5701
and
5702
$$
5703
q^{(j)}(k)=q^{(j)}_0+q^{(j)}_1k+\dots+q^{(j)}_{r-1}k^{r-1},\qquad r=\rank M,
5704
$$
5705
with coefficients $q^{(j)}_i\in \QQ$. It is not hard to show that in the case of affine monoids all components have the same degree $r-1$ and the same leading coefficient:
5706
$$
5707
q_{r-1}=\frac{\vol(Q)}{(r-1)!},
5708
$$
5709
where $\vol$ is the lattice normalized volume of $Q$ (a lattice simplex of smallest possible volume has volume $1$). The \emph{multiplicity} of $M$, denoted by $e(M)$ is $(r-1)!q_{r-1}=\vol(Q)$.
5710
5711
Suppose now that $P$ is a rational polyhedron in $\RR^d$, $L\subset\ZZ^d$ is an affine lattice, and we consider $N=P\cap L$ as a module over $M=\rec_L(P)$. Then we must give up the condition that $\deg$ takes the value $1$ on $\gp(M)$ (see Section \ref{sdqm} for an example). But the Hilbert series
5712
$$
5713
H_N(t)=\sum_{x\in N} t^{\deg x}
5714
$$
5715
is well-defined, and the qualitative statement above about rationality remain valid. However, in general the quasipolynomial gives the correct value of the Hilbert function only for $k>r$ where $r$ is the degree of the Hilbert series as a rational function.
5716
5717
Let $m$ be the gcd of the numbers $\deg x$, $x\in M$. (For $M=\{0\}$ we set $m=1$.) Then we must define $e(M)=e'(M)/m$ where $e'(M)$ is the multiplicity of $M$ with respect to the normalized grading $\deg/m$. The multiplicity of $N$ is given by
5718
$$
5719
e(N)=\mrank(N)e(M).
5720
$$
5721
5722
Since $N$ may have generators in negative degrees, Normaliz shifts the degrees into $\ZZ_+$ by subtracting a constant, called the \emph{shift}. (The shift may also be positive.)
5723
5724
\subsection{The class group}
5725
5726
A normal affine monoid $M$ has a well-defined divisor class group. It is naturally isomorphic to the divisor class group of $K[M]$ where $K$ is a field (or any unique factorization domain); see \cite[4.F]{BG}, and especially \cite[4.56]{BG}. The class group classifies the divisorial ideals up to isomorphism. It can be computed from the standard embedding that sends an element $x$ of $\gp(M)$ to the vector $\sigma(x)$ where $\sigma$ is the collection of support forms $\sigma_1,\dots,\sigma_s$ of $M$: $\Cl(M)=\ZZ^s/\sigma(\gp(M))$. Finding this quotient amounts to an application of the Smith normal form to the matrix of $\sigma$.
5727
5728
5729
5730
\section{Annotated console output}\label{Console}
5731
5732
\subsection{Primal mode}
5733
5734
With
5735
\begin{Verbatim}
5736
./normaliz -ch example/A443
5737
\end{Verbatim}
5738
we get the following terminal output.
5739
5740
\begin{Verbatim}
5741
\.....|
5742
Normaliz 3.2.0 \....|
5743
\...|
5744
(C) The Normaliz Team, University of Osnabrueck \..|
5745
January 2017 \.|
5746
\|
5747
************************************************************
5748
Command line: -ch example/A443
5749
Compute: HilbertBasis HilbertSeries
5750
************************************************************
5751
starting primal algorithm with full triangulation ...
5752
Roughness 1
5753
Generators sorted by degree and lexicographically
5754
Generators per degree:
5755
1: 48
5756
\end{Verbatim}
5757
Self explanatory so far (see Section \ref{bottom_dec} for the definition of roughness). Now the generators are inserted.
5758
\begin{Verbatim}
5759
Start simplex 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 19 22 25 26 27 28 31 34
5760
37 38 39 40 43 46
5761
\end{Verbatim}
5762
Normaliz starts by searching linearly independent generators with indices as small as possible. They span the start simplex in the triangulation. The remaining generators are inserted successively. (If a generator does not increase the cone spanned by the previous ones, it is not listed, but this does not happen for \verb|A443|.)
5763
\begin{Verbatim}
5764
gen=17, 39 hyp, 4 simpl
5765
\end{Verbatim}
5766
We have now reached a cone with $39$ support hyperplanes and the triangulation has $4$ simplices so far. We omit some generators until something interesting happens:
5767
\begin{Verbatim}
5768
gen=35, 667 hyp, 85 pyr, 13977 simpl
5769
\end{Verbatim}
5770
In view of the number of simplices in the triangulation and the number of support hyperplanes, Normaliz has decided to build pyramids and to store them for later triangulation.
5771
\begin{Verbatim}
5772
gen=36, 723 hyp, 234 pyr, 14025 simpl
5773
...
5774
gen=48, 4948 hyp, 3541 pyr, 14856 simpl
5775
\end{Verbatim}
5776
All generators have been processed now. Fortunately our cone is pointed:
5777
\begin{Verbatim}
5778
Pointed since graded
5779
Select extreme rays via comparison ... done.
5780
\end{Verbatim}
5781
Normaliz knows two methods for finding the extreme rays. Instead of ``comparison'' you may see ``rank''.
5782
Now the stored pyramids must be triangulated. They may produce not only simplices, but also pyramids of higher level, and indeed they do so:
5783
\begin{Verbatim}
5784
**************************************************
5785
level 0 pyramids remaining: 3541
5786
**************************************************
5787
**************************************************
5788
all pyramids on level 0 done!
5789
**************************************************
5790
level 1 pyramids remaining: 5935
5791
**************************************************
5792
**************************************************
5793
all pyramids on level 1 done!
5794
**************************************************
5795
level 2 pyramids remaining: 1567
5796
**************************************************
5797
1180 pyramids remaining on level 2, evaluating 2503294 simplices
5798
\end{Verbatim}
5799
At this point the preset size of the evaluation buffer for simplices has been exceeded. Normaliz stops the processing of pyramids, and empties the buffer by evaluating the simplices.
5800
\begin{Verbatim}
5801
||||||||||||||||||||||||||||||||||||||||||||||||||
5802
2503294 simplices, 0 HB candidates accumulated.
5803
**************************************************
5804
all pyramids on level 2 done!
5805
**************************************************
5806
level 3 pyramids remaining: 100
5807
**************************************************
5808
**************************************************
5809
all pyramids on level 3 done!
5810
\end{Verbatim}
5811
This is a small computation, and the computation of pyramids goes level by level without the necessity to return to a lower level. But in larger examples the buffer for level $n+1$ may be filled before level $n$ is finished. Then it becomes necessary to go back. Some simplices remaining in the buffer are now evaluated:
5812
\begin{Verbatim}
5813
evaluating 150978 simplices
5814
||||||||||||||||||||||||||||||||||||||||||||||||||
5815
2654272 simplices, 0 HB candidates accumulated.
5816
Adding 1 denominator classes... done.
5817
\end{Verbatim}
5818
Since our generators form the Hilbert basis, we do not collect any further candidates. If all generators are in degree $1$, we have only one denominator class in the Hilbert series, but otherwise there may be many. The collection of the Hilbert series in denominator classes reduces the computations of common denominators to a minimum.
5819
\begin{Verbatim}
5820
Total number of pyramids = 14137, among them simplicial 2994
5821
\end{Verbatim}
5822
Some statistics of the pyramid decomposition.
5823
\begin{Verbatim}
5824
------------------------------------------------------------
5825
transforming data... done.
5826
\end{Verbatim}
5827
Our computation is finished.
5828
5829
A typical pair of lines that you will see for other examples is
5830
\begin{Verbatim}
5831
auto-reduce 539511 candidates, degrees <= 1 3 7
5832
reducing 30 candidates by 73521 reducers
5833
\end{Verbatim}
5834
It tells you that Normaliz has found a list of $539511$ new candidates for the Hilbert basis, and this list is reduced against itself (auto-reduce). Then the $30$ old candidates are reduced against the $73521$ survivors of the auto-reduction.
5835
5836
\subsection{Dual mode}
5837
5838
Now we give an example of a computation in dual mode. It is started by the command
5839
\begin{Verbatim}
5840
./normaliz -cid example/5x5
5841
\end{Verbatim}
5842
The option \verb|i| is used to suppress the \verb|HSOP| in the input file. The console output:
5843
5844
\begin{Verbatim}
5845
\.....|
5846
Normaliz 3.2.0 \....|
5847
\...|
5848
(C) The Normaliz Team, University of Osnabrueck \..|
5849
January 2017 \.|
5850
\|
5851
************************************************************
5852
Command line: -cid example/5x5
5853
Compute: DualMode
5854
No inequalities specified in constraint mode, using non-negative orthant.
5855
************************************************************
5856
\end{Verbatim}
5857
Indeed, we have used only equations as the input.
5858
\begin{Verbatim}
5859
************************************************************
5860
computing Hilbert basis ...
5861
==================================================
5862
cut with halfspace 1 ...
5863
Final sizes: Pos 1 Neg 1 Neutral 0
5864
\end{Verbatim}
5865
The cone is cut out from the space of solutions of the system of equations (in this case) by successive intersections with halfspaces defined by the inequalities. After such an intersection we have the positive half space, the ``neutral'' hyperplane and the negative half space. The final sizes given are the numbers of Hilbert basis elements strictly in the positive half space, strictly in the negative half space, and in the hyperplane. This pattern is repeated until all hyperplanes have been used.
5866
\begin{Verbatim}
5867
==================================================
5868
cut with halfspace 2 ...
5869
Final sizes: Pos 1 Neg 1 Neutral 1
5870
\end{Verbatim}
5871
We leave out some hyperplanes \dots
5872
\begin{Verbatim}
5873
==================================================
5874
cut with halfspace 20 ...
5875
auto-reduce 1159 candidates, degrees <= 13 27
5876
Final sizes: Pos 138 Neg 239 Neutral 1592
5877
==================================================
5878
cut with halfspace 21 ...
5879
Positive: 1027 Negative: 367
5880
..................................................
5881
Final sizes: Pos 1094 Neg 369 Neutral 1019
5882
\end{Verbatim}
5883
Sometimes reduction takes some time, and then Normaliz may issue a message on ``auto-reduction'' organized by degree (chosen for the algorithm, not defined by the given grading). The line of dots is printed is the computation of new Hilbert basis candidates takes time, and Normaliz wants to show you that it is not sleeping. Normaliz shows you the number of positive and negative partners that must be pared produce offspring.
5884
\begin{Verbatim}
5885
==================================================
5886
cut with halfspace 25 ...
5887
Positive: 1856 Negative: 653
5888
..................................................
5889
auto-reduce 1899 candidates, degrees <= 19 39
5890
Final sizes: Pos 1976 Neg 688 Neutral 2852
5891
\end{Verbatim}
5892
All hyperplanes have been taken care of.
5893
\begin{Verbatim}
5894
Find extreme rays
5895
Find relevant support hyperplanes
5896
\end{Verbatim}
5897
Well, in connection with the equations, some hyperplanes become superfluous. In the output file Normaliz will list a minimal set of support hyperplanes that together with the equations define the cone.
5898
\begin{Verbatim}
5899
Hilbert basis 4828
5900
\end{Verbatim}
5901
The number of Hilbert basis elements computed is the sum of the last positive and neutral numbers.
5902
\begin{Verbatim}
5903
Find degree 1 elements
5904
\end{Verbatim}
5905
The input file contains a grading.
5906
\begin{Verbatim}
5907
transforming data... done.
5908
\end{Verbatim}
5909
Our example is finished.
5910
5911
The computation of the new Hilbert basis after the intersection with the new hyperplane proceeds in rounds, and there can be many rounds \dots (not in the above example). then you can see terminal output like
5912
\begin{Verbatim}
5913
Round 100
5914
Round 200
5915
Round 300
5916
Round 400
5917
Round 500
5918
\end{Verbatim}
5919
5920
\section{Normaliz 2 input syntax}\label{OldSyntax}
5921
5922
A Normaliz 2 input file contains a sequence of matrices. Comments or options are not allowed in it. A matrix has the format
5923
\begin{Verbatim}
5924
<m>
5925
<n>
5926
<x_1>
5927
...
5928
<x_m>
5929
<type>
5930
\end{Verbatim}
5931
where \verb|<m>| denotes the number of rows, \verb|<n>| is the number of columns and \verb|<x_1>...<x_n>| are the rows with \verb|<m>| entries each. All matrix types of Normaliz 3 are allowed (with Normaliz 3), also \verb|grading| and \verb|dehomogenization|. These vectors must be encoded as matrices with $1$ row.
5932
5933
The optional output files of with suffix \verb|cst| are still in this format. Just create one and inspect it.
5934
5935
5936
5937
5938
5939
\section{libnormaliz}\label{libnorm}
5940
5941
\begin{small}
5942
5943
The kernel of Normaliz is the C++ class library \verb|libnormaliz|. It implements all the classes that are necessary for the computations. The central class is \verb|Cone|. It realizes the communication with the calling program and starts the computations most of which are implemented in other classes. In the following we describe the class \verb|Cone|; other classes of \verb|libnormaliz| may follow in the future.
5944
5945
Of course, Normaliz itself is the prime example for the use of \verb|libnormaliz|, but it is rather complicated because of the input and output it must handle. Therefore we have a added a simple example program at the end of this introduction.
5946
5947
\verb|libnormaliz| defines its own name space. In the following we assume that
5948
\begin{Verbatim}
5949
using namespace std;
5950
using namespace libnormaliz;
5951
\end{Verbatim}
5952
have been declared. It is clear that opening these name spaces is dangerous. In this documentation we only do it to avoid constant repetition of \verb|std::| and \verb|libnormaliz::|
5953
5954
\subsection{Integer type as a template parameter}
5955
5956
A cone can be constructed for two integer types, \verb|long long| and \verb|mpz_class|. It is reasonable to choose \verb|mpz_class| since the main computations will then be tried with \verb|long long| and restarted with \verb|mpz_class| if \verb|long long| cannot store the results. This internal change of integer type is not possible if the cone is constructed for \verb|long long|. (Nevertheless, the linear algebra routines can use \verb|mpz_class| locally if intermediate results exceed \verb|long long|; have a look into \verb|matrix.cpp|.)
5957
5958
Internally the template parameter is called \verb|Integer|. In the following we assume that the integer type has been fixed as follows:
5959
\begin{Verbatim}
5960
typedef mpz_class Integer;
5961
\end{Verbatim}
5962
5963
The internal passage from \verb|mpz_class| to \verb|long long| can be suppressed by
5964
\begin{Verbatim}
5965
MyCone.deactivateChangeOfPrecision();
5966
\end{Verbatim}
5967
where we assume that \verb|MyCone| has been constructed as described in the next section.
5968
5969
\subsubsection{Alternative integer types}
5970
5971
The predefined alternative to \verb|mpz_class| is \verb|long long|. It is possible to use libnormaliz with other integer types than \verb|mpz_class| or \verb|long long|, but we have tested only these types.
5972
5973
In an alternative configuration you have to include \verb|libnormaliz-all.cpp|. (In this case you do not need to link \verb|libnormaliz.a|). If you want to use other types, you probably have to implement some conversion functions which you can find in \verb|integer.h|. Namely the functions
5974
\begin{Verbatim}
5975
bool libnormaliz::try_convert(TypeA, TypeB);
5976
// converts TypeB to TypeA, returns false if not possible
5977
\end{Verbatim}
5978
where one type is your type and the other is \verb|long|, \verb|long long| or \verb|mpz_class|.
5979
Additionally, if your type uses infinite precision (for example, it is some wrapper for GMP), you must also implement
5980
\begin{Verbatim}
5981
template<> inline bool libnormaliz::using_GMP<YourType>() { return true; }
5982
\end{Verbatim}
5983
5984
\subsubsection{Decimal fractions and floating point numbers}
5985
5986
libnormaliz has a type \verb|nmz_float| (presently set to \verb|double|) that allows the input of decimal fractions and floating point numbers. Input data of type \verb|nmz_float| are first converted into \verb|mpq_class| by using the GMP constructor of \verb|mpq_class|.
5987
5988
\subsection{Construction of a cone}
5989
5990
The construction requires the specification of input data consisting of one or more matrices and the input types they represent.
5991
5992
The term ``matrix'' stands for
5993
\begin{Verbatim}
5994
vector<vector<Integer> >
5995
\end{Verbatim}
5996
5997
The available input types (from \verb|libnormaliz.h|) are defined as follows:
5998
\begin{Verbatim}
5999
namespace Type {
6000
enum InputType {
6001
integral_closure,
6002
polyhedron,
6003
normalization,
6004
polytope,
6005
rees_algebra,
6006
inequalities,
6007
strict_inequalities,
6008
signs,
6009
strict_signs,
6010
equations,
6011
congruences,
6012
inhom_inequalities,
6013
dehomogenization,
6014
inhom_equations,
6015
inhom_congruences,
6016
lattice_ideal,
6017
grading,
6018
excluded_faces,
6019
lattice,
6020
saturation,
6021
cone,
6022
offset,
6023
vertices,
6024
support_hyperplanes,
6025
cone_and_lattice,
6026
subspace,
6027
open_facets
6028
};
6029
} //end namespace Type
6030
\end{Verbatim}
6031
The input types are explained in Section \ref{input}. In certain environments it is not possible to use the enumeration. Therefore we provide a function that converts a string into the corresponding input type:
6032
\begin{Verbatim}
6033
Type::InputType to_type(const string& type_string)
6034
\end{Verbatim}
6035
6036
The types \verb|grading|, \verb|dehomogenization|, \verb|offset|and \verb|open_facets| must be encoded as matrices with a single row. We come back to this point below.
6037
6038
The simplest constructor has the syntax
6039
\begin{Verbatim}
6040
Cone<Integer>::Cone(InputType input_type, const vector< vector<Integer> >& Input)
6041
\end{Verbatim}
6042
and can be used as in the following example:
6043
\begin{Verbatim}
6044
vector<vector <Integer> > Data = ...
6045
Type::InputType type = cone;
6046
Cone<Integer> MyCone = Cone<Integer>(type, Data);
6047
\end{Verbatim}
6048
For two and three pairs of type and matrix there are the constructors
6049
\begin{Verbatim}
6050
Cone<Integer>::Cone(InputType type1, const vector< vector<Integer> >& Input1,
6051
InputType type2, const vector< vector<Integer> >& Input2)
6052
6053
Cone<Integer>::Cone(InputType type1, const vector< vector<Integer> >& Input1,
6054
InputType type2, const vector< vector<Integer> >& Input2,
6055
InputType type3, const vector< vector<Integer> >& Input3)
6056
\end{Verbatim}
6057
6058
If you have to combine more than three matrices, you can define a
6059
\begin{Verbatim}
6060
map <InputType, vector< vector<Integer> > >
6061
\end{Verbatim}
6062
and use the constructor with syntax
6063
\begin{Verbatim}
6064
Cone<Integer>::Cone(const map< InputType,
6065
vector< vector<Integer> > >& multi_input_data)
6066
\end{Verbatim}
6067
6068
The four constructors also exist in a variant that uses the \verb|libnormaliz| type \verb|Matrix<Integer>| instead of \verb|vector< vector<Integer> >| (see \verb|cone.h|).
6069
6070
For the input of rational numbers we have all constructors also in variants that use \verb|mpq_class| for the input matrix, for example
6071
\begin{Verbatim}
6072
Cone<Integer>::Cone(InputType input_type, const vector< vector<mpq_class> >& Input)
6073
\end{Verbatim}
6074
etc.
6075
6076
Similarly, for the input of decimal fractions and floating point numbers we have all constructors also in variants that use \verb|nmz_float| for the input matrix, for example
6077
\begin{Verbatim}
6078
Cone<Integer>::Cone(InputType input_type, const vector< vector<nmz_float> >& Input)
6079
\end{Verbatim}
6080
etc.
6081
6082
For convenience we provide the function
6083
\begin{Verbatim}
6084
vector<vector<T> > to_matrix<Integer>(vector<T> v)
6085
\end{Verbatim}
6086
in \verb|matrix.h|. It returns a matrix whose first row is \verb|v|. A typical example:
6087
\begin{Verbatim}
6088
size_t dim = ...
6089
vector<vector <Integer> > Data = ...
6090
Type::InputType type = cone;
6091
vector<Integer> total_degree(dim,1);
6092
Type::InputType grad = grading;
6093
Cone<Integer> MyCone = Cone<Integer>(type, Data,grad,to_matrix(total_degree));
6094
\end{Verbatim}
6095
6096
There is a default constructor for cones,
6097
\begin{Verbatim}
6098
Cone<Integer>::Cone()
6099
\end{Verbatim}
6100
6101
\subsubsection{Setting the polynomial}
6102
6103
The polynomial needed for integrals and weighted Ehrhart series must be passed to the cone after construction:
6104
\begin{Verbatim}
6105
void Cone<Integer>::setPolynomial(string poly)
6106
\end{Verbatim}
6107
6108
\subsubsection{Setting the number of significant coefficients of the quasipolynomial}
6109
6110
This is done by
6111
\begin{Verbatim}
6112
void Cone<Integer>::setNrCoeffQuasiPol(long nr_coeff)
6113
\end{Verbatim}
6114
The default value of \verb|nr_coeff| is $-1$, signaling `all coefficients''.
6115
6116
6117
\subsection{Computations in a cone}
6118
6119
Before starting a computation in a (previously constructed) cone, one must decide what should be computed and in which way it should be computed. The computation goals and algorithmic variants (see Section \ref{Goals}) are defined as follows (\verb|cone_property.h|):
6120
\begin{Verbatim}
6121
namespace ConeProperty {
6122
enum Enum {
6123
//
6124
// goals that can be computed (or are defined by input data)
6125
//
6126
// matrix valued
6127
Generators,
6128
ExtremeRays,
6129
VerticesFloat,
6130
VerticesOfPolyhedron,
6131
SupportHyperplanes,
6132
HilbertBasis,
6133
ModuleGenerators,
6134
Deg1Elements,
6135
ModuleGeneratorsOverOriginalMonoid,
6136
Sublattice,
6137
ExcludedFaces,
6138
OriginalMonoidGenerators,
6139
MaximalSubspace,
6140
Equations,
6141
Congruences,
6142
//vector valued
6143
Grading,
6144
Dehomogenization,
6145
WitnessNotIntegrallyClosed,
6146
GeneratorOfInterior,
6147
// Cardinalities
6148
TriangulationSize,
6149
// Integer valued,
6150
TriangulationDetSum,
6151
ReesPrimaryMultiplicity,
6152
GradingDenom,
6153
UnitGroupIndex,
6154
InternalIndex,
6155
ExternalIndex,
6156
// rational valued
6157
Multiplicity,
6158
Integral, // new
6159
VirtualMultiplicity, // new
6160
// dimensions
6161
RecessionRank,
6162
AffineDim,
6163
ModuleRank,
6164
Rank,
6165
EmbeddingDim,
6166
// boolean valued
6167
IsPointed,
6168
IsDeg1ExtremeRays,
6169
IsDeg1HilbertBasis,
6170
IsIntegrallyClosed,
6171
IsReesPrimary,
6172
IsInhomogeneous,
6173
IsGorenstein,
6174
// complex structures
6175
Triangulation,
6176
StanleyDec,
6177
InclusionExclusionData,
6178
ClassGroup,
6179
IntegerHull,
6180
ConeDecomposition,
6181
HilbertSeries,
6182
HilbertQuasiPolynomial,
6183
WeightedEhrhartSeries, // new
6184
WeightedEhrhartQuasiPolynomial, // new
6185
//
6186
// integer type for computations
6187
//
6188
BigInt,
6189
//
6190
// algorithmic variants
6191
//
6192
DefaultMode,
6193
Approximate,
6194
BottomDecomposition,
6195
NoBottomDec,
6196
DualMode,
6197
PrimalMode
6198
Projection,
6199
ProjectionFloat,
6200
NoProjection,
6201
Symmetrize,
6202
NoSymmetrization,
6203
NoSubdivision,
6204
NoNestedTri, // synonym for NoSubdivision
6205
KeepOrder,
6206
HSOP,
6207
NoPeriodBound,
6208
//
6209
// checking properties of already computed data
6210
// (cannot be used as a computation goal)
6211
//
6212
IsTriangulationNested,
6213
IsTriangulationPartial,
6214
6215
EnumSize // this has to be the last entry ...
6216
};
6217
}
6218
\end{Verbatim}
6219
6220
The class \verb|ConeProperties| is based on this enumeration. Its instantiation are essentially boolean vectors that can be accessed via the names in the enumeration. Instantiations of the class are used to set computation goals and algorithmic variants and to check whether the goals have been reached. The distinction between computation goals and algorithmic variants is not completely strict. See Section \ref{Goals} for implications between some \verb|ConeProperties|.
6221
6222
There exist versions of \verb|compute| for up to $3$ cone properties:
6223
\begin{Verbatim}
6224
ConeProperties Cone<Integer>::compute(ConeProperty::Enum cp)
6225
6226
ConeProperties Cone<Integer>::compute(ConeProperty::Enum cp1,
6227
ConeProperty::Enum cp2)
6228
6229
ConeProperties Cone<Integer>::compute(ConeProperty::Enum cp1,
6230
ConeProperty::Enum cp2, ConeProperty::Enum cp3)
6231
\end{Verbatim}
6232
6233
An example:
6234
\begin{Verbatim}
6235
MyCone.compute(ConeProperty::HilbertBasis, ConeProperty::Multiplicity)
6236
\end{Verbatim}
6237
6238
If you want to specify more than $3$ cone properties, you can define an instance of \verb|ConeProperties| yourself and call
6239
\begin{Verbatim}
6240
ConeProperties Cone<Integer>::compute(ConeProperties ToCompute)
6241
\end{Verbatim}
6242
6243
An example:
6244
\begin{Verbatim}
6245
ConeProperties Wanted;
6246
Wanted.set(ConeProperty::Triangulation, ConeProperty::HilbertBasis);
6247
MyCone.compute(Wanted);
6248
\end{Verbatim}
6249
6250
All \verb|get...| functions that are listed in the next section, try to compute the data asked for if they have not yet been computed. Unless you are interested a single result, we recommend to use \verb|compute| since the data asked for can then be computed in a single run. For example, if the Hilbert basis and the multiplicity are wanted, then it would be a bad idea to call \verb|getHilbertBasis| and \verb|getMultiplicity| consecutively. More importantly, however, is the lack of algorithmic variants if you use \verb|get...| without \verb|compute| beforehand.
6251
6252
If \verb|DefaultMode| is not set, then \verb|compute()| will throw a \verb|NotComputableException| so that \verb|compute()| cannot a value. In the presence of \verb|DefaultMode|, the returned \verb|ConeProperties| are those that have not been computed.
6253
6254
Please inspect \verb|cone_property.cpp| for the full list of methods implemented in the class \verb|ConeProperties|. Here we only mention the constructors
6255
\begin{Verbatim}
6256
ConeProperties::ConeProperties(ConeProperty::Enum p1)
6257
6258
ConeProperties::ConeProperties(ConeProperty::Enum p1, ConeProperty::Enum p2)
6259
6260
ConeProperties::ConeProperties(ConeProperty::Enum p1, ConeProperty::Enum p2,
6261
ConeProperty::Enum p3)
6262
\end{Verbatim}
6263
and the functions
6264
\begin{Verbatim}
6265
ConeProperties& ConeProperties::set(ConeProperty::Enum p1, bool value)
6266
6267
ConeProperties& ConeProperties::set(ConeProperty::Enum p1, ConeProperty::Enum p2)
6268
6269
bool ConeProperties::test(ConeProperty::Enum Property) const
6270
\end{Verbatim}
6271
6272
A string can be converted to a cone property and conversely:
6273
\begin{Verbatim}
6274
ConeProperty::Enum toConeProperty(const string&)
6275
const string& toString(ConeProperty::Enum)
6276
\end{Verbatim}
6277
6278
\subsection{Retrieving results}
6279
6280
As remarked above, all \verb|get...| functions that are listed below, try to compute the data asked for if they have not yet been computed. As also remarked above, it is often better to use \verb|compute| first.
6281
6282
The functions that return a matrix encoded as \verb|vector<vector<Integer> >| have variants that return a matrix encoded in the \verb|libnormaliz| class \verb|Matrix<Integer>|. These are not listed below; see \verb|cone.h|.
6283
6284
\subsubsection{Checking computations}
6285
In order to check whether a computation goal has been reached, one can use
6286
\begin{Verbatim}
6287
bool Cone<Integer>::isComputed(ConeProperty::Enum prop) const
6288
\end{Verbatim}
6289
for example
6290
\begin{Verbatim}
6291
bool done=MyCone.isComputed(ConeProperty::HilbertBasis)
6292
\end{Verbatim}
6293
6294
\subsubsection{Rank, index and dimension}
6295
6296
\begin{Verbatim}
6297
size_t Cone<Integer>::getEmbeddingDim()
6298
size_t Cone<Integer>::getRank()
6299
size_t Cone<Integer>::getRecessionRank()
6300
long Cone<Integer>::getAffineDim()
6301
size_t Cone<Integer>::getModuleRank()
6302
6303
Integer Cone<Integer>::getInternalIndex()
6304
Integer Cone<Integer>::getUnitGroupIndex()
6305
\end{Verbatim}
6306
6307
The \emph{internal} index is only defined if original generators are defined. See Section \ref{coord} for the external index.
6308
6309
The last three functions return values that are only well-defined after inhomogeneous computations.
6310
6311
\subsubsection{Support hyperplanes and constraints}\label{SHC}
6312
6313
\begin{Verbatim}
6314
const vector< vector<Integer> >& Cone<Integer>::getSupportHyperplanes()
6315
size_t Cone<Integer>::getNrSupportHyperplanes()
6316
\end{Verbatim}
6317
6318
The first function returns the support hyperplanes of the (homogenized) cone.
6319
The second function returns the number of support hyperplanes.
6320
6321
Together with the equations and congruences the support hyperplanes can also be accessed by
6322
\begin{Verbatim}
6323
map< InputType , vector< vector<Integer> > > Cone<Integer>::getConstraints ()
6324
\end{Verbatim}
6325
The map returned contains three pairs whose keys are
6326
\begin{Verbatim}
6327
Type::inequalities
6328
Type::equations
6329
Type::congruences
6330
\end{Verbatim}
6331
6332
Note that \verb|equations| \verb|congruences| can also be accessed via the coordinate transformation (to which they belong internally). See Section \ref{coord}.
6333
6334
\subsubsection{Extreme rays and vertices}
6335
6336
\begin{Verbatim}
6337
const vector< vector<Integer> >& Cone<Integer>::getExtremeRays()
6338
size_t Cone<Integer>::getNrExtremeRays()
6339
const vector< vector<Integer> >& Cone<Integer>::getVerticesOfPolyhedron()
6340
size_t Cone<Integer>::getNrVerticesOfPolyhedron()
6341
\end{Verbatim}
6342
6343
In the inhomogeneous case the first function returns the extreme rays of the recession cone, and the second the vertices of the polyhedron. (Together they form the extreme rays of the homogenized cone.)
6344
6345
Vertices can be returned in floating point formet:
6346
\begin{Verbatim}
6347
const vector< vector<nmz_float> >& Cone<Integer>::getVerticesFloat()
6348
size_t Cone<Integer>::getNrVerticesFloat()
6349
\end{Verbatim}
6350
6351
\subsubsection{Generators}
6352
6353
\begin{Verbatim}
6354
const vector< vector<Integer> >& Cone<Integer>::getOriginalMonoidGenerators()
6355
size_t Cone<Integer>::getNrOriginalMonoidGenerators()
6356
\end{Verbatim}
6357
Note that original generators are not always defined. The system of generators of the cone that is used in the computations and its cardinality are returned by
6358
\begin{Verbatim}
6359
const vector< vector<Integer> >& Cone<Integer>::getGenerators()
6360
size_t Cone<Integer>::getNrGenerators()
6361
\end{Verbatim}
6362
6363
\subsubsection{Lattice points in polytopes and elements of degree $1$}
6364
6365
\begin{Verbatim}
6366
const vector< vector<Integer> >& Cone<Integer>::getDeg1Elements()
6367
size_t Cone<Integer>::getNrDeg1Elements()
6368
\end{Verbatim}
6369
6370
This applies only to homogeneous computations. If a polytope is defined by inhomogeneous input, its lattice points appear as \verb|ModuleGenerators|; see below.
6371
6372
\subsubsection{Hilbert basis}
6373
6374
In the nonpointed case we need the maximal linear subspace of the cone:
6375
\begin{Verbatim}
6376
const vector< vector<Integer> >& Cone<Integer>::getMaximalSubspace()
6377
size_t Cone<Integer>::getDimMaximalSubspace()
6378
\end{Verbatim}
6379
6380
One of the prime results of Normaliz and its cardinality are returned by
6381
\begin{Verbatim}
6382
const vector< vector<Integer> >& Cone<Integer>::getHilbertBasis()
6383
size_t Cone<Integer>::getNrHilbertBasis()
6384
\end{Verbatim}
6385
Inhomogeneous case the functions refer to the the Hilbert basis of the recession cone. The module generators of the lattice points in the polyhedron are accessed by
6386
\begin{Verbatim}
6387
const vector< vector<Integer> >& Cone<Integer>::getModuleGenerators()
6388
size_t Cone<Integer>::getNrModuleGenerators()
6389
\end{Verbatim}
6390
6391
If the original monoid is not integrally closed, you can ask for a witness:
6392
\begin{Verbatim}
6393
vector<Integer> Cone<Integer>::getWitnessNotIntegrallyClosed()
6394
\end{Verbatim}
6395
6396
\subsubsection{Module generators over original monoid}
6397
6398
\begin{Verbatim}
6399
const vector< vector<Integer> >&
6400
Cone<Integer>::getModuleGeneratorsOverOriginalMonoid()
6401
size_t Cone<Integer>::getNrModuleGeneratorsOverOriginalMonoid()
6402
\end{Verbatim}
6403
6404
\subsubsection{Generator of the interior}\label{GenInt}
6405
6406
If the monoid is Gorenstein, Normaliz computes the generator of the interior (the canonical module):
6407
\begin{Verbatim}
6408
const vector<Integer>& Cone<Integer>::getGeneratorOfInterior()
6409
\end{Verbatim}
6410
Before asking for this vector, one should test \verb|isGorenstein()|.
6411
6412
\subsubsection{Grading and dehomogenization}
6413
6414
\begin{Verbatim}
6415
vector<Integer> Cone<Integer>::getGrading()
6416
Integer Cone<Integer>::getGradingDenom()
6417
\end{Verbatim}
6418
The second function returns the denominator of the grading.
6419
6420
\begin{Verbatim}
6421
vector<Integer> Cone<Integer>::getDehomogenization()
6422
\end{Verbatim}
6423
6424
\subsubsection{Enumerative data}
6425
6426
\begin{Verbatim}
6427
mpq_class Cone<Integer>::getMultiplicity()
6428
\end{Verbatim}
6429
Don't forget that the multiplicity is measured by a rational, not necessarily integral polytope. Therefore it need not be an integer.
6430
6431
The Hilbert series is stored in a class of its own. It is retrieved by
6432
\begin{Verbatim}
6433
const HilbertSeries& Cone<Integer>::getHilbertSeries()
6434
\end{Verbatim}
6435
It contains several data fields that can be accessed as follows (see \verb|hilbert_series.h|):
6436
\begin{Verbatim}
6437
const vector<mpz_class>& HilbertSeries::getNum() const;
6438
const map<long, denom_t>& HilbertSeries::getDenom() const;
6439
6440
const vector<mpz_class>& HilbertSeries::getCyclotomicNum() const;
6441
const map<long, denom_t>& HilbertSeries::getCyclotomicDenom() const;
6442
6443
const vector<mpz_class>& HilbertSeries::getHSOPNum() const;
6444
const map<long, denom_t>& HilbertSeries::getHSOPDenom() const;
6445
6446
long HilbertSeries::getDegreeAsRationalFunction() const;
6447
long HilbertSeries::getShift() const;
6448
6449
bool HilbertSeries::isHilbertQuasiPolynomialComputed() const;
6450
vector< vector<mpz_class> > HilbertSeries::getHilbertQuasiPolynomial() const;
6451
long HilbertSeries::getPeriod() const;
6452
mpz_class HilbertSeries::getHilbertQuasiPolynomialDenom() const;
6453
\end{Verbatim}
6454
6455
The first six functions refer to three representations of the Hilbert series as a rational function in the variable $t$: the first has a denominator that is a product of polynomials $(1-t^g)^e$, the second has a denominator that is a product of cylotomic polynomials. In the third case the denominator is determined by the degrees of a homogeneous system of parameters (see Section \ref{rational}). In all cases the numerators are given by their coefficient vectors, and the denominators are lists of pairs $(g,e)$ where in the second case $g$ is the order of the cyclotomic polynomial.
6456
6457
If you have already computed the Hilbert series without HSOP and you want it with HSOP afterwards, the Hilbert series will simply be transformed, but Normaliz must compute the degrees for the denominator, and this may be a nontrivial computation.
6458
6459
The degree as a rational function is of course independent of the chosen representation, but may be negative, as well as the shift that indicates with which power of $t$ the numerator tarts. Since the denominator has a nonzero constant term in all cases, this is exactly the smallest degree in which the Hilbert function has a nonzero value.
6460
6461
The Hilbert quasipolynomial is represented by a vector whose length is the period and whose entries are itself vectors that represent the coefficients of the individual polynomials corresponding to the residue classes modulo the period. These integers must be divided by the common denominator that is returned by the last function.
6462
6463
For the input type \verb|rees_algebra| we provide
6464
\begin{Verbatim}
6465
Integer Cone<Integer>::getReesPrimaryMultiplicity()
6466
\end{Verbatim}
6467
6468
\subsubsection{Weighted Ehrhart series and integrals}
6469
6470
The wighted Ehrhart series can be accessed by
6471
\begin{Verbatim}
6472
const pair<HilbertSeries, mpz_class>& Cone<Integer>::getWeightedEhrhartSeries()
6473
\end{Verbatim}
6474
The second component of the pair is the denominator of the coefficients in the series numerator. Its introduction was necessary since we wanted to keep integral coefficients for the numerator of a Hilbert series. The numerator and the denominator of the first component of type \verb|HilbertSeries| can be accessed as usual, but one must not forget the denominator of the numerator coefficients. There is a second way to access these data; see below.
6475
6476
The virtual multiplicity and the integral, respectively, are got by
6477
\begin{Verbatim}
6478
mpq_class Cone<Integer>::getVirtualMultiplicity()
6479
mpq_class Cone<Integer>::getIntegral()
6480
\end{Verbatim}
6481
6482
Actually the cone saves these data in a special container of class \verb|IntegrationData| (defined in \verb|Hilbert_sries.h|). It is accessed by
6483
\begin{Verbatim}
6484
IntegrationData& Cone<Integer>::getIntData()
6485
\end{Verbatim}
6486
The three \verb|get| functions above are only shortcuts for the access via \verb|getIntData()|:
6487
\begin{Verbatim}
6488
string IntegrationData::getPolynomial() const
6489
long IntegrationData::getDegreeOfPolynomial() const
6490
bool IntegrationData::isPolynomialHomogeneous() const
6491
6492
const vector<mpz_class>& IntegrationData::getNum_ZZ() const
6493
mpz_class IntegrationData::getNumeratorCommonDenom() const
6494
const map<long, denom_t>& IntegrationData::getDenom() const
6495
6496
const vector<mpz_class>& IntegrationData::getCyclotomicNum_ZZ() const
6497
const map<long, denom_t>& IntegrationData::getCyclotomicDenom() const
6498
6499
bool IntegrationData::isWeightedEhrhartQuasiPolynomialComputed() const
6500
void IntegrationData::computeWeightedEhrhartQuasiPolynomial()
6501
vector< vector<mpz_class> > IntegrationData::getWeightedEhrhartQuasiPolynomial()
6502
mpz_class IntegrationData::getWeightedEhrhartQuasiPolynomialDenom() const
6503
6504
mpq_class IntegrationData::getVirtualMultiplicity() const
6505
mpq_class IntegrationData::getIntegral() const
6506
\end{Verbatim}
6507
6508
The first three functions refer to the polynomial defining the integral or weighted Ehrhart series.
6509
6510
The computation of these data is controlled by the corresponding \verb|ConeProperty|.
6511
6512
\subsubsection{Triangulation and disjoint decomposition}
6513
6514
The triangulation, the size and the sum of the determinants are returned by
6515
\begin{Verbatim}
6516
const vector< pair<vector<key_t>,Integer> >& Cone<Integer>::getTriangulation()
6517
size_t Cone<Integer>::getTriangulationSize()
6518
Integer Cone<Integer>::getTriangulationDetSum()
6519
\end{Verbatim}
6520
See Section \ref{Triang} for the interpretation of these data. The first component of the pair is the vector of indices of the simplicial cones in the triangulation. Note that the indices are here counted from $0$ (whereas they start from $1$ in the \verb|tri| file). The second component is the determinant.
6521
6522
The type of triangulation can be retrieved by
6523
\begin{Verbatim}
6524
bool Cone<Integer>::isTriangulationNested()
6525
bool Cone<Integer>::isTriangulationPartial()
6526
\end{Verbatim}
6527
6528
If the disjoint decomposition has been computed, one gets the $0/1$ vectors describing the facets to be removed
6529
6530
\begin{Verbatim}
6531
const vector<vector<bool> >& Cone<Integer>::getOpenFacets()
6532
\end{Verbatim}
6533
6534
\subsubsection{Stanley decomposition}
6535
6536
The Stanley decomposition is stored in a list whose entries correspond to the simplicial cones in the triangulation:
6537
\begin{Verbatim}
6538
const list< STANLEYDATA<Integer> >& Cone<Integer>::getStanleyDec()
6539
\end{Verbatim}
6540
Each entry is a record of type \verb|STANLEYDATA| defined as follows:
6541
\begin{Verbatim}
6542
struct STANLEYDATA {
6543
vector<key_t> key;
6544
Matrix<Integer> offsets;
6545
};
6546
\end{Verbatim}
6547
The key has the same interpretation as for the triangulation, namely as the vector of indices of the generators of the simplicial cone (counted from $0$). The matrix contains the coordinate vectors of the offsets of the components of the decomposition that belong to the simplicial cone defined by the key. See Section \ref{Stanley} for the interpretation. The format of the matrix can be accessed by the following functions of class \verb|Matrix<Integer>|:
6548
\begin{Verbatim}
6549
size_t nr_of_rows() const
6550
size_t nr_of_columns() const
6551
\end{Verbatim}
6552
The entries are accessed in the same way as those of \verb|vector<vector<Integer> >|.
6553
6554
\subsubsection{Coordinate transformation}\label{coord}
6555
6556
The coordinate transformation from the ambient lattice to the sublattice generated by the Hilbert basis and the basis of the maximal subspace can be returned as follows:
6557
\begin{Verbatim}
6558
const Sublattice_Representation<Integer>& Cone<Integer>::getSublattice()
6559
\end{Verbatim}
6560
An object of type \verb|Sublattice_Representation| models a sequence of $\ZZ$-homomorphisms
6561
$$
6562
\ZZ^r\xrightarrow{\phi}\ZZ^n\xrightarrow{\pi}\ZZ^r
6563
$$
6564
with the following property: there exists $c\in\ZZ$, $c\neq 0$, such that $\pi\circ \phi=c\cdot\operatorname{id}_{\ZZ^r}$. In particular $\phi$ is injective. One should view the two maps as a pair of coordinate transformations: $\phi$ is determined by a choice of basis in the sublattice $U=\phi(\ZZ^r)$, and it allows us to transfer vectors from $U\cong \ZZ^r$ to the ambient lattice $\ZZ^n$. The map $\pi$ is used to realize vectors from $U$ as linear combinations of the given basis of $U\cong\ZZ^r$: after the application of $\pi$ one divides by $c$. (If $U$ is a direct summand of $\ZZ^n$, one can choose $c=1$, and conversely.) Normaliz considers vectors as rows of matrices. Therefore $\phi$ is given as an $r\times n$-matrix and $\pi$ is given as an $n\times r$ matrix.
6565
6566
The data just described can be accessed as follows (\verb|sublattice_representation.h|). For space reasons we omit the class specification \verb|Sublattice_Representation<Integer>::|
6567
\begin{Verbatim}
6568
const vector<vector<Integer> >& getEmbedding() const
6569
const vector<vector<Integer> >& getProjection() const
6570
Integer getAnnihilator() const
6571
\end{Verbatim}
6572
Here ``Embedding'' refers to $\phi$ and ``Projection'' to $\pi$ (though $\pi$ is not always surjective). The ``Annihilator'' is the number $c$ above. (It annihilates $\ZZ^r$ modulo $\pi(\ZZ^n)$.)
6573
6574
The numbers $n$ and $r$ are accessed in this order by
6575
\begin{Verbatim}
6576
size_t getDim() const
6577
size_t getRank() const
6578
\end{Verbatim}
6579
The external index, namely the order of the torsion subgroup of $\ZZ^n/U$, is returned by
6580
\begin{Verbatim}
6581
mpz_class getExternalIndex() const
6582
\end{Verbatim}
6583
Very often $\phi$ and $\psi$ are identity maps, and this property can be tested by
6584
\begin{Verbatim}
6585
bool IsIdentity()const
6586
\end{Verbatim}
6587
The constraints computed by Normaliz are ``hidden'' in the sublattice representation. They van be accessed by
6588
\begin{Verbatim}
6589
const vector<vector<Integer> >& getEquations() const
6590
const vector<vector<Integer> >& getCongruences() const
6591
\end{Verbatim}
6592
6593
But see Section \ref{SHC} above for a more direct access.
6594
6595
6596
\subsubsection{Class group}
6597
6598
\begin{Verbatim}
6599
vector<Integer> Cone<Integer>::getClassGroup()
6600
\end{Verbatim}
6601
The return value is to be interpreted as follows: The entry for index $0$ is the rank of the class group. The remaining entries contain the orders of the summands in a direct sum decomposition of the torsion subgroup.
6602
6603
\subsubsection{Integer hull}
6604
6605
For the computation of the integer hull an auxiliary cone is constructed. A reference to it is returned by
6606
\begin{Verbatim}
6607
Cone<Integer>& Cone<Integer>::getIntegerHullCone() const
6608
\end{Verbatim}
6609
6610
For example, the support hyperplanes of the integer hull can be accessed by
6611
\begin{Verbatim}
6612
MyCone.getIntegerHullCone().getSupportHyperplanes()
6613
\end{Verbatim}
6614
6615
\subsubsection{Excluded faces}
6616
6617
Before using the excluded faces Normaliz makes the collection irredundant by discarding those that are contained in others. The irredundant collection (given by hyperplanes that intersect the cone in the faces) and its cardinality are returned by
6618
\begin{Verbatim}
6619
const vector< vector<Integer> >& Cone<Integer>::getExcludedFaces()
6620
size_t Cone<Integer>::getNrExcludedFaces()
6621
\end{Verbatim}
6622
For the computation of the Hilbert series the all intersections of the excluded faces are computed, and for each resulting face the weight with which it must be counted is computed. These data can be accessed by
6623
\begin{Verbatim}
6624
const vector< pair<vector<key_t>,long> >&
6625
Cone<Integer>::getInclusionExclusionData()
6626
\end{Verbatim}
6627
The first component of each pair contains the indices of the generators (counted from 0) that lie in the face and the second component is the weight.
6628
6629
\subsubsection{Boolean valued results}
6630
6631
All the ``questions'' to the cone that can be asked by the boolean valued functions in this section start a computation if the answer is not yet known.
6632
6633
The first, the question
6634
\begin{Verbatim}
6635
bool Cone<Integer>::isIntegrallyClosed()
6636
\end{Verbatim}
6637
does not trigger a computation of the full Hilbert basis. The computation stops as soon as the answer can be given, and this is the case when an element in the integral closure has been found that is not in the original monoid. Such an element is retrieved by
6638
\begin{Verbatim}
6639
vector<Integer> Cone<Integer>::getWitnessNotIntegrallyClosed()
6640
\end{Verbatim}
6641
6642
As discussed in Section \ref{IsPointed} it can sometimes be useful to ask
6643
\begin{Verbatim}
6644
bool Cone<Integer>::isPointed()
6645
\end{Verbatim}
6646
before a more complex computation is started.
6647
6648
The Gorenstein property can be tested with
6649
\begin{Verbatim}
6650
bool Cone<Integer>::isGorenstein()
6651
\end{Verbatim}
6652
If the answer is positive, Normaliz computes the generator of the interior of the monoid. Also see \ref{GenInt}
6653
6654
6655
The next two functions answer the question whether the Hilbert basis or at least the extreme rays live in degree $1$.
6656
\begin{Verbatim}
6657
bool Cone<Integer>::isDeg1ExtremeRays()
6658
bool Cone<Integer>::isDeg1HilbertBasis()
6659
\end{Verbatim}
6660
6661
Finally we have
6662
\begin{Verbatim}
6663
bool Cone<Integer>::isInhomogeneous()
6664
bool Cone<Integer>::isReesPrimary()
6665
\end{Verbatim}
6666
\verb|isReesPrimary()| checks whether the ideal defining the Rees algebra is primary to the irrelevant maximal ideal.
6667
6668
\subsection{Control of execution}
6669
6670
\subsubsection{Exceptions}
6671
6672
All exceptions that are thrown in \verb|libnormaliz| are derived from the abstract class \verb|NormalizException| that itself is derived from \verb|std::exception|:
6673
\begin{Verbatim}
6674
class NormalizException: public std::exception
6675
\end{Verbatim}
6676
6677
The following exceptions must be caught by the calling program:
6678
\begin{Verbatim}
6679
class ArithmeticException: public NormalizException
6680
class BadInputException: public NormalizException
6681
class NotComputableException: public NormalizException
6682
class FatalException: public NormalizException
6683
class NmzCoCoAException: public NormalizException
6684
class InterruptException: public NormalizException
6685
\end{Verbatim}
6686
6687
The \verb|ArithmeticException| leaves \verb|libnormaliz| if a nonrecoverable overflow occurs (it is also used internally for the change of integer type). This should not happen for cones of integer type \verb|mpz_class|, unless it is caused by the attempt to create a data structure of illegal size or by a bug in the program. The \verb|BadInputException| is thrown whenever the input is inconsistent; the reasons for this are manifold. The \verb|NotComputableException| is thrown if a computation goal cannot be reached. The \verb|FatalException| should never appear. It covers error situations that can only be caused by a bug in the program. At many places \verb|libnormaliz| has \verb|assert| verifications built in that serve the same purpose.
6688
6689
There are two more exceptions for the communication within \verb|libnormaliz| that should not leave it:
6690
\begin{Verbatim}
6691
class NonpointedException: public NormalizException
6692
class NotIntegrallyClosedException: public NormalizException
6693
\end{Verbatim}
6694
6695
The \verb|InterruptException| is discussed in the next section.
6696
6697
\subsubsection{Interruption}
6698
6699
In order to find out if the user wants to interrupt the program, the functions in \verb|libnormaliz| test the value of the global variable
6700
\begin{Verbatim}
6701
volatile sig_atomic_t nmz_interrupted
6702
\end{Verbatim}
6703
If it is found to be \verb|true|, an \verb|InterruptException| is thrown. This interrupt lraves \verb|libnormaliz|, so that the calling program can process it. The \verb|Cone| still exists, and the data computed in it can still be accessed. Moreover, \verb|compute| can again be applied to it.
6704
6705
The calling program must take care to catch the signal caused by Ctrl-C and to set \verb|nmz_interrupted=1|.
6706
6707
\subsubsection{Inner parallelization}
6708
6709
By default the cone constructor sets the maximal number of parallel threads to $8$, unless the system has set a lower limit. You can change this value by
6710
\begin{Verbatim}
6711
long set_thread_limit(long t)
6712
\end{Verbatim}
6713
The function returns the previous value.
6714
6715
\verb|set_thread_limit(0)| raises the limit set by libnormaliz to $\infty$.
6716
6717
\subsubsection{Outer parallelization}
6718
6719
The libnormaliz functions can be called by programs that are parallelized via OpenMP themselves. The functions in libnormaliz switch off nested parallelization.
6720
6721
As a test program you can compile and run \verb|outerpar| in \verb|source/outerpar|. Compile it by \texttt{make -f Makefile.classic}.
6722
6723
6724
\subsubsection{Control of terminal output}
6725
By using
6726
\begin{Verbatim}
6727
bool setVerboseDefault(bool v)
6728
\end{Verbatim}
6729
one can control the verbose output of \verb|libnormaliz|. The default value is \verb|false|. This is a global setting that effects all cones constructed afterwards. However, for every cone one can set an individual value of \verb|verbose| by
6730
\begin{Verbatim}
6731
bool Cone<Integer>::setVerbose(bool v)
6732
\end{Verbatim}
6733
Both functions return the previous value.
6734
6735
The default values of verbose output and error output are \verb|std::cout| and \verb|std::cerr|. These values can be changed by
6736
\begin{Verbatim}
6737
void setVerboseOutput(std::ostream&)
6738
void setErrorOutput(std::ostream&)
6739
\end{Verbatim}
6740
6741
\subsection{A simple program}\label{maxsimplex}
6742
6743
The example program is a simplified version of the program on which the experiments for the paper ``Quantum jumps of normal polytopes'' by W. Bruns, J. Gubeladze and M. Micha\l{}ek, Discrete Comput.\ Geom.\ 56 (2016), no. 1, 181--215, are based. Its goal is to find a maximal normal lattice polytope $P$ in the following sense: there is no normal lattice polytope $Q\supset P$ that has exactly one more lattice point than $P$. `Normal'' means in this context that the Hilbert basis of the cone over $P$ is given by the lattice points of $P$, considered as degree $1$ elements in the cone.
6744
6745
The program generates normal lattice simplices and checks them for maximality. The dimension is set in the program, as well as the bound for the random coordinates of the vertices.
6746
6747
Let us have a look at \verb|source/maxsimplex/maxsimplex.cpp|. First the more or less standard preamble:
6748
6749
\begin{Verbatim}
6750
#include <stdlib.h>
6751
#include <vector>
6752
#include <fstream>
6753
#include <omp.h>
6754
using namespace std;
6755
6756
#include "libnormaliz/libnormaliz.h"
6757
#include "libnormaliz/cone.h"
6758
#include "libnormaliz/vector_operations.h"
6759
#include "libnormaliz/cone_property.h"
6760
#include "libnormaliz/integer.h"
6761
using namespace libnormaliz;
6762
\end{Verbatim}
6763
6764
Since we want to perform a high speed experiment which is not expected to be arithmetically demanding, we choose $64$ bit integers:
6765
\begin{Verbatim}
6766
typedef long long Integer;
6767
\end{Verbatim}
6768
6769
The first routine finds a random normal simplex of dimension \verb|dim|. The coordinates of the vertices are integers between $0$ and \verb|bound|. We are optimistic that such a simplex can be found, and this is indeed no problem in dimension $4$ or $5$.
6770
6771
\begin{Verbatim}
6772
Cone<Integer> rand_simplex(size_t dim, long bound){
6773
6774
vector<vector<Integer> > vertices(dim+1,vector<Integer> (dim));
6775
while(true){ // an eternal loop ...
6776
for(size_t i=0;i<=dim;++i){
6777
for(size_t j=0;j<dim;++j)
6778
vertices[i][j]=rand()%(bound+1);
6779
}
6780
6781
Cone<Integer> Simplex(Type::polytope,vertices);
6782
// we must check the rank and normality
6783
if(Simplex.getRank()==dim+1 && Simplex.isDeg1HilbertBasis())
6784
return Simplex;
6785
}
6786
vector<vector<Integer> > dummy_gen(1,vector<Integer>(1,1));
6787
// to make the compiler happy
6788
return Cone<Integer>(Type::cone,dummy_gen);
6789
}
6790
\end{Verbatim}
6791
6792
We are looking for a normal polytope $Q\supset P$ with exactly one more lattice point. The potential extra lattice points $z$ are contained in the matrix \verb|jump_cands|. There are two obstructions for $Q=\operatorname{conv}(P,z)$ to be tested: (i) $z$ is the only extra lattice point and (ii) $Q$ is normal. It makes sense to test them in this order since most of the time condition (i) is already violated and it is much faster to test.
6793
\begin{Verbatim}
6794
bool exists_jump_over(Cone<Integer>& Polytope,
6795
const vector<vector<Integer> >& jump_cands){
6796
6797
vector<vector<Integer> > test_polytope=Polytope.getExtremeRays();
6798
test_polytope.resize(test_polytope.size()+1);
6799
for(size_t i=0;i<jump_cands.size();++i){
6800
test_polytope[test_polytope.size()-1]=jump_cands[i];
6801
Cone<Integer> TestCone(Type::cone,test_polytope);
6802
if(TestCone.getNrDeg1Elements()!=Polytope.getNrDeg1Elements()+1)
6803
continue;
6804
if(TestCone.isDeg1HilbertBasis())
6805
return true;
6806
}
6807
return false;
6808
}
6809
\end{Verbatim}
6810
6811
In order to make the (final) list of candidates $z$ as above we must compute the widths of $P$ over its support hyperplanes.
6812
\begin{Verbatim}
6813
vector<Integer> lattice_widths(Cone<Integer>& Polytope){
6814
6815
if(!Polytope.isDeg1ExtremeRays()){
6816
cerr<< "Cone in lattice_widths is not defined by lattice polytope"<< endl;
6817
exit(1);
6818
}
6819
vector<Integer> widths(Polytope.getNrExtremeRays(),0);
6820
for(size_t i=0;i<Polytope.getNrSupportHyperplanes();++i){
6821
for(size_t j=0;j<Polytope.getNrExtremeRays();++j){
6822
// v_scalar_product is a useful function from vector_operations.h
6823
Integer test=v_scalar_product(Polytope.getSupportHyperplanes()[i],
6824
Polytope.getExtremeRays()[j]);
6825
if(test>widths[i])
6826
widths[i]=test;
6827
}
6828
}
6829
return widths;
6830
}
6831
\end{Verbatim}
6832
6833
\begin{Verbatim}
6834
6835
int main(int argc, char* argv[]){
6836
6837
time_t ticks;
6838
srand(time(&ticks));
6839
cout << "Seed " <<ticks << endl; // we may want to reproduce the run
6840
6841
size_t polytope_dim=4;
6842
size_t cone_dim=polytope_dim+1;
6843
long bound=6;
6844
vector<Integer> grading(cone_dim,0);
6845
// at some points we need the explicit grading
6846
grading[polytope_dim]=1;
6847
6848
size_t nr_simplex=0; // for the progress report
6849
6850
\end{Verbatim}
6851
Since the computations are rather small, we suppress parallelization (except for one step below).
6852
\begin{Verbatim}
6853
while(true){
6854
6855
#ifdef _OPENMP
6856
omp_set_num_threads(1);
6857
#endi
6858
Cone<Integer> Candidate=rand_simplex(polytope_dim,bound);
6859
nr_simplex++;
6860
if(nr_simplex%1000 ==0)
6861
cout << "simplex " << nr_simplex << endl;
6862
\end{Verbatim}
6863
Maximality is tested in $3$ steps. Most often there exists a lattice point $z$ of height $1$ over $P$. If so, then $\operatorname{conv}(P,z)$ contains only $z$ as an extra lattice point and it is automatically normal. In order to find such a point we must move the support hyperplanes outward by lattice distance $1$.
6864
\begin{Verbatim}
6865
vector<vector<Integer> > supp_hyps_moved=Candidate.getSupportHyperplanes();
6866
for(size_t i=0;i<supp_hyps_moved.size();++i)
6867
supp_hyps_moved[i][polytope_dim]+=1;
6868
Cone<Integer> Candidate1(Type::inequalities,supp_hyps_moved,
6869
Type::grading,to_matrix(grading));
6870
if(Candidate1.getNrDeg1Elements()>Candidate.getNrDeg1Elements())
6871
continue; // there exists a point of height 1
6872
\end{Verbatim}
6873
Among the polytopes that have survived the height $1$ test, most nevertheless have suitable points $z$ close to them, and it makes sense not to use the maximum possible height immediately. Note that we must now test normality explicitly.
6874
\begin{Verbatim}
6875
cout << "No ht 1 jump"<< " #latt " << Candidate.getNrDeg1Elements() << endl;
6876
// move the hyperplanes further outward
6877
for(size_t i=0;i<supp_hyps_moved.size();++i)
6878
supp_hyps_moved[i][polytope_dim]+=polytope_dim;
6879
Cone<Integer> Candidate2(Type::inequalities,supp_hyps_moved,
6880
Type::grading,to_matrix(grading));
6881
cout << "Testing " << Candidate2.getNrDeg1Elements()
6882
<< " jump candidates" << endl; // including the lattice points in P
6883
if(exists_jump_over(Candidate,Candidate2.getDeg1Elements()))
6884
continue;
6885
\end{Verbatim}
6886
Now we can be optimistic that a maximal polytope $P$ has been found, and we test all candidates $z$ that satisfy the maximum possible bound on their lattice distance to $P$.
6887
\begin{Verbatim}
6888
cout << "No ht <= 1+dim jump" << endl;
6889
vector<Integer> widths=lattice_widths(Candidate);
6890
for(size_t i=0;i<supp_hyps_moved.size();++i)
6891
supp_hyps_moved[i][polytope_dim]+=
6892
-polytope_dim+(widths[i])*(polytope_dim-2);
6893
\end{Verbatim}
6894
The computation may become arithmetically critical at this point. Therefore we use \verb|mpz_class| for our cone. The conversion to and from \verb|mpz_class| is done by routines contained in \verb|convert.h|.
6895
\begin{Verbatim}
6896
vector<vector<mpz_class> > mpz_supp_hyps;
6897
convert(mpz_supp_hyps,supp_hyps_moved);
6898
vector<mpz_class> mpz_grading=convertTo<vector<mpz_class> >(grading);
6899
\end{Verbatim}
6900
The computation may need some time now. Therefore we allow a little bit of parallelization.
6901
\begin{Verbatim}
6902
#ifdef _OPENMP
6903
omp_set_num_threads(4);
6904
#endif
6905
\end{Verbatim}
6906
Since $P$ doesn't have many vertices (even if we use these routines for more general polytopes than simplices), we don't expect too many vertices for the enlarged polytope. In this situation it makes sense to set the algorithmic variant \verb|Approximate|.
6907
\begin{Verbatim}
6908
Cone<mpz_class> Candidate3(Type::inequalities,mpz_supp_hyps,
6909
Type::grading,to_matrix(mpz_grading));
6910
Candidate3.compute(ConeProperty::Deg1Elements,ConeProperty::Approximate);
6911
vector<vector<Integer> > jumps_cand; // for conversion from mpz_class
6912
convert(jumps_cand,Candidate3.getDeg1Elements());
6913
cout << "Testing " << jumps_cand.size() << " jump candidates" << endl;
6914
if(exists_jump_over(Candidate, jumps_cand))
6915
continue;
6916
\end{Verbatim}
6917
Success!
6918
\begin{Verbatim}
6919
cout << "Maximal simplex found" << endl;
6920
cout << "Vertices" << endl;
6921
Candidate.getExtremeRaysMatrix().pretty_print(cout); // a goody from matrix.h
6922
cout << "Number of lattice points = " << Candidate.getNrDeg1Elements();
6923
cout << " Multiplicity = " << Candidate.getMultiplicity() << endl;
6924
6925
} // end while
6926
} // end main
6927
\end{Verbatim}
6928
6929
For the compilation of \verb|maxsimplex.cpp| we have added a \verb|Makefile|. Running the program needs a little bit of patience. However, within a few hours a maximal simplex should have emerged. From a log file:
6930
\begin{Verbatim}
6931
simplex 143000
6932
No ht 1 jump #latt 9
6933
Testing 22 jump candidates
6934
No ht 1 jump #latt 10
6935
Testing 30 jump candidates
6936
No ht 1 jump #latt 29
6937
Testing 39 jump candidates
6938
No ht <= 1+dim jump
6939
Testing 173339 jump candidates
6940
Maximal simplex found
6941
Vertices
6942
1 3 5 3 1
6943
2 3 0 3 1
6944
3 0 5 5 1
6945
5 2 2 1 1
6946
6 5 6 2 1
6947
Number of lattice points = 29 Multiplicity = 275
6948
\end{Verbatim}
6949
6950
\end{small}
6951
6952
\section{PyNormaliz}
6953
6954
Before you can install PyNormaliz (written by Sebastian Gutsche), you must install Normaliz with shared libraries via \verb|make install| (using cmake or autotools). PyNormaliz expects the installed files in a standard location.
6955
6956
For PyNormaliz itself type
6957
\begin{Verbatim}
6958
python setup.py install
6959
\end{Verbatim}
6960
or
6961
\begin{Verbatim}
6962
pip install PyNormaliz
6963
\end{Verbatim}
6964
at a command prompt. Depending on your python version you might want to write \verb|python3| instead.
6965
6966
For a brief introduction please consult the PyNormaliz tutorial on the Normaliz web site.
6967
6968
\section{QNormaliz}\label{QNorm}
6969
6970
The variant QNormaliz of Normaliz can use coefficients from subfields of $\RR$, for example from real algebraic extensions of $\QQ$. It is clear that the computations are then restricted to those that do not depend on lattice data. At present QNormaliz is restricted to the computations of support hyperplanes/extreme rays and to triangulations. Instead of the template name \verb|Integer| (see Appendix \ref{libnorm}) it uses \verb|Number| to emphasize the difference.
6971
6972
The included implementation specializes \verb|Number| to \verb|mpq_class|. This only serves as a demonstration example. From the practical viewpoint it is superfluous since Normaliz can (now) process rational input data. Nevertheless have a look at the examples in \verb|Qexample|.
6973
6974
\subsection{Prerequisites}
6975
6976
In order to specialize \verb|Number| to your field of coefficients you must of course make it available via a suitable class library in the compilation process.
6977
6978
In the code of \verb|QNormaliz| the following adaptations are necessary:
6979
\begin{enumerate}
6980
\item In \verb|Qnormaliz.cpp| replace \verb|mpq_class| in the definition of \verb|Cone|.
6981
\item In \verb|libQnoraliz-templated.cpp| you must do the same replacement.
6982
\item If your number type has a convenient conversion to \verb|string|, modify the specialization of the templated function \verb|toString| in \verb|Qinteger.h|. Otherwise remove the specialization for \verb|mpq_class|.
6983
\item In \verb|Qvector_operations.cpp| you must replace the definition of \verb|v_simplify| by a version for your numbers.
6984
\end{enumerate}
6985
6986
The last point is extremely important since the coefficients of the linear forms representing the support hyperplanes or the vectors representing the extreme rays tend to explode rapidly in size if not kept under control. For \verb|mpq_class| this means that we multiply vectors by the least common denominator of the components and then extract the gcd of the numerators. (If you really need a demonstration of what happens otherwise, switch off the function by an immediate \verb|return|, and run QNormaliz on the example \verb|small| in \verb|example|.)
6987
6988
\subsection{Restrictions}
6989
6990
The following input types are NOT allowed in QNormaliz:
6991
\begin{center}
6992
\texttt{
6993
\begin{tabular}{lll}
6994
lattice& congruences&grading\\
6995
cone\_and\_lattice& inhom\_congruences& offset
6996
\end{tabular}
6997
}
6998
\end{center}
6999
Inhomogeneous types are allowed. Also \verb|saturation| is allowed. It must be interpreted as a generating set for a subspace that is intersected with all the objects defined by other input items.
7000
7001
The only computation goals and algorithmic variants allowed are:
7002
\begin{center}
7003
\texttt{
7004
\begin{tabular}{llll}
7005
Generators& ExtremeRays & VerticesOfPolyhedron & MaximalSubspace \\
7006
SupportHyperplanes& Equations & Triangulation & ConeDecomposition \\
7007
Dehomogenization& Rank& EmbeddingDim & Sublattice\\
7008
KeepOrder & IsPointed &IsInhomogeneous &DefaultMode
7009
\end{tabular}
7010
}
7011
\end{center}
7012
7013
It may seem paradoxical that \verb|Sublattice| appears here. As in the true lattice case, the \verb|Sublattice| \verb|Representation| is the coordinate transformation used by QNormaliz. Over a field $F$ there is no need for the annihilator $c$, and one simply has a pair of linear maps $F^r\to F^d \to F^r$ whose composition is the identity of $F^r$. Of course, congruences and external index make no sense anymore.
7014
7015
Moreover, the original monoid and any data referring to it are not defined.
7016
7017
Implicit or explicit \verb|DefaultMode| is interpreted as \verb|SupportHyperplanes|.
7018
7019
\subsection{Results}
7020
7021
In the output file we use the same terminology as in that of Normaliz, for example, ``basis elements of sublattice''. In QNormaliz these vectors form a basis of the vector subspace spanned by the cone that is computed. No reference to a sublattice is implied.
7022
7023
As far as possible, we apply \verb|v_simplify| to the computation results. Note that it is not necessarily applied to all results. For example the generators of the cone (contained in the file with suffix \verb|tgn| or returned by \verb|getGenerators|) are not necessarily simplified. (In particular, they need not be identical to those computed by Normaliz (if applicable to the input), but can differ from them by order and rational factors.)
7024
7025
As for Normaliz, for dehomogenization one must divide a vector by its value under the linear form that we call dehomogenization. Since fractions can be formed now, Normaliz could divide by the dehomogenization, but it does not do it in favor of the output in simplified form.
7026
7027
Note that there is no way for QNormaliz to define an invariant volume function on subspaces. (In Normaliz the situation is different since determinants of invertible matrices over $\ZZ$ are equal to $\pm 1$.) Therefore QNormaliz does not define multiplicities. Together with triangulations, it computes determinants of the vectors spanning the simplicial cones. They are defined relative to the choice of basis shown as ``basis of sublattice'' if the cone is not fulldimensional. In the fulldimensional case QNormaliz uses the basis of unit vectors.
7028
7029
\subsection{Installation}
7030
7031
The standard autotools build system automatically builds and installs
7032
QNormaliz as well.
7033
7034
As for Normaliz and QNormaliz one can use the ``classical'' Makefile. See \verb|INSTALL| in the directory \verb|source| (without Q!).
7035
7036
Moreover, QNormaliz can also be built by cmake. Simply follow the steps in Section \ref{cmake} after having made a directory \verb|BUILD_QNMZ| (for example) in the Normaliz directory. Then go to it and say \verb|cmake ../Qsource|, followed by \verb|make| and \verb|make install|. With the default settings, libQnormaliz will be installed in \verb|/usr/local|, subdirectories \verb|bin|, \verb|lib| and \verb|include/libQnormaliz|.
7037
7038
7039
7040
\begin{thebibliography}{15.}
7041
\small
7042
7043
\bibitem{CoCoA}
7044
J. Abbott, A.M. Bigatti and G. Lagorio,
7045
\emph{CoCoA-5: a system for doing {C}omputations in {C}ommutative {A}lgebra.}
7046
Available at \url{http://cocoa.dima.unige.it}.
7047
7048
\bibitem{Scip} T. Achterberg. {\em SCIP: Solving constraint integer programs}. Mathematical Programming Computation 1 (2009), 1--41. Available from \url{http://mpc.zib.de/index.php/MPC/article/view/4}
7049
7050
\bibitem{AI} V. Almendra and B. Ichim. {\em jNormaliz 1.7}.
7051
Available at \url{http://normaliz.uos.de}
7052
7053
\bibitem{LatInt} V. Baldoni, N. Berline, J.A. De Loera, B. Dutra, M. K�ppe, S. Moreinis, G. Pinto, M. Vergne, J. Wu,
7054
\emph{A User's Guide for LattE integrale v1.7.2, 2013.} Software package
7055
LattE is available at \url{http://www.math.ucdavis.edu/~latte/}
7056
7057
\bibitem{BG} W. Bruns and J. Gubeladze. {\em Polytopes, rings, and K-theory}.
7058
Springer 2009.
7059
7060
\bibitem{BI} W. Bruns and B. Ichim. {\em Normaliz: algorithms for rational cones and affine monoids.}
7061
J. Algebra 324 (2010) 1098--1113.
7062
7063
\bibitem{BHIKS} W. Bruns, R. Hemmecke, B. Ichim, M. K�ppe and C. S�ger.
7064
{\em Challenging computations of Hilbert bases of cones associated with
7065
algebraic statistics }. Exp. Math.20 (2011), 25--33.
7066
7067
\bibitem{BIS} W. Bruns, B. Ichim and C. S�ger. {\em The power of
7068
pyramid decomposition in Normaliz}. J. Symb. Comp. 74 (2016), 513--536.
7069
7070
\bibitem{BK02} W .Bruns and R. Koch. {\em Computing the integral
7071
closure of an affine semigroup}. Univ. Iagell. Acta Math.
7072
39 (2001), 59--70.
7073
7074
\bibitem{BSS} W. Bruns, R. Sieg and C. S�ger. {\em Normaliz 2013--2016.} To appear in the final report of the DFG SPP 1489. Preprint \url{ arXiv:1611.07965}.
7075
7076
\bibitem{BS} W. Bruns and C. S�ger. {\em The computation of weighted Ehrhart series in Normaliz.} J. Symb. Comp. 68 (2015), 75--86.
7077
7078
\bibitem{GAP-NmzInterface} S.~Gutsche, M.~Horn, C.~S�ger,
7079
\emph{NormalizInterface for GAP}.
7080
Available at \url{https://github.com/gap-packages/NormalizInterface}.
7081
7082
\bibitem{KV} M. K�ppe and S. Verdoolaege. {\em Computing
7083
parametric rational generating functions with a primal
7084
Barvinok algorithm.} Electron. J. Comb.
7085
15, No. 1, Research Paper R16, 19 p. (2008).
7086
7087
\bibitem{Po} L. Pottier. {\em The Euclide algorithm in dimension
7088
$n$}. Research report, ISSAC 96, ACM Press 1996.
7089
7090
\bibitem{Sch} A. Sch\"{u}rmann, {\em Exploiting
7091
polyhedral symmetries in social choice.}
7092
Social Choice and Welfare \textbf{40} (2013), 1097--1110.
7093
\end{thebibliography}
7094
\end{document}
7095
7096