generated from nerf/texTemplate
Compare commits
2 commits
9bf50e4353
...
ac0ce99bb5
Author | SHA1 | Date | |
---|---|---|---|
ac0ce99bb5 | |||
0ef95e1735 |
33 changed files with 4262 additions and 11 deletions
6
.editorconfig
Normal file
6
.editorconfig
Normal file
|
@ -0,0 +1,6 @@
|
|||
root = true
|
||||
[*]
|
||||
end_of_line = lf
|
||||
charset = utf-8
|
||||
indent_style = tab
|
||||
|
8
.gitignore
vendored
8
.gitignore
vendored
|
@ -45,6 +45,11 @@
|
|||
# latexrun
|
||||
latex.out/
|
||||
|
||||
.cache/
|
||||
.local/
|
||||
.texlive2022/
|
||||
.lesshst
|
||||
|
||||
## Auxiliary and intermediate files from other packages:
|
||||
# algorithms
|
||||
*.alg
|
||||
|
@ -303,3 +308,6 @@ TSWLatexianTemp*
|
|||
|
||||
# Nix buid output
|
||||
/result
|
||||
|
||||
# language correction
|
||||
.aspell.*
|
||||
|
|
25
flake.lock
Normal file
25
flake.lock
Normal file
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1694343207,
|
||||
"narHash": "sha256-jWi7OwFxU5Owi4k2JmiL1sa/OuBCQtpaAesuj5LXC8w=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "78058d810644f5ed276804ce7ea9e82d92bee293",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
24
flake.nix
24
flake.nix
|
@ -30,19 +30,22 @@
|
|||
let
|
||||
pkgs = nixpkgsFor.${system};
|
||||
# LaTex package dependencies
|
||||
latexdeps = {inherit (pkgs.texlive) scheme-small;};
|
||||
latexdeps = {inherit (pkgs.texlive) scheme-full;};
|
||||
latex = pkgs.texlive.combine latexdeps;
|
||||
latexrun = pkgs.latexrun;
|
||||
in {
|
||||
default = pkgs.stdenvNoCC.mkDerivation {
|
||||
name = "output.pdf";
|
||||
buildInputs = [latex latexrun pkgs.biber];
|
||||
buildInputs = [latex latexrun pkgs.biber pkgs.inkscape pkgs.stix-two];
|
||||
src = self;
|
||||
buildPhase = ''
|
||||
latexrun -o output.pdf src/Main.tex
|
||||
HOME=.
|
||||
cd src
|
||||
latexrun --latex-cmd lualatex --bibtex-cmd biber --latex-args '\-shell-escape' -o output.pdf Main.tex
|
||||
'';
|
||||
installPhase = ''
|
||||
mkdir -p $out
|
||||
env > $out/env
|
||||
cp output.pdf $out/output.pdf
|
||||
'';
|
||||
};
|
||||
|
@ -57,7 +60,7 @@
|
|||
kakoune =
|
||||
let
|
||||
texlab = pkgs.texlab;
|
||||
latexTmpDir = "src/latex.out/";
|
||||
latexTmpDir = "latex.out/";
|
||||
myKakoune =
|
||||
let
|
||||
# this could also be done by generating toml with the
|
||||
|
@ -67,20 +70,20 @@
|
|||
text = ''
|
||||
[language.latex]
|
||||
filetypes = ["latex"]
|
||||
roots = [".git"]
|
||||
roots = [".git", "Main.tex"]
|
||||
command = "texlab"
|
||||
# args = ["-vvvv", "--log-file", "/tmp/texlabLog"]
|
||||
settings_section = "texlab"
|
||||
[language.latex.settings.texlab.build]
|
||||
onSave = true
|
||||
auxDirectory = "${latexTmpDir}"
|
||||
executable = "latexrun"
|
||||
args = ["--bibtex-cmd", "biber", "--latex-args", "'-synctex=1'","%f"]
|
||||
args = ["--latex-cmd", "lualatex", "--bibtex-cmd", "biber", "--latex-args", "'-shell-escape -synctex=1'","%f"]
|
||||
# args = ["--latex-cmd", "lualatex", "--bibtex-cmd", "biber", "%f"]
|
||||
forwardSearchAfter = true
|
||||
[language.latex.settings.texlab.forwardSearch]
|
||||
executable = "zathura"
|
||||
args = ["--synctex-forward", "%l:1:%f", "%p"]
|
||||
[language.latex.settings.texlab]
|
||||
auxDirectory = "${latexTmpDir}"
|
||||
'';
|
||||
};
|
||||
config = pkgs.writeTextFile (rec {
|
||||
|
@ -89,7 +92,7 @@
|
|||
text = ''
|
||||
colorscheme solarized-dark
|
||||
set global tabstop 2
|
||||
set global indentwidth 2
|
||||
set global indentwidth 0
|
||||
# eval %sh{kak-lsp --kakoune --session $kak_session -c ${kak-lsp-config} --log /tmp/kak-lspLog -vvvv}
|
||||
eval %sh{kak-lsp --kakoune --session $kak_session -c ${kak-lsp-config}}
|
||||
hook global WinSetOption filetype=(latex) %{
|
||||
|
@ -111,7 +114,10 @@
|
|||
# TODO only try to start the kakoune session if no session with that
|
||||
# name exists
|
||||
shellHook = ''
|
||||
HOME=.
|
||||
# unset LANG # This is a very stupid idea but lualatex GRMBLFX!!! # argh this brakes kakoune unicode!!!
|
||||
alias ..="cd .."
|
||||
cd src
|
||||
mkdir -p ${latexTmpDir}
|
||||
export KAKOUNE_CONFIG_DIR="/dev/null/"
|
||||
kak -d -s ${name} &
|
||||
|
|
187
src/Main.tex
187
src/Main.tex
|
@ -1,4 +1,187 @@
|
|||
\documentclass{scrartcl}
|
||||
\begin{filecontents*}[overwrite]{\jobname.xmpdata}
|
||||
\Title{A uniform equivariant model for cubical type thoery}
|
||||
\Author{Dennis Frieberg}
|
||||
\Language{en-US}
|
||||
\Subject{see title}
|
||||
\Keywords{keyword}
|
||||
\end{filecontents*}
|
||||
\documentclass[DIV=calc,fontsize=12pt,draft=true,parskip]{scrartcl}
|
||||
\usepackage[a-2b,mathxmp]{pdfx}[2018/12/22]
|
||||
%\usepackage{pdfmanagement-testphase}
|
||||
%\DeclareDocumentMetadata{
|
||||
% pdfversion=1.7
|
||||
%}
|
||||
\usepackage{microtype}
|
||||
\usepackage{amsthm} %theorems
|
||||
\usepackage{mathtools} %coloneqq and other stuff
|
||||
\usepackage{amssymb}
|
||||
%\usepackage[colon=literal]{unicode-math-luatex} %unicode in mathmode does conflict amssymb maybe sometimes use this line if you want
|
||||
% that : behaves like \colon
|
||||
\usepackage{fontspec} % for changing text fonts (we roll default right now, so no use)
|
||||
\usepackage{unicode-math} %also loads fontspec
|
||||
\setmathfont{STIXTwoMath-Regular.otf}
|
||||
\setmainfont{STIXTwoText}[Extension=.otf, UprightFont = *-Regular, BoldFont = *-SemiBold, ItalicFont = *-Italic, BoldItalicFont = *-SemiBoldItalic]
|
||||
\addtokomafont{disposition}{\rmfamily}
|
||||
% \usepackage{imakeidx} %make index possible
|
||||
% \makeindex[columns=3, title=Alphabetical Index]
|
||||
\usepackage{graphicx}
|
||||
\usepackage{dsfont}
|
||||
\usepackage{hyperref}
|
||||
\usepackage{tikz}
|
||||
\usetikzlibrary{cd}
|
||||
\usetikzlibrary{decorations.pathreplacing}
|
||||
\usepackage{faktor}
|
||||
\usepackage{polyglossia}
|
||||
\setdefaultlanguage[variant=us]{english}
|
||||
\usepackage[style=alphabetic]{biblatex}
|
||||
\usepackage{ebproof}
|
||||
\usepackage{xspace}
|
||||
\usepackage[obeyDraft]{todonotes}
|
||||
\usepackage{subfiles}
|
||||
\usepackage[plain]{fancyref}
|
||||
\usepackage[capitalise]{cleveref}
|
||||
% \usepackage{mathabx}
|
||||
|
||||
\usepackage{pdftexcmds} % pdftex primitves for lualatex
|
||||
\makeatletter % make the primitives available for svg
|
||||
\let\pdfstrcmp\pdf@strcmp
|
||||
\let\pdffilemoddate\pdf@filemoddate
|
||||
\makeatother
|
||||
\usepackage{svg}
|
||||
|
||||
\DeclareFontFamily{U}{min}{}
|
||||
\DeclareFontShape{U}{min}{m}{n}{<-> udmj30}{}
|
||||
\newcommand\Yo{\!\text{\usefont{U}{min}{m}{n}\symbol{'207}}\!}
|
||||
|
||||
\newcommand*{\printline}{\number\inputlineno\xspace}
|
||||
\NewCommandCopy\oldtodo\todo
|
||||
\renewcommand{\todo}[2][]{\oldtodo[#1]{\printline #2}}
|
||||
|
||||
\addbibresource{resource.bib}
|
||||
|
||||
\theoremstyle{definition}
|
||||
\swapnumbers
|
||||
\newtheorem{theorem}{Theorem}[subsection]
|
||||
\newtheorem{corollary}[theorem]{Corollary}
|
||||
\newtheorem{lemma}[theorem]{Lemma}
|
||||
\newtheorem{definition}[theorem]{Definition}
|
||||
\newtheorem{example}[theorem]{Example}
|
||||
\newtheorem{proposition}[theorem]{Proposition}
|
||||
\newtheorem{observation}[theorem]{Observation}
|
||||
\newtheorem{notation}[theorem]{Notation}
|
||||
\theoremstyle{remark}
|
||||
\newtheorem{remark}[theorem]{Remark}
|
||||
|
||||
\newcommand*{\fancyrefthmlabelprefix}{thm}
|
||||
\newcommand*{\fancyreflemlabelprefix}{lem}
|
||||
\newcommand*{\fancyrefcorlabelprefix}{cor}
|
||||
\newcommand*{\fancyrefproplabelprefix}{prop}
|
||||
\newcommand*{\fancyrefexlabelprefix}{ex}
|
||||
\newcommand*{\fancyrefremlabelprefix}{rem}
|
||||
\newcommand*{\fancyrefdeflabelprefix}{def}
|
||||
|
||||
% This is a dirty hack, we should go via \fref...name commands intstead
|
||||
\frefformat{plain}{\fancyrefthmlabelprefix}{theorem #1}
|
||||
\Frefformat{plain}{\fancyrefthmlabelprefix}{Theorem #1}
|
||||
\frefformat{plain}{\fancyreflemlabelprefix}{lemma #1}
|
||||
\Frefformat{plain}{\fancyreflemlabelprefix}{Lemma #1}
|
||||
\frefformat{plain}{\fancyrefcorlabelprefix}{corollary #1}
|
||||
\Frefformat{plain}{\fancyrefcorlabelprefix}{Corollary #1}
|
||||
\frefformat{plain}{\fancyrefproplabelprefix}{proposition #1}
|
||||
\Frefformat{plain}{\fancyrefproplabelprefix}{Proposition #1}
|
||||
\frefformat{plain}{\fancyrefexlabelprefix}{example #1}
|
||||
\Frefformat{plain}{\fancyrefexlabelprefix}{Example #1}
|
||||
\frefformat{plain}{\fancyrefremlabelprefix}{remark #1}
|
||||
\Frefformat{plain}{\fancyrefremlabelprefix}{Remark #1}
|
||||
\frefformat{plain}{\fancyrefdeflabelprefix}{definition #1}
|
||||
\Frefformat{plain}{\fancyrefdeflabelprefix}{Definition #1}
|
||||
|
||||
|
||||
\newenvironment{eqcd*}[1][]{\begin{equation*}\begin{tikzcd}[#1]}{\end{tikzcd}\end{equation*}\ignorespacesafterend}
|
||||
\newenvironment{eqcd}[1][]{\begin{equation}\begin{tikzcd}[#1]}{\end{tikzcd}\end{equation}\ignorespacesafterend}
|
||||
\DeclareMathOperator{\Ty}{Ty}
|
||||
\DeclareMathOperator{\Tm}{Tm}
|
||||
\DeclareMathOperator{\Obj}{Obj}
|
||||
\DeclareMathOperator{\im}{im}
|
||||
\DeclareMathOperator{\TFib}{TFib}
|
||||
\newcommand*{\Fib}{\ensuremath{\mathrm{Fib}}\xspace}
|
||||
\newcommand*{\∂}{\mathop{∂}}
|
||||
%\DeclareMathOperator{\colim}{colim}
|
||||
\newcommand*{\colim}{\mathop{\mathrm{colim}}\limits}
|
||||
%\DeclareMathOperator{\Yo}{\mathbf{y}}
|
||||
\newcommand*{\Fam}{\mathcal{F}\mathrm{am}}
|
||||
\newcommand*{\Id}{\textsf{Id}}
|
||||
\newcommand*{\id}{\textsf{id}}
|
||||
\newcommand*{\Cat}[1]{\ensuremath{\mathcal{#1}}\xspace}
|
||||
\newcommand*{\A}{\Cat{A}}
|
||||
\newcommand*{\B}{\Cat{B}}
|
||||
\newcommand*{\C}{\Cat{C}}
|
||||
\newcommand*{\D}{\Cat{D}}
|
||||
\newcommand*{\E}{\Cat{E}}
|
||||
\newcommand*{\F}{\Cat{F}}
|
||||
\newcommand*{\I}{\ensuremath{\mathds{I}}\xspace}
|
||||
\newcommand*{\J}{\Cat{J}}
|
||||
\renewcommand*{\L}{\ensuremath{\mathcal{L}}\xspace} % wfs not cat
|
||||
\newcommand*{\R}{\ensuremath{\mathcal{R}}\xspace} % wfs not cat
|
||||
\newcommand*{\T}{\ensuremath{\mathrm{T}}\xspace}
|
||||
\newcommand*{\TCF}{\ensuremath{\mathrm{TC}}\xspace}
|
||||
\newcommand*{\FF}{\ensuremath{\mathrm{F}}\xspace}
|
||||
\newcommand*{\TFF}{\ensuremath{\mathrm{TF}}\xspace}
|
||||
\newcommand*{\CF}{\ensumermath{\mathrm{CF}}\xspace}
|
||||
\newcommand*{\Ct}{\ensuremath{\mathcal{C}_t}\xspace}
|
||||
|
||||
\newcommand*{\Hom}{\mathrm{Hom}}
|
||||
\newcommand*{\Ho}{\mathrm{Ho}}
|
||||
|
||||
\newcommand*{\type}{\, \mathsf{type}}
|
||||
\newcommand*{\cofib}{\, \mathsf{cofib}}
|
||||
\newcommand*{\formula}{\, \mathsf{formula}}
|
||||
\newcommand*{\face}{\, \mathsf{face}}
|
||||
\newcommand*{\mfill}{\mathsf{fill}}
|
||||
\newcommand*{\comp}{\mathsf{comp}}
|
||||
\newcommand*{\Path}[3]{\mathsf{Path}\,#1\,#2\,#3}
|
||||
\newcommand*{\p}{\textrm{p}}
|
||||
\newcommand*{\Set}{\ensuremath{\mathbf{Set}}\xspace}
|
||||
|
||||
\newcommand*{\skl}{\mathrm{skl}}
|
||||
\newcommand*{\op}{\mathsf{op}}
|
||||
\renewcommand*{\d}{\mathsf{d}}
|
||||
\renewcommand*{\i}{\textsf{i}}
|
||||
\renewcommand*{\t}{\mathsf{t}}
|
||||
\newcommand*{\⊗}{\mathbin{\hat{⊗}}}
|
||||
\newcommand*{\×}{\mathbin{\hat{×}}}
|
||||
\newcommand*{\at}{\mathbin{\hat{@}}}
|
||||
\newcommand*{\□}{\ensuremath{\widehat{□}}\xspace}
|
||||
\newcommand*{\Δ}{\ensuremath{\widehat{Δ}}\xspace}
|
||||
\newcommand*{\dCube}{\ensuremath{□_{∧∨}}\xspace}
|
||||
\newcommand*{\dcSet}{\ensuremath{\widehat{\dCube}}\xspace}
|
||||
\newcommand*{\FL}{\ensuremath{\textbf{FL}}\xspace}
|
||||
\newcommand*{\M}{\Cat{M}}
|
||||
\newcommand*{\arr}[1]{#1^{→}}
|
||||
\newcommand*{\FinSetOp}{\ensuremath{{\mathbf{FinSet}\rlap{\raisebox{0.75ex}{\rotatebox{90}{*}}}\raisebox{-0.3ex}{\rotatebox{90}{*}}}^\mathsf{op}}\xspace}
|
||||
\newcommand*{\FinSet}{\ensuremath{{\mathbf{FinSet}\rlap{\raisebox{0.75ex}{\rotatebox{90}{*}}}\raisebox{-0.3ex}{\rotatebox{90}{*}}}}\xspace}
|
||||
% \newcommand*{\Yo}{\mathbf{y}}
|
||||
%
|
||||
\newcommand*{\pb}{\arrow[dr,phantom,very near start, "\lrcorner"]}
|
||||
\newcommand*{\pu}{\arrow[dr,phantom,very near end, "\ulcorner"]}
|
||||
\newcommand*{\Ob}[1][0.66em]{\begin{tikzpicture}[y=#1,x=#1]
|
||||
\draw[line join=round] (0,1) -- (0,0) -- (1,0) -- (1,1);
|
||||
\end{tikzpicture}
|
||||
}
|
||||
|
||||
|
||||
% \AtBeginDocument{\DeclareMathSymbol{→}{\mathbin}{symbols}{"21}} % I don't know why this does not work
|
||||
\title{A uniform equivariant model for cubical Type Theory}
|
||||
\author{Dennis Frieberg}
|
||||
\date{\today}
|
||||
\begin{document}
|
||||
Hallo Welt!
|
||||
\maketitle
|
||||
\tableofcontents
|
||||
\newpage
|
||||
\subfile{Preliminaries/Preliminaries}
|
||||
\subfile{main/Mainpart}
|
||||
\listoftodos
|
||||
% \printindex
|
||||
\printbibliography
|
||||
|
||||
\end{document}
|
||||
|
|
200
src/Preliminaries/Algebraic_Weak_Factorization_System.tex
Normal file
200
src/Preliminaries/Algebraic_Weak_Factorization_System.tex
Normal file
|
@ -0,0 +1,200 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
\begingroup
|
||||
\renewcommand*{\E}{\ensuremath{\mathrm{E}}\xspace}
|
||||
\newcommand*{\LF}{\ensuremath{\mathrm{L}}\xspace}
|
||||
\newcommand*{\RF}{\ensuremath{\mathrm{R}}\xspace}
|
||||
\renewcommand*{\d}{\ensuremath{\mathbf{d}}\xspace}
|
||||
\section{Algebraic Weak Factorization Systems}
|
||||
In this section, we will revisit the basic ideas of algebraic weak factorization systems (awfs).
|
||||
We won't use them much explicitly through this paper, but we need one major result about them.
|
||||
Also, while we don't talk about them explicitly, their ideas permeate through most of the arguments.
|
||||
We will only repeat the most basic definitions and ideas, which will be enough to understand
|
||||
this document. For a much more complete and in depth discussion, see
|
||||
\cites{riehlAlgebraicModelStructures2011}{bourkeAlgebraicWeakFactorisation2016}{bourkeAlgebraicWeakFactorisation2016a}.
|
||||
This introduction follows the approach of \cite[Section 2]{riehlAlgebraicModelStructures2011}.
|
||||
If the reader is already familiar with this concept, they might safely skip this section.
|
||||
|
||||
We start this section with some observations about regular functorial weak factorization systems (wfs).
|
||||
For the remainder of this section we write \({\E : \A^→ → \A^𝟛}\) as the factorization functor of some functorial
|
||||
wfs \((\L,\R)\). We are going to write \(\d^0 , \d^2 : \A^𝟛 → A^→\) for the functors induced by \(d_0, d_2 : 𝟚 → 𝟛\).
|
||||
\LF and \RF for the endofunctors \(\A^→ → \A^→\) that are given by
|
||||
\(\d^2\E\) and \(\d^0\E\), the projection to the left or right factor of the factorization.
|
||||
For a given \(f : X → Y\), we call the factoring object \(\E_f\).
|
||||
\begin{eqcd*}[column sep =small]
|
||||
& \E_f \arrow[rd,"\RF(f)"] & \\
|
||||
X \arrow[rr,"f"] \arrow[ru,"\LF(f)"] & & Y
|
||||
\end{eqcd*}
|
||||
|
||||
For now, we are interested in witnessing if some map is in the right class (or dually left class). Or in other words,
|
||||
attaching some kind of data to a right map from which we could deduce all solutions of the required lifting
|
||||
problem. This is indeed possible. Assume that \(f\) is a right map, then a retraction \(r_f\) of \(\LF(f)\) would suffice.
|
||||
Assume we had some left map \(f'\) and a lifting problem given by \((g,h)\). We can then factor this with the help of \E.
|
||||
\begin{eqcd*}[row sep=huge, column sep=huge]
|
||||
X' \arrow[dd,"f'"', bend right] \arrow[d,"\LF(f')"] \arrow[r,"g"] & X \arrow[d,"\LF(f)"] \arrow[dd,"f",bend left=50]\\
|
||||
\E_{f'} \arrow[d,"\RF(f')"] \arrow[r, "ϕ_{g,h}"] & \E_f \arrow[d,"\RF(f)"] \arrow[u, "r_f", bend left]\\
|
||||
Y' \arrow[r,"h"] \arrow[ur, dashed] & Y
|
||||
\end{eqcd*}
|
||||
And then compose the solution for the whole lifting problem from the lifting of the problem \((g L(f), h)\) with
|
||||
\(r_f\). That this is a solution is guaranteed by \(r_f\) being a retract \(r_f L(f) = \id\). Dually we can
|
||||
witness \(f'\) being a left map by supplying a split \(s_{f'}\) of \(R(f')\). If we did both at the same time
|
||||
we automatically get a canonical choice of lifts.
|
||||
\begin{eqcd}[row sep=huge, column sep=huge] \label{eq:coalgebraliftsalgebra}
|
||||
X' \arrow[dd,"f'"', bend right=50] \arrow[d,"\LF(f')"] \arrow[r,"g"] & X \arrow[d,"\LF(f)"] \arrow[dd,"f",bend left=50]\\
|
||||
\E_{f'} \arrow[d,"\RF(f')"'] \arrow[r, "ϕ_{g,h}"] & \E_f \arrow[d,"\RF(f)"'] \arrow[u, "r_f", bend left]\\
|
||||
Y' \arrow[r,"h"] \arrow[u, bend right, "s_{f'}"'] & Y
|
||||
\end{eqcd}
|
||||
Namely for a lifting problem \((g,h)\) the map \(r_f ϕ_{g,h} s_{f'}\), and if we make \(r_f\) and \(s_{f'}\) part of the
|
||||
data of a right- (left-) map the chosen lifts are even functorial.
|
||||
The next question one might rightfully ask, if we can always find such a witness. And the answer is happily yes.
|
||||
We just need to make up the right lifting problem.
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}[ampersand replacement=\&,column sep = large, row sep = large]
|
||||
X' \arrow[d,"f'"'] \arrow[r,"\LF(f')"]\& \E_{f'} \arrow[d, "\RF(f')"] \& \& X \arrow[r,equal] \arrow[d,"\LF(f)"'] \& X \arrow[d,"f"] \\
|
||||
Y' \arrow[r,equal] \arrow[ru, "s_{f'}", dashed] \& Y' \& \& \E_f \arrow[r,"\RF(f)"] \arrow[ru,dashed,"r_f"] \& Y
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
We can also repack this information in a slightly different way, \(f\) is a right map exactly if \(f\) is a retract of
|
||||
\(\R(f)\) in \(\faktor{\A}{Y}\). And \(f'\) is a left map precisely if \(f'\) is a retract of \(\LF(f)\) in \(\faktor{X'}{\A}\).
|
||||
\begin{eqcd*}
|
||||
X
|
||||
\arrow[d,"f"]
|
||||
\arrow[r, "\LF(f)"]
|
||||
& E_f
|
||||
\arrow[d, "\RF(f)"]
|
||||
\arrow[r, "r_f", dashed]
|
||||
& X
|
||||
\arrow[d, "f"]
|
||||
&
|
||||
& X'
|
||||
\arrow[d,"f'"]
|
||||
\arrow[r, equal]
|
||||
& X'
|
||||
\arrow[r, equal]
|
||||
\arrow[d, "\LF(f)"]
|
||||
& X'
|
||||
\arrow[d, "f'"]
|
||||
\\
|
||||
Y
|
||||
\arrow[r,equal]
|
||||
& Y
|
||||
\arrow[r, equal]
|
||||
& Y
|
||||
&
|
||||
& Y'
|
||||
\arrow[r, "s_{f'}", dashed]
|
||||
& E_{f'}
|
||||
\arrow[r, "\RF(f')"]
|
||||
& Y'
|
||||
\end{eqcd*}
|
||||
If we focus on the diagram at the left-hand side, we can also see it as a morphism \(η_f : f → \RF(f)\) in \(\A^→\), completely dictated
|
||||
by \(\E\) and thus natural in \(f\), and a morphism \(α : \RF(f) → f\), such that \(αη_f = \id\).
|
||||
If we reformulate what we have just observed, we get to the following.
|
||||
\begin{observation}
|
||||
In a functorial wfs \((\L,\R)\) on \A, \(\LF : \A → \A \) is a copointed endofunctor and \RF is pointed endofunctor,
|
||||
where the counit \(ε : \LF → \Id\) is given by the squares \(ε_f ≔
|
||||
\begin{tikzcd}
|
||||
\cdot \arrow[d,"\LF(f)"] \arrow[r,equal] & \cdot \arrow[d,"f"]\\
|
||||
\cdot \arrow[r,"\RF(f)"] & \cdot
|
||||
\end{tikzcd}
|
||||
\)
|
||||
and the unit \(η : \Id → \RF\) by \( η_f ≔
|
||||
\begin{tikzcd}
|
||||
\cdot \arrow[d,"f"] \arrow[r,"\LF(f)"'] & \cdot \arrow[d,"\RF(f)"'] \\
|
||||
\cdot \arrow[r,equal] & \cdot
|
||||
\end{tikzcd}
|
||||
\). \L is precisely the class of \LF-coalgebras and \R the class of \RF-algebras.
|
||||
\end{observation}%
|
||||
One should think of these (co)algebras as morphism with a choice of liftings.
|
||||
At this point, we might try to get rid of the wfs \((\L,\R)\) as input data and might to try to recover it from the
|
||||
factorization functor. And that works described by the methods above, but only if we know that this factorization
|
||||
functor comes from a wfs. If we just start with an arbitrary factorization functor, we still get that all
|
||||
\RF-algebras right lift to all \LF-algebras and vice versa,
|
||||
but in general \(\RF(f)\) will not be a \RF-algebra.
|
||||
One way to solve this problem is by adding a second natural transformation \(\RF\RF → \RF\), such that
|
||||
the neccessary data commute, making \RF a monad (and dually \LF a comand).
|
||||
|
||||
\begin{definition}\label{def:awfs:awfs}
|
||||
An \emph{algebraic weak factorization system (awfs)} is given by a functor \(\E : \A^→ → \A^𝟛\) and two natural transformations
|
||||
\(δ\) and \(μ\). We write \((\LF,ε)\) where \(\LF ≔ \d^2\E\) and \((\RF,η)\) where \(\RF ≔ \d^0\E\) for the two induced
|
||||
pointed endofunctors \(\A^→ → \A^→\). We require that \((\LF,ε,δ)\) is a comonad and \((\RF,η,μ)\) is a monad.\footnote{%
|
||||
Most modern definitions require additionaly that a certain induced natural transformation to be a distributive law of the comonad
|
||||
over the monad. While we recognise its technical important,but we feel that it is distracting from our goal to get the
|
||||
general ideas across.}
|
||||
\end{definition} \todo{this definition is terrible but i can't get it into nice sounding sentence}
|
||||
\begin{notation}
|
||||
If the rest of the data is clear from context we will only specify the comonad and monad and say \((\LF,\RF)\) to be an
|
||||
algebraic weak factorization system.
|
||||
\end{notation}
|
||||
|
||||
\begin{remark}\label{rem:awfs:retractClosure}
|
||||
Neither the \LF-coalgebras nor the \RF-algebras are in general closed under retracts, if we think of them
|
||||
as comonad and monad. But we get a full wfs by taking the \LF-coalgebras and \RF-algebras if we only
|
||||
regard them as the pointed endofunctor, which is the same as the retract closure of the algebras in the (co)monad sense.
|
||||
\end{remark}
|
||||
|
||||
In light of this remark we can pass back from any awfs to a wfs. We can think of this operation as forgetting the choice
|
||||
of liftings.
|
||||
\begin{definition}
|
||||
Let \(\LF,\RF\) be an awfs. We will call \((\L,\R)\) the underlying wfs of \((\LF,\RF)\), where \L is the class
|
||||
of \LF-coalgebras (as a pointed endofunctor) and \R is the class of \RF-algebras (as a pointed endofunctor).
|
||||
\end{definition}
|
||||
\begin{remark}
|
||||
Dropping the algebraic structure is not a lossless operation. Even though the (co)pointed endofunctors and (co)algebras
|
||||
with respect to those endofunctors
|
||||
can be recovered (with enough classical principles), the unit and counit might not have been unique. And thus also
|
||||
not the category of (co)algebras regarding the (co)monad structure.
|
||||
\end{remark}
|
||||
|
||||
We will end this discussion with a few definitions and a theorem that we will later need. While we think the reader
|
||||
is now well prepared to understand the statements and their usefulness, we are aware that we didn't cover
|
||||
enough theory to understand its inner workings.\todo{is this wording ok? should I not do it? I don't want to scare
|
||||
people}
|
||||
|
||||
\begin{definition}\label{def:awfs:rightMaps}
|
||||
Let \J be category and \(J : \J → \A^→\) be a functor. Then an object of the category \(J^⧄\) of \emph{right \(J\)-maps}
|
||||
is a pair \((f,j)\) with \(f\) in
|
||||
\(\A^→\) and \(j\) a function that assigns for every object \(i\) in \J, and for every
|
||||
lifting problem
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}[column sep=huge,row sep=huge]
|
||||
L \arrow[r,"α"] \arrow[d, "J(i)"'] & X \arrow[d,"f"] \\
|
||||
M \arrow[r,"β"] \arrow[ur, dashed,"{j(i,α,β)}"] & Y
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
a specified lift \(j(i,α,β)\), such that for every \((a,b) : k → i\) in \J, the diagram
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}[column sep=huge, row sep = huge]
|
||||
L' \arrow[r,"a"] \arrow[d,"J(k)"'] & L \arrow[r,"α"] \arrow[d,"J(i)", near end] & X \arrow[d,"f"] \\
|
||||
M' \arrow[urr, dashed, near start, "{j(i,αa,βb)}"] \arrow[r,"b"] & M \arrow[ur, dashed, "{j(i,α,β)}"'] \arrow[r,"β"] & Y
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
commutes. And morphisms are morphisms in \(\A^→\) that preserve these liftings.
|
||||
\end{definition}
|
||||
\begin{remark}
|
||||
This is even a functor \((−)^⧄ : \faktor{\mathbf{Cat}}{\A^→} → \left(\faktor{\mathbf{Cat}}{\A^→}\right)^\op\)
|
||||
\end{remark}
|
||||
\begin{remark}
|
||||
There is an adjoint notion of left lifting.
|
||||
\end{remark}
|
||||
\begin{remark}
|
||||
This is a strong generalization from the usual case, where one talks about sets (or classes) that lift against each other.
|
||||
If one believes in strong enough choice principles, then the usual case is equivalent
|
||||
to \(\J\) beeing a discrete category and \(J\) some monic functor.
|
||||
\end{remark}
|
||||
We will now turn to a theorem that will provide us with awfs that are right lifting to some functor \(J\).
|
||||
It is (for obvious reasons) known as Garners small object argument.
|
||||
|
||||
\begin{theorem}[{Garner \cites{garnerUnderstandingSmallObject2009}[Theorem 2.28, Lemma 2.30]{riehlAlgebraicModelStructures2011}}]\label{awfs:smallObject}
|
||||
Let \(\A\) be a cocomplete category satisfying either of the following conditions.
|
||||
\begin{itemize}
|
||||
\item[(\(*\))] Every \(X ∈ \A\) is \(α_X\)-presentable for some regular cardinal \(α_X\).
|
||||
\item[(\dagger)] Every \(X ∈ \A\) is \(α_X\)-bounded with respect to some proper, well-copowered
|
||||
orthogonal factorization system on \(\A\), for some regular cardinal \(α_X\).
|
||||
\end{itemize}
|
||||
Let \( J : \mathcal{J} → \A^→\) be a category over \(\A^→\), with \(\mathcal{J}\) small. Then the free awfs on \(\mathcal{J}\) exists,
|
||||
its category of \RF algebras is isomorphic to \(J^⧄\), and the category of \RF-algebras is retract closed.
|
||||
\end{theorem}
|
||||
|
||||
\endgroup
|
||||
\end{document}
|
89
src/Preliminaries/Categorical_Semantics.tex
Normal file
89
src/Preliminaries/Categorical_Semantics.tex
Normal file
|
@ -0,0 +1,89 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
\section{Categorical Semantics}
|
||||
We will need some basics knowledge categorical semantics for type theory. A bit old, but still good
|
||||
instructive read can be found here\cite{jacobsCategoricalLogicType1999}. We will recap categories with
|
||||
families as we will encounter them later more often.
|
||||
|
||||
\subsection{Category with Families}
|
||||
A category with families\cite{dybjerInternalTypeTheory2003, castellanCategoriesFamiliesUnityped2020a, hofmannSyntaxSemanticsDependent} consists of a category of contexts and context substitutions. And an assignment
|
||||
of context to (semantical) types, which then gets interpreted as the possible types in that context. And a second assignment of types in
|
||||
a context to (semantical) terms of that type. These have to fulfill some rules, so that they actually behave like contexts, types and terms.
|
||||
We follow Dybjer in the way we organize this data. First we define the category \(\Fam\).
|
||||
|
||||
\begin{definition}
|
||||
\(\Fam\) is the category which has as objects \((I,\{A\}_{i ∈ I})\), where \(I\) is a set and \(\{A_i\}_{i ∈ I}\) is
|
||||
a family of sets indexed by \(I\). A morphism of \(\Fam\), \(f \colon (I,\{A_i\}_{i ∈ I}) → (J, \{B_j\}_{j ∈ J})\)
|
||||
consists of a tuple \((f,\{f_i\}_{i ∈ I})\). Where \(f\) is a function \(I → J\) and \(f_i\) is a function
|
||||
\(A_i → B_{f(i)}\).
|
||||
\end{definition}
|
||||
|
||||
% TODO this typesets ugly and we want to make this the definition and the definition a remark
|
||||
% If we do this we need to change the technicallities in the next definition accordingly
|
||||
\begin{remark}
|
||||
Alternatively this is just the category \(\mathcal{S}et^→\)
|
||||
\end{remark}
|
||||
|
||||
|
||||
|
||||
% call elements of C contexts
|
||||
% pi_1 is not great
|
||||
\begin{definition}[Category with Families (CwF)\index{category!with families}\index{CwF}]
|
||||
A category with families, consists of a category \(\C\) with terminal obeject, and a functor \(T \colon \C^{op} → \Fam\).
|
||||
Before we continue the definition we will introduce some very helpful notation.
|
||||
Let \(Γ\) and \(Δ\) be objects of \(\C\) and \(σ \colon Δ → Γ \). We call
|
||||
\begin{itemize}
|
||||
\item \(\Ty(Γ) \coloneqq π_1 ( T (Γ))\), the types in context \(Γ\).
|
||||
\item whenever \(A ∈ \Ty(Γ)\), \(\Tm(Γ, A) \coloneqq (π_2 (T (Γ)))_A\), the terms of type \(A\) in context \(Γ\).
|
||||
\item \(A[σ] \coloneqq π_1 (T (σ))(A) ∈ \Ty(Δ)\) the substitution of \(A\) along \(σ\).
|
||||
\item whenever \(t ∈ \Tm(Γ, A)\), \(t[σ] \coloneqq (π_2 (T(σ)))_A(t) ∈ \Tm(Δ, A[σ])\) the substitution
|
||||
of \(t\) along \(σ\).
|
||||
\end{itemize}
|
||||
For a CwF we require a context comprehension operation, that assigns to every context \(Γ ∈ \C\) and type
|
||||
\(A ∈ \Ty(Γ)\), a context
|
||||
\(Γ.A ∈ \C\), a morphism \(\p^1_{Γ.A} \colon Γ.A → Γ\) and a term \(\p^2_{Γ.A} ∈ \Tm(Γ.A,A[p^1_{Γ.A}])\), such that
|
||||
for every substitution \(γ \colon Δ → Γ \) and every term \(t ∈ \Tm(Δ,A[γ])\) there exists a unique substitution
|
||||
\(⟨ γ,t ⟩ \colon Δ → Γ.A \), which fulfills the conditions
|
||||
\(\p^1_{Γ.A} ∘ ⟨γ,t⟩ = γ\) and \(\p^2_{Γ.A}[⟨γ,t⟩] = t\).
|
||||
\end{definition}
|
||||
|
||||
\begin{remark}
|
||||
Some literature does not require the existance of a terminal object. We decided to require it, because for all
|
||||
our purposes we will need it anyway.
|
||||
\end{remark}
|
||||
|
||||
\begin{remark}
|
||||
For the initiated, in other ways of building models for dependent type theory, that are based on some form of
|
||||
slice categories (i.e.: categories with attributes). The morphism \(\p^1_{Γ.A}\) and the term \(\p^2_{Γ.A}\) correspond to the
|
||||
first and second projection from the
|
||||
dependent product \(Γ.A\). And the condition imposed on them correspond to the corresponding universal property.
|
||||
\end{remark}
|
||||
|
||||
% TODO how do sections and Terms relate
|
||||
% TODO fix semantic bla naming conventions above
|
||||
|
||||
\subsubsection{Semantics}
|
||||
We assume a fixed CwF in the background.
|
||||
|
||||
How the above naming conventions suggest we want to interpret contexts as semantic contexts, types as semantic
|
||||
types and terms as semantic terms. So we define a partial interpretation function \( ⟦-⟧ \) mapping contexts
|
||||
to semantic contexts, types in contex \(Γ\) to \( \Ty(⟦Γ⟧) \) and terms of type \(A\) of in contex \( Γ \) to
|
||||
to \( \Tm(⟦Γ⟧,⟦A⟧) \). We will continue this section with defining that function. We will see that for various
|
||||
typetheoretic construction we need to pose additional conditions on our CwF. We will develop them along the way.
|
||||
|
||||
First the contexts:
|
||||
|
||||
\begin{itemize}
|
||||
\item \( ⟦\diamond⟧ ≔ ⊤\)
|
||||
\item \(⟦Γ,x : A⟧ ≔ ⟦Γ⟧.⟦Γ⊢A⟧ \)
|
||||
\end{itemize}
|
||||
|
||||
Now we talk about types and their terms. Before we jump into type constructors, we see how we deal with assumptions.
|
||||
|
||||
\begin{itemize}
|
||||
\item \( ⟦Γ,x:A⊢x:A⟧ ≔ \p^2_{⟦Γ,x:A⟧} \)
|
||||
\item \( ⟦Γ,x:A,Δ,y:B⊢x:A⟧ ≔ ⟦Γ,x:A,Δ⊢x:A⟧[\p^1_{⟦Γ,x:A,Δ,y:B⟧}] \)
|
||||
\end{itemize}
|
||||
|
||||
Now we are ready to dive into the Semantics of various different types.
|
||||
\end{document}
|
373
src/Preliminaries/Cubical_Set.tex
Normal file
373
src/Preliminaries/Cubical_Set.tex
Normal file
|
@ -0,0 +1,373 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
\section{Cubical Sets}
|
||||
\subsection{Category of Cubes}
|
||||
%TODO auto correct
|
||||
%TODO proof read
|
||||
%TODO Dedekind cubes
|
||||
%TODO connections
|
||||
|
||||
The first question we need to answer, what site to consider. In the simplicial world there is more
|
||||
or less a consesus, what exactly the site should be. In the cubical world there are multiple models
|
||||
with slightly different sites. Our main protagonist will be the cartesian model, but we will also
|
||||
need to introduce the Dedekind model, as it will appear in an argument later. For a comperative
|
||||
analysis we direct the reader to \cite{cavalloUnifyingCubicalModels2016} and \cite{ABCHFL}.
|
||||
|
||||
\subsubsection{Cartesian cubes}
|
||||
|
||||
The most uniform description is probably in form of Lawvere theories,
|
||||
but our site has a more direct description \cite{mathematicalsciencesresearchinstitutemsriEquivariantUniformKan2020}.
|
||||
|
||||
\begin{definition}
|
||||
Let \(\mathrm{FinSet}\) be the category whose objects are the subsets of \(ℕ\), of the form \(\{0,..,n\}\), for some
|
||||
\(n ∈ ℕ\), and as morphisms functions between them.
|
||||
\end{definition}
|
||||
|
||||
\begin{remark}
|
||||
Up to equivalence this is the category of finite sets. Choosing a skeleton will circumvent later size issues, but
|
||||
for convenience we will pretend that these are finite sets and not always reindex our sets to have an initial segment
|
||||
of \(ℕ\).
|
||||
\end{remark}
|
||||
|
||||
\begin{definition}\label{def:cartCubeSet}
|
||||
Let \( □ ≔ {\mathrm{FinSet}\rlap{\raisebox{0.75ex}{\rotatebox{90}{*}}}\raisebox{-0.3ex}{\rotatebox{90}{*}}}^\mathsf{op}\)
|
||||
the opposite category of double-pointed finite sets.
|
||||
\end{definition}
|
||||
|
||||
An alternative definition would be
|
||||
\begin{definition}\label{def:cartCubeTheory}
|
||||
Let \(□\) be the Lawvere-theory of the type theory with a single type \I, and
|
||||
two constants \(⊥ : \I\) and \(⊤ : \I\).
|
||||
\end{definition}
|
||||
|
||||
\begin{remark}
|
||||
As in light of \fref{def:cartCubeSet} and \fref{def:cartCubeTheory} we sometimes talk about morphisms
|
||||
as functions \(X + 2 → Y + 2\) preserving both points in \(2\) or equivalently ordinary functions \(X → Y+2\).
|
||||
\end{remark}
|
||||
|
||||
We will now spend some time exploring, why this might be viewed as the cube category.
|
||||
We write \(\{⊥|a,b,c|⊤\}\) for double pointed finite sets, where \(⊥\) and \(⊤\) are the
|
||||
two distunguished points. A cube of dimension \(n\) is now interpreted as such a set
|
||||
with \(n+2\) (\(n\) without the distinguished points) many elements. We think of these
|
||||
as dimension variables. We could for example think about the zero dimensional cube
|
||||
(a point) \(\{⊥|⊤\}\) and the 2 dimensional cube (a square) \(\{⊥|a,b|⊤\}\).
|
||||
How many maps do we expect from the line, into the point? Exactly one. And this is
|
||||
also exactly what we get, a fitting map in \( \FinSetOp \) corresponds to a function
|
||||
\(\{⊥|⊤\} → \{⊥|a,b|⊤\}\) preserving \(⊥\) and \(⊤\), this is already unique.
|
||||
In the other direction we have four maps. These encode the corners. For example the corner “\((⊥,⊥)\)”
|
||||
gets encode by sending \(a\) and \(b\) to \(⊥\).
|
||||
|
||||
Lets increase the dimensions a bit and look at the maps from a 1 dimensional cube (a line segment)
|
||||
\(\{⊥|x|⊤\}\}\) into the square. These correspond to functions from \(\{a,b\} → \{⊥,x,⊤\}\). We
|
||||
can think of this as a dimensional description of a line in a square. We have 2 dimensions in which
|
||||
the line can move \(a\) and \(b\). And in this two dimennsions the line can either be constant (sending
|
||||
this dimension to \(⊥\) or \(⊤\), or can advance along its dimension (sending the dimension to \(x\)).
|
||||
So we get the degenerate corner points (as lines), by sending both of \(a\) and \(b\) to \(⊥\) or \(⊤\).
|
||||
We get the for sides of the square by sending one of \(a\) or \(b\) to \(⊥\) or \(⊤\). The two sides
|
||||
that run along the dimension \(a\) send \(a\) to \(x\).
|
||||
One function is left, sending both \(a\) and \(b\) to \(x\). This is the line that advances in both
|
||||
dimensions. Or in other words the diagonal.
|
||||
|
||||
\begin{remark}
|
||||
Our cube category is closed under taking products. This differs from the simplicial case. This will turn out to be very handy and
|
||||
important later.
|
||||
\end{remark}
|
||||
|
||||
As we will need some of these element througout the document again we will give them names.
|
||||
\begin{definition}[Notation for cubes]\label{def:cubeNotation}
|
||||
We call:
|
||||
\begin{itemize}
|
||||
\item \([0] ≔ \{⊥|⊤\}\)
|
||||
\item \([1] ≔ \{⊥|x|⊤\}\)
|
||||
\item \(\deg(x) ≔ |x| - 2\)
|
||||
\item \(\mathrm{d}_k : [0] → [1]\) with \(k ∈ \{⊤,⊥\}\), be the map given by the function that sends \(x \mapsto k\)
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includesvg[inkscapearea=nocrop,inkscapedpi=500]{figures/square_example_picture.svg}
|
||||
\caption{A square with some lines maped into it}
|
||||
\todo[inline]{fix 0 to \(⊥\) and same for 1}
|
||||
\end{figure}
|
||||
|
||||
|
||||
Also while \(□\) is not a strict Reedy-category (as \(Δ\) is) it is almost, namely an Eilenberg-Zilber category.
|
||||
So most of Reedy theory from the simplicial case can be salvaged. This will play an integral part in the
|
||||
prove that our model structure on \□ is equivalent to spaces.
|
||||
|
||||
\begin{definition}[generalized Reedy-category] %TODO add cite nlab
|
||||
A \emph{generalized Reedy category} is a category \C together with two
|
||||
wide subcategories \(\C_+\) and \(\C_-\) and a function \(d:\mathrm{ob}(\C) → α\) called \emph{degree},
|
||||
for some ordinal \(α\), such that
|
||||
\begin{enumerate}
|
||||
\item every non-isomorphism in \(\C_+\) raises degree,
|
||||
\item every non-isomorphism in \(\C_-\) lowers degree,
|
||||
\item every isomorphism in \C preserves degree,
|
||||
\item \(\C_+ ∩ \C_-\) is the core of \C
|
||||
\item every morphism factors as a mapn in \(\C_-\) followed by a map in \(\C_+\), uniquely up
|
||||
to isomorphism
|
||||
\item if \(f ∈ \C_-\) and \(θ\) is an isomorphism such that \(θf = f\), then \(θ = \id\)
|
||||
\end{enumerate}
|
||||
\end{definition}
|
||||
|
||||
Which will be an important concept later, but in our special case we can do even better
|
||||
|
||||
\begin{definition}[Eilenberg-Zilber category] %TODO add cite nlab
|
||||
An \emph{Eilenberg-Zilber category} or short \emph{EZ-category} is a small category \C
|
||||
equipped with a function \(d:\mathrm{ob}(\C)→ℕ\) such that
|
||||
\begin{enumerate}
|
||||
\item For \(f : x → y\) a morphism of \C: \label{EZ:1}
|
||||
\begin{enumerate}
|
||||
\item If \(f\) is an isomorphism, then \(d(x) = d(y)\).
|
||||
\item If \(f\) is a noninvertible monomorphism, then \(d(x) < d(y)\).
|
||||
\item If \(f\) is a noninvertible split epimorphism, then \(d(x) > d(y)\).
|
||||
\end{enumerate}
|
||||
\item Every morphism factors as a split epimorphism followed by a monomorphism. \label{EZ:2}
|
||||
\item Every pair of split epimorphisms in \C has an absolute pushout. \label{EZ:3}
|
||||
\end{enumerate}
|
||||
\end{definition}
|
||||
|
||||
\begin{remark}
|
||||
Cleary every EZ-category is a generalized Reedy category
|
||||
\end{remark}
|
||||
|
||||
|
||||
\begin{theorem}\label{thm:CisEZ}
|
||||
\(□\) together with \(\deg\) is an EZ-category
|
||||
\end{theorem}
|
||||
|
||||
Before we give a proof we will give a technical lemma about absolute pushouts of split epis.
|
||||
\begin{lemma}\label{absolutePushout} %TODO cite nlab absolute pushout
|
||||
Let
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
A \arrow[r, two heads, "b"'] \arrow[d, two heads, "c"] & B \arrow[l,bend right, dashed, "b'"'] \arrow[d, two heads, "d_1"'] \\
|
||||
C \arrow[r, "d_2"] \arrow[u,bend left, dashed,"c'"] & D \arrow[u, bend right, dashed,"d_1'"']
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
be a diagram in a category \C of split epis such that, \(bc' = d_1'd_2\). Then it is already a pushout square.
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
We look at an arbitrary cocone
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
A \arrow[r, two heads, "b"] \arrow[d, two heads, "c"] & B \arrow[d, "e_1"] \\
|
||||
C \arrow[r, "e_2"] & E
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
We can compose this square with \((c',d_1')\) yielding
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
C \arrow[d,"c'"] \arrow[r, "d_2"] \arrow[dd,bend right, "\id"'] & D \arrow[d,"d_1'"] \\
|
||||
A \arrow[r, two heads, "b"] \arrow[d, two heads, "c"] & B \arrow[d, "e_1"] \\
|
||||
C \arrow[r, "e_2"] & E
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
We get our candidate map from \(e_1d_1' : D → E\). We need to show that it commuts with \(e_1\) and \(d_1\) as well as
|
||||
\(e_2\) and \(d_2\). The second case \(e_1d_1'd_2 = e_2\) is directly obvious from the diagram.
|
||||
To check that \(e_1d_1'd_1 = e_1\) it is enough to check that \(be_1d_1'd_1 = be_1\), as \(b\) is epic. Which is a
|
||||
straight forward diagramm chase, if one adds all the undrawn arrwos to the diagram above. Since \(d_1\), are epic \(e_1d'_1\) is unique in fulfilling already one of the two conditions.
|
||||
\end{proof}
|
||||
|
||||
\begin{remark}
|
||||
As we see in the proof, we don't need \(b\) to be split, but our application needs the split, as split epis are
|
||||
preserved by pushouts and general epis are not.
|
||||
\end{remark}
|
||||
\begin{remark}
|
||||
As split epis and commutativity is preserved by all functors, pushouts of this form are absolute pushouts.
|
||||
\end{remark}
|
||||
|
||||
As we will use the following statement multiple times in the proof we put it into its own lemma.
|
||||
\begin{lemma} \label{EZ:split}
|
||||
Every epi in \(□\) splits. Or dually every mono in \(\FinSet\) has a retraction.
|
||||
This can be constructed in a way that for a mono \(f\) in \(\FinSet\) the corresponding contraction
|
||||
sends everything not in the image of \(f\) to \(⊥\).
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
Let \(f\) be an epi in \(□\). It is thus a monomorphism in \(\FinSet\). We construct a retraction
|
||||
\(g\) (that will be a split in \(□\)). Let \(g(x)\) by the unique pre image of \(f\) if it is not empty. If it is empty we choose
|
||||
any element (for example \(⊥\)). It is trivial to verify that this is a retraction. And as \(f\) needs to
|
||||
preserve the destinguished points, \(g\) does to.
|
||||
\end{proof}
|
||||
|
||||
\begin{proof}[Proof of theorem \ref{thm:CisEZ}]
|
||||
Property \ref{EZ:1} follows from basic facts about surjectivity and injectivity between finite sets.
|
||||
Property \ref{EZ:2} needs us to find a (epi,mono) factorization in \(\FinSet\). This factorization
|
||||
can be obtained in the same way one would obtain it \(\Set\), by factoring an morphism through its image.
|
||||
It is straight forward to verify, that the distinguished points are preserved by those maps. And the
|
||||
that the epi (in \(□\)) splits follows from \ref{EZ:split}.
|
||||
|
||||
For property \ref{EZ:3}, we only need find a pushout and splits of the form described in \ref{absolutePushout}.
|
||||
So let \(c : A → C\) and \(b : A → B\) spilt epis. We will do this by
|
||||
constructing the dual digram in \(\FinSet\). We will keep the names for the morphisms. As pullbacks over non-empty diagrams
|
||||
work in \(\FinSet\) like in \(\Set\) we can construct a pullback. Now we must construct the
|
||||
dotted arrows that satisfy the dual properties of lemma \ref{absolutePushout}.
|
||||
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
D \arrow[r, tail ,"b^*c"] \arrow[d,tail, "c^*b"] \pb & B \arrow[d,tail, "b"'] \\
|
||||
C \arrow[r,tail,"c"] \arrow[u,dashed, two heads, "(c^*b)'", bend left] & A \arrow[l, bend left,dashed, two heads, "c'"] \arrow[u,bend right,dashed,two heads ,"b'"']
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
Let \(c'\), \(b'\) and \((c\*b)'\) be retractions of \(c\), \(b\) and \(c^*b\) that sends everything that is not in
|
||||
their image to \(0\).
|
||||
We will show the required commutativity by case distinction
|
||||
Let \(x ∈ C\). Note that \(x ∈ \im c^*b\) if and only if \(c(x) ∈ \im b\) as this is a pullback diagram. Thus it is
|
||||
immediatly clear that if \(x\) is not contained in \(\im c^*b\) that \((b^*c)(c^*b)'(x) = 0 = b'c(x)\). Let us turn to
|
||||
the case that \(x ∈ \im c^*b \). As restricted to those \(x\) the map \(c^*b\) is surjective it suffices to check
|
||||
\( b'c(c^*b) = (b^*c)(c^*b)'(c^*b) \).
|
||||
\begin{equation*}
|
||||
(b^*c) = b'b(b^*c) = b'c(c^*b) = (b^*c)(c^*b)'(c^*b) = (b^*c)
|
||||
\end{equation*}
|
||||
|
||||
|
||||
Thus the corresponding diagram in \(□\) fulfills the requirements of lemma \ref{absolutePushout}, and is thus
|
||||
an absolute pushout square.
|
||||
\end{proof}
|
||||
|
||||
Next we start looking at the presheaf topos on our cube category \(\hat{□}\).
|
||||
We will recall some important concepts, and give some construtcion, that we will
|
||||
use later. Also we introduce some notation.
|
||||
|
||||
\begin{definition}[Notation for Cubical Sets]\label{def:notationCubeSets}
|
||||
We fix the following notations for Cartesian cubical sets:
|
||||
\begin{itemize}
|
||||
\item \(* ≔ \Yo{[0]}\)
|
||||
\item \(\I ≔ \Yo{[1]}\)
|
||||
\item \(δ_k ≔ \Yo \mathrm{d}_k\)
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
|
||||
\subsubsection{Dedekind cubes}
|
||||
We will also encounter the so called Dedekind cubes. There are also multiple ways to look at them. We will present
|
||||
multiple definitions, but skip the equivalence proofs as they don't bear much inside into the subject.
|
||||
|
||||
\begin{definition}\leavevmode
|
||||
\begin{itemize}
|
||||
\item Let \FL be the category of finite lattices and monotone maps.
|
||||
\item Let \(𝟚\) be the finite lattice \(⊥ ≤ ⊤\).
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}\label{def:dedCubeLat}
|
||||
The category \(□_{∧∨}\) is the full subcategory of \FL, restricted to objects of the form \(𝟚^n\) with \(n ∈ ℕ\).
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}\label{def:dedCubeTheory}
|
||||
The category \(□_{∧∨}\) is the Lawvere-theory of the theory of lattices.
|
||||
\end{definition}
|
||||
|
||||
Unraveling this definition gives us a quite similar description to our description of Cartesian cubes.
|
||||
Let \(F\) be the functor that assigns to a set its free lattice. We have then as objects
|
||||
natural numbers (or up to equivalence finite sets). And again we will write elements of these as lower
|
||||
roman letters instead of natural numbers to keep the confusion between sets and their elements minimal.
|
||||
A morphism \(m → n\) is a function
|
||||
from \(n → F(m)\). This adds a few more degeneracy maps, but the general geometric intuition stays the
|
||||
same. The elments of of an object are dimensions, and maps do give us for every dimension in the target
|
||||
a term, that describes the behaiviour of our domain in that particular direction. If we look at an
|
||||
example again one might ask what the difference is, between the new degeneracies map from a square
|
||||
to the interval in comparison to the old one. We can view these as degenrate squares where we map the top
|
||||
and right (or bottom and left) side along the interval and the other to sides to \(⊥\) (or \(⊤\)).
|
||||
This wasn't possible before and will make a drastic difference once we start considiring filling and
|
||||
composition problems.
|
||||
|
||||
\begin{figure}
|
||||
\missingfigure{draw pictures for this}
|
||||
\caption{A squre degenrated with a connection. The upper and right side are equal while the left and bottom side is degenrated to point.}
|
||||
\end{figure}
|
||||
|
||||
\todo{add figure for \(2→2\) degeneracy map}
|
||||
|
||||
\begin{proposition}
|
||||
The Dedekind cubes are an EZ-category
|
||||
\end{proposition}
|
||||
\begin{proof}[Proof sketch:]
|
||||
This can be proven by an analog argument as \fref{thm:CisEZ}
|
||||
\end{proof}
|
||||
|
||||
If it is clear from the context we will use the notation from \fref{def:cubeNotation} and \fref{def:notationCubeSets} for
|
||||
the anologous objects of the Dedekind cubes.
|
||||
|
||||
\todo{think about if I want to write down the proof}
|
||||
|
||||
|
||||
% We want to recall universes though, as they will become important later again.
|
||||
% As a warm-up lets consider \(\Set\), as the presheaves over the singleton category.
|
||||
% We now want to classify all small sets indexed by some set. This is relatively easy, we take the set
|
||||
% of all small sets, lets call it \(U\) and the set of all elements of all small sets \(E ≔ ∐_{u ∈ U} u\)
|
||||
% together with the map \(p_U : E → U\) sending each element back to the set where it came from
|
||||
% \(p_U(s,x) ≔ s\). Another way to express \(E\) would be in term of pointed samll sets, so could
|
||||
% equivalently define \(E ≔ \Set_{κ}^{*}\). So how do we recover a collection of small sets from this? We just take
|
||||
% from our potential index set \(α : X → U\) to U. We can then get a pullback.
|
||||
% \begin{equation}
|
||||
% \begin{tikzcd}
|
||||
% A \arrow[d,"α^{*}p_u"] \arrow[r] \arrow[dr,phantom, "\lrcorner", very near start] & E \arrow[d,"p_U"] \\
|
||||
% X \arrow[r,"α"] & U
|
||||
% \end{tikzcd}
|
||||
% \end{equation}
|
||||
% And get small sets indexed by \(X\) by sending \(x \mapsto α^{*}p_u^{-1}(x)\). But can we get all those maps
|
||||
% as pullbacks? Indeed we can, given such a map \(p\) we define \(α(x) ≔ p^{-1}(x)\). Or in more categorical
|
||||
% terms we are \(α\) sends \(x\) to the collection of \(a\) such that the following diagram commutes.
|
||||
% \begin{equation}
|
||||
% \begin{tikzcd}
|
||||
% \Yo{*} \arrow[r,"a", dashed] \arrow[d,"\id"] & A \arrow[d,"p"] \\
|
||||
% \Yo{*} \arrow[r,"x"] & X
|
||||
% \end{tikzcd}
|
||||
% \end{equation}
|
||||
%
|
||||
% Now let us get back to an arbitrary presheaf category. Let \(\C\) be a category, that we will use as
|
||||
% our site from here on.
|
||||
% We first need to make percise what our analogon for
|
||||
% small set, and collection for small indexed sets is.
|
||||
%
|
||||
% \begin{definition} %TODO cite to awodeyKripkeJoyalForcingType2021 Definition 1.1 ? % TODO fix newline shit
|
||||
% \begin{enumerate}
|
||||
% \item A presheaf is called small if all its values are small
|
||||
% \item A map of presheaves \(p : A → X \) is small if for every \(x : \Yo c → X \) the
|
||||
% presheaf \(A_x\) obtained by the pullback
|
||||
% \begin{equation*}
|
||||
% \begin{tikzcd}
|
||||
% A_x \arrow[r] \arrow[d] \arrow[dr, phantom, "\lrcorner", very near start] & A \arrow[d,"p"] \\
|
||||
% \Yo c \arrow[r,"x"] & X
|
||||
% \end{tikzcd}
|
||||
% \end{equation*}
|
||||
% is small.
|
||||
% \end{enumerate}
|
||||
% \end{definition}
|
||||
%
|
||||
% We want now to mimic the set case, so let us first look at, how \(α\) would look, if we just do
|
||||
% the straight forward generalization.
|
||||
% \begin{equation*}
|
||||
% \begin{tikzcd}
|
||||
% \Yo d \arrow[r,dashed, "a"] \arrow[d,"\Yo f"] & A \arrow[d,"p"] \\
|
||||
% \Yo c \arrow[r,"x"] & X
|
||||
% \end{tikzcd}
|
||||
% \end{equation*}
|
||||
% Notice that we gained an additional dependency, that this diagram depends on, namely \(f\).
|
||||
% This makes \(α(x)\) a (small) presheaf on \(\C / c \). And gives rise to the follwing definition:
|
||||
%
|
||||
% \begin{equation}
|
||||
% U(c) ≔ \Obj [ (\C / c)^{\op},\Set_{κ}]
|
||||
% \end{equation}
|
||||
% The action on \(U(f)\) for \(f : d → c \) in \(\C\) is given by precomposition with the induced functor
|
||||
% \( \C / d → \C / c \). And similiar to the \(\Set\) case we can define:
|
||||
% \begin{equation}
|
||||
% E(c) ≔ \Obj [ (\C / c)^{\op},\Set^{*}_{κ}]
|
||||
% \end{equation}
|
||||
% and let the forgetfull functor \(\Set^{*} → \Set\) induce \(p_U : E → U\).
|
||||
%
|
||||
%
|
||||
% There are lots of equivalent descriptions of the suboject classifier \(Ω\), we want to recall one quite similiar to our universe
|
||||
% definition. We call \(\mathbf{2}\) the category of the well-ordering with two elements \( 0 ≤ 1 \).
|
||||
%
|
||||
% \begin{definition}
|
||||
% We define
|
||||
% \begin{equation*}
|
||||
% Ω(c) ≔ \Obj[(\C / c)^{\op},\mathbf{2}]
|
||||
% \end{equation*}
|
||||
% The natural transformation \(\t: * → Ω\) is given by the presheaves that have constant value 1. We call
|
||||
% \(\t\) a \emph{suboject classifier} ttt
|
||||
% \end{definition}
|
||||
\end{document}
|
395
src/Preliminaries/Cubical_Type_Theory.tex
Normal file
395
src/Preliminaries/Cubical_Type_Theory.tex
Normal file
|
@ -0,0 +1,395 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
\section{Cubical Type Theory}
|
||||
This section is mainly to give the reader which is not acquainted with cubical type theory
|
||||
some guidance and intuition. The reader that already is well-versed in this regard might
|
||||
skip this section. We still expect the reader to be familiar with some form of univalent
|
||||
type theory. For a more technical complete presentation we point at \cite{ABCHFL} and \cite{cavalloUnifyingCubicalModels2020}.
|
||||
We think about this in terms of categorical semantics, so we expect the reader to have
|
||||
basic idea about this. For the basic ideas there is the quite old \cite{jacobsCategoricalLogicType1999}. % TODO find categorical semantics references
|
||||
We also recall some translation between intuition and technical treatments of types those elements and similar things.
|
||||
We also want to mention that cubical type theory isn't one type theory, but a zoo of related variances of the same idea.
|
||||
Usually there is already a model in mind, and we allow our type theory to talk about some features of those.
|
||||
We will briefly talk about this in this section for a more technical treatment take a look at \cite{cavalloUnifyingCubicalModels2020}
|
||||
Our presentation follows \cite{ABCHFL}. We state formation rules and describe the intended interpretation and leave
|
||||
out the syntactical rules that captures the behavior that the interpretation captures, as we think that would
|
||||
not add to further intuition. For the Cartesian case these can be found in \cite{ABCHFL} and for the deMorgan case in \cite{CCHM}.
|
||||
|
||||
|
||||
\subsection{types and universes}
|
||||
As our type theory features a universe of small types we get a technical very nice
|
||||
way of interpreting types. We will interpret our universe as a certain fibrant
|
||||
map \(p_U: E → U\). We can then define a type in context \(Γ\) as a map
|
||||
\(Γ → U\). From a topological intuition we want to think about types as
|
||||
fibrant small maps into \(Γ\). The way to translate between these way is by taking
|
||||
a pullback.
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
{Γ.A} \arrow[r] \arrow[d] \pb & E \arrow[d,"p_U"] \\
|
||||
Γ \arrow[r,"A"] & U
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
Of course this means that there have to exist the right amount of morphisms
|
||||
from \(Γ → U\), but this is the property that makes our universe a universe.
|
||||
Of course this translation can't be expected to be one to one as pullbacks
|
||||
are only defined up to isomorphism. One way to deal with this to make a
|
||||
choice of pullbacks (calling these display maps) and require that every small fibrant map is isomorphic
|
||||
to one of those display maps. This has further problems though. For example is
|
||||
substitution functorial on the nose, but if we were to go for display maps this
|
||||
corresponds to building another pullback along a map between contexts. So we
|
||||
would need to make our choice of display maps that these choice of pullbacks
|
||||
gets functorial on the nose. On the other hand working with maps into \(U\)
|
||||
has technical benefits. Substitution becomes precomposition, which
|
||||
is functorial on the nose and we get smallness for free. This does
|
||||
not directly reflect our topological intuition though. So when we talk about
|
||||
intuition we talk about fibrant maps into the context, but on the technical side
|
||||
we work with maps into the universe.
|
||||
|
||||
We shortly recall how terms of types are treated in this way.
|
||||
A term \(a\) of type \(A\) in context \(Γ\), becomes a map
|
||||
we call also \(a\) from \(Γ\) to \(E\) such that the following
|
||||
triangle commutes.
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
& E \arrow[d,"p_U"] \\
|
||||
Γ \arrow[ru,"a"] \arrow[r,"A"] & U
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
This translates directly to the expected splits of the display map
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
Γ \arrow[rrd, bend left,"a"] \arrow[ddr, bend right,"\id"] \arrow [rd,dashed] & & \\
|
||||
& Γ.A \arrow[r] \arrow[d] \pb & E \arrow[d,"p_U"] \\
|
||||
& Γ \arrow[r, "A"] & U
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
|
||||
\subsection{Liftings}
|
||||
As with HoTT in cubical type theory, we want a type theory that satisfies the univalence axiom. But other than
|
||||
HoTT, cubical type theory is allowed to talk rather directly about liftings of trivial cofibrations along fibrations.
|
||||
|
||||
|
||||
\subsubsection{interval, dimensions and cubes}
|
||||
Cubical type theory has a rather direct way talking about paths. We allow the type theory to talk about the
|
||||
underlying interval object. We will use the symbol \(\I\) here. This might be a bit unintuitive because
|
||||
these are not necessarily Kan. To not get tangled up in this problem we split our context in two. The first half only containing
|
||||
variables of the interval and later half types as we are used to. Or in other words every context has the form
|
||||
\(Φ;Γ\), where \(Φ\) has the form \(i:\I,j:\I,\dots\), and \(Γ\) is allowed to depend on \(Φ\). We often
|
||||
think of the elements of \(Φ\) as dimensions.
|
||||
As in cubical type theory the interval object is usually representable and the underlying category is closed under products the
|
||||
interpretation of \(Φ\) is some higher dimensional representable cube. To see that this makes sense we can think about the case
|
||||
where \(Γ\) is empty and we have a closed type \(\cdot ⊢ A\). Then a term
|
||||
\(Φ ⊢ a:A\) corresponds to a section of the display map
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
Φ.A \arrow[d] \arrow[r] \pb & A \arrow[d] \arrow[r] \pb & E \arrow[d,"p_U"]\\
|
||||
Φ \arrow[r] \arrow[u,dashed,"a", bend left] & * \arrow[r,"A"] & U
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
and thus also to a map \(Φ → A\). As \(Φ\) is some representable cube it is isomorphic to some \(\Yo c\)
|
||||
and so this map corresponds by yoneda to an element of \(A(c)\).
|
||||
In other words we might think about the elements of \(Φ\) as intervals (with start- and endpoint) in different
|
||||
directions.
|
||||
One might express this by including the following new kind of judgment.
|
||||
\[
|
||||
Φ ⊢ r:\I \face
|
||||
\]
|
||||
and the following rules
|
||||
\[
|
||||
\begin{prooftree}
|
||||
\infer0{Φ ⊢ 0 : \I \face}
|
||||
\end{prooftree}
|
||||
\quad \quad
|
||||
\begin{prooftree}
|
||||
\infer0{Φ ⊢ 1 : \I \face}
|
||||
\end{prooftree}
|
||||
\quad \quad
|
||||
\begin{prooftree}
|
||||
\infer0[$r ∈ Φ$]{Φ ⊢ r : \I \face}
|
||||
\end{prooftree}
|
||||
\]
|
||||
|
||||
\subsubsection{cofibrations}
|
||||
Models of cubical type theory are usually cofibrantly generated, and these generating maps have representable
|
||||
codomain. This is done by restricting our first context half by some form of dimension formulas. So our context gets
|
||||
a third part \(Φ;ϕ;Γ\). The exact details depend on the model in question. Following the presentation in
|
||||
in \cite{ABCHFL} we introduce an additional kind of judgments, of the form
|
||||
\begin{equation*}
|
||||
Φ ⊢ α \cofib \quad \textrm{and} \quad Φ ⊢ ϕ \formula
|
||||
\end{equation*}
|
||||
Our \(ϕ\) will consist of a list of such \(α\), and we think of this as one formula by conjunction of this list.
|
||||
This is expressed by the following rules
|
||||
%
|
||||
%TODO alignment
|
||||
\begin{equation*}
|
||||
\begin{prooftree}
|
||||
\hypo{\vphantom{Φ ϕ \formula}}
|
||||
\infer1{Φ ⊢ \cdot \formula \vphantom{ϕ,}}
|
||||
\end{prooftree}
|
||||
\quad \quad
|
||||
\begin{prooftree}
|
||||
\hypo{Φ ⊢ ϕ \formula}
|
||||
\hypo{Φ ⊢ α \cofib}
|
||||
\infer2{Φ ⊢ ϕ,α \formula}
|
||||
\end{prooftree}
|
||||
\end{equation*} %
|
||||
%
|
||||
But about what kind of subobjects do we want to talk? The first that comes to mind are faces, which
|
||||
get syntactically represented by the following rule.
|
||||
\begin{equation*}
|
||||
\begin{prooftree}
|
||||
\hypo{Φ ⊢ r:\I \face}
|
||||
\hypo{Φ ⊢ r':\I \face}
|
||||
\infer2{Φ ⊢ r = r' \cofib}
|
||||
\end{prooftree}
|
||||
\end{equation*}
|
||||
Our interpretation will naturally get to be objects in \(\mathbf{Sub}(Φ)\).
|
||||
For example \(Φ ⊢ r = 1\), means that \(r ∈ Φ\) and we can write \(Φ\) as \(Ψ,r:\I\) and the judgment gets
|
||||
interpreted as the face inclusion \(⟨\id,1/r⟩:Ψ → Φ\). If we have \(Φ ⊢ r = r'\), we can write \(Φ\) as
|
||||
\(Ψ,r':\I,r:\I\) and our interpretation becomes the diagonal \(⟨\id,r'/r⟩\). \(Φ⊢ r = r\)
|
||||
as the identity and \(Φ⊢0=1\) as the inclusion of the empty subobject. Cubical variants disagree which
|
||||
face inclusions should be allowed. The \cite{CCHM} model does not include the diagonal case, while in
|
||||
the Cartesian model \cite{ABCHFL} the inclusion of these are important to get fillings from compositions.
|
||||
|
||||
We are also allowed to glue these together. For example we might want the right and the top face of a square.
|
||||
We can do this using the \(∨\) operation.
|
||||
\begin{equation*}
|
||||
\begin{prooftree}
|
||||
\hypo{Φ⊢α \cofib}
|
||||
\hypo{Φ⊢β \cofib}
|
||||
\infer2{Φ⊢ α∨β \cofib}
|
||||
\end{prooftree}
|
||||
\end{equation*}
|
||||
We will interpret as the coproduct in \(\mathbf{Sub}(Φ)\), or spelled out as the pushout of a their pullback.
|
||||
The geometrical intuition might be we glue the two subobjects together,
|
||||
along their intersection. Or as a diagram
|
||||
\begin{equation}
|
||||
\begin{tikzcd}
|
||||
P \arrow[r] \arrow[d] \pb \pu & A \arrow[d] \arrow[rdd,"α", hook , bend left] & \\
|
||||
B \arrow[rrd, "β", hook, bend right ] \arrow[r] & A∨B \arrow[rd, "α∨β", dashed, hook] & \\
|
||||
& & Φ
|
||||
\end{tikzcd}
|
||||
\end{equation}
|
||||
where the outer square is a pullback and the inner square is a pushout.
|
||||
|
||||
We also have a third operation, that will take an important role in the development of
|
||||
getting a constructive model for univalence. Namely \(∀\), like the name suggest
|
||||
it will get interpreted as the right adjoint of the pullback functor between
|
||||
subobject categories. We add the following rule
|
||||
\begin{equation}
|
||||
\begin{prooftree}
|
||||
\hypo{Φ,i:\I ⊢ α \cofib }
|
||||
\infer1{Φ⊢∀i:\I.α \cofib}
|
||||
\end{prooftree}
|
||||
\end{equation}
|
||||
Geometrically this might be envisioned by only keeping faces that are defined across the
|
||||
entirety of that dimension, and then projecting the dimension away.
|
||||
|
||||
|
||||
\subsubsection{path types}
|
||||
As one might expect usual topology cubical type theory treats path types like
|
||||
maps from interval. They are also used to model identity types. But as they
|
||||
are maps from the interval we can actually evaluate them.
|
||||
\begin{equation*}
|
||||
\begin{prooftree}
|
||||
\hypo{Φ;ϕ;Γ ⊢ A}
|
||||
\hypo{Φ;ϕ;Γ ⊢ t : A}
|
||||
\hypo{Φ;ϕ;Γ ⊢ u : A}
|
||||
\infer3{Φ;ϕ;Γ ⊢ \Path{A}{t}{u}}
|
||||
\end{prooftree}
|
||||
\end{equation*}
|
||||
Which you can think of as the paths from \(t\) to \(u\) in \(A\), or as the
|
||||
ways \(t\) and \(u\) are identifiable. As we want to think about paths as maps
|
||||
from the interval, get rules similar to that of function types.
|
||||
\begin{equation*}
|
||||
\begin{prooftree}
|
||||
\hypo{Φ;ϕ;Γ ⊢A}
|
||||
\hypo{Φ,i:\I;ϕ;Γ ⊢t:A}
|
||||
\infer2{Φ;ϕ;Γ ⊢⟨i⟩ t : \Path{A}{t(i/0)}{t(i/1)}}
|
||||
\end{prooftree}
|
||||
\end{equation*}
|
||||
Of course we can also evaluate them
|
||||
\begin{equation*}
|
||||
\begin{prooftree}
|
||||
\hypo{Φ;ϕ;Γ ⊢p : \Path{A}{t}{u}}
|
||||
\hypo{Φ ⊢ r : \I}
|
||||
\infer2{Φ;ϕ;Γ ⊢ p\,r:A}
|
||||
\end{prooftree}
|
||||
\end{equation*}
|
||||
with the usual (\(η\)) and (\(β\)) rules as well as \(p\,0 \equiv t\) and \(p\,1 \equiv u\).
|
||||
|
||||
\subsubsection{terms of intervals}
|
||||
In general there are more terms of the interval as we allowed in the construction of \(ϕ\) above.
|
||||
Which are allowed is usually closely tied to the site of the underlying model. For example in the
|
||||
\cite{CCHM} model the underlying cite is the Lawvere theory of deMorgan algebras, and the terms
|
||||
of the interval in that are also the terms of a deMorgan algebra. We want to shortly study them
|
||||
and give some names and intuition to them. We will mostly talk about the \cite{CCHM} model, as the
|
||||
Cartesian case has a lot less morphisms in its site.
|
||||
|
||||
Unraveling the definition of a Lawvere theory, we get that the site of the \cite{CCHM} consists of
|
||||
a skeleton of finite sets and a morphism from \(A\) to \(B\) is a function from \(B\) to the free
|
||||
deMorgan algebra generated by \(A\). As the yoneda embedding is fully faithfull this also holds
|
||||
for the subcategory of representables. A term in some dimensional context \(Φ\) of type \I becomes
|
||||
a map from \(Φ → \I\), and as this a map from \(\I^n → \I\). These correspond to a term of a deMorgan
|
||||
algebra with \(n\) variables. We will write for this judgments of the form
|
||||
\begin{equation*}
|
||||
Φ ⊢ t : \I
|
||||
\end{equation*}
|
||||
We can think of a type in a context with \(n\) dimension variables as an \(n\)-cube
|
||||
of types. And a term as an \(n\)-cube in the total space of the type, going across the fibers.
|
||||
Depending on the map in the site, or equivalently map between the representables, we get terms
|
||||
of cubes that we can plug into our types.
|
||||
|
||||
For example if we have a line of types, we can look
|
||||
at it as a degenerate square of types. These can be done in multiple ways. The obvious one is
|
||||
through weakening. Or syntactically going from \(r:\I;; ⊢ A\) to \(r:\I,r':\I;; ⊢ A\). Which gives
|
||||
us a square of types that is constant in the direction of \(r'\). In usual topology there are more
|
||||
ways to get degenerate a square from a line, for example we might want that the right and the
|
||||
top face are given line and the left and bottom face is constant. These kind of degeneracies are
|
||||
usually called connections and exist in some models. In the \cite{CCHM} model this specific
|
||||
degeneracy corresponds to the meet (\(∧\)). Or syntactically more explicit, if we have
|
||||
a line of types \(r:\I⊢A\) we can get a degenerate square of types \(r:\I,r':\I⊢A(r/r∧r')\).
|
||||
In point set topology we could think about this as taking the infimum of \(r\) and \(r'\).
|
||||
Of course there is also the dual connection, which is also present in \cite{CCHM}. There
|
||||
is also a model with only one of the two connections studied in \cite{cavalloRelativeEleganceCartesian2022}
|
||||
These degeneracies allow to reconstruct fillings from compositions. The Cartesian model
|
||||
does not have connections, there we will use diagonal cofibrations to get fillings from
|
||||
compositions.
|
||||
% TODO image of connections.
|
||||
|
||||
In the \cite{CCHM} model we are also allowed to reverse dimensions. This corresponds
|
||||
to the inversion/negation of the deMorgan algebra. These kind of maps are often called reversals.
|
||||
|
||||
The last map we want to mention here, are diagonals. These correspond syntactically to the diagonals
|
||||
\(r:\I;; ⊢ (r,r) : \I^2\). The reason to mention them is that the \cite{BCH} model does not feature those
|
||||
in the site. It gets away with this by using a different monoidal structure, so the judgment above has
|
||||
an interpretation, it is just different then one might initially think.
|
||||
|
||||
\subsubsection{compositions and fillings}
|
||||
The first thing we need to talk about a filling, is a filling problem.
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
\Ob \arrow[d,hook] \arrow[r] & □;;Γ.A \arrow[d] \\
|
||||
□ \arrow[r] \arrow[ur, dashed, "\mfill"] & □;;Γ
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
A syntactic representation of the figuratively drawn problem above might be represented by
|
||||
witnessing the following rule.
|
||||
\todo{should I also include the judgmental equalities that say that a filling is actually a filling of that problem?}
|
||||
\begin{equation*}
|
||||
\begin{prooftree}
|
||||
\hypo{z: \I ,z':\I ;;Γ ⊢ A \type }
|
||||
\hypo{z: \I ,z': \I ;z{=}0 ∨ z{=}1 ∨ z'{=}0;Γ⊢ t : A}
|
||||
\infer2{z: \I ,z': \I ;;Γ⊢ \mfill (t) : A}
|
||||
\end{prooftree}
|
||||
\end{equation*}
|
||||
|
||||
Allowing this in its obvious generalization runs directly into problems, as that would allow
|
||||
liftings against arbitrary cofibrations, and not only trivial ones. To get around this problem
|
||||
we reformulate our syntactic representation. To do this we split the problem data into two
|
||||
parts. The first part is the base, that is not allowed to talk about the dimension we want to
|
||||
fill, but completely defined in all other dimensions, and a part that is allowed to talk about the dimension we want to fill in,
|
||||
but must be attached to the base and agree with it.
|
||||
\begin{equation*}
|
||||
\begin{prooftree}
|
||||
\hypo{z:\I,z':\I;;Γ &⊢ A \type}
|
||||
\infer[no rule]1{z:\I;;Γ &⊢ b: A(0/z')}
|
||||
\infer[no rule]1{z:\I &⊢ z{=}0 ∨ z{=}1 \formula}
|
||||
\infer[no rule]1{z:\I;z':\I;z{=}0 ∨ z{=}1;Γ &⊢ t : A}
|
||||
\infer[no rule]1{z:\I;z{=}0 ∨ z{=}1;Γ &⊢ t(0/z') \equiv b : A(0/z')}
|
||||
\infer1{z:\I;z':\I;;&⊢ \mfill^{z':0}(b,t) : A}
|
||||
\end{prooftree}
|
||||
\end{equation*}
|
||||
That these describe trivial cofibrations is justified by the fact, that our model structures are cylindrical.
|
||||
Or in other words the pushout product with the inclusions of the cylinder object sends cofibrations to
|
||||
trivial cofibrations. The term \(t\) is often called the tube of the lifting problem and \(b\) either the
|
||||
cap or the base.
|
||||
If we abstract our example away we get
|
||||
\todo{get a better variable convention for dimension going}
|
||||
\begin{equation*}
|
||||
\begin{prooftree}
|
||||
\hypo{Φ,z:\I;;Γ &⊢ A \type}
|
||||
\infer[no rule]1{Φ&⊢r: \I}
|
||||
\infer[no rule]1{Φ;;Γ &⊢ b: A(r/z)}
|
||||
\infer[no rule]1{Φ &⊢ ϕ \formula}
|
||||
\infer[no rule]1{Φ;z:\I;ϕ;Γ &⊢ t : A}
|
||||
\infer[no rule]1{Φ;ϕ;Γ &⊢ t(r/z) \equiv b : A(r/z)}
|
||||
\infer1{Φ;z:\I;;Γ&⊢ \mfill^{r}(b,t) : A}
|
||||
\end{prooftree}
|
||||
\end{equation*}
|
||||
Or written as the intended commuting diagram
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
Φ,z:\I;ϕ ∨ Φ\arrow[r,"{[b,t]}"] \arrow[d, hook, "{(ϕ,z) ∨ (r/z)}"'] & Φ,z:\I;;Γ.A \arrow[d] \\
|
||||
Φ,z:\I \arrow[r] \arrow[ur,"{\mfill(b,t)}", dashed] & Φ,z:\I;;Γ
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
From a syntactical point of view that rule is not so great, as in the
|
||||
conclusion there is an explicit free variable \(r'\), and it is not
|
||||
at all clear how one would substitute a term for \(r'\) into our filling term.
|
||||
The usual way of dealing with this problem is by making \(r'\) a term appearing
|
||||
in the conditions of the rule.
|
||||
\begin{equation*}
|
||||
\begin{prooftree}
|
||||
\hypo{Φ,z:\I;;Γ &⊢ A \type}
|
||||
\infer[no rule]1{Φ&⊢r: \I}
|
||||
\infer[no rule]1{Φ&⊢r':\I}
|
||||
\infer[no rule]1{Φ;;Γ &⊢ b: A(r/z)}
|
||||
\infer[no rule]1{Φ &⊢ ϕ \formula}
|
||||
\infer[no rule]1{Φ;z:\I;ϕ;Γ &⊢ t : A}
|
||||
\infer[no rule]1{Φ;ϕ;Γ &⊢ t(r/z) \equiv b : A(r/z)}
|
||||
\infer1{Φ;;Γ&⊢ \comp_A^{z:r→r'}(b,t) : A(r'/z)}
|
||||
\end{prooftree}
|
||||
\end{equation*}
|
||||
\todo{include the coherence rules here? because they are less obvious}
|
||||
These are usually called compositions, and correspond to a face of the filling.
|
||||
If we translate this back to the world of commutative diagrams we get\footcite[p.431]{ABCHFL}:
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
Φ;ϕ ∨ Φ;r{=}r' \arrow[d,"{ϕ ∨ r{=}r'}"'] \arrow[r] & Φ,z:\I;ϕ ∨ Φ\arrow[r,"{[t,b]}"] \arrow[d, hook, "{(ϕ,z) ∨ (r/z)}"] & Φ,z:\I;;Γ.A \arrow[d] \\
|
||||
Φ \arrow[r,"{(\id,r'/z)}"'] \arrow[rru, dashed,"{\comp_A^{z:r→r}(b,t)}"] & Φ,z:\I \arrow[r] & Φ,z:\I;;Γ
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
Different variants allow different terms for \(r'\) and \(z\). On one extreme the
|
||||
\cite{CCHM} model only allows composition from 0 to 1. Which is in the presence of
|
||||
reversals and connections is not really a strong restriction. In the Cartesian model
|
||||
we need to be less restrictive to.
|
||||
|
||||
In general it is not clear that can recover fillings from compositions. So it
|
||||
is our job to make that possible. In the presence of at least one connection this
|
||||
is not to difficult, and we can define
|
||||
\begin{equation*}
|
||||
Φ,i:\I;;Γ⊢\mfill_A^z(b,t) ≔ \comp_{A(i/i∧j)}^{a→b}(b,t(i∧j/i)) : A
|
||||
\end{equation*}\todo{make this technical correct}
|
||||
\todo{agree on a way to write substitution, why is this not uniform across literature?}
|
||||
\todo{make hand drawn picture for this}
|
||||
|
||||
This doesn't work in the Cartesian setting though, as we don't have any connections. Here
|
||||
we can use the fact that we allowed diagonals in our filling problems. And define
|
||||
\begin{equation*}
|
||||
Φ,z:\I;;Γ ⊢ \mfill_A^{z:r}(b,t) ≔ \comp_A^{z':r→z}(b,t) : A
|
||||
\end{equation*}
|
||||
where \(z'\) does not appear in \(Φ\).
|
||||
\todo{make hand drawn picture for this}
|
||||
|
||||
\begin{equation}
|
||||
\begin{prooftree}
|
||||
\hypo{Φ ⊢ Γ \mathbf{ctx}}
|
||||
\hypo{Φ,i:\I;Γ⊢ A \mathbf{type}}
|
||||
\hypo{Φ;Γ⊢A}
|
||||
\infer3{b}
|
||||
\end{prooftree}
|
||||
\end{equation}
|
||||
|
||||
We also allow a special kind of lambda abstraction
|
||||
for these kind of variables (note that real lambda abstraction is only allowed for types).
|
||||
\begin{equation}
|
||||
\begin{prooftree}
|
||||
\hypo{Φ,i:\I;Γ⊢t:A}
|
||||
\infer1{Φ;Γ⊢⟨i⟩t : A^{\I}}
|
||||
\end{prooftree}
|
||||
\end{equation}
|
||||
|
||||
|
||||
|
||||
\end{document}
|
62
src/Preliminaries/Dependent_Type_Theory.tex
Normal file
62
src/Preliminaries/Dependent_Type_Theory.tex
Normal file
|
@ -0,0 +1,62 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
|
||||
% TODO add references
|
||||
% TODO assume that we have seen lambda calculus
|
||||
\section{Dependent Type Theory}
|
||||
To talk about Cubical Type Theory we must first talk about what dependent type theory is.
|
||||
We will first talk about syntax and in the next section about the formal semantics. Of course
|
||||
we will discuss what we want the syntax to mean in this section, but the technical encodings of
|
||||
those are for the next section.
|
||||
|
||||
What do we mean when we say \emph{dependent} type theory? We want
|
||||
|
||||
%TODO good example what dependent types are
|
||||
% 1.) Numbers from 1 to x (with x : N)
|
||||
% 2.) lists of length x
|
||||
% 3.) Lukas fragen
|
||||
%
|
||||
|
||||
We will go forward having different kind of judgements
|
||||
\begin{itemize}
|
||||
\item \(Γ ctx\), for \(Γ\) is a well-formed context
|
||||
\item \(Γ ⊢ A type \) , for A is a well-formed type in context \(Γ\)
|
||||
\item \(Γ ⊢ t : A \), for \( t \) is a well-formed term of type \(A\) in context \(Γ\)
|
||||
\end{itemize}
|
||||
|
||||
We also assume a countable set of variable \(V\) and a bijection \(f \colon ℕ → V\). We write
|
||||
\(v_n \coloneqq f(n)\). For better readability we will take the freedom to use \(x,y,z,\dots\)
|
||||
for variables.
|
||||
|
||||
\subsubsection{Contexts}
|
||||
As in some non-dependent typed logic (like the typed lambda calculus),
|
||||
we need contexts to track what type free variables have. But here types might depend on the previous
|
||||
declared variables. We define a context inductively. So first the empty context is a context.
|
||||
|
||||
\begin{itemize}
|
||||
\item \begin{prooftree}
|
||||
\infer0{\diamond ctx}
|
||||
\end{prooftree}
|
||||
\end{itemize}
|
||||
And we can extend already valid contexts, by types in that context.
|
||||
\begin{itemize}
|
||||
\item \begin{prooftree}
|
||||
\hypo{Γ ctx}
|
||||
\hypo{Γ ⊢ A type}
|
||||
\infer2{Γ,x : A ctx}
|
||||
\end{prooftree}
|
||||
\end{itemize}
|
||||
|
||||
\subsubsection{Types}
|
||||
Here we will no go through the types that we will use in our typetheory, except for equality types.
|
||||
That will be handled later in the section about Cubical Type Theory. Let us begin with (dependent) function types.
|
||||
These work almost the same as in the normal lambda calculus, the only difference is that we allow the output type
|
||||
to depend on the value of the input.
|
||||
|
||||
\begin{itemize}
|
||||
\item \begin{prooftree}
|
||||
|
||||
\end{prooftree}
|
||||
\end{itemize}
|
||||
|
||||
\end{document}
|
82
src/Preliminaries/Leibniz_Construction.tex
Normal file
82
src/Preliminaries/Leibniz_Construction.tex
Normal file
|
@ -0,0 +1,82 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
\section{Leibniz Construction}
|
||||
We will see use a well know construction in homotopy theory, to
|
||||
elegantly construct a lot of interesting objects, the Leibniz construction.
|
||||
This section will mostly give a definition and some examples to get familiar with this construction.
|
||||
If the reader is already familiar with it, they might skip this section without any problems.
|
||||
|
||||
We start be giving the definition
|
||||
\begin{definition}[Leibniz Construction]
|
||||
Let \(\A\), \(\B\) and \(\C\) be categories and \(\C\) has finite pushouts. Let \(⊗ : \A × \B → \C \) be a bifunctor.
|
||||
Then we define the \emph{Leibniz Construction} \(\hat{⊗} : \arr{\A} × \arr{\B} → \arr{\C}\) to be the functor
|
||||
that sends \(f : A → A' \) in \(\A\) and
|
||||
\( g : B → B' \) in \(\B\), to \( f \hat{⊗} g \) as defined as in the following diagram.
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
A ⊗ B \arrow[r, "f ⊗ \id"] \arrow[d, "\id ⊗ g"] \arrow[dr, phantom, "\ulcorner", very near end] & A' ⊗ \mathrlap{B} \phantom{A'} \arrow[ddr, bend left, "\id ⊗ g"] \arrow[d] & \\
|
||||
\phantom{B'} \mathllap{A} ⊗ B' \arrow[drr, bend right, "f ⊗ \id"] \arrow[r] & A ⊗ B' ∐\limits_{A ⊗ B} A' ⊗ B \arrow[dr, dashed ,"f \hat{⊗} g"] & \\
|
||||
& & A' ⊗ B'
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
If \(⊗\) is the tensor functor of a monoidal category we also call it the \emph{Leibniz product} or \emph{pushout-product}. If \(⊗\) is
|
||||
the functor application functor we also call it the \emph{Leibniz application}.
|
||||
\end{definition}
|
||||
|
||||
The following examples are true for any category of nice spaces, we will state them for simplicial sets as the reader is probably
|
||||
familiar with them.
|
||||
|
||||
\begin{example}\label{ex:leib:prism}
|
||||
Let \(δ_k : * → Δ^1\) for \(k ∈ {0,1}\) be one of the endpoint inclusions into the interval and \(i : \∂ Δ^n → Δ^n\) the boundary
|
||||
inclusion of the \(n\)-simplex, then \(δ_k \hat{×} i\) is the inclusion of a prism without its interior and one cap, into the prism.
|
||||
\end{example}
|
||||
This gives us a description of a possible set of generating trivial cofibrations. This will be an ongoing theme. For cubical sets we
|
||||
will define trivial cofibrations in this fashion. We can even observe something stronger. If we would replace the boundary inclusion
|
||||
of the \(n\)-simplex with the boundary inclusion of an arbitrary space \(X\). We get the inclusion of the cylinder object of \(X\)
|
||||
without a cap and its interior into the cylinder.
|
||||
\begin{figure}
|
||||
\missingfigure{cylinder inclusion as pushout product}
|
||||
\caption{cylinder inclusion as Leibniz product}
|
||||
\end{figure}
|
||||
|
||||
\begin{observation}
|
||||
If \(f\) and \(g\) are cofibrations then \(f \hat{×} g\) is too. If one of them is a trivial cofibration then so is \(f \hat{×} g\).
|
||||
\end{observation}
|
||||
This has far reaching consequences, and one can define a monoidal model category where this are one of the axioms
|
||||
for the tensor functor. For more
|
||||
detail see \cite{hoveyModelCategories2007}. We are not going to need much of this theory explicitly. But it is worthy to note
|
||||
that all examples of model categories that we are going to encouter are of this form.
|
||||
|
||||
We will also encouter the basically same construction as \cref{ex:leib:prism} in another way. We can also get the Cylinder object
|
||||
functorialy in \Δ, such that the cap inclusions are natural transformations.
|
||||
We can also get the inclusion the Leibniz product produces in a natural manner via the Leibniz application.
|
||||
|
||||
\begin{example}\label{ex:leib:functorialCylinder}
|
||||
Let \( @ : \C^{\C} × \C → \C \) defined as
|
||||
\( F @ x ≔ F(x)\) the functor application functor. Let \(\I × (−)\) be the functor that sends \(X\) to its cylinder object (also known as the
|
||||
product with the interval).
|
||||
Let be one of the inclusions \( δ_k : \Id → \I ⊗ (−) \), the natural transformation that embeds the space in one of the cylinder caps.
|
||||
Let \( \∂ X → X \) be the boundery inclusion of \(X\).
|
||||
Then \( δ_k \hat{@} (∂X → X) \) is the filling of a cylinder with one base surface of shape \(X\).
|
||||
\end{example}
|
||||
This kind of construction will later play an important role in the construction of our desired model categories.
|
||||
|
||||
We will also encouter another form of Leibniz application, which has at first glance not to do alot with homotopy. On
|
||||
the other hand one might read this as, getting the component of an natural transformation is some sort of a homotopic construction.
|
||||
\begin{lemma}
|
||||
Let \(F,G : A → B \) and \(η : F ⇒ G\) be a natural transformation, and \A has in initial object \(∅\). Let
|
||||
\(F\) and \(G\) preserve the initial object.
|
||||
We write \(∅ → X\) for the unique map with the same type. Then we get \(η \at (∅ → X) = η_X\).
|
||||
\end{lemma}
|
||||
\begin{proof}\todo{this is contravariant and I'm too tired}
|
||||
We will draw the diagram from the definiton
|
||||
\begin{eqcd*}
|
||||
F (∅) \arrow[r, "F(∅ → X)"] \arrow[d, "η_∅"] \arrow[dr, phantom, "\ulcorner", very near end] & F(X) \arrow[ddr, bend left, "η_X"] \arrow[d] & \\
|
||||
G(∅) \arrow[drr, bend right, "G(∅ → X)"] \arrow[r] & G(∅) ∐\limits_{F(∅)} \mathrlap{F(X)} \phantom{G(∅)} \arrow[dr, dashed ,"f \hat{⊗} g"] & \\
|
||||
& & G(X)
|
||||
\end{eqcd*}
|
||||
If we substitude \(F(\emptyset)\) and \(G(∅)\) with \(∅\) the claim directly follows.
|
||||
\end{proof}
|
||||
% TODO Left adjoint
|
||||
|
||||
\end{document}
|
8
src/Preliminaries/Model_Categories.tex
Normal file
8
src/Preliminaries/Model_Categories.tex
Normal file
|
@ -0,0 +1,8 @@
|
|||
\documentclass[Preliminaries.tex]{subfiles}
|
||||
\begin{document}
|
||||
|
||||
\section{Model Categories}
|
||||
We will need some basic knowledge about model categories. A nice introduction can be found in the
|
||||
nlab\cite{IntroductionHomotopyTheory}. A more feature complete reference is this book by Hirschhorn\cite{hirschhornModelCategoriesTheir2009}
|
||||
|
||||
\end{document}
|
770
src/Preliminaries/Model_Structure.tex
Normal file
770
src/Preliminaries/Model_Structure.tex
Normal file
|
@ -0,0 +1,770 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
|
||||
\section{Model structure}\label{modelStructure}
|
||||
|
||||
% TODO restructure lifting with respect to category
|
||||
% awfs
|
||||
% Then cubical sets
|
||||
|
||||
\subsection{Equivariant Cartesian Cubes}
|
||||
We now presenting our, main star, the equivariant model structure on cubical sets.
|
||||
We will give a description of the two involved factorization systems and then follow
|
||||
an argument from \cite{cavalloRelativeEleganceCartesian2022}, to show that it is indeed
|
||||
a model structure. The modelstructure as well as the presentation is taken from
|
||||
\cite{riehlVid}. This
|
||||
equivariant model structure is also know as the ACCRS model structure.
|
||||
|
||||
% reference for uniform fibrations CCHM and
|
||||
The “usual” model structure on this cube category is not equivalent to spaces, as shown
|
||||
by Buchholtz. %TODO find citation
|
||||
If we build the qutoient by swapping the dimensions of a square, the resulting space is not
|
||||
contractable, while in spaces it is. The idea is to correct this defect, by making the embedding
|
||||
of the point a trivial cofibration. This is archived by forcing an extra property onto
|
||||
the fibrations.
|
||||
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}[column sep=large, row sep=large]
|
||||
* \arrow[r,"\id"] \arrow[d,tail,"f"] & * \arrow[d,tail,"σf"'] \arrow[r] & * \arrow[d,tail] \arrow[r,"β"] & X \arrow[d,"f"] \\
|
||||
\I^2 \arrow[r,"σ"', shift right=0.7ex] \arrow[r,"\id", shift left=0.7ex] \arrow[urrr, dashed, ,near end, "{j(αe,β)}"'] & \I^2 \arrow[urr, dashed, "{j(αeσ,β)}"] \arrow[r, "e"] & Q \arrow[r,"α"] & Y
|
||||
\end{tikzcd}
|
||||
\end{equation*} % Fix twist of suboject
|
||||
|
||||
Here \(σ\) is the map that swaps the dimenisons and \(e\) is the coequilizer map. The hope would be, that
|
||||
the coequilizer, would now present a lift for the right most lifting problem. But in general that
|
||||
does not hold, the lifts have neither to commute with \(σ\) nor with \(\id\). The path forward
|
||||
will be to restrict the fibrations to have the desired property.
|
||||
%
|
||||
|
||||
% \subsubsection{Weak factorization systems}
|
||||
% If we force additional properties on our lifting problems, one might rightfully
|
||||
% ask if these indeed induce a weak factorization system (wfs). It is also unclear if
|
||||
% there are induced factorizations at all. The good news is, there is a refined version
|
||||
% of the small object argument \cref{smallObject}, that will help us out. This works on
|
||||
% the back of algebraic weak factorization systems. We don't need the whole theory and
|
||||
% get by with the following definitiont.
|
||||
%
|
||||
% \begin{definition}\label{def:rightMaps}
|
||||
% Let \J be category and \(J : \J → \M^→\) be a functor. Then an object \(f\) in
|
||||
% \(\M^→\) is a \emph{right \(J\)-map} if for every object \(i\) in \J, and for every
|
||||
% lifting problem
|
||||
% \begin{equation*}
|
||||
% \begin{tikzcd}[column sep=large,row sep=large]
|
||||
% L \arrow[r,"α"] \arrow[d, "Ji"'] & X \arrow[d,"f"] \\
|
||||
% M \arrow[r,"β"] \arrow[ur, dashed,"{j(α,β)}"] & Y
|
||||
% \end{tikzcd}
|
||||
% \end{equation*}
|
||||
% there is a specified lift \(j(α,β)\), such that for every \((a,b) : k → i\) in \J, the diagram
|
||||
% \begin{equation*}
|
||||
% \begin{tikzcd}[column sep=huge, row sep = large]
|
||||
% L' \arrow[r,"a"] \arrow[d,"Jk"'] & L \arrow[r,"α"] \arrow[d,"Ji"] & X \arrow[d,"f"] \\
|
||||
% M' \arrow[urr, dashed, near start, "{j(αa,βb)}"] \arrow[r,"b"] & M \arrow[ur, dashed, "{j(α,β)}"'] \arrow[r,"β"] & Y
|
||||
% \end{tikzcd}
|
||||
% \end{equation*}
|
||||
% commutes.
|
||||
% \end{definition}
|
||||
%
|
||||
% \begin{remark}
|
||||
% If we start with a set \J and make it into a discret category, then we will get our old definition
|
||||
% of right lifting map, with an additional choice function for lifts. Which exists anyway if we
|
||||
% are ready to believe in strong enough choice principles.
|
||||
% \end{remark}
|
||||
%
|
||||
% After we defined a class of right maps by those lifting problems, it is not clear that these
|
||||
% will yield a wfs.
|
||||
% To get those we invoke a refinment of Quillens small object argument, namely
|
||||
% Garners small object argument \cite{garnerUnderstandingSmallObject2009}. This has the additional benefit of producing
|
||||
% algebraic weak factorization systems (awfs). For a quick reference see
|
||||
% \cite{riehlMadetoOrderWeakFactorization2015a}, and for an extensive tour \cite{riehlAlgebraicModelStructures2011}, or
|
||||
% \cite{bourkeAlgebraicWeakFactorisation2016,bourkeAlgebraicWeakFactorisation2016a}. The main takeaway for
|
||||
% us is the following theorem \cite[][Theorem 2.28]{riehlAlgebraicModelStructures2011}
|
||||
%
|
||||
% \begin{theorem}[Garner]\label{smallObject}
|
||||
% Let \(\M\) be a cocomplete category satisfying either of the following conditions.
|
||||
% \begin{itemize}
|
||||
% \item[(\(*\))] Every \(X ∈ \M\) is \(α_X\)-presentable for some regular cardinal \(α_X\).
|
||||
% \item[(\dagger)] Every \(X ∈ \M\) is \(α_X\)-bounded with respect to some proper, well-copowered
|
||||
% orthogonal factorization system on \(\M\), for some regular cardinal \(α_X\).
|
||||
% \end{itemize}
|
||||
% Let \( J : \mathcal{J} → \M^→\) be a category over \(\M^→\), with \(\mathcal{J}\) small. Then the free awfs on \(\mathcal{J}\) exists
|
||||
% and is algebraically-free on \(\mathcal{J}\).
|
||||
% \end{theorem}
|
||||
% Without going to much into the size issues, every locally presentable categoy satisfies \((\ast)\). And as \(\□\) is a presheaf
|
||||
% category over a countable category, it is locally presentable. Also by beeing a presheaf category makes it automatically cocomplete.
|
||||
%
|
||||
% We also won't clarify all the consequences that getting an awfs instead of a wfs, brings with it. For us the important
|
||||
% thing is, that every awfs has an underlying wfs. The free part of that statement unsures that this construction preserves
|
||||
% the right lifting class.
|
||||
%
|
||||
% Now we need to formulate our definitions in a way that agrees with this conditions of this theorem.
|
||||
%
|
||||
|
||||
|
||||
\subsubsection{Cofibrations and trivial fibrations}
|
||||
The short way of specifying this weak factorization system, is by saying
|
||||
the cofibrations are the monomorphisms. Another, longer way, but for the further development more
|
||||
enlightening is by formulating this as uniform lifting properties.
|
||||
It is also not equivalent from the viewpoint of an awfs, as this definition has extra conditions on the
|
||||
chosen lifts.
|
||||
The condition to have uniform lifts, comes from \cite{CCHM} and generalized in \cite{gambinoFrobeniusConditionRight2017} . This requirement is motivated to have
|
||||
constructive interpretation of Cubical type theory\todo{see ctt intro chapter when it is done}. As we don't focus on the typetheoretic interpretation, but on the model structure we won't
|
||||
explore this further.
|
||||
|
||||
\begin{definition}
|
||||
Let \(J : \mathcal{M} ↣ \A^→\) be subcategory which objects are a pullback stable class of morphisms in \A, and morphisms are
|
||||
the cartesian squares between them. The category \(J^⧄\) is called the category \emph{uniform right lifting morphisms} with respect
|
||||
to \(\mathcal{M}\). Objects of \(J^⧄\) are said to have the \emph{uniform right lifting property}.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
The category of \emph{uniform generating cofibrations} has as object monomorphisms into a cube \(C ↣ I^n\) of arbitraty dimension, and
|
||||
as morphism cartesian squares between those.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A \emph{uniform trivial cofibration} is a right map with respect to the inclusion functor from the category of uniform generation cofibrations
|
||||
into \(\□^→\)
|
||||
\end{definition}
|
||||
|
||||
\todo{show that the cofib are all monos}
|
||||
|
||||
|
||||
\subsubsection{Trivial cofibrations and fibrations}
|
||||
|
||||
As we sketched above, the strategy will be to make more maps trivial cofibrations. This is done
|
||||
by making it harder to be a fibration. Before we give the definition in full generality, we
|
||||
need to adress which coequilizer we talk about percisely.
|
||||
|
||||
Of course doing this only in the 2 dimensional case is not enough. For this we need to say what
|
||||
the “swap” maps are. What these should do is permutating the dimensions. So let \(Σ_k\) be the symmetric group
|
||||
on \(k\) elements. For every \(σ' ∈ Σ_k\), we get a map from \( σ : \I^k → \I^k \), by \((π_{σ'(1)}, … , π_{σ'(k)})\),
|
||||
where \(π_l : \I^k → \I \) is the \(l\)-th projection. Also if we have a subcube \(f : \I^n \rightarrowtail \I^k\),
|
||||
we get a pullback square of the following form
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
\I^n \arrow[r,"\id"] \arrow[d,tail,"f"] \pb & \I^n \arrow[d,tail, "σf"] \\
|
||||
\I^k \arrow[r,"σ"] & \I^k
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
whose right arrow we call \(σf\).
|
||||
%
|
||||
Now we desrcibe our generating trivial cofibrations. We remeber example \cref{Leibniz:Ex:OpenBox}. We would
|
||||
like to do the same here, but a bit more general.
|
||||
|
||||
|
||||
Instead we are going for boxfillings in the context of a cube. The definition of those will be very similiar
|
||||
we will just work in the slice over a cube \(I^k\). We do the Pushout-product in the slice, and
|
||||
forget the slice to get a map in \(\□\).
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
\left(\faktor{\□}{\I^k} \right) ^{→} × \left(\faktor{\□}{\I^k}\right)^{→} \arrow[r,"\hat{×}_{\I^k}"] & \left(\faktor{\□}{\I^k}\right)^→ \arrow[r,"\textrm{forget}"] & \□^→
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
|
||||
What do we change by working in the slice?
|
||||
For intuiton let us first look at the case
|
||||
with a one dimensional context. The point in in this context ist the terminal object in \(\faktor{\□}{\I}\).
|
||||
The interval in this cube category could be described as the pullback, of the interval in \(\□ = \faktor{\□}{\I^0}\)
|
||||
along the unique map \(\I → \I^0\), or in simpler words it is the left projection \(π_l : \I × \I → \I\).
|
||||
Let us now look what maps we have from the point into the interval \(\id_\I → π_l \). We get the two
|
||||
endpoint maps we expected, as \(⟨\id, 0⟩ \) and \(⟨\id, 1⟩\), but in this context we get an additional map,
|
||||
namely \(⟨\id, \id⟩\). If we would forget the context again (aka beeing in the slice), this is the diagonal of the square. From
|
||||
a type theoretic perspective this is realy not surprising. Here it amounts to the question, how many different terms
|
||||
of type \(\I\) can be produced. In an empty context the answer is two.
|
||||
\begin{align*}
|
||||
⊢ 0 : \I && ⊢ 1 : \I
|
||||
\end{align*}
|
||||
But in the context of \(\I\), the answer is three.
|
||||
\begin{align*}
|
||||
i:\I ⊢ 0 : \I && i:\I ⊢ 1 : \I && i:\I ⊢ i : \I
|
||||
\end{align*}
|
||||
Like the type theory suggest we can think of this, as an generic element of the interval. That this
|
||||
element is different from both closed points, is what differentiates the interval from the
|
||||
coproduct of two points. In the typetheoretic world we want something that is called a composition structure, and these
|
||||
should also work
|
||||
along those diagonals, this amounts to be able to have be able to have a cube filling property along those diagonals,
|
||||
or in other words we want them to be trivial cofibrations too. The earliest mention of the idea the I am aware of is
|
||||
this short note \cite{coquandVariationCubicalSets2014}, for a discussion see for example \cite{angiuliSyntaxModelsCartesian2021}.
|
||||
|
||||
The generalization of the boundary inclusion is straight forward. We just take any cube over \(\I^k\)
|
||||
\(ζ : \I^n → \I^k\) and a monomorphism \(c : C → \I^n\), this also induces an object in the slice by composition.
|
||||
Notice that we did not require that \( n ≥ k\).
|
||||
If we pack all of this into a definiton we get.
|
||||
\begin{definition}
|
||||
Let \(ζ : \I^n → \I^k\) be a cube over \(\I^k\) and \(c : C → \I^n\) monic. A map of the form
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
C \arrow[r,"c", tail] \arrow[dr] & \I^n \arrow[d,"ζ"] \arrow[dr,phantom, "\hat{×}_{\I^k}"] & \I^k \arrow[d,"\id"] \arrow[r,"{⟨\id,l⟩}"] & \I^k × \I^m \arrow[dl,"π_l"] \\
|
||||
& \I^k & \I^k &
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
is called \emph{generating trivial cofibration}, as a map in \(\□\) with signature \(B_{c,ζ,l} \rightarrowtail \I^m × \I^n\).
|
||||
\end{definition}
|
||||
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includesvg[inkscapearea=nocrop,inkscapedpi=500,width=15cm]{figures/trivial_cofibration.svg}
|
||||
\caption{An example construction of a trivial cofibration}
|
||||
\end{figure}
|
||||
|
||||
We now want to define our fibrations as the uniform equivariant right lifting maps. To do this we need to define what this
|
||||
actually means. Uniformity wants a choice of lifts that agree with pullback squares between (trivial) cofibrations. %TODO move the uniformity section upwards
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}[column sep=large]
|
||||
A \arrow[r, "a"] \arrow[d, tail] \arrow[dr, phantom, very near start, "\lrcorner"] & B \arrow[d, tail] \arrow[r,"x"] & X \arrow[d, two heads] \\
|
||||
\I^k \arrow[r,"b"] \arrow[rru, dashed, "{j(xa,yb)}"] & \I^n \arrow[r,"y"] \arrow[ru, dashed, "{j(x,y)}"'] & Y
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
The uniformity condition wants that the triangle containing both chosen lifts commutes, or as an equation \(j(x,y)b = j(ax,yb)\).
|
||||
We now need to combine the uniformity and the equivariance condition. As the pushout-product is functorial it is enough to
|
||||
describe these conditions between the building blocks of our trivial cofibration and apply the pushout-product again.
|
||||
Again we will do this in a slice and forget the slice afterwards.
|
||||
\begin{equation*}
|
||||
\left(
|
||||
\begin{tikzcd}[column sep = normal]
|
||||
D \arrow[r,"a"] \arrow[d, tail, "∂"] \pb & C \arrow[d,tail,"c"] &[-20pt] \\
|
||||
\I^m \arrow[r,"α"] & \I^n \arrow[dr, "ζ"] & \\[-15pt]
|
||||
& & \I^k
|
||||
\end{tikzcd}
|
||||
\right)
|
||||
\hat{×}_{\I^k}
|
||||
\left(
|
||||
\begin{tikzcd}
|
||||
\I^k \arrow[d,"{⟨\id,l⟩}"] \arrow[r,"\id"] & \I^k \arrow[d,"{⟨\id,σl⟩}"] &[-30pt] \\
|
||||
\I^k × \I^l \arrow[r,"\id × σ"] & \I^k × \I^l \arrow[dr,"π_l"] & \\[-15pt]
|
||||
& & \I^k
|
||||
\end{tikzcd}
|
||||
\right)
|
||||
\end{equation*}
|
||||
Notice that we understand the vertical morphisms as objects of \(\□^→\) and the horizontal ones as morphism in \(\□^→\).
|
||||
Also notice, that the left diagramm is a square for which we use in our uniformity condition and the right one captures
|
||||
equivariance. And notice that the pushout-product of the columns yield trivial cofibrations. We will denote the resulting
|
||||
commutative square as
|
||||
\begin{equation}\label{devilSquare}
|
||||
\begin{tikzcd}[column sep=large]
|
||||
B_{∂,ζα,l} \arrow[r,"{⟨α,a×σ⟩}"] \arrow[d, tail] & B_{c,ζ,σl} \arrow[d, tail] \\
|
||||
\I^m × \I^l \arrow[r, "α×σ"] & \I^n × \I^l
|
||||
\end{tikzcd}
|
||||
\end{equation}
|
||||
Now we can finaly give the definition of uniform equivariant fibrations.
|
||||
|
||||
\begin{definition}
|
||||
The category of \emph{generating uniform equivariant trivial cofibrations} has as object
|
||||
generating cofibrations and as morphism squares of the shape defined in \cref{devilSquare}
|
||||
above.
|
||||
\end{definition}
|
||||
\begin{definition}
|
||||
A \emph{uniform equivariant fibration} is right map with respect to the inclusion functor of the category of generating
|
||||
uniform equivariant trivial cofibrations into \(\□^→\).
|
||||
\end{definition}
|
||||
|
||||
\begin{notation}
|
||||
By \cref{awfs:smallObject} these categories of maps form an awfs. We call it \((\TCF,\FF)\) and the underlying wfs
|
||||
\((\Ct,\F)\)
|
||||
\end{notation}
|
||||
|
||||
We will show that this is a modelstructure by using the theory of premodel structures
|
||||
\cites{bartonModel2CategoryEnriched2019}[section 3]{cavalloRelativeEleganceCartesian2022}[section 3]{solution}{sattlerCYLINDRICALMODELSTRUCTURES}.
|
||||
\todo{if there is time (there probably won't be) include the premodel theory and explain}
|
||||
This machinery gives us a nice condition to check if we are actually dealing with a modelstructure. But first we need to
|
||||
introduce the notion of a premodel structure.
|
||||
|
||||
\begin{definition}[{\cites{bartonModel2CategoryEnriched2019}[def. 3.1.1]{cavalloRelativeEleganceCartesian2022}}]\label{def:preModelStructure}
|
||||
A \emph{premodel Structure} on a finitely cocomplete and complete category \M consists of two weak factorization systems
|
||||
\((C,F_t)\) (the \emph{cofibrations} and \emph{trivial fibrations}) and \((C_t, F)\) (the \emph{trivial cofibrations} and \emph{fibrations})
|
||||
on \M such that \(C_t ⊆ C\) (or equivalently \(F_t ⊆ F\) ).
|
||||
\end{definition}
|
||||
|
||||
And also the definition of a property of such.
|
||||
\begin{definition}[{\cite[Definition 3.3.3]{cavalloRelativeEleganceCartesian2022}}]
|
||||
We say a premodel category \M has the fibration extension property when for any fibration
|
||||
\(f : Y → X\) and trivial cofibration \(m : X → X′\), there exists a fibration \(f' : Y' → X'\) whose base change along \(m\) is
|
||||
\(f\):
|
||||
\begin{eqcd*}[row sep=large, column sep=large]
|
||||
Y \arrow[r,dashed] \arrow[d,"f"] \pb & Y' \arrow[d,dashed, "f'"] \\
|
||||
X \arrow[r,"m"] & X'
|
||||
\end{eqcd*}
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}[{\cite[Definition 3.2.5]{cavalloRelativeEleganceCartesian2022}}]
|
||||
A \emph{cylindrical} premodel structure on a category E consists of a premodel structure
|
||||
and adjoint functorial cylinder on E that are compatible in the following sense:
|
||||
\begin{itemize}
|
||||
\item \(∂ \⊗ (−)\) preserves cofibrations and trivial cofibrations;
|
||||
\item \(δ_k \⊗ (−)\) sends cofibrations to trivial cofibrations for \(k ∈ \{0, 1\}\).
|
||||
\end{itemize}
|
||||
\end{definition}\todo{clean up and explain}
|
||||
|
||||
|
||||
We get this characterizing theorem.
|
||||
|
||||
\begin{theorem}[{\cite[Theorem 3.3.5]{cavalloRelativeEleganceCartesian2022}}]\label{thm:premodelIsModel} \todo{if this gets longer expanded give it its own section}
|
||||
Let \(\M\) be a cylindrical premodel category in which
|
||||
\begin{enumerate}
|
||||
\item all objects are cofibrant;
|
||||
\item the fibration extension property is satisfied.
|
||||
\end{enumerate}
|
||||
Then the premodel structure on \(\M\) defines a model structure.
|
||||
\end{theorem}
|
||||
|
||||
The first item is fast verified, as all monomorhpisms are cofibrations.\todo{make a small lemma to prove it (closure properties got you covered)}
|
||||
The second condition we won't show directly. We will get for free if we construct fibrant universes, which we want to do anyway.
|
||||
|
||||
\subsubsection{Fibrant Universes}
|
||||
In this section we will change notation between commutative diagrams and the internal language of our presheaf.
|
||||
As these tools can get quite technical and tedious to verify, we will only present the idea how these constructions
|
||||
work and refer for the technical details to the source material.
|
||||
|
||||
The general construction of these universes follow mostly the same idea. We lift a Grothendieck universe from \Set to
|
||||
the desired presheaf category, obtaining a Hofmann-Streicher universe \cite{hofmannLiftingGrothendieckUniverses1997},
|
||||
classifying small maps.
|
||||
This works in general for any presheaf topos over a small site. Afterwards we restrict this universe to only small fibrations.
|
||||
We are then faced with a challenge, we need to prove that the resulting classifier is itself a fibration. In some cases
|
||||
this can be made easier or avoided by stating this construction in a specific way.
|
||||
|
||||
For example by the arguments in \cite[Section 7]{awodeyHofmannStreicherUniverses2023} it suffices to construct \emph{classifying types}.
|
||||
\begin{definition}\label{def:modelStructure:classifyingType}
|
||||
A \emph{classifying type} for an awfs is a construction \(\Fib\) such that for every object \(X\)
|
||||
and every morphism \(f : A → X\) there is a map \({\Fib(f) : \Fib(A) → X}\) and the section are in correspondence with
|
||||
\(\mathrm{R}\)-algebra structures on \(f\).
|
||||
Given a map \(g : Y → X\) then \(\Fib\) is stable under
|
||||
pullbacks along \(g\).
|
||||
\begin{eqcd*}[row sep=large]
|
||||
g^*A
|
||||
\arrow[r]
|
||||
\arrow[d, "g^*f"']
|
||||
\pb
|
||||
& A
|
||||
\arrow[d, "f"]
|
||||
\\
|
||||
Y
|
||||
\arrow[r,"g"]
|
||||
& X
|
||||
\\
|
||||
\Fib(g^*A)
|
||||
\arrow[u, "\Fib(g^*f)"]
|
||||
\arrow[r]
|
||||
\arrow[ur,"\urcorner",phantom, very near start]
|
||||
& \Fib(A)
|
||||
\arrow[u, "\Fib(f)"']
|
||||
\end{eqcd*}
|
||||
\end{definition}
|
||||
If the wfs, for that we try to classify the right maps, is functorial we might think of this as an internaliztaion
|
||||
of the \(\R\)-algbera structures on \(f\).
|
||||
|
||||
We will first construct classifying types, after that we construct universes from it.
|
||||
|
||||
In preperation of the fibrant case we will first classify the trivial fibrations.
|
||||
We don't have any equivariance condition on them, and thus they are the same as in \cite[Section 2]{awodeyCartesianCubicalModel2023},
|
||||
when we assume \(Φ = Ω\).
|
||||
|
||||
Let us first think about the easiest case where the domain of auf trivial fibration is the terminal.
|
||||
We should ask us what are the \TFF-algebra structures\todo{introduce notion above}. These are intuitively
|
||||
choice functions of uniform liftings against cofibrations. So let us think about them first. If we picture
|
||||
a lifting problem,
|
||||
\begin{eqcd}\label{diag:liftProbTerminal}
|
||||
A \arrow[d, tail] \arrow[r] & X \arrow[d] \\
|
||||
C \arrow[r] & *
|
||||
\end{eqcd}
|
||||
we might think of this as a filling porblem of some shape \(A\) in \(X\) to some shape \(C\) in \(X\).
|
||||
Or in other words as a complition of a partial map\footnote{Recall that a partial map \(C → X\) is a span \(C \leftarrow A → X\),
|
||||
where the left map is a monomorphism.} from \(C → X\). We can classify these these by the partial map classifier,
|
||||
given by \(X^+ ≔ Σ_{φ:Ω}X^{[φ]}\)\todo{give a reference}. This is gives us an object \(X^+\) (technicaly equipped with a map into the terminal)
|
||||
and a monomrphism \( η_X : X → X^+\) such that for evey partial map \(C → X\) there is a unique map \(a: C → X^+\) completing
|
||||
the following pullback square.
|
||||
\begin{eqcd*}
|
||||
A \arrow[d, tail] \arrow[r] \pb & X \arrow[d,"η_X"] \\
|
||||
C \arrow[r, "a"] & X^+
|
||||
\end{eqcd*}
|
||||
This makes \((−)^+\) into a pointed endofunctor, and with little surprise are the \(+\)-algebras in correspondence with
|
||||
the \TFF-algebras.
|
||||
|
||||
\begin{proposition}[{\cite[Proposition 15]{awodeyCartesianCubicalModel2023}}]
|
||||
The algebras of the pointed endofunctor \((−)^+\) are in correspondence to the algebras of the pointed endofunctor \TFF
|
||||
with codomain \(*\).
|
||||
\end{proposition}
|
||||
\begin{proof}[Proofsketch:]
|
||||
A \(+\)-algebra is an object \(X\) together with a morphism \(α : X^+ → X\) such that \(α η_x = \id\).
|
||||
Take some lifting problem like \cref{diag:liftProbTerminal}, define the chosen map to be \(αa\). If we
|
||||
are given a chosen lift, we compose it with \(η_X\) to retrieve \(a\).
|
||||
\end{proof}
|
||||
|
||||
The benefit of \((−)^+\)-algebras is, that they are morphisms in \□ rather than in \(\□^→\).
|
||||
We can interalize the condition to be a \((−)^+\)-algebra by means of the internal \(\Hom\).
|
||||
\begin{eqcd*}
|
||||
+\textrm{-Alg}(X)
|
||||
\arrow[d]
|
||||
\arrow[r]
|
||||
\pb
|
||||
& {[X^+,X]}
|
||||
\arrow[d,"{[η_X,\id_X]}"]
|
||||
\\
|
||||
*
|
||||
\arrow[r,"λ\id_X"]
|
||||
& {[X,X]}
|
||||
\end{eqcd*}
|
||||
It is immediatly clear that a sections of the left map correspond to the desired \(+\)-algebra structure.
|
||||
|
||||
If we want to archive the same with a trivial fibration that has arbitrary codomain \(Y\), we might just move
|
||||
to the slice topos \(\faktor{\□}{Y}\), there such a map becomes again a map into the terminal.
|
||||
To not be confused we will call the partial map classification functor in
|
||||
the slice over \(Y\) by the name \((−)^{+Y}\). The algebras will be called relative \(+\)-algberas.
|
||||
We have even more luck and this functor commutes with the pullback
|
||||
functor \(\□ → \faktor{\□}{Y}\),
|
||||
\begin{eqcd*}
|
||||
\faktor{\□}{Y}
|
||||
\arrow[r, "(−)^{+Y}"]
|
||||
& \faktor{\□}{Y}
|
||||
\\
|
||||
\□
|
||||
\arrow[u,"Y^*"]
|
||||
\arrow[r,"(−)^+"]
|
||||
& \□
|
||||
\arrow[u,"Y^*"]
|
||||
\end{eqcd*}
|
||||
which allows us to compute it fiberwise \cite[Proposition 12]{awodeyCartesianCubicalModel2023}. This allows us to write
|
||||
\((A)^{+Y} = Σ_{y:Y}A_y^+\), where \(A_y\) is the fiber of \(A\) at \(y\). We will still continue to write \(+\)-Alg, when
|
||||
there is no confusion.
|
||||
As all the involved functors \((−)^+\), pullbacks and exponentials are pullback stable so is \(+\)-Alg. Wich lets us conlude
|
||||
the following.
|
||||
\begin{proposition}
|
||||
\(+\)-Alg is a classifying type for trivial cofibrations.
|
||||
\end{proposition}
|
||||
|
||||
|
||||
We will move down a very similar path as \cite{awodeyCartesianCubicalModel2023}, but need to keep track of the group
|
||||
actions of the symmetric groups on representables.
|
||||
For this we will pass to a category that \cite{solution} calls cubical species. The argument in essence will be very similar
|
||||
to the argument there, though in a different language.
|
||||
|
||||
\begin{definition}[compare {\cite[Section 4]{solution}}]
|
||||
By abuse of notation we will say \(Σ_k\) to be the one object groupoid induced by the symmetric group \(Σ_k\).
|
||||
Let \(Σ ≔ ∐_{k ∈ ℕ} Σ_k\), we define \(\□^Σ\) to be the category of \emph{cubical species}. For a cubical
|
||||
species \(A\) we write \(A_k\) for the cubical set in the image of \(Σ_k\).
|
||||
\end{definition}
|
||||
\begin{remark}
|
||||
\(\□^Σ\) is a presheaf topos.
|
||||
\end{remark}
|
||||
\begin{remark}
|
||||
\cite{solution} excludes the case for \(k = 0\), because they want to exhibit a model category on cubical species.
|
||||
For the construction on cubical Sets this makes no difference compare \cite[Remark 4.3.17]{solution}.
|
||||
\end{remark}
|
||||
\begin{remark}
|
||||
Going forward we keep the names trivial fibrations,cofibrations, fibrationstructure, \dots, even though we don't
|
||||
have defined any of this notions on \(\□^Σ\). We mean the objects that are equivalently classified in this
|
||||
topos, like in the procedure above. This can actually be made into a modelstructure where these are the corresponding
|
||||
fibrations, cofibrations, \dots, for details see \cite[Section 4]{solution}.
|
||||
\end{remark}
|
||||
|
||||
We need to define our equivalent of an interval, and the generic point inclusion. Recall that a functor from a group
|
||||
into a category is the same as choosing an object of that category together with a group action on it.
|
||||
\begin{definition}[{\cite[Example 4.3.5]{solution}}]
|
||||
Let \(\mathrm{I} : Σ → \□\) be the functor that sends \(Σ_k\) to \(\I^k\) together with the group action that freely
|
||||
permutates the dimensions.
|
||||
\end{definition}
|
||||
\begin{remark}[{\cite[Lemma 4.2.7]{solution}}]
|
||||
The interval \(\mathrm{I}\) is tiny.
|
||||
\end{remark}
|
||||
|
||||
\begin{definition}[{\cite[Definition 4.3.3]{solution}}]
|
||||
The diagonal \(δ : \mathrm{I} → \mathrm{I} × \mathrm{I}\) as a map in the slice \(\faktor{\□^Σ}{\mathrm{I}}\) is called
|
||||
the generic point.
|
||||
\end{definition}
|
||||
|
||||
% We will classify what in \cite{awodeyCartesianCubicalModel2023} would be called biased fibrations with regard to \(δ_0\) in \(\□^Σ\).
|
||||
% These are not the kind of fibrations we are interested in, as we want lifts against the generic point. We will sort that out later.
|
||||
% As the monic maps in \(\□^Σ\) are precisely those that are componentwise monic, we are just going to characterize
|
||||
% componentwise fibrations. But because we passed to \(\□^Σ\), the classifying type will have group actions on
|
||||
% every level, and we need to restrict to those, that are invariant under those group actions.
|
||||
|
||||
The main idea to continue classifying uniform fibrations from here on out is quite simple. As being a uniform fibration is
|
||||
equivalent by being send to a trivial fibration by the right adjoint of the leibniz applicationfunctor of the inteval,
|
||||
we get that a \(+\)-algebra structure on \(δ ⇒ f\)\todo{write the right thing here}
|
||||
is equivalent to a \FF-algebra structure on \(f\). This must then
|
||||
be reformulated, to get the desired map. This reformulation is quite tedious and so we will refer
|
||||
for the details to
|
||||
\cite[The classifying types of unbiased fibration structures]{awodeyCartesianCubicalModel2023}.
|
||||
In the source this takes place in \□, but it only makes use of the features we already established for \(\□^Σ\).
|
||||
An important detail
|
||||
to this procedure is, that in order to get the resulting classifying type pullback stable, this argument uses that
|
||||
the interval is tiny.
|
||||
|
||||
We get a pullback stable construction \(\Fib^Σ(f)\)\footnote{this can be made into a classifying type for some
|
||||
awfs compare \cite[Section 4.]{solution}}\todo{this footnote 2 is bad, but how to make different}
|
||||
whose sections now correspond componentwise to \FF-algebra structures, that are equivariant with respect
|
||||
to the group action of \(Σ^k\).
|
||||
|
||||
We now going to extract the classifying type for equivariant cubical fibrations. For this we need to relate cubical sets
|
||||
and cubical species. There exists the constant diagram functor \(Δ\) that sends a cubical set to the constant cubical species.
|
||||
|
||||
|
||||
Since \□ is complete this functor has a right adjoint\footnote{and also a left adjoint as \□ is cocomplete}
|
||||
\(Γ\) compare \cite[Section 5.1]{solution}, which is given by
|
||||
|
||||
\begin{equation*}
|
||||
Γ(A) ≔ ∏\limits_{k∈ℕ}(A_k)^{Σ_k}
|
||||
\end{equation*}
|
||||
|
||||
where \((A_k)^Σ_k\) is the cubical set of \(Σ_k\) fixed points. It is not hard to see that this is indeed
|
||||
the right adjoint functor.
|
||||
|
||||
\begin{proposition}
|
||||
We have a pair of adjoints \(Δ ⊣ Γ\)
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
We need to show
|
||||
\begin{equation*}
|
||||
\Hom_{\□^Σ}(Δ(A),B) = \Hom_{\□}\left(A,∏\limits_{k∈ℕ}(B_k)^{Σ_k}\right) \text{.}
|
||||
\end{equation*}
|
||||
But this is immediatly clear, on the right hand side we have componentwise equivariant natural translations
|
||||
from \(Δ(A)_k = A\) to \(B_k\). They can only be valued in the fixed points in \(B_k\) as \(Δ(A)_k\) carries the
|
||||
trivial group action. A map on the right side is also a collection of natural transformations from \(A\) to
|
||||
the fixed points of \(B_k\).
|
||||
\end{proof}
|
||||
|
||||
We get now that an \FF-algebra on \(f : A → B\) structure corresponds to a section
|
||||
\begin{eqcd*}
|
||||
{} & \mathrm{Fib}^{Σ_k}(Δ(A))
|
||||
\arrow[d,"\mathrm{Fib}^{Σ_k}(Δ(f))"]
|
||||
\\ Δ(B) \arrow[r,equal]
|
||||
\arrow[ru, dashed]
|
||||
& Δ(B)
|
||||
\end{eqcd*}
|
||||
By adjointness we can transpose this lift to a lift in cubical sets, and by pulling back
|
||||
along the adjunction unit to splits of a map over \(B\) (compare \cite[Lemma 5.3.3,2.1.16]{solution}).
|
||||
\begin{eqcd*}[row sep=large]
|
||||
\mathrm{Fib}(B)
|
||||
\arrow[d,"\mathrm{Fib}(f)"]
|
||||
\arrow[r]
|
||||
\pb
|
||||
& Γ(\mathrm{Fib}^{Σ_k}(Δ(A)))
|
||||
\arrow[d,"Γ(\mathrm{Fib}^{Σ_k}(Δ(f)))"]
|
||||
\\ B \arrow[r,"η_B"]
|
||||
\arrow[ru, dashed]
|
||||
\arrow[u,bend left, dashed]
|
||||
& ΓΔ(B)
|
||||
\end{eqcd*}
|
||||
|
||||
We will now present the main idea, how to get fibrant universes from such classifying types.
|
||||
The technical details can be found in \cite[Section 7]{awodeyHofmannStreicherUniverses2023}.
|
||||
One might hope that we can construct a universal fibration, in the sense that every fibration
|
||||
is a pullback of this universal fibration. This can not happen for size reasons. We will do
|
||||
the next best thing and construct a universe for big enough kardinals \(κ\). Because
|
||||
our set theory has Grothendieck universes this will give us universes for \(κ\) small maps fibrations.
|
||||
|
||||
To start we will call our Hofmann-Streicher, or \(κ\)-small map classifier, \({p : \dot{\mathcal{V}}_κ → \mathcal{V}_κ}\).
|
||||
|
||||
\begin{proposition}[{\cite[Proposition 10]{awodeyHofmannStreicherUniverses2023}}]
|
||||
For a large enough \(κ\), there is a universe for \(κ\)-small fibrant maps, in the sense that there is
|
||||
a small fibration \(π : \dot{\mathcal{U}}_κ → \mathcal{U}_κ\) such that every small fibration \(f : A → X\), is
|
||||
a pullback of it along a connonical classifying map \(a\)
|
||||
\begin{eqcd*}
|
||||
A
|
||||
\pb
|
||||
\arrow[d,"f"]
|
||||
\arrow[r]
|
||||
& \dot{\mathcal{U}}_{κ}
|
||||
\arrow[d,"π_κ"]
|
||||
\\
|
||||
\mathcal{X}
|
||||
\arrow[r,"a"]
|
||||
& \mathcal{U}
|
||||
\end{eqcd*}
|
||||
\end{proposition}
|
||||
|
||||
\begin{proof}
|
||||
We can then construct our universe that classifies \(κ\)-small fibrations by setting \(\mathcal{U}_κ ≔ \mathrm{Fib}(\dot{\mathcal{V}}_κ)\)
|
||||
and building the following pullback.
|
||||
\begin{eqcd*}[row sep=large, column sep = large]
|
||||
\dot{\mathcal{U}}_κ
|
||||
\pb
|
||||
\arrow[d,"π_κ"]
|
||||
\arrow[r]
|
||||
& \dot{\mathcal{V}}_{κ}
|
||||
\arrow[d,"p"]
|
||||
\\
|
||||
\mathcal{U}
|
||||
\arrow[r,"\mathrm{Fib}(p)"]
|
||||
& \mathcal{V}
|
||||
\end{eqcd*}
|
||||
To see this is a fibration we will exhibit we exhibit a split of the classifying type of \(π_κ\).
|
||||
As the classifying type is pullback stable we get the following diagram
|
||||
\begin{eqcd*}[row sep=large, column sep = large]
|
||||
\dot{\mathcal{U}}_κ
|
||||
\pb
|
||||
\arrow[d,"π_κ"]
|
||||
\arrow[r]
|
||||
& \dot{\mathcal{V}}_{κ}
|
||||
\arrow[d,"p"]
|
||||
\\
|
||||
\mathcal{U_κ}
|
||||
\arrow[r,"\mathrm{Fib}(p)"]
|
||||
& \mathcal{V_κ}
|
||||
\\
|
||||
\mathrm{Fib}(\dot{\mathcal{U}}_κ)
|
||||
\arrow[u]
|
||||
\arrow[r]
|
||||
\arrow[ur,phantom,very near start,"\urcorner"]
|
||||
& \mathrm{Fib}(\dot{\mathcal{V}}_κ)
|
||||
\arrow[u,"\mathrm{Fib}(p)"]
|
||||
\end{eqcd*}
|
||||
The lower pullback square is a pullback of \(\Fib(p)\) along itself. This map has a split, namely the diagonal.
|
||||
To show that this classifies small fibrations, let us consider a small fibration \(f : A → X\).
|
||||
Because it is a small map we get it as a pull back along a cannonical map \(a : X → \mathcal{V}\).
|
||||
\begin{eqcd*}
|
||||
{}
|
||||
& {}
|
||||
& \dot{\mathcal{U}}
|
||||
\arrow[dr]
|
||||
\arrow[dd]
|
||||
&
|
||||
\\
|
||||
{}
|
||||
& |[alias=A]| A
|
||||
\arrow[rr,crossing over]
|
||||
\arrow[ru,dashed]
|
||||
& & \dot{\mathcal{V}}
|
||||
\arrow[dd]
|
||||
\\
|
||||
\Fib(A)
|
||||
\arrow[rr]
|
||||
\arrow[dr]
|
||||
& & \mathcal{U}
|
||||
\arrow[dr]
|
||||
&
|
||||
\\
|
||||
{}
|
||||
& |[alias=X]| X
|
||||
\arrow[rr,"a"]
|
||||
\arrow[lu,dashed,"s",bend left]
|
||||
\arrow[ur,dashed,"s'"]
|
||||
& & \mathcal{V}
|
||||
\arrow[from=A,to=X,crossing over]
|
||||
\end{eqcd*}
|
||||
We get the lower square as a pullback as \Fib is pullback stable and \(\mathcal{U} = \Fib(\dot{\mathcal{V}})\).
|
||||
As \(f\) is a fibration, we get a split \(s\) of the fibration structure. And so we get a unique corresponding map
|
||||
\(s'\), which induces the required pullback square.
|
||||
|
||||
\end{proof}
|
||||
|
||||
\todo{introduce Leibniz pullback exponential}
|
||||
\todo{introduced (unbiased) uniform fibrations and how they behave with leibniz constructions}
|
||||
\todo{hate my life}
|
||||
|
||||
\subsubsection{HS-Universe}
|
||||
Assume this as given for now, if we have time we can write this out \todo{delete it or write it out, and move it to some preliminary place}
|
||||
|
||||
|
||||
|
||||
\subsection{Dedekind Cubes}
|
||||
We will also need a description of the model structure on the Dedekind cubes. It is similar in construction, we just
|
||||
don't need explicit equivariance. It turns out that in the presence of connections we get equivariance for free
|
||||
\cite[Remark 4.2.26]{cavalloRelativeEleganceCartesian2022}. While the argument that this modelstructure indeed is a
|
||||
model structure constructing univalent universes is somewhat easier, as we can extract it from an internal description
|
||||
in the presheaf topos, by using strategies from \cite{LOPS}. As we only need this model structure for a later argument
|
||||
we will not present all the arguments here.
|
||||
|
||||
Characterizing cofibrations and trivial fibrations will done as above.
|
||||
|
||||
\begin{definition}
|
||||
A \emph{generating cofibration} is a monomorphism into a cube \(c : C ↣ \I^n\)
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A \emph{uniform trivial fibration} is a map with the uniform right lifting property against the generating cofibrations
|
||||
\end{definition}
|
||||
|
||||
It will be usefull for our later development, to give a characterization in the internal language of the presheaf topos
|
||||
\(\□_{∧∨}\). For more details on the interpretation of the internal semantics of cubical sets see \cite{awodeyKripkeJoyalForcingType2021}.
|
||||
For the this style of modeling cubical type thoery inside of topoi see \cite{ortonAxiomsModellingCubical2018}.
|
||||
We will first give the characterization, then try to give some intuition why it makes sense and afterwards give a prove.
|
||||
|
||||
|
||||
\begin{equation}\label{eq:TFib}
|
||||
\TFib A ≔ Π_{φ:Ω}Π_{f:[φ] → A} Σ_{a:A} Π_{α: [φ]} v(α) = a
|
||||
\end{equation}
|
||||
|
||||
\missingfigure{draw a tfib explanation}
|
||||
|
||||
So assume we have a type \(A\) in some context \(Γ\). For the discussion we will identify types with their
|
||||
respecting display maps, to keep the notational overhead low.
|
||||
In this context the display map of \([φ]\) is monomorphism into \(Γ\), or
|
||||
in other words a subterminal in \(\faktor{\□}{Γ}\). The term \(f : [φ] → A\) corresponds to a map from \([φ]\)
|
||||
to \(A\). As this is a subterminal we could view it as a partial element of \(A\), or under the analogy to spaces we can
|
||||
think of \(A\)
|
||||
like a bundle over \(Γ\), we specify an element in only some fibers of \(A\). The \(a\) now corresponds to a full element
|
||||
of \(A\) that agrees with the partial element, where it is defined. So giving an element of the type
|
||||
\(\TFib A\) is like giving an operation that can fill partial elements. Or formulated as a lifting problem:
|
||||
\begin{equation}
|
||||
\begin{tikzcd}
|
||||
{[φ]} \arrow[r,"f"] \arrow[d,tail] & A \arrow[d] \\
|
||||
Γ \arrow[r,"\id"] \arrow[ru,dashed,"a"'] & Γ
|
||||
\end{tikzcd}
|
||||
\end{equation}
|
||||
%
|
||||
It is an opration that given \([φ]\) and \(f\) returns an \(a\) and a wittness that the upper triangle commutes.
|
||||
The lower one commuts automatically by the virtue of \(a\) beeing a term of type \(A\). Notice that uniformity
|
||||
does not appear explicitly in this description, but happens automatically as pullback squares are the
|
||||
interpretation of substitutions, and the type thoeretic rules around substitution already imply a uniform
|
||||
choice of our lifts.
|
||||
|
||||
\todo{give a proof or get rid of the claim that we do}
|
||||
|
||||
We can also give an anolog description of fibrations. As we do not need to think about equivariance the descriptions
|
||||
gets a bit less involved but stays basically the same.
|
||||
|
||||
\begin{definition}
|
||||
Let \(c : C ↣ \I^n\) be monic a \emph{generating trivial cofibration} is a map of the form
|
||||
\( c \× δ_k\)
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}
|
||||
A \emph{uniform fibration} is a uniform right lifting map against generating trivial cofibrations.
|
||||
\end{definition}
|
||||
|
||||
\begin{proposition}[{\cite[Remark 4.2.26]{cavalloRelativeEleganceCartesian2022}}]\label{prop:dedekindEquivariant}
|
||||
The uniform fibrations of \(□_{∧∨}\) are equivariant.
|
||||
\end{proposition}
|
||||
|
||||
|
||||
|
||||
|
||||
% % \subsubsubsection{Internal description}
|
||||
% For our further development we also want an internal description of our fibrations. This turns out to be more tricky then
|
||||
% the trivial fibration variant for multiple reasons. The first is, that trivial cofibrations are harder to describe then
|
||||
% cofibrations. We also have an additional condition namely equivariance.
|
||||
% We also want a universe in the cubical type theory, that corresponds to a fibrant univers of fibrations in \(\□\).
|
||||
%
|
||||
% First we need to discuss, how type theory talks about lifting problems.
|
||||
%
|
||||
% We will do this the other way round. We first describe our fibrant universe and then get the rest for free. We will
|
||||
% do this in a way following \cite{LOPS}. The first important obesrvation will be that our functorial cylinder not only
|
||||
% has a right adjoint, but an even further right adjoint. Or in other words our interval is tiny.
|
||||
% This can be seen with ease, we can describe the cylinder path space adjunction by a functor on \(□\). Namely the
|
||||
% product with \([1]\). One checks easaly that \(((−) × [1])_!\) is isomporhic to the product with \(\I\) in \(\□\).
|
||||
% Like in any presheaf topos we get an induced adjoint triple. This situation is very common to variants of cubical
|
||||
% sets. In contrast to simplicial sets where this does not happen.
|
||||
% \begin{equation}
|
||||
% ((−) × [1])_! ⊣ ((−) × [1])^* ⊣ ((−) × [1])_*
|
||||
% \end{equation}
|
||||
% \begin{proposition}
|
||||
% We have that
|
||||
% \begin{equation*}
|
||||
% (−)^\I \cong ((−) × [1])^*
|
||||
% \end{equation*}
|
||||
% \end{proposition}
|
||||
% \begin{proof}
|
||||
% Let \(A\) be a cubical Set. Because \(□\) is closed under products we get
|
||||
% by yoneda and the definition of exponentials the following natural isomorphism.
|
||||
% \begin{equation*}
|
||||
% A^\I_n \cong \□(\I^n,A^\I) \cong \□(\I^n × \I, A) \cong A([1]^n × [1]) = ((−) × [1])^*(A)_n
|
||||
% \end{equation*}
|
||||
% And thus an isomorphism between the given functors.
|
||||
% \end{proof}
|
||||
%
|
||||
% \begin{definition}
|
||||
% We write \(\sqrt{\phantom{−}} ≔ ((−)×[1])_* \) for the rightmost adjoint of this adjunction
|
||||
% triple.
|
||||
% \end{definition}
|
||||
|
||||
\end{document}
|
19
src/Preliminaries/Preliminaries.tex
Normal file
19
src/Preliminaries/Preliminaries.tex
Normal file
|
@ -0,0 +1,19 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
% \chapter{Preliminaries}
|
||||
% \section{Category Theory}
|
||||
% We already assume some basic notions of category theory. For the completly unitiated there are good
|
||||
% references for example \cite{maclaneCategoriesWorkingMathematician1978} or \cite{awodeyCategoryTheory2006}.
|
||||
% We will also assume some familarities with presheave categories \cite{maclaneSheavesGeometryLogic1994}.
|
||||
% But we will revisit some branches of category theory that we will encouter later.
|
||||
|
||||
|
||||
% \subfile{Model_Categories}
|
||||
\subfile{Leibniz_Construction}
|
||||
\subfile{Cubical_Set}
|
||||
\subfile{Model_Structure}
|
||||
% \subfile{Reedy_Categories}
|
||||
% \subfile{Dependent_Type_Theory}
|
||||
% \subfile{Categorical_Semantics}
|
||||
|
||||
\end{document}
|
4
src/Preliminaries/Reedy_Categories.tex
Normal file
4
src/Preliminaries/Reedy_Categories.tex
Normal file
|
@ -0,0 +1,4 @@
|
|||
\documentclass[Preliminaries.tex]{subfiles}
|
||||
\begin{document}
|
||||
|
||||
\end{document}
|
4
src/Preliminaries/figures/square_example_picture.svg
Normal file
4
src/Preliminaries/figures/square_example_picture.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 1.2 MiB |
4
src/Preliminaries/figures/trivial_cofibration.svg
Normal file
4
src/Preliminaries/figures/trivial_cofibration.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 3 MiB |
1
src/Preliminaries/resource.bib
Symbolic link
1
src/Preliminaries/resource.bib
Symbolic link
|
@ -0,0 +1 @@
|
|||
../resource.bib
|
775
src/main/Equivalence.tex
Normal file
775
src/main/Equivalence.tex
Normal file
|
@ -0,0 +1,775 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
\section{Equivalence to spaces}\todo{\printline do I need to explain idempotent completions?}
|
||||
\todo{give all natural transformations \(⇒\) instead of \(→\)}
|
||||
|
||||
For this section we fix the following notation. We write \(n\) for a set with cardinality \(n\),
|
||||
and identify \(2\) with the set \(\{⊥,⊤\}\). We write \([n]\) for the well ordered sets
|
||||
\(\{0,…,n\}\).
|
||||
|
||||
We would love to compare the model categories on \(\□\) and \(\Δ\). For this we want at least
|
||||
an adjoint pair between those. If we somehow could get a functor on the site level, we could
|
||||
get an adjoint triple. But there is no immediate obvious functor to do the job.
|
||||
The usual trick would be to go look at the idempotent completions of these categories. These
|
||||
usually have :w
|
||||
more objects and are sometimes more suitable as the codomain of a functor, while
|
||||
the presheaf category stays the same (up to equivalence).
|
||||
For example, the idempotent completion of \dCube is the category \FL of finite lattices and
|
||||
monotone maps \cites{streicherSimplicialSetsCubical2021}{sattlerIdempotentCompletionCubes2019}.
|
||||
So we have automatically \(\widehat{\FL} = \dcSet\),
|
||||
and there is an obvious inclusion \(i: Δ → \FL\). Sadly for us \(□\) is already idempotent complete.
|
||||
To get around this issue we can embed \(j: □ → \dCube → \FL\), and get into the following situation.
|
||||
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}[column sep=large]
|
||||
\□ \arrow[from=r,"j^*"{description,name=i}] & \widehat{\FL} %
|
||||
\arrow[from=l,bend left=50,"j_!"{above,name=ish}] \arrow[from=l,bend right=50, "j_*"{below,name=ist}] %
|
||||
\arrow[from=r,bend right=50, "i_!"{above,name=jsh}] \arrow[from=r,bend left=50,"i_*"{below,name=jst}] %
|
||||
& \Δ \arrow[from=l,"i^*"{description,name=j}]
|
||||
\arrow[phantom,from=i,to=ish,"{\rotatebox{270}{\(⊣\)}}"]
|
||||
\arrow[phantom,from=ist,to=i,"{\rotatebox{270}{\(⊣\)}}"]
|
||||
\arrow[phantom,from=j,to=jsh,"{\rotatebox{270}{\(⊣\)}}"]
|
||||
\arrow[phantom,from=jst,to=j,"{\rotatebox{270}{\(⊣\)}}"]
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
|
||||
We can also define \(j\) explicitly like this:
|
||||
\begin{definition}
|
||||
Let \(𝟚\) be the finite order \(⊥ ≤ ⊤\). If it is convenient we will identify this with the order \(0 ≤ 1\).
|
||||
\end{definition} \todo{\printline move this somewhere sane}
|
||||
|
||||
\begin{definition}\label{def:j}
|
||||
Let \(j : □ → \FL\) the functor given by the following data:
|
||||
\begin{itemize}
|
||||
\item On objects: \(f(\{⊥|x_1…x_n|⊤\}) ≔ 𝟚^n\) \todo{\printline use \(⊥⊤\) notation in introduction chapter}
|
||||
\item On morphisms: \(j(f)(g)(x) ≔ \begin{cases}g(f(x)) & f(x) \notin 𝟚 \\ f(x) & f(x) ∈ 𝟚\end{cases}\)
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
In \cite{riehlVid} it is claimed that \(i^*j_!\) is the triangulation functor and \(j^*i_!\) a
|
||||
left Quillen homotopy inverse. In the mean time Reid Barton observed in an yet unpublished paper,
|
||||
\todo{\printline how to cite this? claimed in \cite{solution}}
|
||||
that this triangulation can be described by a single functor on the level of sites \(Δ → □\). One
|
||||
could now establish Quillen equivalence using this functor, for details see \cite[sec. 6]{solution}.
|
||||
We are going to verify the original argument. To give an overview of the necessary steps:
|
||||
\begin{enumerate}
|
||||
\item Show that \(i_!\) and \(i^*\) are left Quillen. \label{step1}
|
||||
\item Show that \(j_!\) and \(j^*\) are left Quillen. \label{step2}
|
||||
\item[*] Conclude that all 4 preserve weak equivalences.
|
||||
\item Show that \(i_! j^*\) and \(j_! i^*\) descend to inverses of the Homotopy Categories.
|
||||
\end{enumerate}
|
||||
|
||||
Before we jump into the formal proofs we like to get some intuition for the four functors of interest.
|
||||
The category \FL has the luxury as containing
|
||||
both \(Δ\) as the well orderings and \dCube as \(𝟚^n\). As \FL is the idempotent completion of \dCube,
|
||||
we also know that a presheaf on \FL is completely determined by its behavior on \dCube. The two functors
|
||||
\(i^*\) and \(j^*\) are just the restrictions, and we can also understand \(i_!\) and \(j_!\) geometrically.
|
||||
Let \(F ∈ \Δ\), then \(F(x) = i_!(F)(i(x))\). The interesting question is what does \(i_!\) define
|
||||
on cubes (aka \(\Yo 𝟚^n\)) and \(j_!\) on simplicials (aka \(\Yo [n]\)). \(j_!\) will basically triangulate the cube (see \cref{prop:triangulation}),
|
||||
while \(i_!\) will add just all possible cubes that are degenerated to simplicials.
|
||||
|
||||
To understand \(i^*\) a bit better we might take a look how things that are defined on \(𝟚^n\) extend to simplicials.
|
||||
For this we need to exhibit \([n]\) as a retract of \(𝟚^n\). For this we define two maps.
|
||||
\begin{definition}
|
||||
Let
|
||||
\begin{itemize}
|
||||
\item \(r: 𝟚^n → [n]\) be the monotone function \(r(f) = Σ_{x ∈ n}f(x)\)
|
||||
\item \(i: [n] → 𝟚^n\) be the monotone function \(i(x)(y) = \begin{cases} ⊤ & y < x \\ ⊥ & \text{otherwise} \end{cases}\)
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
We can see directly that this is a split retract pair and thus gives rise to an idempotent \(e = ir : 𝟚^n → 𝟚^n\).
|
||||
And by general abstract nonsense we get that \(r\) is an absolute coequalizer map of \(e\) and \(\id\). Similarly \(i\)
|
||||
is an absolute equalizer of \(e\) and \(\id\). So for a presheaf
|
||||
\(F\) on \FL, we can compute \(F([n])\) as the equalizer of \(F(e)\) and \(F(\id)\), or the coequalizer of \(F(e)\) and \(F(\id)\)
|
||||
in \Set.
|
||||
|
||||
\begin{proposition}\label{prop:i_!Nothing}
|
||||
\(i_!Δ^n\) agrees with \(\Yo [n]\).
|
||||
\end{proposition}
|
||||
\begin{proof} We have
|
||||
\begin{equation*}
|
||||
\Hom_{\FL}(i_!Δ^n,B) = \Hom_{\Δ}(Δ^n,i^*B) = B(i([n])) = B([n]) = \Hom_{\FL}(\Yo [n], B) \text{,}
|
||||
\end{equation*}
|
||||
and by uniqueness of adjoints, the claim follows.
|
||||
\end{proof}
|
||||
|
||||
\begin{proposition}\label{prop:j_!Nothing}
|
||||
\(j_!(\I^n) = \Yo 𝟚^n\)
|
||||
\end{proposition} \todo{\printline move to a sane place}
|
||||
\begin{proof}
|
||||
We have that \(\Hom(j_!(\I^n),B) = \Hom(\I^n,j^*(B))\). And we get
|
||||
\begin{equation*}
|
||||
\Hom(\Yo 𝟚^n, B) = B(𝟚^n) = B(j(n)) = \Hom(\I^n,j^*B)\text{.}
|
||||
\end{equation*}
|
||||
Thus, by uniqueness of adjoints, we have that \(j_!(\I^n) = \Yo 𝟚^n\).
|
||||
\end{proof}
|
||||
|
||||
\subsection{\(i_!\) and \(i^*\) are left Quillen functors}
|
||||
We need to show that \(i_!\) and \(i^*\) preserve cofibrations (or
|
||||
in other words monomorphisms) and trivial cofibrations. The hard
|
||||
part will be to show that \(i_!\) preserves monomorphisms and we will do that last.
|
||||
|
||||
\begin{lemma}\label{lem:restrictionPreserveMonos}
|
||||
Let \(F: \A → \B\) be a functor, then the restriction functor \(F^* : \hat{\B} → \hat{\A}\) preserves monomorphisms.
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
Let \(f\) be a monomorphism in \(\hat{\B}\). As being monic can be tested componentwise, we need
|
||||
to show that \((i^*f)_x\) is monic for an arbitrary \(x∈\A\). This is just \(f_{(ix)}\) which is monic by assumption.
|
||||
\end{proof}
|
||||
|
||||
\begin{proposition}\label{prop:i*preservesMonos}
|
||||
\(i^*\) preserves monomorphisms.
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
Directly by \cref{lem:restrictionPreserveMonos}.
|
||||
\end{proof}
|
||||
|
||||
\begin{proposition}\label{prop:i*preservesTrivialCofib}
|
||||
\(i^*\) preserves trivial cofibrations.
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
We are in the comfortable position of being left and right adjoint.
|
||||
\(i^*\) preserves the interval inclusion. As pushout products with the interval inclusion sends cofibrations
|
||||
to trivial cofibrations and \(i^*\) preserves monomorphisms by \cref{prop:i*preservesMonos}, \(i^*\) sends trivial cofibrations
|
||||
to trivial cofibrations.
|
||||
\end{proof}
|
||||
|
||||
As the next proof is mostly a straight forward induction, we don't repeat the full argument and only give a proof sketch.
|
||||
\begin{proposition}[{\cite[prop. 3.6]{sattlerIdempotentCompletionCubes2019}}]\label{prop:i!preservesTrivialCofib}
|
||||
\(i_!\) preserves trivial cofibrations.
|
||||
\end{proposition} \todo{\printline maybe give the full proof but low prio}
|
||||
\begin{proof}[Proof sketch.]
|
||||
It is fairly easy to see that \(i_!(\Yo -)\) on morphisms is valued in weak equivalences, by checking that representables in \dcSet are
|
||||
weakly contractible.
|
||||
Then one shows inductively that one can build the horn inclusions as pushouts of horn inclusion of dimension \(n\) and an
|
||||
inclusion of the \(n\)-dimensional horn into a \(n+1\) dimensional horn. As \(i_!\) is cocontinuous and the trivial cofibrations
|
||||
have as a left lifting class the necessary closure properties, the claim follows.
|
||||
\end{proof}
|
||||
|
||||
We now get to the difficult part of step \ref{step1}. The argument we are going to follow is due to Sattler \cite{sattlerIdempotentCompletionCubes2019}
|
||||
and involves the Reedy Structure of \(Δ\). We are going through the whole argument here, as it is a good preparation
|
||||
for \(j_!\) in step \ref{step2}. We also use this to add a lot of detail and improve the criterion a bit, see the remark at \cref{lem:idemLemma3.5}.
|
||||
\begin{proposition}[{\cite[prop. 3.3]{sattlerIdempotentCompletionCubes2019}}]\label{prop:i!preservesMonos}
|
||||
\(i_!\) preserves monomorphisms.
|
||||
\end{proposition}
|
||||
Following \cite{sattlerIdempotentCompletionCubes2019} we are first going to prove two technical lemmas.
|
||||
Note that a repetition of this argument is also good to check if we actually need an elegant Reedy structure
|
||||
or if we can do with just an Eilenberg-Zilber category.
|
||||
|
||||
\begin{lemma}[{\cite[lem 3.4]{sattlerIdempotentCompletionCubes2019}}]\label{lem:idemLemma3.4}
|
||||
Let \C be an Eilenberg-Zilber category. Assume \C as pullbacks along face maps.
|
||||
The functor \(\colim : \hat{\C} → \Set\) preserves monomorphisms.
|
||||
\end{lemma}
|
||||
\begin{remark}
|
||||
The original source has as an extra condition that these pullbacks preserve face and degeneracy maps. We
|
||||
do not need this by making use of the fact that degeneracies split.
|
||||
\end{remark}
|
||||
|
||||
For this proof we will make some use of the equivalence between presheaves and discrete Grothendieck fibrations.
|
||||
This has the benefit of exhibiting our monomorphism as a Grothendieck fibration. We also can lift the
|
||||
Eilenberg-Zilber structure from our site, a notion that is hard to express with presheaves.
|
||||
|
||||
\begin{lemma}\label{lem:liftEZFib}
|
||||
Let \(\C\) be an Eilenberg-Zilber category, and \(\p : Q → \C\) a discrete Grothendieck fibration.
|
||||
Then we can lift the Eilenberg-Zilber structure to \(Q\).
|
||||
\end{lemma}
|
||||
The hard part of the proof will be the lifting of absolute pushouts. In general pushouts don't need to
|
||||
lift, as the fiber above the pushout in the base category might be empty. If we had a split pushout like in
|
||||
\cref{absolutePushout}, we could lift the splits to solve that problem. Sadly, not all absolute pushouts
|
||||
are of this form, but something a bit weaker is true. The first appearance of this observation seems to be
|
||||
in \cite{pareAbsoluteColimits1971}.
|
||||
\begin{lemma}\label{lem:absPushChar}
|
||||
A commutative square
|
||||
\begin{eqcd*}
|
||||
A \arrow[r, "p" ] \arrow[d, "q" ] & B \arrow[d, "m"] \\
|
||||
C \arrow[r, "n"] & P
|
||||
\end{eqcd*}
|
||||
is an absolute pushout diagram if and only if there exists
|
||||
\begin{enumerate}
|
||||
\item A section \(u : P → B\) of \(m\).
|
||||
\item Morphisms \(r_1,…,r_k : B → A \) and \(s_1,…,s_k : B → A\) for some \( k ≥ 1\), such that \(ps_1 = \id_B\),
|
||||
\(qs_i = qr_i\) for all \(i\), \(pr_i = ps_{i+1}\) for all \(i < k\), and \(pr_k = um\).
|
||||
\item Morphisms \(t_1,…,t_{l+1} : C → A\) and \(v_1,…,v_l : C → A\) for some \(l ≥ 0\), such that \(qt_1 = \id_C\),
|
||||
\(pt_i = pv_i\) for all \(i < l\), \(qv_i = qt_{i+1}\) for all \(i ≤ l\), and \(pt_{l+1} = un\).
|
||||
\end{enumerate}
|
||||
or the symmetric situation where \(B\) and \(C\) are interchanged.
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
Let us assume first we have given all the data above (and WLOG \(B\) and \(C\) are not interchanged), and let
|
||||
\begin{eqcd*}
|
||||
A \arrow[r, "p" ] \arrow[d, "q" ] & B \arrow[d, "b"] \\
|
||||
C \arrow[r, "c"] & X
|
||||
\end{eqcd*}
|
||||
be a commutative square. As all this data gets preserved by any functor we only need to prove that such a diagram
|
||||
is already a pushout. We now need to find a \(x : P → X\) that witnesses \(P\) as a pushout.
|
||||
Since \(m\) splits, \(x\) will be unique automatically. We define \(x ≔ bu\). We now need to check
|
||||
if it makes the both evident triangles commute. We do this by chasing though all the data we have been given.
|
||||
\begin{equation*}
|
||||
xm = bum = bqr_k = cqr_k = cqs_k = bps_k = bqr_{k-1} = … = bps_1 = b
|
||||
\end{equation*}
|
||||
and
|
||||
\begin{equation*}
|
||||
xn = bun = bpt_{l+1} = cqt_{l+1} = cqv_l = bpv_l = bpt_l = … = cqt_1 = c \text{.}
|
||||
\end{equation*}
|
||||
|
||||
Now let us assume the given square is an absolute pushout. Now we need to construct all the date from above.
|
||||
To get the desired split \(u\), we will apply the \(\Hom(P,−)\) functor and get a pushout square in \(\Set\).
|
||||
\begin{eqcd*}
|
||||
\Hom(P,A) \arrow[r, "p"] \arrow[d, "q"] \pu & \Hom(P,B) \arrow[d, "m"] \\
|
||||
\Hom(P,C) \arrow[r, "n"] & \Hom(P,P)
|
||||
\end{eqcd*}
|
||||
That means we have a surjection from \(\Hom(P,B) + \Hom(P,C) → \Hom(P,P)\).
|
||||
A preimage of \(\id_P\) under this surjection is a morphism \(u\) for that either \(mu = \id_P\)
|
||||
or \(nu = \id_P\) holds. We assume WLOG the first case.
|
||||
|
||||
To get \(r_1,…,r_k\) and \(s_1,…,s_k\) that relate \(\id_B\) and \(um\), we look at the image under the
|
||||
\(\Hom(B,−)\) functor.
|
||||
\begin{eqcd*}
|
||||
\Hom(B,A) \arrow[r, "p"] \arrow[d, "q"] \pu & \Hom(B,B) \arrow[d, "m"] \\
|
||||
\Hom(B,C) \arrow[r, "n"] & \Hom(B,P)
|
||||
\end{eqcd*}
|
||||
We know that \(um\) and \(\id_B\), are getting sent to the same map in \(\hom(B,P)\), because
|
||||
\(mum = m = m\id_B\).
|
||||
We can now construct a sequence of maps in \(\hom(B,A)\) if we alternate between taking preimages of images
|
||||
under \(p\) and \(q\). Lets call the preimages along \(p\) by \(s_i\), and the preimages along \(q\) by \(r_i\).
|
||||
This procedure immediately guarantees the equations
|
||||
\begin{align*}
|
||||
qs_i = qr_i & & pr_i = ps_{i+1} \text{.}
|
||||
\end{align*}
|
||||
If we start the procedure with \(\id_B\) in \(\hom(B,B)\), we also get \(ps_1 = \id_B\). Because \(\id_B\) and
|
||||
\(um\) get sent to the same element along \(m\) there exists a \(k\) and a choice of \(r_i\) and \(s_i\) such that
|
||||
\(pr_k = um\).
|
||||
|
||||
The maps for condition three can be constructed with the same technique after applying the \(\hom(C,−)\) functor.
|
||||
The index shift comes from the fact that \(\id_C ∈ \hom(C,C)\) and \(un ∈ \hom(C,B)\) are in opposite corners
|
||||
of the resulting diagram.
|
||||
\end{proof}
|
||||
|
||||
To be able to lift this structure, we make sure we can lift commuting triangles first.
|
||||
\begin{lemma}\label{lem:gfibliftcommute}
|
||||
Let \(F : Q → \C\) be a discrete Grothendieck fibration, \(f: A → B\) be a morphism in \(Q\), and
|
||||
\(g : C → F(B)\) and \(h : F(A) → C\) such that \(gh = F(f)\). We can lift \(g\) and \(h\) to form
|
||||
a commutative diagram in \(Q\).
|
||||
\begin{eqcd*}[column sep=small]
|
||||
A \arrow[rr,"f"] \arrow[dr, dashed, "h'"'] & & B \\[-10pt]
|
||||
& C' \arrow[ur,"g'"', dashed]& \\
|
||||
F(A) \arrow[rr,"F(f)"] \arrow[dr, "h"'] & & F(B) \\[-10pt]
|
||||
& C \arrow[ur, "g"'] &
|
||||
\end{eqcd*}
|
||||
\end{lemma}
|
||||
\begin{remark}
|
||||
It also suffices to just give an object \(B\) and a commutative diagram in \C.
|
||||
\end{remark}
|
||||
\begin{proof}
|
||||
That we can get lifts \(g'\) and \(h'\) follows directly from the definition of discrete Grothendieck fibrations.
|
||||
We only need to argue that the domain of \(h'\) is indeed \(A\) and that the diagram commutes. As \(f\) and \(g'h'\) are lifts
|
||||
of \(F(f) = gh\), we get \(f = g'h'\) by the uniqueness of lifts.
|
||||
\end{proof}
|
||||
|
||||
\begin{proof}[Proof of Lemma \ref{lem:liftEZFib}]
|
||||
First we need to define a degree function \(\d : \Obj Q → ℕ\). Let \(\d'\) be the degree function from
|
||||
\C, we then define \(\d ≔ \d' \p\). The next thing to do is to show that isomorphisms preserve degree, split epis
|
||||
lower degree and noninvertible monomorphisms increase degree. For that it suffices to show that \(p\) preserves
|
||||
these. Isomorphisms and split epis are preserved by every functor, so only noninvertible monomorphisms remain.
|
||||
We will establish this in two steps, first showing mono preservation and then reflection of inverses.
|
||||
|
||||
Given some monomorphism \(f : A ↣B\) in \(Q\) and two morphisms \(g,h : C → \p(A)\) in \C, such that \(\p(f)h = \p(f)g \),
|
||||
we can lift \(g\) and \(h\) to \(g' : C' → A\) and \(h' : C' → A\). By \cref{lem:gfibliftcommute}, we have \(fg' = fh'\) and
|
||||
thus \(g' = h'\). We get directly \( g = \p(g') = \p(h') = h\) and thus \(\p(f)\) is monic.
|
||||
|
||||
Assume we have a map \(f\) in \(Q\) such that \(\p(f)\) has a (one sided) inverse. By \cref{lem:gfibliftcommute}, the lift
|
||||
of that (one sided) inverse is also a (one sided) inverse of \(f\).
|
||||
|
||||
This fact also helps us directly in the next step. We need to show that every morphism factors into a split epi followed by
|
||||
a monomorphism. We can lift a factorization by \cref{lem:gfibliftcommute}, and as one sided inverses are preserved by lifting,
|
||||
\(\p\) reflects split epis. So we only need to show that monos are preserved by liftings. Let \(f : A → B\) be a morphism
|
||||
in \(Q\), such that \(\p(f)\) is monic and let \(g,h : C → A\) such that \(fg = fh\). Because \(p(f)\) is monic, \(\p(g) = \p(h)\),
|
||||
and thus \(g\) and \(h\) are liftings of the same morphism. As liftings are unique \(g = h\) and \(f\) is monic.
|
||||
|
||||
As a last step we must establish that a span of split epis has an absolute pushout. So let \(f: A → B \) and \(g : A → C\) be spilt
|
||||
epimorphisms in \(Q\). As these are preserved by functors, \(\p(f)\) and \(\p(g)\) are split epis too. So there is an absolute pushout
|
||||
of \(\p(f)\) and \(\p(g)\). By \cref{lem:absPushChar}, we can extend this square, by all the extra morphisms that characterize
|
||||
the absolute pushout. We first lift the split established by 1. in \cref{lem:absPushChar}, to get our candidate pushout.
|
||||
Lifting the rest of the data and checking that the required equalities still hold is a tedious but straight forward repeated
|
||||
application of \cref{lem:liftEZFib}. Afterwards we can apply \cref{lem:absPushChar} again to verify that we indeed got
|
||||
an absolute pushout in \(Q\).
|
||||
\end{proof}
|
||||
\begin{remark}
|
||||
With the exact same argument we can lift elegant Reedy structures. We don't even need to to check the mono preservation,
|
||||
we can just define face maps to be lifts of face maps. Even though not explicit in the definition degeneracies in elegant
|
||||
reedy categories are split epimorphisms. We took the extra effort here to see if this step would be a problem in the cubical
|
||||
case.
|
||||
\end{remark}
|
||||
|
||||
\begin{lemma}\label{lem:liftPullbacks}
|
||||
Let \(F : Q → \C\) be a discrete Grothendieck fibration. Given a span in \(Q\) and a pullback of the image of the
|
||||
span in \C, then there exists a lift of that pullback completing the original span to a pullback square.
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
Let \(p : A → C\) and \(q : B → C\) be a span in \(Q\), such that its image under \(F\) completes to a pullback
|
||||
square. We can lift this square to a commuting square in \(Q\) by \cref{lem:gfibliftcommute}.
|
||||
We now need to check that this again a pullback square.
|
||||
\begin{eqcd*}[column sep=tiny, row sep=small]
|
||||
X \arrow[dd, bend right, "x"' ] \arrow[rrrd, bend left=15, "y"] \arrow[dr, dashed] & & & \\
|
||||
& P' \arrow[rr,"p'"] \arrow[dl,"q'"'] & & A \arrow[dl,"f"] \\
|
||||
B \arrow[rr,"g"'] & & C & \\[30pt]
|
||||
F(X) \arrow[dd, bend right, "F(x)"'] \arrow[rrrd, bend left=15, "F(y)"] \arrow[dr, dashed] & & & \\
|
||||
& P \arrow[rr,"p"] \arrow[dl,"q"'] \pb & & F(A) \arrow[dl,"F(f)"] \\
|
||||
F(B) \arrow[rr, "F(g)"'] & & F(C) &
|
||||
\end{eqcd*}
|
||||
So let us take any span \((x,y)\) that completes the cospan \((f,g)\) to a commuting square in \(Q\).
|
||||
We can map this to \C via \(F\) and get a universal map witnessing that the square in \C is indeed a pullback.
|
||||
We can lift this map uniquely to \(P'\) along \(F\), and by lemma \ref{lem:gfibliftcommute} this lift makes the
|
||||
diagram commute.
|
||||
\end{proof}
|
||||
|
||||
% We will also need another technical property of discrete Grothendieck fibrations and Eilenberg-Zilber categories.
|
||||
% \begin{lemma}\label{lem:bifib}
|
||||
% Let \(F : P ↣ \C\) be a a monic discrete Grothendieck fibration, and \C be an Eilenberg-Zilber (or elegant Reedy) category. Then
|
||||
% \(F\) as opcartesian lifts of degeneracy maps. \todo{\printline check if we introduced “degeneracy” and “face” as words in the definition}
|
||||
% \end{lemma}
|
||||
% \begin{proof}
|
||||
% So let \(A\) be an object in P and \(f : F(A) → B\) be a degeneracy in \C i.e. a split epi.
|
||||
% We can then lift the split to \(A\), and get a morphism \(g : B' → A\) where \(B'\) is the unique object
|
||||
% above \(B\). We can now lift \(f\), to \(B'\) and get a morphism \(f': A → B'\) above \(f\). Verifying that
|
||||
% \(f'\) has the required universal property is a straight forward repeated application of lemma \ref{lem:gfibliftcommute}.
|
||||
% \end{proof}
|
||||
|
||||
\begin{proof}[Proof of Lemma \ref{lem:idemLemma3.4}]
|
||||
Let \(f : P' ↣ Q'\) be a monomorphism in \(\hat{\C}\). This gives equivalently rise to a monomorphism \(F\)
|
||||
of discrete Grothendieck fibrations via the category of elements.
|
||||
We define \(P ≔ \int P'\), \(Q ≔ \int Q'\) and \(F ≔ \int f\).
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}[column sep=tiny, row sep=small]
|
||||
P \arrow[rr,"F",tail] \arrow[rd] & & Q \arrow[ld] \\
|
||||
& \C &
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
That \(\colim\) sends \(f\) to a monomorphism,
|
||||
is equivalent to the statement that \(F\) is monic on connected components. That follows directly from the explicit construction
|
||||
of colimits in \(\Set\). Also \(F\) gets to be a discrete Grothendieck fibration on its own. We can lift the Eilenberg-Zilber
|
||||
structure to \(Q\) by \cref{lem:liftEZFib}.
|
||||
|
||||
So now let \(S\) and \(T\) be be objects of \(P\) such that \(F(S)\) and \(F(T)\) lie in the same connected component of \(Q\).
|
||||
This means there is a zigzag connecting \(F(S)\) and \(F(T)\). We can normalize this to a span. To see this, we are going
|
||||
to show how to turn a cospan into a span of morphisms. So let \( f : D → B\) and \( g : E → B \) be a cospan. Because of the Eilenberg-Zilber
|
||||
structure, we can factor those into a degeneracy followed by a face map. Lets call them \(f_d\), \(f_f\), \(g_d\) and \(g_f\).
|
||||
\begin{eqcd*}[column sep=small]
|
||||
D \arrow[dr, two heads, "f_d"] & & P \arrow [dr, tail] \arrow[dl, tail] \arrow[dd, phantom,very near start, "\rotatebox{-45}{\(\lrcorner\)}"]& & E \arrow[dl, two heads, "g_d"'] \\
|
||||
{} & • \arrow[dr, tail, "f_f"] \arrow[lu, bend left] & & • \arrow[dl,tail, "g_f"'] \arrow[ru, bend right] & \\
|
||||
{} & & B & &
|
||||
\end{eqcd*}
|
||||
We can then construct the pullback \(P\) along \(f_f\) and \(g_f\), and the resulting maps concatenated with the splits of \(f_d\) and \(g_d\)
|
||||
are a span over \(D\) and \(E\). This means \(F(S)\) and \(F(T)\) are connected by a span of morphisms. Because \(F\) is injective
|
||||
on objects we can lift this span to a span from \(S\) to \(T\) and thus \(S\) and \(T\) are in the same connected component.
|
||||
\end{proof}
|
||||
|
||||
Before we continue to follow \cite{sattlerIdempotentCompletionCubes2019}, we need yet another lemma about lifting elegant Reedy
|
||||
structures. Sadly this does not work well for Eilenberg-Zilber categories. So we need to find a way around this later.
|
||||
|
||||
\begin{lemma}\label{liftReedyToComma}
|
||||
Let \A be an elegant Reedy category, \(i : \A → \B \) a functor and \(B ∈ \B \) an object in \B. We can lift
|
||||
the elegant Reedy structure along \(p : B ↓ i → \A \), where \(p\) is the evident projection from the comma
|
||||
category.
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
First we need to define a degree function \(\d: \Obj (\B) → ℕ\), let \(\d'\) be the degree function from the elegant Reedy structure
|
||||
we can then define \(\d ≔ \d'p\). We define the degeneracies and faces, as being degeneracies and faces under \(p\).
|
||||
Here we would already get a problem with EZ-categories as we might have maps that are monic in \(B ↓ i\), but not in \B.
|
||||
|
||||
As a next step we need to construct the desired factorization into a degeneracy and a face map. So we take
|
||||
a map \(f\) in \(B ↓ i\) and factor it as a map in \A into \(f = f_f f_d\). If we try to lift the factorization
|
||||
we end up with the following diagram:
|
||||
\begin{eqcd*}
|
||||
{} & B \arrow[dl, "ϕ"'] \arrow[dr, "ϕ'"] & \\
|
||||
i(A) \arrow[rr,"i(f)", near start] \arrow[dr, "i(f_d)"'] & & i(A') \\
|
||||
{} & i(X) \arrow[ur, "i(f_f)"'] \arrow[from=uu, crossing over, "i(f_d)ϕ", near start] &
|
||||
\end{eqcd*}
|
||||
It remains to show that the front right triangle commutes.
|
||||
\begin{equation*}
|
||||
i(f_f)(i(f_d)ϕ) = (i(f_f)i(f_d))ϕ= i(f_f f_d) ϕ = i(f) ϕ = ϕ'
|
||||
\end{equation*}
|
||||
% Is there an error here? The split makes a problem right?
|
||||
It is then tedious, but straight forward to check one of the equivalent elegance axioms.
|
||||
\end{proof}
|
||||
|
||||
\begin{lemma}\label{commaPullback}
|
||||
Let \(i : \A → \B\) be a functor, and \(B\) an object of \(\B\), and let \(p : B ↓ i → A \) be the projection map. A
|
||||
span in \(B ↓ i\) has a pullback, if the span has a pullback in \B, and the pullback square lies in the image of \(i\).
|
||||
\end{lemma}
|
||||
\begin{remark}
|
||||
Usually this situation is given, if \A has pullbacks along a class of maps and \(i\) preserves them.
|
||||
\end{remark}
|
||||
\begin{proof}
|
||||
So consider a span of maps in \(B ↓ i\). That fulfills the conditions of this lemma.
|
||||
If we unravel this, we get the following diagram:
|
||||
\begin{eqcd*}
|
||||
{} & B \arrow[rdd, "ϕ'", bend left] \arrow[ldd, "ϕ"', bend right] & \\
|
||||
{} & & \\
|
||||
i(X) \arrow[rd, "i(f)"] & & i(Y) \arrow[ld, "i(g)"'] \\
|
||||
{} & i(Z) &
|
||||
\end{eqcd*}
|
||||
By assumption we have pullbacks of \(i(f)\) and \(i(g)\) in \B that lie in the image of \(i\). So we
|
||||
can complete the diagram
|
||||
\begin{eqcd*}
|
||||
{} & B \arrow[rdd, "ϕ'", bend left] \arrow[ldd, "ϕ"', bend right] \arrow[d,dashed, "{⟨ϕ,ϕ'⟩}"] & \\
|
||||
{} & i(P) \arrow[ld] \arrow[rd] & \\
|
||||
i(X) \arrow[rd, "i(f)"] & & i(Y) \arrow[ld, "i(g)"'] \\
|
||||
{} & i(Z) &
|
||||
\end{eqcd*}
|
||||
and exhibit \(i(P)\) as an object in \(B ↓ i\) by the universal map of the pullback.
|
||||
\end{proof}
|
||||
|
||||
\begin{lemma}[{\cite[lemma 3.5]{sattlerIdempotentCompletionCubes2019}}]\label{lem:idemLemma3.5}
|
||||
Let \A be an elegant Reedy category and \(i : \A → \B\) a functor. Assume that \A has pullbacks along face maps whenever the
|
||||
cospan under consideration lies in the image of the projection \(B ↓ i → \A\) for some \(B ∈ \B\), and that \(i\) preserves these pullbacks.
|
||||
Then the left Kan extension \(i_! : \hat{\A} → \hat{\B}\) preserves monomorphisms.
|
||||
\end{lemma}
|
||||
\begin{remark}
|
||||
The original source also requires that these pullbacks preserve face and degeneracy maps. We get around this by making use of the fact
|
||||
that degeneracies are split in \cref{lem:liftEZFib}.
|
||||
\end{remark}
|
||||
\begin{proof}
|
||||
We only need to show that \(i_!\) preserves monomorphisms component wise, giving us a functor from \(\hat{\A} → \Set\) for every
|
||||
object in \( B ∈\B\). This functor can be written as
|
||||
\begin{eqcd*}
|
||||
\hat{\A} \arrow[r,"p^*"] & \widehat{B ↓ i} \arrow[r,"\colim"] & \Set
|
||||
\end{eqcd*}
|
||||
where \(p : B ↓ i → \A\) is the projection.
|
||||
This is the usual construction of \(i_!\) in this setting, see for example \cite{stacks00VC}.
|
||||
It is trivial that \(p^*\) preserves monomorphisms, so we only need to check that \(\colim\) does.
|
||||
We want to apply \cref{lem:idemLemma3.4}.
|
||||
We can lift the elegant Reedy structure from \A to \( B ↓ i \) along \(p\) by \cref{liftReedyToComma}.
|
||||
And as the face maps in \(B↓i\) are defined as maps that \(p\) sends to face maps in \A. We can lift the
|
||||
required pullbacks to \(B↓i\) by \cref{commaPullback}.
|
||||
\end{proof}
|
||||
|
||||
|
||||
\begin{proof}[Proof of \cref{prop:i!preservesMonos}]
|
||||
We want to verify the conditions for \cref{lem:idemLemma3.5}. Our first observation is that \(Δ\) does not have arbitrary
|
||||
pullbacks of face maps. Intuitively speaking we have only the non-empty ones. So let us unpack the condition that
|
||||
we only need pullbacks if the cospan of face maps lies in the image of the projection \(B ↓ i → Δ\) for some \(B\).
|
||||
That means there is a \(B ∈ \FL\) such that the following square commutes:
|
||||
\begin{eqcd*}
|
||||
{} & B \arrow[rd] \arrow[ld] & \\
|
||||
i(X) \arrow[rd, "i(f_f)"] & & i(Y) \arrow[ld, "i(g_f)"'] \\
|
||||
{} & i(Z) &
|
||||
\end{eqcd*} %COMMENT
|
||||
As the face maps in \(Δ\) are monic, we can identify \(X\) and \(Y\) with their respective image in \(Z\). As \FL does not
|
||||
contain the empty poset, they share at least a point, and the pullback is given by \(\im f_f ∩ \im g_f\). A quick calculation
|
||||
shows that this is also a pullback in \FL and thus we can apply \cref{lem:idemLemma3.5}
|
||||
\end{proof}
|
||||
|
||||
|
||||
\subsection{\(j^*\) and \(j_!\) are left Quillen}
|
||||
We are now going to show that \(j^*\) and \(j_!\) are left Quillen. As expected, the hard part will be again to show that
|
||||
\(j_!\) preserves monomorphisms.
|
||||
|
||||
So let us start with \(j^*\) again.
|
||||
|
||||
\begin{proposition}\label{prop:j*preservesMonos}
|
||||
\(j^*\) preserves monomorphisms.
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
Follows directly from \cref{lem:restrictionPreserveMonos}
|
||||
\end{proof}
|
||||
|
||||
\begin{proposition}\label{prop:j^*preservesTrivialCofib}
|
||||
\(j^*\) preserves trivial cofibrations.
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
Like in \cref{prop:i*preservesTrivialCofib}, we are in the comfortable position of being left and right adjoint.
|
||||
Note that \(j^*\) also preserves the interval inclusion. As pushout products with the interval inclusion send cofibrations
|
||||
to trivial cofibrations and \(j^*\) preserves monomorphisms by \cref{prop:j*preservesMonos}, \(j^*\) sends trivial cofibrations
|
||||
to trivial cofibrations.
|
||||
\end{proof}
|
||||
|
||||
\begin{proposition}\label{prop:j!preservesTrivialCofib}
|
||||
\(j^*\) preserves fibrations
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
Preservation of uniform fibrations is clear, as every lifting problem against a generating trivial cofibration factors
|
||||
throug the image of a trivial cofibration under \(j^*\). As \(j^*\) preserves coequalizer the equivariance follows
|
||||
from the equivariance of the model category on \dcSet, which follows from the presence of (at least one connection).
|
||||
\end{proof}\todo{define uniform fibrations without equivariance}\todo{make a lemma that \dcSet is equivariant even if we just cite this from the relative elegance paper}
|
||||
|
||||
We want to recap the general proof idea that \(i_!\) preserved monos. We can test being a monomorphism component wise and can write the
|
||||
left Kan extension evaluated at a point as some mono preserving functor followed by a colimit.
|
||||
That the colimit preserves monomorphisms is equivalent to a condition of connected components on the categories
|
||||
of elements. We can only lift morphisms backwards along discrete Grothendieck fibrations, but we are faced
|
||||
with zigzags. To remedy this we want to lift a strong enough pullback property to turn these zigzags into spans.
|
||||
|
||||
This strategy will be the same for \(j_!\). Sadly we couldn't get arbitrary Eilenberg-Zilber structures to lift, so
|
||||
we will need a slightly different attack point.
|
||||
|
||||
We first observe that \(□\) has a lot more pullbacks then \(Δ\). While \(Δ\) has only “non-empty” pullbacks of face maps,
|
||||
\(□\) has all “non-empty" pullbacks. This elevates us from the necessity to lift the Eilenberg-Zilber structure to \(B ↓ i\).
|
||||
We can get by with the following lemma.
|
||||
|
||||
\begin{lemma}\label{lem:pullbackMono}
|
||||
Let \(i: \A → \B\) be a functor such that for all \(B ∈ \B\), spans in the image of the projection \(p: B ↓ i\) have pullbacks
|
||||
in \B, and these pullbacks are in the image of \(i\). Then \(i_!\) preserves monomorphisms.
|
||||
\end{lemma}
|
||||
|
||||
To show this we replace \cref{lem:idemLemma3.4} with the following simpler lemma.
|
||||
|
||||
\begin{lemma}\label{lem:colimPullbackMono}
|
||||
Let \C be a category. Assume \C has pullbacks, then the functor \(\colim : \hat{\C} → \Set\) preserves monomorphisms.
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
Again let \(f\) be a monomorphism in \(\hat{\C}\). This is equivalently a monomorphism \(F: P → Q\) of discrete Grothendieck fibrations
|
||||
over \C. We need to show that \(F\) is injective on connected components. We can lift the pullbacks to \(Q\) by \cref{lem:liftPullbacks}.
|
||||
Let now \(S\) and \(T\) be in \(Q\) such that \(F(S)\) and \(F(T)\) are connected by a zigzag. We can turn this zigzag into a span, by
|
||||
taking pullbacks of cospans. Because \(F\) is monic it is injective on objects and we can lift this span to a span in \(P\) and thus
|
||||
\(S\) and \(T\) are in the same connected component.
|
||||
\end{proof}
|
||||
|
||||
\begin{proof}[Proof of \cref{lem:pullbackMono}]
|
||||
As in \cref{lem:idemLemma3.5}, we check that \(i_!\) preserves monos component wise. We write \(i_!(B)\) again as
|
||||
\begin{eqcd*}
|
||||
\hat{\A} \arrow[r, "p^*"] & \widehat{B ↓ i} \arrow[r, "\colim"] & \Set
|
||||
\end{eqcd*}
|
||||
where \(p : B ↓ i → A\) is the projection map. We need to show that \(B ↓ i\) has pullbacks, but that is an immediate
|
||||
consequence of \cref{commaPullback}.
|
||||
\end{proof}
|
||||
|
||||
And we arrive at the conclusion we wanted to get to:
|
||||
\begin{lemma}
|
||||
Let \(F: \A → \B\) be a functor, then the restriction functor \(F^* : \hat{\B} → \hat{\A}\) preserves monomorphisms.
|
||||
\end{lemma}
|
||||
|
||||
\begin{proposition}\label{prop:j!preservesMonos}
|
||||
\(j_! : \widehat{\FL} → \□\) preserves monomorphisms.
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
For this proof we will identify \(2 = \{⊥,⊤\}\).
|
||||
We want to fulfill the conditions of \cref{lem:pullbackMono}. To check these conditions, we check that equalizers and products
|
||||
with the desired conditions exist. Products are a straightforward calculation so we go forward to the equalizer case.
|
||||
To check this we will look at a diagram of the following form,
|
||||
\begin{eqcd*}[column sep=small]
|
||||
{} & B \arrow[ld] \arrow[rd] & \\
|
||||
𝟚^n \arrow[rr, "j(f)", shift left] \arrow[rr, "j(g)"', shift right] & & 𝟚^m
|
||||
\end{eqcd*}
|
||||
where we identify \(f\) and \(g\) with functions \(m → n + 2\).
|
||||
We note that equalizers, if they exist in \FL, are constructed the same way as in \Set.
|
||||
If we apply this to our diagram, we can see that this construction can't create something empty,
|
||||
as both maps needs to preserve elements from \(B\).
|
||||
We would like to complete the diagram in the following way
|
||||
%\begin{eqcd*}[column sep=large]
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}[column sep=large]
|
||||
{} & B \arrow[dl] \arrow[d] \arrow[dr] & {} \\
|
||||
𝟚^l \arrow[r,"j(e)"] & 𝟚^n \arrow[r,"j(f)", shift left] \arrow[r,"j(g)"',shift right] & 𝟚^m
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
So we need to find a finite set \(l\) and a map \(e : n → l + 2\). We will construct those by
|
||||
effectively constructing the coequalizer in \FinSet. So we take the quotient \(\faktor{n+2}{\sim}\)
|
||||
where \(\sim\) is the equivalence relation generated by the relation of having a joint preimage under
|
||||
\(f\) and \(g\). We will now argue that our equalizer would be empty if this quotient identifies
|
||||
\(⊤\) and \(⊥\).
|
||||
|
||||
We assume that indeed the relation identifies \(⊥\) and \(⊤\), let \(h ∈ 𝟚^n\) such that \(j(f)(h) = j(g)(h)\).
|
||||
Because \(⊥\) and \(⊤\) are being identified, there exists two finite sequence of elements \(x_i\) and \(y_i\) in \(m\), such that
|
||||
there exists a \(k\) and
|
||||
(WLOG) \(f(x_1) = ⊥\), \(g(x_i) = f(y_i)\), \(g(y_i) = f(x_{i+1})\) and \(g(y_k) = ⊤\).
|
||||
Plugging that into the definition of \(j\), we get
|
||||
\begin{align*}
|
||||
⊥ = j(f)(h)(x_1) &= j(g)(h)(x_1) = j(f)(h)(y_1) \\ &= j(g)(h)(y_1) = j(f)(h)(x_2) = … = j(g)(h)(y_k) = ⊤
|
||||
\end{align*}
|
||||
which is a contradiction and thus \(⊤\) and \(⊥\) are not identified.
|
||||
For \(e : n → l + 2\), we take the evident quotient map restricted to \(n\).
|
||||
While we now have our candidate, we still need to show that this is actually a pushout in \FL. As \FL is well pointed
|
||||
it suffices to check global elements, which again are just elements in the set theoretic sense.
|
||||
As \(fe = ge\) by construction and \(j\) is a functor we get \(j(f)j(e) = j(g)j(e)\).
|
||||
|
||||
Again let \(h ∈ 𝟚^n\) such that \(j(f)(h) = j(g)(h)\). We can then define
|
||||
\begin{align*}
|
||||
h': l → 𝟚 && \text{by} && h'(x) ≔ h(z) & & \text{where} && z ∈ e^{-1}(x)\text{.}
|
||||
\end{align*}
|
||||
For this definition to make sense we need to argue that the choice of \(z\) does not matter.
|
||||
So let \(z\) and \(z'\) be both element of \(e^{-1}(x)\). Like above that means there are
|
||||
two sequences \(x_i\) and \(y_i\) like above, just that \(f(x_1) = z\) and \(g(y_k) = z'\), instead
|
||||
of \(⊥\) and \(⊤\).
|
||||
And like above we get
|
||||
\begin{align*}
|
||||
h(z) = j(f)(h)(x_1) &= j(g)(h)(x_1) = j(f)(h)(y_1) \\ &= j(g)(h)(y_1) = j(f)(h)(x_2) = … = j(g)(h)(y_k) = h(z')
|
||||
\end{align*}
|
||||
We also need that the map \(j(e)\) is monic, but this follows directly from the fact that \(e\) in \FinSet is epic.
|
||||
We only need to produce a map \(B → 𝟚^l\), that commutes. We get this map directly, because we have shown that
|
||||
this diagram is an equalizer in \FL.
|
||||
This means we fulfill the conditions of \cref{lem:pullbackMono} and thus \(j_!\) preserves monomorphisms.
|
||||
\end{proof}
|
||||
|
||||
\subsection{\(i^*j_!\) and \(j^*i_!\) induce an equivalence of homotopy categories}
|
||||
Before we continue our plan directly, we take a short detour to get a little bit better idea how these two functors behave.
|
||||
|
||||
From the previous sections it is immediately clear that \(i^*j_!\) and \(j^*i_!\) are left Quillen functors with some right adjoint.
|
||||
One might ask if these two functors are adjoint to each other. The former development would suggest that they aren't
|
||||
and that is indeed the case.
|
||||
|
||||
\begin{definition}\label{def:triangulation}
|
||||
Let \(t: □ → \Δ\) be the functor that sends \([1]^n \mapsto (Δ^1)^n\). The \emph{triangulation functor \(\T\)}
|
||||
is the left Kan extension of \(t\) along Yoneda.
|
||||
\begin{eqcd*}
|
||||
□ \arrow[r,"t"] \arrow[d,"\Yo"] & \Δ \\
|
||||
\□ \arrow[ru,"\T"] &
|
||||
\end{eqcd*}
|
||||
\end{definition}
|
||||
|
||||
\begin{proposition}\label{prop:triangulation}
|
||||
The functor \(i^*j_!\) is the triangulation functor.
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
As \T is the unique cocontinuous functor that extends the product preserving functor from \□ to \Δ that sends the interval
|
||||
to the interval, we only need to show these conditions. \(i^*j_!\) is cocontinuous as it is the composition of two left
|
||||
adjoints. We need to show that this functor is product preserving on representables. But by Yoneda and \cref{prop:j_!Nothing}
|
||||
\begin{equation*}
|
||||
j_!(\I^n × \I^m) = j_!(\I^{n+m}) = \Yo 𝟚^{n+m} = \Yo 𝟚^n × \Yo 𝟚^m = j_!(\I^n) × j_!(\I^m)
|
||||
\end{equation*}
|
||||
\(j_!\) preserves products of representables.
|
||||
Because \(i^*\) is a right adjoint we get this property immediately. We also need to show that \(i^*j_!\) preserves the interval.
|
||||
We already know this for \(j_!\). So the question is if \(i^*\) preserves the interval. We now that \(i^*(\I)(x) =
|
||||
\Hom_{\dCube}(\Yo [1],i(x))\). As \(i\) is fully faithful the claim follows.
|
||||
\end{proof}
|
||||
|
||||
\begin{example}
|
||||
\begin{equation*}
|
||||
\Hom_{\□}(j^*i_!(Δ^2),\I^2) ≠ \Hom_{\Δ}(Δ^2,i^*j_!(\I^2))
|
||||
\end{equation*}
|
||||
\end{example}%
|
||||
Intuitively the reason for this is that we can't map a square with one side degenerated to a triangle into the representable
|
||||
square in \□. But we have a lot of ways to map a triangle into a triangulated square in \Δ.
|
||||
\begin{proof}
|
||||
By the Yoneda lemma and that \(j^*i_!\) is the triangulation functor, a map on the right-hand side corresponds to a
|
||||
pair of monotone maps \([2] → [1]\). There are 16 of such pairs. On the left we can use \cref{prop:j_!Nothing},
|
||||
and expanding the definition to get \(\Hom_{\□}(\Hom_{\FL}(j(-),[2]),\Hom_{□}(-,2))\). Lets take such a transformation
|
||||
and call it \(η\). \(η_0\) gives rise to a map \(f : [2] → 𝟚^2\). \(η_1\) witnesses that \(f\) is indeed monotone, by
|
||||
composing with face maps. And \(η_2\) rules out all injective maps, as for them always exactly 2 faces agree and
|
||||
\(\I^2(2)\) does not contain such faces. There are only 9 such maps, which is an upper bound for the possible number of
|
||||
natural transformations.
|
||||
\end{proof}
|
||||
|
||||
\begin{example}
|
||||
\begin{equation*}
|
||||
\Hom_{\Δ}(i^*j_!(\I^2),Δ^1 × Δ^1) ≠ \Hom_{\□}(\I^2,j^*i_!(Δ^1 × Δ^1))
|
||||
\end{equation*}
|
||||
\end{example}%
|
||||
Here the problem is that the square made from a product in \Δ doesn't have a 2-cell that fills the whole square, this
|
||||
doesn't give us a way to map the representable square on it without degenerating something. While if we triangulate the representable square
|
||||
first we don't run into that problem.
|
||||
\begin{proof}
|
||||
As \(j^*i_!\) is the triangulation functor, the left-hand side becomes \(\Hom_{\Δ}(Δ^1 × Δ^1,Δ^1 × Δ^1)\).
|
||||
We can write \( Δ^1 × Δ^1 \) as a pushout of two copies of \(Δ^1\) joint by an edge.
|
||||
As \(i_!\) is cocontinuous, we can construct this pushout in \FL. By this and \cref{prop:i_!Nothing}, we
|
||||
get a colimit of two representables there. Unraveling these definition it is a straightforward exercise
|
||||
to verify that the set of on the left-hand side has more elements.
|
||||
\end{proof}
|
||||
|
||||
From here on we follow the argument from \cite[§6.2]{solution}. They lay down a criterion on functors and model categories
|
||||
such that every natural transformation between them is a weak equivalence. Thus we only need to construct any natural
|
||||
transformation between the identity functor and the concatenation of these functors.
|
||||
|
||||
For this, we cite two technical
|
||||
lemmas whose theory is mostly developed in \cite{riehlTheoryPracticeReedy2014}, but are taken in this form from \cite{solution}.
|
||||
To make sense of the first lemma we need
|
||||
to introduce a technicality. If we have a category \A and presheaves on it, then the automorphism group of some
|
||||
\(a ∈ \A\) acts on \(\Yo a\,b\) by composition. This kind of acting is natural in the sense that \(\Yo a f\) is an
|
||||
\(\mathrm{Aut}(a)\)-equivariant map. Thus we can say \(\mathrm{Aut}(a)\) acts on \(\Yo a\). And thus we can also
|
||||
quotient \(\Yo a\) by subgroups of \(\mathrm{Aut}(a)\). This is also where equivariance comes into play again, as
|
||||
equivariance by definition guarantees us that these quotients of representables by some automorphism subgroup are
|
||||
contractible.
|
||||
|
||||
\todo{if time and resources explain and proof these lemma (ca. 4 pages)}
|
||||
\begin{lemma}[{\cites[§5]{riehlTheoryPracticeReedy2014}[lemma 6.2.13]{solution}}]\label{lem:Eilenberg-ZilberMonosCreated}
|
||||
Let \A be an Eilenberg-Zilber category. Then the monomorphisms in \(\hat{\A}\) are generated under
|
||||
coproduct, pushout, sequential composition, and right cancellation under monomorphisms by the maps
|
||||
\(∅ → \faktor{\Yo a}{H}\) valued in the quotient of a representable presheaf at some \(a ∈ \A\) by an arbitrary
|
||||
subgroup \(H\) of its automorphism group.
|
||||
\end{lemma}
|
||||
|
||||
\begin{lemma}[{\cites[§5]{riehlTheoryPracticeReedy2014}[lemma 6.2.15]{solution}}]\label{lem:PairClosure}
|
||||
Let \(U,V : K → M\) be a cocontinuous pair of functors valued in a model category and \(α : U ⇒ V\) a
|
||||
natural transformation between them. Define the cofibrations in \(K\) to be the maps that are sent to
|
||||
cofibrations under both \(U\) and \(V\). Define \(\mathcal{N}\) to be the class of cofibrations
|
||||
between cofibrant objects that are sent by Leibniz pushout application with \(α\) to weak equivalences in
|
||||
\(M\). Then \(\mathcal{N}\) is closed under coproducts, pushout, (transfinite) composition, and right cancellation
|
||||
between cofibrations.
|
||||
\end{lemma}
|
||||
|
||||
\begin{corollary}[{\cite[corollary 6.2.16]{solution}}]\label{lem:everythingWorks}
|
||||
Let \A be an Eilenberg–Zilber category and consider a parallel pair of functors \(U, V : \Set^{\A^\op} → M\) valued in a model
|
||||
category \(M\) together with a natural transformation \(α : U ⇒ V\) . Suppose that \(U\) and \(V\) preserve colimits and send monomorphisms
|
||||
in \(K\) to cofibrations in \(M\). Then if the components of \(α\) at quotients of representables by subgroups of their automorphism groups
|
||||
are weak equivalences, then all components of \(α\) are weak equivalences.
|
||||
\end{corollary}
|
||||
\begin{proof}
|
||||
By \cref{test}\todo{In Leibniz construction chapter make this an example} we can write the the part of \(α\) at \(X\) as a
|
||||
Leibniz application with the monomorphism \(∅ → X\). We can use both lemmas from above to show that \(α\) gets send by all
|
||||
of those maps to a weak equivalence. By \cref{lem:Eilenberg-ZilberMonosCreated} the monomorphisms in \(\Set^{\A^\op}\) are generated by
|
||||
morphisms \(∅ → \faktor{\Yo a}{H}\) under some closure properties. By assumption, morphisms of that form are sent to weak equivalences
|
||||
by the Leibniz application with \(α\). By \cref{lem:PairClosure}, the maps that get send to weak equivalences by Leibniz application with
|
||||
\(α\) have the same closure properties, thus all monomorphisms get send to weak equivalences, and with them all morphisms of the
|
||||
form \(∅ → X\).
|
||||
\end{proof}
|
||||
|
||||
\begin{corollary}[{\cite[Corollary 6.2.17]{solution}}]\label{cor:dafuq}
|
||||
Let \A be an Eilenberg–Zilber category for which \(\Set^{\A^\op}\) admits a model structure whose cofibrations are the monomorphisms,
|
||||
in which the
|
||||
quotients \(\faktor{\Yo a}{H}\) of representables by subgroups of their automorphism groups are weakly contractible. Then if
|
||||
\(U, V : \Set^{\A^\op} → M\) define a
|
||||
pair of left Quillen functors that preserve the terminal object, then any natural transformation \(α : U ⇒ V\) is a natural weak equivalence.
|
||||
\end{corollary}
|
||||
\begin{proof}
|
||||
By Ken Browns lemma, left Quillen functors preserve weak equivalences between cofibrant objects. If such a functor preserves the terminal
|
||||
object, then it also preserves weak contractibility between cofibrant objects. To apply \cref{lem:everythingWorks}
|
||||
we need show that \(α_{\faktor{\Yo a}{H}}\) is a weak equivalence. As this is weakly contractible, we get the following commuting
|
||||
square.
|
||||
\begin{eqcd*}[column sep=large, row sep=large]
|
||||
U\left(\faktor{\Yo a}{H}\right) \arrow[r,"α_{\faktor{\Yo a}{H}}"] \arrow[d, "\rotatebox{90}{\(\sim\)}"] & V\left(\faktor{\Yo a}{H}\right) \arrow[d, "\rotatebox{90}{\(\sim\)}"] \\
|
||||
U(*) \arrow[r,equal] & V(*)
|
||||
\end{eqcd*}
|
||||
And by 2-out-of-3, the upper map must be a weak equivalence, and thus by \cref{lem:everythingWorks} the claim follows.
|
||||
\end{proof}
|
||||
|
||||
\begin{theorem}
|
||||
\(j^*i_!\) and \(i^*j_!\) induce an equivalence between \(\Ho(\□)\) and \(\Ho(\Δ)\).
|
||||
\end{theorem}
|
||||
\begin{proof}
|
||||
By \cref{prop:i*preservesMonos,prop:i*preservesTrivialCofib,prop:i!preservesMonos,prop:i!preservesTrivialCofib,%
|
||||
prop:j!preservesTrivialCofib,prop:j!preservesMonos,prop:j*preservesMonos,prop:j^*preservesTrivialCofib}, are
|
||||
\(i^*\),\(i_!\),\(j^*\), and \(j_!\) left Quillen functors, and thus \(j^*i_!\) and \(i^*j_!\) too.
|
||||
By Ken Browns lemma we know that left Quillen functors preserve weak equivalences between cofibrant objects. In
|
||||
the model structures on \□ and \Δ, every monomorphism a cofibration and thus every object cofibrant.
|
||||
Thus \(j^*i_!\) and \(i^*j_!\) preserve weak equivalences and descend to functors between the corresponding
|
||||
homotopy categories.
|
||||
|
||||
To show that these induce an equivalence between the homotopy categories we must show that we have a zigzag of
|
||||
natural transformations between \(i^*j_!j^*i_!\) and the identity functor such that every natural transformation
|
||||
is valued in weak equivalences, and likewise for \(j^*i_!i^*j_!\). We will call \(η^i\) and \(ε^i\) the unit and counit
|
||||
of the adjunction \(i_! ⊣ i^*\). Likewise \(η^j\) and \(ε^j\) for the adjunction \(j_! ⊣ j^*\).
|
||||
We can construct these as follows
|
||||
\begin{eqcd*}[column sep=large, row sep=large,execute at end picture={\draw[decorate,decoration={brace,mirror,amplitude=4.5}] (sBrcs.south) to (eBrcs.south);}]
|
||||
\Δ \arrow[r,"i_!", ""'{name=i!s}] & |[alias=sBrcs]| \dcSet \arrow[r,"j^*"] \arrow[d,phantom,"⊛"] \arrow[rr,bend right=20,phantom , ""'{name=cc}] & \□ \arrow[r,"j_!"] & |[alias=eBrcs]| \dcSet \arrow[r,"i^*", ""'{name=i*s}] \arrow[d,phantom,"⊛"] & \Δ \\
|
||||
\Δ \arrow[r,"i_!"{name=i!t}] & \dcSet \arrow[rr,"\id"{name=sida},""'{name=sidb}, equal] & & \dcSet \arrow[r,"i^*"{name=i*t}] & \Δ \\
|
||||
\Δ \arrow[rrrr,"\id"{name=lid}, equal] & & & & \Δ
|
||||
\arrow[Rightarrow ,from=i!s, to=i!t, "\id"]
|
||||
\arrow[Rightarrow, from=cc, to=sida, "ε^j"]
|
||||
\arrow[Rightarrow, from=i*s, to=i*t, "\id"]
|
||||
\arrow[Rightarrow, from=lid, to=sidb, "η^i"]
|
||||
\end{eqcd*}
|
||||
and
|
||||
\begin{eqcd*}[column sep=large, row sep=large,execute at end picture={\draw[decorate,decoration={brace,mirror,amplitude=4.5}] (sBrcs.south) to (eBrcs.south);}]
|
||||
\□ \arrow[r,"j_!", ""'{name=j!s}] & |[alias=sBrcs]| \dcSet \arrow[r,"i^*"] \arrow[d,phantom,"⊛"] \arrow[rr,bend right=20,phantom, ""'{name=cc}] & \Δ \arrow[r,"i_!"] & |[alias=eBrcs]| \dcSet \arrow[r,"j^*", ""'{name=j*s}] \arrow[d,phantom,"⊛"] & \□ \\
|
||||
\□ \arrow[r,"j_!"{name=j!t}] & \dcSet \arrow[rr,"\id"{name=sida},""'{name=sidb}, equal] & & \dcSet \arrow[r,"j^*"{name=j*t}] & \□ \\
|
||||
\□ \arrow[rrrr,"\id"{name=lid}, equal] & & & & \□
|
||||
\arrow[Rightarrow ,from=j!s, to=j!t, "\id"]
|
||||
\arrow[Rightarrow, from=cc, to=sida, "ε^i"]
|
||||
\arrow[Rightarrow, from=j*s, to=j*t, "\id"]
|
||||
\arrow[Rightarrow, from=lid, to=sidb, "η^j"]
|
||||
\end{eqcd*}
|
||||
where \(⊛\) is the Godement product.
|
||||
By \cref{prop:i_!Nothing,prop:j_!Nothing}, \(i_!\) and \(j_!\) preserve terminal obejcts and \(i^*\) and \(j^*\) do so because they are
|
||||
right adjoints. So by \cref{cor:dafuq} these two cospans of natural transformations are weak equivalences and thus we have our
|
||||
desired equivalence of homotopy categories.
|
||||
\end{proof}
|
||||
\end{document}
|
5
src/main/Mainpart.tex
Normal file
5
src/main/Mainpart.tex
Normal file
|
@ -0,0 +1,5 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
\subfile{Equivalence}
|
||||
|
||||
\end{document}
|
163
src/main/Model_Structure_Argument.tex
Normal file
163
src/main/Model_Structure_Argument.tex
Normal file
|
@ -0,0 +1,163 @@
|
|||
\documentclass[../Main.tex]{subfiles}
|
||||
\begin{document}
|
||||
\section{Name TODO}
|
||||
This arguments from this section follows very closely/This section recalls \cite[section 3]{cavalloRelativeEleganceCartesian2022}, which is
|
||||
itself a refinment of \cite{sattlerEquivalenceExtensionProperty2017}. We focus a bit more on the arguments that we actually need
|
||||
and leave out some remarks about the overall theory.
|
||||
and establishes a nice critereon for a premodel structure beeing a model structure. This excerpt is mainly included in this document
|
||||
to be more selfcontained.
|
||||
|
||||
\begin{definition}[{\cite[Def. 3.1.1]{cavalloRelativeEleganceCartesian2022} \cite[Def. 2.1.23]{bartonModel2CategoryEnriched2019}}]
|
||||
Let \(\M\) be a finitely complete and cocomplete category. A \emph{premodel structure} on \(\M\) constist of two
|
||||
weak factorization system \((C,F_t)\) called cofibrations and trivial fibrations and \((C_t,F)\) called
|
||||
trivial cofibrations and fibrations on \(\M\) such that \(C_t ⊂ C\) and \(F_t ⊂ F\).
|
||||
\end{definition}
|
||||
|
||||
The data that is missig for a real model structure is the set of weak equivalences \(W\) such that \(C,W,F\) gets to
|
||||
be a model structure. In the case of a model structure \(W\) would already be determined by the other two classes,
|
||||
giving us an obvious candidate for \(W\)
|
||||
|
||||
\begin{definition}[{\cite[Def. 3.1.3]{cavalloRelativeEleganceCartesian2022}}]
|
||||
A morphism in a premodle structure is a \emph{weak equivalence} if it factors as a trivial cofibration followed
|
||||
by a trivial fibration. We will call this class \(W(C,F)\).
|
||||
\end{definition}
|
||||
|
||||
One might rightfully ask, what can stop this of beeing a model structure. The good news is, it is only the 2-out-of-3 property.
|
||||
The other conditions can be shown quite easely
|
||||
|
||||
\begin{lemma}[retract argument]\label{retractArgument}\index{retract Argument}
|
||||
Consider a composite morphism \(f : X \overset{i}{→} A \overset{p}{→} Y\)
|
||||
Then the following holds
|
||||
\begin{itemize}
|
||||
\item If \(f\) has the left lifting property against \(p\), then \(f\) is a rectract of \(i\).
|
||||
\item If \(f\) has the riht lifting property against \(i\), then \(f\) is a retract of \(p\).
|
||||
\end{itemize}
|
||||
\end{lemma}
|
||||
\begin{proof}
|
||||
We prove the first statement, the other second one is dual.
|
||||
We can write the decomposition of \(f\) like in the following square and get a lift \(l\) against \(p\).
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
X \arrow[r,"i"] \arrow[d,"f"] & A \arrow[d,"p"] \\
|
||||
Y \arrow[r,"\id"] \arrow[ur, dashed, "l"] & Y
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
Now we can realize \(f\) as a retract as follows.
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
X \arrow[r,"\id"] \arrow[d,"f"] & X \arrow[r,"\id"] \arrow[d,"i"] & X \arrow[d,"f"] \\
|
||||
Y \arrow[r,"l"] \arrow[rr, bend right, "\id"'] & A \arrow[r, "p"] & Y
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
\end{proof}
|
||||
\begin{proposition}[{\cite[Def. 3.1.5]{cavalloRelativeEleganceCartesian2022}}]
|
||||
\(C_t = C ∩ W(C,F)\) and \(F_t = F ∩ W(C,F)\)
|
||||
\end{proposition}
|
||||
\begin{proof}
|
||||
We are proving the case for trivial cofibrations the other part is analog.
|
||||
Let \(g ∈ C ∩ W(C,F)\), then \(g\) factors into a trivial cofibration \(c\) followd be a trivial fibration \(f\).
|
||||
As \(g\) is also a cofibration, \(g\) has the left lifting property against \(f\) and by lemma \ref{retractArgument}
|
||||
is a retract of the trivial cofibration \(c\). As those are closed under retracts \(g\) is a trivial cofibration too.
|
||||
|
||||
The other direction follows from the fact that classes defined by lifting propertys always contain identities.
|
||||
\end{proof}
|
||||
|
||||
We now start to reduce the problem of checking 2-out-of-3. It turns out we do not need to check all possible maps.
|
||||
|
||||
\begin{definition}[{\cite[Def. 3.1.7]{cavalloRelativeEleganceCartesian2022}}]
|
||||
Given a wide subcategory \(\A ⊆ \E\) of a category \(\E\), we say \(\A\) has left cancellation in \(\E\) (or among maps in \(\E\)) when for every
|
||||
composable pair \(g\), \(f\) in \(\E\), if \(gf\) and \(g\) are in \(\A\) then \(f\) is in \(\A\). Dually, \(\A\) has right cancellation in \(\E\)
|
||||
when for all \(g\), \(f\) with \(gf\), \(f ∈ \A\), we have \(g ∈ \A\).
|
||||
\end{definition}
|
||||
|
||||
\begin{theorem}[{\cite[Thm 3.1.8]{cavalloRelativeEleganceCartesian2022}}]
|
||||
\(W(C,F)\) satisfies 2-out-of-3 exactly if the following hold:
|
||||
\begin{enumerate}
|
||||
\item[(A)] trivial cofibrations have right cancellation among cofibrations and trivial fibrations have left cancellation among fibrations;
|
||||
\item[(B)] every (cofibration, trivial fibration) factorization or (trivial cofibration, fibration) factorization of a weak equivalence is
|
||||
a (trivial cofibration, trival fibration) factorization;
|
||||
\item[(C)] every composite of a trivial fibration followed by a trivial cofibration is a weak equivalence.
|
||||
\end{enumerate}
|
||||
\end{theorem}
|
||||
\begin{proof}
|
||||
Assume \(W(C,F)\) satisfies 2-out-of-3. Let us start with (A), let \(g\) and \(f\) be cofibrations such that \(gf\) and \(g\) are trivial.
|
||||
then by 2-out-of-3 \(f\) is a trivial map too. (The second half of (A) follows in the same way)
|
||||
Let us investigate (B). Let \(k\) be a weak equivalence, and \(fg\) be a factorization into a trivial cofibration \(g\) followed by a
|
||||
fibration \(f\). Then by 2-out-of-3 \(f\) is a weak equivalence two. (The second half of (B) is analog)
|
||||
(C) follows even more directly from 2-out-of-3.
|
||||
|
||||
Let us now assume A-C, and let \(g : Y → Z\) and \(f : X → Y\). We can then factor f into a cofibration followed by a trivial fibration
|
||||
and \(g\) into a trivial cofibraiton followed by a fibration.
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}
|
||||
X \arrow[dr, "f"] \arrow[r, tail] & U \arrow[r, tail, "\sim"] \arrow[d,two heads, "\sim"] & W \arrow[d,two heads,"\sim"] \\
|
||||
& Y \arrow[dr, "g"] \arrow[r,tail,"\sim"] & V \arrow[d,two heads] \\
|
||||
& & Z
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
The maps from and to \(W\) can be obtained by factorizing the composition \(U \overset{\sim}{→} Y \overset{\sim}{→} V\) again and using (B).
|
||||
Now let us suppose \(f\) and \(g\) are weak equivalences, by (B) the maps from \(X → U\) and \(V → Z\) become weak equivalences too.
|
||||
Thus the the composition of \(gf\) is a weak equivalence, witnessed by the upper and right side of the diagram.
|
||||
Now let us assume that \(f\) and \(gf\) are weak equivalences. By (B) the map \(X → U\) gets to be a trivial cofibration again.
|
||||
Thus the composite \(X → W\) is a trivial cofibration and again by (B) the composite map \(W → Z\) gets to be a trivial fibration,
|
||||
as these to composites factor \(gf\). By (A) this makes the map \(V → Z\) a trivial fibration. This establishes that \(g\)
|
||||
factors as a trivial cofibration by a trivial fibration and hence \(g\) is a weak equivalence.
|
||||
The proof for \(g\) and \(gf\) beeing weak equivalences is dual.
|
||||
\end{proof}
|
||||
|
||||
Now we move on and make use of extra structure that (for example) cubical models exhibit. An adjoint functorial cylinder.
|
||||
\begin{definition}[{\cite[Def. 3.2.1]{cavalloRelativeEleganceCartesian2022}}]\index{functiorial cylinder}
|
||||
A \emph{functorial cylinder} on a category \E is a functor \(\I⊗(−): \E → \E \) equipped with \emph{endpoint} and \emph{contraction}
|
||||
transformations fitting in a diagram as shown:
|
||||
\begin{equation*}
|
||||
\begin{tikzcd}[column sep=large, row sep=large]
|
||||
\Id \arrow[r, dashed, "δ_0⊗(−)"] \arrow[dr, "\id"] & \I⊗(−) \arrow[d,dashed,"ε⊗(−)" description] & \Id \arrow[l,dashed, "δ_1⊗(−)"'] \arrow[dl,"\id"'] \\
|
||||
& \Id &
|
||||
\end{tikzcd}
|
||||
\end{equation*}
|
||||
An \emph{adjoint functorial cylinder} is a functorial cylinder such that \(\I ⊗ (−) \) is a left adjoint.
|
||||
The dual notion is called a \emph{functorial path object} consisting of a functor \(\I ⊘ (−)\) and natural
|
||||
transformation \(δ_k ⊘ (−)\) and \(ε ⊘ (−)\).
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}[{\cite[Def. 3.2.2]{cavalloRelativeEleganceCartesian2022}}]
|
||||
Given a functiorial cylinder in a cocomplete category, we have a induced \emph{boundary} transformation
|
||||
\( ∂ ⊗ X ≔ [δ_0 ⊗ X, δ_1 ⊗ X] : X ⊔ X → \I ⊗ X \).
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}[{\cite[Def. 3.2.4]{cavalloRelativeEleganceCartesian2022}}]
|
||||
Write \(@ : [\E,\Cat{F}] × \E → \Cat{F}\) for the application bifunctor definde by \(F @ X ≔ F(X)\).
|
||||
Given a category \E with a functorial cylinder and \(f ∈ \E^→\), we abbreviate \((δ_k ⊗ (−)) \hat{@} f : \E^→ → \E^→\)
|
||||
as \(δ_k\⊗f\). We likewise write \(ε \⊗ f\) for Leibniz application of the contraction. We write \(δ_k \hat{⊘}(−)\) and
|
||||
\(ε \hat{⊘}(−)\) for the dual operations associated to a functorial path object.
|
||||
\end{definition}
|
||||
|
||||
\begin{definition}[{\cite[Def. 3.2.5]{cavalloRelativeEleganceCartesian2022}}]\index{cylindrical premodel structure}
|
||||
A \emph{cylindrical premodel structure} on a category \E consists of a premodel structure and adjoint functorial cylinder on
|
||||
\E that are compatible in the following sense:
|
||||
\begin{itemize}
|
||||
\item \(∂ \⊗ (−)\) preserves cofibrations and trivial cofibrations;
|
||||
\item \(δ_k\⊗(−)\) sends cofibrations to trivial cofibrations for \(k ∈ \{0,1\}\).
|
||||
\end{itemize}
|
||||
\end{definition}
|
||||
|
||||
TODO Move this to the part about the cube category
|
||||
\begin{theorem}
|
||||
\□ has an adjoint functorial cylinder given by \(\I × (-)\) making the premodel structure giving by the weak factorization system
|
||||
in section \ref{modelStructure}, a cylindrical model structure
|
||||
\end{theorem}
|
||||
\begin{proof}
|
||||
As \□ is a presheaf category and as such cartesian closed, we get \(\I × (−) ⊣ (−)^{\I}\). We also have
|
||||
\(δ_k⊗(−) ≔ δ_k × (−)\) and \(ε⊗(−) ≔ ε × (−)\). As the cofibrations are just the monomorphisms, it is clear
|
||||
that \(∂\⊗(−)\) preserves cofibrations.
|
||||
TODO write out rest (see note cylinder)
|
||||
\end{proof}
|
||||
|
||||
In a cylindrical modelcategory we can
|
||||
|
||||
%AFH compositions
|
||||
%com : (s : I) → (A s)[φ |-> f * s] fst(com r) = fst x_0
|
||||
%CCHM composition
|
||||
%com : ( A ε)[φ |-> f * ε]
|
||||
% see CMS20 (Unifying Cubical Models of Univalent type theory
|
||||
\end{document}
|
1
src/main/resource.bib
Symbolic link
1
src/main/resource.bib
Symbolic link
|
@ -0,0 +1 @@
|
|||
../resource.bib
|
1067
src/resource.bib
Normal file
1067
src/resource.bib
Normal file
File diff suppressed because it is too large
Load diff
1
src/result
Symbolic link
1
src/result
Symbolic link
|
@ -0,0 +1 @@
|
|||
/nix/store/vi5k8nsgp9knk76bri7p6nr1la5fa4mn-output.pdf
|
BIN
src/stix/static_otf/STIXTwoMath-Regular.otf
Normal file
BIN
src/stix/static_otf/STIXTwoMath-Regular.otf
Normal file
Binary file not shown.
BIN
src/stix/static_otf/STIXTwoText-Bold.otf
Normal file
BIN
src/stix/static_otf/STIXTwoText-Bold.otf
Normal file
Binary file not shown.
BIN
src/stix/static_otf/STIXTwoText-BoldItalic.otf
Normal file
BIN
src/stix/static_otf/STIXTwoText-BoldItalic.otf
Normal file
Binary file not shown.
BIN
src/stix/static_otf/STIXTwoText-Italic.otf
Normal file
BIN
src/stix/static_otf/STIXTwoText-Italic.otf
Normal file
Binary file not shown.
BIN
src/stix/static_otf/STIXTwoText-Medium.otf
Normal file
BIN
src/stix/static_otf/STIXTwoText-Medium.otf
Normal file
Binary file not shown.
BIN
src/stix/static_otf/STIXTwoText-MediumItalic.otf
Normal file
BIN
src/stix/static_otf/STIXTwoText-MediumItalic.otf
Normal file
Binary file not shown.
BIN
src/stix/static_otf/STIXTwoText-Regular.otf
Normal file
BIN
src/stix/static_otf/STIXTwoText-Regular.otf
Normal file
Binary file not shown.
BIN
src/stix/static_otf/STIXTwoText-SemiBold.otf
Normal file
BIN
src/stix/static_otf/STIXTwoText-SemiBold.otf
Normal file
Binary file not shown.
BIN
src/stix/static_otf/STIXTwoText-SemiBoldItalic.otf
Normal file
BIN
src/stix/static_otf/STIXTwoText-SemiBoldItalic.otf
Normal file
Binary file not shown.
Loading…
Reference in a new issue