I'm scared

This commit is contained in:
Dennis Frieberg 2024-08-07 04:26:06 +02:00
parent 0ef95e1735
commit ac0ce99bb5
Signed by: nerf
GPG key ID: 42DED0E2D8F04FB6
20 changed files with 1336 additions and 465 deletions

View file

@ -36,7 +36,7 @@
in { in {
default = pkgs.stdenvNoCC.mkDerivation { default = pkgs.stdenvNoCC.mkDerivation {
name = "output.pdf"; name = "output.pdf";
buildInputs = [latex latexrun pkgs.biber pkgs.inkscape]; buildInputs = [latex latexrun pkgs.biber pkgs.inkscape pkgs.stix-two];
src = self; src = self;
buildPhase = '' buildPhase = ''
HOME=. HOME=.
@ -72,7 +72,7 @@
filetypes = ["latex"] filetypes = ["latex"]
roots = [".git", "Main.tex"] roots = [".git", "Main.tex"]
command = "texlab" command = "texlab"
args = ["-vvvv", "--log-file", "/tmp/texlabLog"] # args = ["-vvvv", "--log-file", "/tmp/texlabLog"]
settings_section = "texlab" settings_section = "texlab"
[language.latex.settings.texlab.build] [language.latex.settings.texlab.build]
onSave = true onSave = true
@ -93,8 +93,8 @@
colorscheme solarized-dark colorscheme solarized-dark
set global tabstop 2 set global tabstop 2
set global indentwidth 0 set global indentwidth 0
eval %sh{kak-lsp --kakoune --session $kak_session -c ${kak-lsp-config} --log /tmp/kak-lspLog -vvvv} # eval %sh{kak-lsp --kakoune --session $kak_session -c ${kak-lsp-config} --log /tmp/kak-lspLog -vvvv}
# eval %sh{kak-lsp --kakoune --session $kak_session -c ${kak-lsp-config}} eval %sh{kak-lsp --kakoune --session $kak_session -c ${kak-lsp-config}}
hook global WinSetOption filetype=(latex) %{ hook global WinSetOption filetype=(latex) %{
lsp-auto-hover-enable lsp-auto-hover-enable
lsp-enable-window lsp-enable-window

View file

@ -1 +0,0 @@
vim

View file

@ -1,18 +1,35 @@
\documentclass[DIV=calc,draft=true]{scrartcl} \begin{filecontents*}[overwrite]{\jobname.xmpdata}
\Title{A uniform equivariant model for cubical type thoery}
\Author{Dennis Frieberg}
\Language{en-US}
\Subject{see title}
\Keywords{keyword}
\end{filecontents*}
\documentclass[DIV=calc,fontsize=12pt,draft=true,parskip]{scrartcl}
\usepackage[a-2b,mathxmp]{pdfx}[2018/12/22]
%\usepackage{pdfmanagement-testphase}
%\DeclareDocumentMetadata{
% pdfversion=1.7
%}
\usepackage{microtype} \usepackage{microtype}
\usepackage{amsthm} %theorems \usepackage{amsthm} %theorems
\usepackage{mathtools} %coloneqq and other stuff \usepackage{mathtools} %coloneqq and other stuff
\usepackage{amssymb} \usepackage{amssymb}
%\usepackage[colon=literal]{unicode-math-luatex} %unicode in mathmode does conflict amssymb maybe sometimes use this line if you want %\usepackage[colon=literal]{unicode-math-luatex} %unicode in mathmode does conflict amssymb maybe sometimes use this line if you want
% that : behaves like \colon % that : behaves like \colon
\usepackage{unicode-math-luatex} \usepackage{fontspec} % for changing text fonts (we roll default right now, so no use)
\usepackage{imakeidx} %make index possible \usepackage{unicode-math} %also loads fontspec
\setmathfont{STIXTwoMath-Regular.otf}
\setmainfont{STIXTwoText}[Extension=.otf, UprightFont = *-Regular, BoldFont = *-SemiBold, ItalicFont = *-Italic, BoldItalicFont = *-SemiBoldItalic]
\addtokomafont{disposition}{\rmfamily}
% \usepackage{imakeidx} %make index possible
% \makeindex[columns=3, title=Alphabetical Index] % \makeindex[columns=3, title=Alphabetical Index]
\usepackage{graphicx} \usepackage{graphicx}
\usepackage{dsfont} \usepackage{dsfont}
\usepackage{hyperref} \usepackage{hyperref}
\usepackage{tikz} \usepackage{tikz}
\usetikzlibrary{cd} \usetikzlibrary{cd}
\usetikzlibrary{decorations.pathreplacing}
\usepackage{faktor} \usepackage{faktor}
\usepackage{polyglossia} \usepackage{polyglossia}
\setdefaultlanguage[variant=us]{english} \setdefaultlanguage[variant=us]{english}
@ -22,7 +39,7 @@
\usepackage[obeyDraft]{todonotes} \usepackage[obeyDraft]{todonotes}
\usepackage{subfiles} \usepackage{subfiles}
\usepackage[plain]{fancyref} \usepackage[plain]{fancyref}
\usepackage{cleveref} \usepackage[capitalise]{cleveref}
% \usepackage{mathabx} % \usepackage{mathabx}
\usepackage{pdftexcmds} % pdftex primitves for lualatex \usepackage{pdftexcmds} % pdftex primitves for lualatex
@ -32,16 +49,28 @@
\makeatother \makeatother
\usepackage{svg} \usepackage{svg}
\DeclareFontFamily{U}{min}{}
\DeclareFontShape{U}{min}{m}{n}{<-> udmj30}{}
\newcommand\Yo{\!\text{\usefont{U}{min}{m}{n}\symbol{'207}}\!}
\newcommand*{\printline}{\number\inputlineno\xspace}
\NewCommandCopy\oldtodo\todo
\renewcommand{\todo}[2][]{\oldtodo[#1]{\printline #2}}
\addbibresource{resource.bib} \addbibresource{resource.bib}
\theoremstyle{definition} \theoremstyle{definition}
\swapnumbers
\newtheorem{theorem}{Theorem}[subsection] \newtheorem{theorem}{Theorem}[subsection]
\newtheorem{corollary}[theorem]{Corollary} \newtheorem{corollary}[theorem]{Corollary}
\newtheorem{lemma}[theorem]{Lemma} \newtheorem{lemma}[theorem]{Lemma}
\newtheorem{definition}[theorem]{Definition} \newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example} \newtheorem{example}[theorem]{Example}
\newtheorem{proposition}[theorem]{Proposition} \newtheorem{proposition}[theorem]{Proposition}
\newtheorem{observation}[theorem]{Observation}
\newtheorem{notation}[theorem]{Notation}
\theoremstyle{remark} \theoremstyle{remark}
\newtheorem*{remark}{Remark} \newtheorem{remark}[theorem]{Remark}
\newcommand*{\fancyrefthmlabelprefix}{thm} \newcommand*{\fancyrefthmlabelprefix}{thm}
\newcommand*{\fancyreflemlabelprefix}{lem} \newcommand*{\fancyreflemlabelprefix}{lem}
@ -75,53 +104,67 @@
\DeclareMathOperator{\Obj}{Obj} \DeclareMathOperator{\Obj}{Obj}
\DeclareMathOperator{\im}{im} \DeclareMathOperator{\im}{im}
\DeclareMathOperator{\TFib}{TFib} \DeclareMathOperator{\TFib}{TFib}
\DeclareMathOperator{\colim}{colim} \newcommand*{\Fib}{\ensuremath{\mathrm{Fib}}\xspace}
\DeclareMathOperator{\Yo}{\mathbf{y}} \newcommand*{\∂}{\mathop{}}
\newcommand{\Fam}{\mathcal{F}\mathrm{am}} %\DeclareMathOperator{\colim}{colim}
\newcommand{\Id}{\textsf{Id}} \newcommand*{\colim}{\mathop{\mathrm{colim}}\limits}
\newcommand{\id}{\textsf{id}} %\DeclareMathOperator{\Yo}{\mathbf{y}}
\newcommand{\Cat}[1]{\ensuremath{\mathcal{#1}}\xspace} \newcommand*{\Fam}{\mathcal{F}\mathrm{am}}
\newcommand{\A}{\Cat{A}} \newcommand*{\Id}{\textsf{Id}}
\newcommand{\B}{\Cat{B}} \newcommand*{\id}{\textsf{id}}
\newcommand{\C}{\Cat{C}} \newcommand*{\Cat}[1]{\ensuremath{\mathcal{#1}}\xspace}
\newcommand{\D}{\Cat{D}} \newcommand*{\A}{\Cat{A}}
\newcommand{\E}{\Cat{E}} \newcommand*{\B}{\Cat{B}}
\newcommand{\I}{\ensuremath{\mathds{I}}\xspace} \newcommand*{\C}{\Cat{C}}
\newcommand{\J}{\Cat{J}} \newcommand*{\D}{\Cat{D}}
\newcommand{\T}{\ensuremath{\mathrm{T}}\xspace} \newcommand*{\E}{\Cat{E}}
\newcommand{\Hom}{\mathrm{Hom}} \newcommand*{\F}{\Cat{F}}
\newcommand*{\I}{\ensuremath{\mathds{I}}\xspace}
\newcommand*{\J}{\Cat{J}}
\renewcommand*{\L}{\ensuremath{\mathcal{L}}\xspace} % wfs not cat
\newcommand*{\R}{\ensuremath{\mathcal{R}}\xspace} % wfs not cat
\newcommand*{\T}{\ensuremath{\mathrm{T}}\xspace}
\newcommand*{\TCF}{\ensuremath{\mathrm{TC}}\xspace}
\newcommand*{\FF}{\ensuremath{\mathrm{F}}\xspace}
\newcommand*{\TFF}{\ensuremath{\mathrm{TF}}\xspace}
\newcommand*{\CF}{\ensumermath{\mathrm{CF}}\xspace}
\newcommand*{\Ct}{\ensuremath{\mathcal{C}_t}\xspace}
\newcommand{\type}{\, \mathsf{type}} \newcommand*{\Hom}{\mathrm{Hom}}
\newcommand{\cofib}{\, \mathsf{cofib}} \newcommand*{\Ho}{\mathrm{Ho}}
\newcommand{\formula}{\, \mathsf{formula}}
\newcommand{\face}{\, \mathsf{face}}
\newcommand{\mfill}{\mathsf{fill}}
\newcommand{\comp}{\mathsf{comp}}
\newcommand{\Path}[3]{\mathsf{Path}\,#1\,#2\,#3}
\newcommand{\p}{\textrm{p}}
\newcommand{\Set}{\ensuremath{\mathbf{Set}}\xspace}
\newcommand{\skl}{\mathrm{skl}} \newcommand*{\type}{\, \mathsf{type}}
\newcommand{\op}{\mathsf{op}} \newcommand*{\cofib}{\, \mathsf{cofib}}
\renewcommand{\d}{\mathsf{d}} \newcommand*{\formula}{\, \mathsf{formula}}
\renewcommand{\i}{\textsf{i}} \newcommand*{\face}{\, \mathsf{face}}
\renewcommand{\t}{\mathsf{t}} \newcommand*{\mfill}{\mathsf{fill}}
\newcommand{\⊗}{\mathbin{\hat{}}} \newcommand*{\comp}{\mathsf{comp}}
\newcommand{\×}{\mathbin{\hat{×}}} \newcommand*{\Path}[3]{\mathsf{Path}\,#1\,#2\,#3}
\newcommand{\□}{\ensuremath{\hat{}}\xspace} \newcommand*{\p}{\textrm{p}}
\newcommand{}{\ensuremath{\hat{Δ}}\xspace} \newcommand*{\Set}{\ensuremath{\mathbf{Set}}\xspace}
\newcommand{\dCube}{\ensuremath{_{∧∨}}\xspace}
\newcommand{\dcSet}{\ensuremath{\widehat{\dCube}}\xspace} \newcommand*{\skl}{\mathrm{skl}}
\newcommand{\FL}{\ensuremath{\textbf{FL}}\xspace} \newcommand*{\op}{\mathsf{op}}
\newcommand{\M}{\Cat{M}} \renewcommand*{\d}{\mathsf{d}}
\newcommand{\arr}[1]{#1^{}} \renewcommand*{\i}{\textsf{i}}
\newcommand{\FinSetOp}{\ensuremath{{\mathbf{FinSet}\rlap{\raisebox{0.75ex}{\rotatebox{90}{*}}}\raisebox{-0.3ex}{\rotatebox{90}{*}}}^\mathsf{op}}\xspace} \renewcommand*{\t}{\mathsf{t}}
\newcommand{\FinSet}{\ensuremath{{\mathbf{FinSet}\rlap{\raisebox{0.75ex}{\rotatebox{90}{*}}}\raisebox{-0.3ex}{\rotatebox{90}{*}}}}\xspace} \newcommand*{\⊗}{\mathbin{\hat{}}}
% \newcommand{\Yo}{\mathbf{y}} \newcommand*{\×}{\mathbin{\hat{×}}}
\newcommand*{\at}{\mathbin{\hat{@}}}
\newcommand*{\□}{\ensuremath{\widehat{}}\xspace}
\newcommand*{}{\ensuremath{\widehat{Δ}}\xspace}
\newcommand*{\dCube}{\ensuremath{_{∧∨}}\xspace}
\newcommand*{\dcSet}{\ensuremath{\widehat{\dCube}}\xspace}
\newcommand*{\FL}{\ensuremath{\textbf{FL}}\xspace}
\newcommand*{\M}{\Cat{M}}
\newcommand*{\arr}[1]{#1^{}}
\newcommand*{\FinSetOp}{\ensuremath{{\mathbf{FinSet}\rlap{\raisebox{0.75ex}{\rotatebox{90}{*}}}\raisebox{-0.3ex}{\rotatebox{90}{*}}}^\mathsf{op}}\xspace}
\newcommand*{\FinSet}{\ensuremath{{\mathbf{FinSet}\rlap{\raisebox{0.75ex}{\rotatebox{90}{*}}}\raisebox{-0.3ex}{\rotatebox{90}{*}}}}\xspace}
% \newcommand*{\Yo}{\mathbf{y}}
% %
\newcommand{\pb}{\arrow[dr,phantom,very near start, "\lrcorner"]} \newcommand*{\pb}{\arrow[dr,phantom,very near start, "\lrcorner"]}
\newcommand{\pu}{\arrow[dr,phantom,very near end, "\ulcorner"]} \newcommand*{\pu}{\arrow[dr,phantom,very near end, "\ulcorner"]}
\newcommand{\Ob}[1][0.66em]{\begin{tikzpicture}[y=#1,x=#1] \newcommand*{\Ob}[1][0.66em]{\begin{tikzpicture}[y=#1,x=#1]
\draw[line join=round] (0,1) -- (0,0) -- (1,0) -- (1,1); \draw[line join=round] (0,1) -- (0,0) -- (1,0) -- (1,1);
\end{tikzpicture} \end{tikzpicture}
} }
@ -134,10 +177,11 @@
\begin{document} \begin{document}
\maketitle \maketitle
\tableofcontents \tableofcontents
\newpage
\subfile{Preliminaries/Preliminaries} \subfile{Preliminaries/Preliminaries}
\subfile{main/Mainpart} \subfile{main/Mainpart}
\listoftodos \listoftodos
\printindex % \printindex
\printbibliography \printbibliography
\end{document} \end{document}

View file

@ -0,0 +1,200 @@
\documentclass[../Main.tex]{subfiles}
\begin{document}
\begingroup
\renewcommand*{\E}{\ensuremath{\mathrm{E}}\xspace}
\newcommand*{\LF}{\ensuremath{\mathrm{L}}\xspace}
\newcommand*{\RF}{\ensuremath{\mathrm{R}}\xspace}
\renewcommand*{\d}{\ensuremath{\mathbf{d}}\xspace}
\section{Algebraic Weak Factorization Systems}
In this section, we will revisit the basic ideas of algebraic weak factorization systems (awfs).
We won't use them much explicitly through this paper, but we need one major result about them.
Also, while we don't talk about them explicitly, their ideas permeate through most of the arguments.
We will only repeat the most basic definitions and ideas, which will be enough to understand
this document. For a much more complete and in depth discussion, see
\cites{riehlAlgebraicModelStructures2011}{bourkeAlgebraicWeakFactorisation2016}{bourkeAlgebraicWeakFactorisation2016a}.
This introduction follows the approach of \cite[Section 2]{riehlAlgebraicModelStructures2011}.
If the reader is already familiar with this concept, they might safely skip this section.
We start this section with some observations about regular functorial weak factorization systems (wfs).
For the remainder of this section we write \({\E : \A^→ → \A^𝟛}\) as the factorization functor of some functorial
wfs \((\L,\R)\). We are going to write \(\d^0 , \d^2 : \A^𝟛 → A^\) for the functors induced by \(d_0, d_2 : 𝟚𝟛\).
\LF and \RF for the endofunctors \(\A^→ → \A^\) that are given by
\(\d^2\E\) and \(\d^0\E\), the projection to the left or right factor of the factorization.
For a given \(f : X → Y\), we call the factoring object \(\E_f\).
\begin{eqcd*}[column sep =small]
& \E_f \arrow[rd,"\RF(f)"] & \\
X \arrow[rr,"f"] \arrow[ru,"\LF(f)"] & & Y
\end{eqcd*}
For now, we are interested in witnessing if some map is in the right class (or dually left class). Or in other words,
attaching some kind of data to a right map from which we could deduce all solutions of the required lifting
problem. This is indeed possible. Assume that \(f\) is a right map, then a retraction \(r_f\) of \(\LF(f)\) would suffice.
Assume we had some left map \(f'\) and a lifting problem given by \((g,h)\). We can then factor this with the help of \E.
\begin{eqcd*}[row sep=huge, column sep=huge]
X' \arrow[dd,"f'"', bend right] \arrow[d,"\LF(f')"] \arrow[r,"g"] & X \arrow[d,"\LF(f)"] \arrow[dd,"f",bend left=50]\\
\E_{f'} \arrow[d,"\RF(f')"] \arrow[r, "ϕ_{g,h}"] & \E_f \arrow[d,"\RF(f)"] \arrow[u, "r_f", bend left]\\
Y' \arrow[r,"h"] \arrow[ur, dashed] & Y
\end{eqcd*}
And then compose the solution for the whole lifting problem from the lifting of the problem \((g L(f), h)\) with
\(r_f\). That this is a solution is guaranteed by \(r_f\) being a retract \(r_f L(f) = \id\). Dually we can
witness \(f'\) being a left map by supplying a split \(s_{f'}\) of \(R(f')\). If we did both at the same time
we automatically get a canonical choice of lifts.
\begin{eqcd}[row sep=huge, column sep=huge] \label{eq:coalgebraliftsalgebra}
X' \arrow[dd,"f'"', bend right=50] \arrow[d,"\LF(f')"] \arrow[r,"g"] & X \arrow[d,"\LF(f)"] \arrow[dd,"f",bend left=50]\\
\E_{f'} \arrow[d,"\RF(f')"'] \arrow[r, "ϕ_{g,h}"] & \E_f \arrow[d,"\RF(f)"'] \arrow[u, "r_f", bend left]\\
Y' \arrow[r,"h"] \arrow[u, bend right, "s_{f'}"'] & Y
\end{eqcd}
Namely for a lifting problem \((g,h)\) the map \(r_f ϕ_{g,h} s_{f'}\), and if we make \(r_f\) and \(s_{f'}\) part of the
data of a right- (left-) map the chosen lifts are even functorial.
The next question one might rightfully ask, if we can always find such a witness. And the answer is happily yes.
We just need to make up the right lifting problem.
\begin{equation*}
\begin{tikzcd}[ampersand replacement=\&,column sep = large, row sep = large]
X' \arrow[d,"f'"'] \arrow[r,"\LF(f')"]\& \E_{f'} \arrow[d, "\RF(f')"] \& \& X \arrow[r,equal] \arrow[d,"\LF(f)"'] \& X \arrow[d,"f"] \\
Y' \arrow[r,equal] \arrow[ru, "s_{f'}", dashed] \& Y' \& \& \E_f \arrow[r,"\RF(f)"] \arrow[ru,dashed,"r_f"] \& Y
\end{tikzcd}
\end{equation*}
We can also repack this information in a slightly different way, \(f\) is a right map exactly if \(f\) is a retract of
\(\R(f)\) in \(\faktor{\A}{Y}\). And \(f'\) is a left map precisely if \(f'\) is a retract of \(\LF(f)\) in \(\faktor{X'}{\A}\).
\begin{eqcd*}
X
\arrow[d,"f"]
\arrow[r, "\LF(f)"]
& E_f
\arrow[d, "\RF(f)"]
\arrow[r, "r_f", dashed]
& X
\arrow[d, "f"]
&
& X'
\arrow[d,"f'"]
\arrow[r, equal]
& X'
\arrow[r, equal]
\arrow[d, "\LF(f)"]
& X'
\arrow[d, "f'"]
\\
Y
\arrow[r,equal]
& Y
\arrow[r, equal]
& Y
&
& Y'
\arrow[r, "s_{f'}", dashed]
& E_{f'}
\arrow[r, "\RF(f')"]
& Y'
\end{eqcd*}
If we focus on the diagram at the left-hand side, we can also see it as a morphism \(η_f : f → \RF(f)\) in \(\A^\), completely dictated
by \(\E\) and thus natural in \(f\), and a morphism \(α : \RF(f) → f\), such that \(αη_f = \id\).
If we reformulate what we have just observed, we get to the following.
\begin{observation}
In a functorial wfs \((\L,\R)\) on \A, \(\LF : \A\A \) is a copointed endofunctor and \RF is pointed endofunctor,
where the counit \(ε : \LF\Id\) is given by the squares \(ε_f ≔
\begin{tikzcd}
\cdot \arrow[d,"\LF(f)"] \arrow[r,equal] & \cdot \arrow[d,"f"]\\
\cdot \arrow[r,"\RF(f)"] & \cdot
\end{tikzcd}
\)
and the unit \(η : \Id\RF\) by \( η_f ≔
\begin{tikzcd}
\cdot \arrow[d,"f"] \arrow[r,"\LF(f)"'] & \cdot \arrow[d,"\RF(f)"'] \\
\cdot \arrow[r,equal] & \cdot
\end{tikzcd}
\). \L is precisely the class of \LF-coalgebras and \R the class of \RF-algebras.
\end{observation}%
One should think of these (co)algebras as morphism with a choice of liftings.
At this point, we might try to get rid of the wfs \((\L,\R)\) as input data and might to try to recover it from the
factorization functor. And that works described by the methods above, but only if we know that this factorization
functor comes from a wfs. If we just start with an arbitrary factorization functor, we still get that all
\RF-algebras right lift to all \LF-algebras and vice versa,
but in general \(\RF(f)\) will not be a \RF-algebra.
One way to solve this problem is by adding a second natural transformation \(\RF\RF\RF\), such that
the neccessary data commute, making \RF a monad (and dually \LF a comand).
\begin{definition}\label{def:awfs:awfs}
An \emph{algebraic weak factorization system (awfs)} is given by a functor \(\E : \A^→ → \A^𝟛\) and two natural transformations
\(δ\) and \(μ\). We write \((\LF)\) where \(\LF\d^2\E\) and \((\RF)\) where \(\RF\d^0\E\) for the two induced
pointed endofunctors \(\A^→ → \A^\). We require that \((\LF,ε,δ)\) is a comonad and \((\RF,η,μ)\) is a monad.\footnote{%
Most modern definitions require additionaly that a certain induced natural transformation to be a distributive law of the comonad
over the monad. While we recognise its technical important,but we feel that it is distracting from our goal to get the
general ideas across.}
\end{definition} \todo{this definition is terrible but i can't get it into nice sounding sentence}
\begin{notation}
If the rest of the data is clear from context we will only specify the comonad and monad and say \((\LF,\RF)\) to be an
algebraic weak factorization system.
\end{notation}
\begin{remark}\label{rem:awfs:retractClosure}
Neither the \LF-coalgebras nor the \RF-algebras are in general closed under retracts, if we think of them
as comonad and monad. But we get a full wfs by taking the \LF-coalgebras and \RF-algebras if we only
regard them as the pointed endofunctor, which is the same as the retract closure of the algebras in the (co)monad sense.
\end{remark}
In light of this remark we can pass back from any awfs to a wfs. We can think of this operation as forgetting the choice
of liftings.
\begin{definition}
Let \(\LF,\RF\) be an awfs. We will call \((\L,\R)\) the underlying wfs of \((\LF,\RF)\), where \L is the class
of \LF-coalgebras (as a pointed endofunctor) and \R is the class of \RF-algebras (as a pointed endofunctor).
\end{definition}
\begin{remark}
Dropping the algebraic structure is not a lossless operation. Even though the (co)pointed endofunctors and (co)algebras
with respect to those endofunctors
can be recovered (with enough classical principles), the unit and counit might not have been unique. And thus also
not the category of (co)algebras regarding the (co)monad structure.
\end{remark}
We will end this discussion with a few definitions and a theorem that we will later need. While we think the reader
is now well prepared to understand the statements and their usefulness, we are aware that we didn't cover
enough theory to understand its inner workings.\todo{is this wording ok? should I not do it? I don't want to scare
people}
\begin{definition}\label{def:awfs:rightMaps}
Let \J be category and \(J : \J\A^\) be a functor. Then an object of the category \(J^\) of \emph{right \(J\)-maps}
is a pair \((f,j)\) with \(f\) in
\(\A^\) and \(j\) a function that assigns for every object \(i\) in \J, and for every
lifting problem
\begin{equation*}
\begin{tikzcd}[column sep=huge,row sep=huge]
L \arrow[r,"α"] \arrow[d, "J(i)"'] & X \arrow[d,"f"] \\
M \arrow[r,"β"] \arrow[ur, dashed,"{j(i,α,β)}"] & Y
\end{tikzcd}
\end{equation*}
a specified lift \(j(i,α)\), such that for every \((a,b) : k → i\) in \J, the diagram
\begin{equation*}
\begin{tikzcd}[column sep=huge, row sep = huge]
L' \arrow[r,"a"] \arrow[d,"J(k)"'] & L \arrow[r,"α"] \arrow[d,"J(i)", near end] & X \arrow[d,"f"] \\
M' \arrow[urr, dashed, near start, "{j(i,αa,βb)}"] \arrow[r,"b"] & M \arrow[ur, dashed, "{j(i,α,β)}"'] \arrow[r,"β"] & Y
\end{tikzcd}
\end{equation*}
commutes. And morphisms are morphisms in \(\A^\) that preserve these liftings.
\end{definition}
\begin{remark}
This is even a functor \(()^⧄ : \faktor{\mathbf{Cat}}{\A^}\left(\faktor{\mathbf{Cat}}{\A^}\right)^\op\)
\end{remark}
\begin{remark}
There is an adjoint notion of left lifting.
\end{remark}
\begin{remark}
This is a strong generalization from the usual case, where one talks about sets (or classes) that lift against each other.
If one believes in strong enough choice principles, then the usual case is equivalent
to \(\J\) beeing a discrete category and \(J\) some monic functor.
\end{remark}
We will now turn to a theorem that will provide us with awfs that are right lifting to some functor \(J\).
It is (for obvious reasons) known as Garners small object argument.
\begin{theorem}[{Garner \cites{garnerUnderstandingSmallObject2009}[Theorem 2.28, Lemma 2.30]{riehlAlgebraicModelStructures2011}}]\label{awfs:smallObject}
Let \(\A\) be a cocomplete category satisfying either of the following conditions.
\begin{itemize}
\item[(\(*\))] Every \(X ∈ \A\) is \(α_X\)-presentable for some regular cardinal \(α_X\).
\item[(\dagger)] Every \(X ∈ \A\) is \(α_X\)-bounded with respect to some proper, well-copowered
orthogonal factorization system on \(\A\), for some regular cardinal \(α_X\).
\end{itemize}
Let \( J : \mathcal{J}\A^\) be a category over \(\A^\), with \(\mathcal{J}\) small. Then the free awfs on \(\mathcal{J}\) exists,
its category of \RF algebras is isomorphic to \(J^\), and the category of \RF-algebras is retract closed.
\end{theorem}
\endgroup
\end{document}

View file

@ -1,19 +1,19 @@
\documentclass[../Main.tex]{subfiles} \documentclass[../Main.tex]{subfiles}
\begin{document} \begin{document}
\section{Cubical Type Theory} \section{Cubical Type Theory}
This section is mainly to give the reader which is not aquainted with cubical type theory This section is mainly to give the reader which is not acquainted with cubical type theory
some guidance and intuition. The reader that already is well-versed in this regard might some guidance and intuition. The reader that already is well-versed in this regard might
skip this section. We still expect the reader to be familiar with some form of univalent skip this section. We still expect the reader to be familiar with some form of univalent
type theory. For a more technical complete presentation we point at \cite{ABCHFL} and \cite{cavalloUnifyingCubicalModels2020}. type theory. For a more technical complete presentation we point at \cite{ABCHFL} and \cite{cavalloUnifyingCubicalModels2020}.
We think about this in terms of categorical semantics, so we expect the reader to have We think about this in terms of categorical semantics, so we expect the reader to have
basic idea about this. For the basic ideas there is the quite old \cite{jacobsCategoricalLogicType1999}. % TODO find categorical semantics references basic idea about this. For the basic ideas there is the quite old \cite{jacobsCategoricalLogicType1999}. % TODO find categorical semantics references
We also recall some translation between intuition and technical treatments of types those elements and similiar things. We also recall some translation between intuition and technical treatments of types those elements and similar things.
We also want to mention that cubical type theory isn't one type theory, but a zoo of related variances of the same idea. We also want to mention that cubical type theory isn't one type theory, but a zoo of related variances of the same idea.
Usually there is already a model in mind, and we allow our type theory to talk about some features of those. Usually there is already a model in mind, and we allow our type theory to talk about some features of those.
We will briefly talk about this in this section for a more technical treatment take a look at \cite{cavalloUnifyingCubicalModels2020} We will briefly talk about this in this section for a more technical treatment take a look at \cite{cavalloUnifyingCubicalModels2020}
Our presentation follows \cite{ABCHFL}. We state formation rules and describe the intended interpretation and leave Our presentation follows \cite{ABCHFL}. We state formation rules and describe the intended interpretation and leave
out the syntactial rules that captures the behaviour that the interpretation captures, as we think that would out the syntactical rules that captures the behavior that the interpretation captures, as we think that would
not add to further intuition. For the cartesian case these can be found in \cite{ABCHFL} and for the deMorgan case in \cite{CCHM}. not add to further intuition. For the Cartesian case these can be found in \cite{ABCHFL} and for the deMorgan case in \cite{CCHM}.
\subsection{types and universes} \subsection{types and universes}
@ -30,13 +30,13 @@ a pullback.
\end{tikzcd} \end{tikzcd}
\end{equation*} \end{equation*}
Of course this means that there have to exist the right amount of morphisms Of course this means that there have to exist the right amount of morphisms
from \(Γ → U\), but this is the property that makes our univese a universe. from \(Γ → U\), but this is the property that makes our universe a universe.
Of course this translation can't be expected to be one to one as pullbacks Of course this translation can't be expected to be one to one as pullbacks
are only defined up to isomorphism. One way to deal with this to make a are only defined up to isomorphism. One way to deal with this to make a
choice of pullbacks (calling these display maps) and require that every small fibrant map is isomorph choice of pullbacks (calling these display maps) and require that every small fibrant map is isomorphic
to one of those display maps. This has further problems though. For example is to one of those display maps. This has further problems though. For example is
substitution functorial on the nose, but if we were to go for display maps this substitution functorial on the nose, but if we were to go for display maps this
correpsonds to building another pullback along a map between contexts. So we corresponds to building another pullback along a map between contexts. So we
would need to make our choice of display maps that these choice of pullbacks would need to make our choice of display maps that these choice of pullbacks
gets functorial on the nose. On the other hand working with maps into \(U\) gets functorial on the nose. On the other hand working with maps into \(U\)
has technical benefits. Substitution becomes precomposition, which has technical benefits. Substitution becomes precomposition, which
@ -71,7 +71,7 @@ HoTT, cubical type theory is allowed to talk rather directly about liftings of t
\subsubsection{interval, dimensions and cubes} \subsubsection{interval, dimensions and cubes}
Cubical type theory has a rather direct way talking about paths. We allow the type theory to talk about the Cubical type theory has a rather direct way talking about paths. We allow the type theory to talk about the
underlying inteval object. We will use the symbol \(\I\) here. This might be a bit unintuitive because underlying interval object. We will use the symbol \(\I\) here. This might be a bit unintuitive because
these are not necessarily Kan. To not get tangled up in this problem we split our context in two. The first half only containing these are not necessarily Kan. To not get tangled up in this problem we split our context in two. The first half only containing
variables of the interval and later half types as we are used to. Or in other words every context has the form variables of the interval and later half types as we are used to. Or in other words every context has the form
\(Φ;Γ\), where \(Φ\) has the form \(i:\I,j:\I,\dots\), and \(Γ\) is allowed to depend on \(Φ\). We often \(Φ;Γ\), where \(Φ\) has the form \(i:\I,j:\I,\dots\), and \(Γ\) is allowed to depend on \(Φ\). We often
@ -86,7 +86,7 @@ where \(Γ\) is empty and we have a closed type \(\cdot ⊢ A\). Then a term
Φ \arrow[r] \arrow[u,dashed,"a", bend left] & * \arrow[r,"A"] & U Φ \arrow[r] \arrow[u,dashed,"a", bend left] & * \arrow[r,"A"] & U
\end{tikzcd} \end{tikzcd}
\end{equation*} \end{equation*}
and thus also to a map \(Φ → A\). As \(Φ\) is some representable cube it is isomomorhpic to some \(\Yo c\) and thus also to a map \(Φ → A\). As \(Φ\) is some representable cube it is isomorphic to some \(\Yo c\)
and so this map corresponds by yoneda to an element of \(A(c)\). and so this map corresponds by yoneda to an element of \(A(c)\).
In other words we might think about the elements of \(Φ\) as intervals (with start- and endpoint) in different In other words we might think about the elements of \(Φ\) as intervals (with start- and endpoint) in different
directions. directions.
@ -110,14 +110,14 @@ and the following rules
\] \]
\subsubsection{cofibrations} \subsubsection{cofibrations}
Models of cubical type thoery are usually cofibrantly generated, and these generating maps have representable Models of cubical type theory are usually cofibrantly generated, and these generating maps have representable
codomain. This is done by restricting our first context half by some form of dimension formulas. So our context gets codomain. This is done by restricting our first context half by some form of dimension formulas. So our context gets
a third part \(Φ;ϕ;Γ\). The exact details depend on the model in question. Following the presentation in a third part \(Φ;ϕ;Γ\). The exact details depend on the model in question. Following the presentation in
in \cite{ABCHFL} we introduce an additional kind of judgements, of the form in \cite{ABCHFL} we introduce an additional kind of judgments, of the form
\begin{equation*} \begin{equation*}
Φ ⊢ α \cofib \quad \textrm{and} \quad Φ ⊢ ϕ \formula Φ ⊢ α \cofib \quad \textrm{and} \quad Φ ⊢ ϕ \formula
\end{equation*} \end{equation*}
Our \(ϕ\) will constist of a list of such \(α\), and we think of this as one formula by conjunction of this list. Our \(ϕ\) will consist of a list of such \(α\), and we think of this as one formula by conjunction of this list.
This is expressed by the following rules This is expressed by the following rules
% %
%TODO alignment %TODO alignment
@ -144,14 +144,14 @@ get syntactically represented by the following rule.
\end{prooftree} \end{prooftree}
\end{equation*} \end{equation*}
Our interpretation will naturally get to be objects in \(\mathbf{Sub}(Φ)\). Our interpretation will naturally get to be objects in \(\mathbf{Sub}(Φ)\).
For exmaple \(Φ ⊢ r = 1\), means that \(r ∈ Φ\) and we can write \(Φ\) as \(Ψ,r:\I\) and the judgment gets For example \(Φ ⊢ r = 1\), means that \(r ∈ Φ\) and we can write \(Φ\) as \(Ψ,r:\I\) and the judgment gets
interpreted as the face inclusion \(\id,1/r⟩:Ψ → Φ\). If we have \(Φ ⊢ r = r'\), we can write \(Φ\) as interpreted as the face inclusion \(\id,1/r⟩:Ψ → Φ\). If we have \(Φ ⊢ r = r'\), we can write \(Φ\) as
\(Ψ,r':\I,r:\I\) and our interpretation becomes the diagonal \(\id,r'/r⟩\). \(Φ⊢ r = r\) \(Ψ,r':\I,r:\I\) and our interpretation becomes the diagonal \(\id,r'/r⟩\). \(Φ⊢ r = r\)
as the identity and \(Φ⊢0=1\) as the inclusion of the empty subobject. Cubical variants disagree which as the identity and \(Φ⊢0=1\) as the inclusion of the empty subobject. Cubical variants disagree which
face inclusions should be allowed. The \cite{CCHM} model does not include the diagonal case, while in face inclusions should be allowed. The \cite{CCHM} model does not include the diagonal case, while in
the cartesian model \cite{ABCHFL} the inclusion of these are important to get fillings from compositions. the Cartesian model \cite{ABCHFL} the inclusion of these are important to get fillings from compositions.
We are also allowed to glue these together. For exmaple we might want the right and the top face of a square. We are also allowed to glue these together. For example we might want the right and the top face of a square.
We can do this using the \(\) operation. We can do this using the \(\) operation.
\begin{equation*} \begin{equation*}
\begin{prooftree} \begin{prooftree}
@ -161,7 +161,7 @@ We can do this using the \(\) operation.
\end{prooftree} \end{prooftree}
\end{equation*} \end{equation*}
We will interpret as the coproduct in \(\mathbf{Sub}(Φ)\), or spelled out as the pushout of a their pullback. We will interpret as the coproduct in \(\mathbf{Sub}(Φ)\), or spelled out as the pushout of a their pullback.
The geometrical intution might be we glue the two subobjects together, The geometrical intuition might be we glue the two subobjects together,
along their intersection. Or as a diagram along their intersection. Or as a diagram
\begin{equation} \begin{equation}
\begin{tikzcd} \begin{tikzcd}
@ -173,7 +173,7 @@ along their intersection. Or as a diagram
where the outer square is a pullback and the inner square is a pushout. where the outer square is a pullback and the inner square is a pushout.
We also have a third operation, that will take an important role in the development of We also have a third operation, that will take an important role in the development of
getting a constructive model for univalence. Nameliy \(\), like the name suggest getting a constructive model for univalence. Namely \(\), like the name suggest
it will get interpreted as the right adjoint of the pullback functor between it will get interpreted as the right adjoint of the pullback functor between
subobject categories. We add the following rule subobject categories. We add the following rule
\begin{equation} \begin{equation}
@ -182,7 +182,7 @@ subobject categories. We add the following rule
\infer1{Φ⊢∀i:\I.α \cofib} \infer1{Φ⊢∀i:\I.α \cofib}
\end{prooftree} \end{prooftree}
\end{equation} \end{equation}
Geometrically this might be envisioned by only keeping faces that are defined arcoss the Geometrically this might be envisioned by only keeping faces that are defined across the
entirety of that dimension, and then projecting the dimension away. entirety of that dimension, and then projecting the dimension away.
@ -200,7 +200,7 @@ are maps from the interval we can actually evaluate them.
\end{equation*} \end{equation*}
Which you can think of as the paths from \(t\) to \(u\) in \(A\), or as the Which you can think of as the paths from \(t\) to \(u\) in \(A\), or as the
ways \(t\) and \(u\) are identifiable. As we want to think about paths as maps ways \(t\) and \(u\) are identifiable. As we want to think about paths as maps
from the interval, get rules similiar to that of function types. from the interval, get rules similar to that of function types.
\begin{equation*} \begin{equation*}
\begin{prooftree} \begin{prooftree}
\hypo{Φ;ϕ;Γ ⊢A} \hypo{Φ;ϕ;Γ ⊢A}
@ -221,44 +221,44 @@ with the usual (\(η\)) and (\(β\)) rules as well as \(p\,0 \equiv t\) and \(p\
\subsubsection{terms of intervals} \subsubsection{terms of intervals}
In general there are more terms of the interval as we allowed in the construction of \(ϕ\) above. In general there are more terms of the interval as we allowed in the construction of \(ϕ\) above.
Which are allowed is usually closely tied to the site of the underlying model. For example in the Which are allowed is usually closely tied to the site of the underlying model. For example in the
\cite{CCHM} model the underlying cite is the lawvere theory of deMorgan algebras, and the terms \cite{CCHM} model the underlying cite is the Lawvere theory of deMorgan algebras, and the terms
of the interval in that are also the terms of a deMorgan algebra. We want to shortly study them of the interval in that are also the terms of a deMorgan algebra. We want to shortly study them
and give some names and intuition to them. We will mostly talk about the \cite{CCHM} model, as the and give some names and intuition to them. We will mostly talk about the \cite{CCHM} model, as the
cartesian case has a lot less morphisms in its site. Cartesian case has a lot less morphisms in its site.
Unravelling the definition of a lawvere theory, we get that the site of the \cite{CCHM} consists of Unraveling the definition of a Lawvere theory, we get that the site of the \cite{CCHM} consists of
a skelleton of finite sets and a morphism from \(A\) to \(B\) is a function from \(B\) to the free a skeleton of finite sets and a morphism from \(A\) to \(B\) is a function from \(B\) to the free
deMorgan algebra generated by \(A\). As the yoneda embedding ist fully faithfull this also holds deMorgan algebra generated by \(A\). As the yoneda embedding is fully faithfull this also holds
for the subcategory of representables. A term in some dimensional context \(Φ\) of type \I becomes for the subcategory of representables. A term in some dimensional context \(Φ\) of type \I becomes
a map from \(Φ → \I\), and as this a map from \(\I^n → \I\). These correspond to a term of a deMorgan a map from \(Φ → \I\), and as this a map from \(\I^n → \I\). These correspond to a term of a deMorgan
algebra with \(n\) variables. We will write for this judgements of the form algebra with \(n\) variables. We will write for this judgments of the form
\begin{equation*} \begin{equation*}
Φ ⊢ t : \I Φ ⊢ t : \I
\end{equation*} \end{equation*}
We can think of a type in a context with \(n\) dimension variables as an \(n\)-cube We can think of a type in a context with \(n\) dimension variables as an \(n\)-cube
of types. And a term as an \(n\)-cube in the total space of the type, going across the fibres. of types. And a term as an \(n\)-cube in the total space of the type, going across the fibers.
Depending on the map in the site, or equivalently map between the representables, we get terms Depending on the map in the site, or equivalently map between the representables, we get terms
of cubes that we can plug into our types. of cubes that we can plug into our types.
For example if we have a line of types, we can look For example if we have a line of types, we can look
at it as a degenerate square of types. These can be done in multiple ways. The obvious one is at it as a degenerate square of types. These can be done in multiple ways. The obvious one is
through weakening. Or syntactically gooing from \(r:\I;; ⊢ A\) to \(r:\I,r':\I;; ⊢ A\). Which gives through weakening. Or syntactically going from \(r:\I;; ⊢ A\) to \(r:\I,r':\I;; ⊢ A\). Which gives
us a square of types that is constant in the direction of \(r'\). In usual topology there are more us a square of types that is constant in the direction of \(r'\). In usual topology there are more
ways to get degenerate a square from a line, for example we might want that the right and the ways to get degenerate a square from a line, for example we might want that the right and the
top face are given line and the left and bottom face is constant. These kind of degeneracies are top face are given line and the left and bottom face is constant. These kind of degeneracies are
usually called connections and exist in some models. In the \cite{CCHM} model this specific usually called connections and exist in some models. In the \cite{CCHM} model this specific
degenracy corresponds to the meet (\(\)). Or syntactically more explicit, if we have degeneracy corresponds to the meet (\(\)). Or syntactically more explicit, if we have
a line of types \(r:\I⊢A\) we can get a degenarete square of types \(r:\I,r':\I⊢A(r/r∧r')\). a line of types \(r:\I⊢A\) we can get a degenerate square of types \(r:\I,r':\I⊢A(r/r∧r')\).
In point set topology we could think about ths as taking the infimum of \(r\) and \(r'\). In point set topology we could think about this as taking the infimum of \(r\) and \(r'\).
Of course there is also the dual connection, which is also present in \cite{CCHM}. There Of course there is also the dual connection, which is also present in \cite{CCHM}. There
is also a model with only one of the two connections studied in \cite{cavalloRelativeEleganceCartesian2022} is also a model with only one of the two connections studied in \cite{cavalloRelativeEleganceCartesian2022}
These degeneracies allow to reconstruct fillings from compositions. The cartesian model These degeneracies allow to reconstruct fillings from compositions. The Cartesian model
does not have connections, there we will use diagonal cofibrations to get fillings from does not have connections, there we will use diagonal cofibrations to get fillings from
compositions. compositions.
% TODO image of connections. % TODO image of connections.
In the \cite{CCHM} model we are also allowed to reverse dimensions. This corresponds In the \cite{CCHM} model we are also allowed to reverse dimensions. This corresponds
to the inversion/negation of the deMorgan algbera. These kind of maps are often called reversals. to the inversion/negation of the deMorgan algebra. These kind of maps are often called reversals.
The last map we want to mention here, are diagonals. These correspond syntactically to the diagonals The last map we want to mention here, are diagonals. These correspond syntactically to the diagonals
\(r:\I;; ⊢ (r,r) : \I^2\). The reason to mention them is that the \cite{BCH} model does not feature those \(r:\I;; ⊢ (r,r) : \I^2\). The reason to mention them is that the \cite{BCH} model does not feature those
@ -275,7 +275,7 @@ The first thing we need to talk about a filling, is a filling problem.
\end{equation*} \end{equation*}
A syntactic representation of the figuratively drawn problem above might be represented by A syntactic representation of the figuratively drawn problem above might be represented by
witnessing the following rule. witnessing the following rule.
\todo{should I also include the judgemental equalities that say that a filling is actually a filling of that problem?} \todo{should I also include the judgmental equalities that say that a filling is actually a filling of that problem?}
\begin{equation*} \begin{equation*}
\begin{prooftree} \begin{prooftree}
\hypo{z: \I ,z':\I ;;Γ ⊢ A \type } \hypo{z: \I ,z':\I ;;Γ ⊢ A \type }
@ -288,7 +288,7 @@ Allowing this in its obvious generalization runs directly into problems, as that
liftings against arbitrary cofibrations, and not only trivial ones. To get around this problem liftings against arbitrary cofibrations, and not only trivial ones. To get around this problem
we reformulate our syntactic representation. To do this we split the problem data into two we reformulate our syntactic representation. To do this we split the problem data into two
parts. The first part is the base, that is not allowed to talk about the dimension we want to parts. The first part is the base, that is not allowed to talk about the dimension we want to
fill, but completly defined in all other dimensions, and a part that is allowed to talk about the dimension we want to fill in, fill, but completely defined in all other dimensions, and a part that is allowed to talk about the dimension we want to fill in,
but must be attached to the base and agree with it. but must be attached to the base and agree with it.
\begin{equation*} \begin{equation*}
\begin{prooftree} \begin{prooftree}
@ -324,7 +324,7 @@ Or written as the intended commuting diagram
Φ,z:\I \arrow[r] \arrow[ur,"{\mfill(b,t)}", dashed] & Φ,z:\I;;Γ Φ,z:\I \arrow[r] \arrow[ur,"{\mfill(b,t)}", dashed] & Φ,z:\I;;Γ
\end{tikzcd} \end{tikzcd}
\end{equation*} \end{equation*}
From a syntactial point of view that rule is not so great, as in the From a syntactical point of view that rule is not so great, as in the
conclusion there is an explicit free variable \(r'\), and it is not conclusion there is an explicit free variable \(r'\), and it is not
at all clear how one would substitute a term for \(r'\) into our filling term. at all clear how one would substitute a term for \(r'\) into our filling term.
The usual way of dealing with this problem is by making \(r'\) a term appearing The usual way of dealing with this problem is by making \(r'\) a term appearing
@ -350,9 +350,9 @@ If we translate this back to the world of commutative diagrams we get\footcite[p
Φ \arrow[r,"{(\id,r'/z)}"'] \arrow[rru, dashed,"{\comp_A^{z:r→r}(b,t)}"] & Φ,z:\I \arrow[r] & Φ,z:\I;;Γ Φ \arrow[r,"{(\id,r'/z)}"'] \arrow[rru, dashed,"{\comp_A^{z:r→r}(b,t)}"] & Φ,z:\I \arrow[r] & Φ,z:\I;;Γ
\end{tikzcd} \end{tikzcd}
\end{equation*} \end{equation*}
Different variants allow different terms for \(r'\) and \(z\). On one extrem the Different variants allow different terms for \(r'\) and \(z\). On one extreme the
\cite{CCHM} model only allows composition from 0 to 1. Which is in the presence of \cite{CCHM} model only allows composition from 0 to 1. Which is in the presence of
reversals and connections is not really a strong restriction. In the cartesian model reversals and connections is not really a strong restriction. In the Cartesian model
we need to be less restrictive to. we need to be less restrictive to.
In general it is not clear that can recover fillings from compositions. So it In general it is not clear that can recover fillings from compositions. So it
@ -361,16 +361,16 @@ is not to difficult, and we can define
\begin{equation*} \begin{equation*}
Φ,i:\I;;Γ⊢\mfill_A^z(b,t) ≔ \comp_{A(i/i∧j)}^{a→b}(b,t(i∧j/i)) : A Φ,i:\I;;Γ⊢\mfill_A^z(b,t) ≔ \comp_{A(i/i∧j)}^{a→b}(b,t(i∧j/i)) : A
\end{equation*}\todo{make this technical correct} \end{equation*}\todo{make this technical correct}
\todo{agree on a way to write substitution, why is this not uniform across litrature?} \todo{agree on a way to write substitution, why is this not uniform across literature?}
\todo{make handrawn picture for this} \todo{make hand drawn picture for this}
This doesn't work in the cartesian setting though, as we don't have any connections. Here This doesn't work in the Cartesian setting though, as we don't have any connections. Here
we can use the fact that we allowed diagonals in our filling problems. And define we can use the fact that we allowed diagonals in our filling problems. And define
\begin{equation*} \begin{equation*}
Φ,z:\I;;Γ ⊢ \mfill_A^{z:r}(b,t) ≔ \comp_A^{z':r→z}(b,t) : A Φ,z:\I;;Γ ⊢ \mfill_A^{z:r}(b,t) ≔ \comp_A^{z':r→z}(b,t) : A
\end{equation*} \end{equation*}
where \(z'\) does not appear in \(Φ\). where \(z'\) does not appear in \(Φ\).
\todo{make handrawn picture for this} \todo{make hand drawn picture for this}
\begin{equation} \begin{equation}
\begin{prooftree} \begin{prooftree}

View file

@ -3,14 +3,15 @@
\section{Leibniz Construction} \section{Leibniz Construction}
We will see use a well know construction in homotopy theory, to We will see use a well know construction in homotopy theory, to
elegantly construct a lot of interesting objects, the Leibniz construction. elegantly construct a lot of interesting objects, the Leibniz construction.
This section will recall the definition establish some facts and give some examples. This section will mostly give a definition and some examples to get familiar with this construction.
There are multiple If the reader is already familiar with it, they might skip this section without any problems.
We start be giving the definition We start be giving the definition
\begin{definition}[Leibniz Construction] \begin{definition}[Leibniz Construction]
Let \(\A\), \(\B\) and \(\C\) be categories and \(\C\) has finite pushouts. Let \(⊗ : \A × \B\C \) be a bifunctor. Let \(\A\), \(\B\) and \(\C\) be categories and \(\C\) has finite pushouts. Let \(⊗ : \A × \B\C \) be a bifunctor.
Then we define \(\hat{} : \arr{\A} × \arr{\B}\arr{\C}\) to defined as follows. Given \(f : A → A' \) in \(\A\) and Then we define the \emph{Leibniz Construction} \(\hat{} : \arr{\A} × \arr{\B}\arr{\C}\) to be the functor
\( g : B → B' \) in \(\B\), then \( f \hat{} g \) is defined as in the following diagram.\\ that sends \(f : A → A' \) in \(\A\) and
\( g : B → B' \) in \(\B\), to \( f \hat{} g \) as defined as in the following diagram.
\begin{equation*} \begin{equation*}
\begin{tikzcd} \begin{tikzcd}
A ⊗ B \arrow[r, "f ⊗ \id"] \arrow[d, "\id ⊗ g"] \arrow[dr, phantom, "\ulcorner", very near end] & A' ⊗ \mathrlap{B} \phantom{A'} \arrow[ddr, bend left, "\id ⊗ g"] \arrow[d] & \\ A ⊗ B \arrow[r, "f ⊗ \id"] \arrow[d, "\id ⊗ g"] \arrow[dr, phantom, "\ulcorner", very near end] & A' ⊗ \mathrlap{B} \phantom{A'} \arrow[ddr, bend left, "\id ⊗ g"] \arrow[d] & \\
@ -18,34 +19,64 @@ We start be giving the definition
& & A' ⊗ B' & & A' ⊗ B'
\end{tikzcd} \end{tikzcd}
\end{equation*} \end{equation*}
\end{definition} If \(\) is the tensor functor of a monoidal category we also call it the \emph{Leibniz product} or \emph{pushout-product}. If \(\) is
When the bifunctor is the cartesian product functor, this construction is also known as the \emph{pushout-product}. the functor application functor we also call it the \emph{Leibniz application}.
Now we will give some examples, some of them instructive and some that will appear later again.
\begin{example}
Let \(\C\) be your favourite category of spaces, \( × \) the cartesian product and \(f = g\) be the
inclusion of the point into the start of the interval. Then the pushout-product \(f \hat{×} g \) is the inclusion of
two adjacents sides of a square, into that square.
\end{example}
Or a bit more general
\begin{example}\label{Leibniz:Ex:OpenBox}
Let \(\C\) be your favourite category of spaces, \( × \) the cartesian poduct. Let \(f\) be the
inclusion of one endpoint into the interval. Let \(g : ∂\I^n → \I^n \) be the boundary inclusion into
the \(n\)-cube. Then \(f \hat{×} \) is the filling of an \(n\) dimensional open box.
\end{example}
\begin{example}
Let \(\C\) be your favourite category of spaces, and \( @ : \C^{\C} × \C\C \) defined as
\( F @ x ≔ F(x)\) the functor application functor. Let \(\I()\) be a functorial cylinder and \(i_0\).
be one of the inclusions \( i_0 : \Id\I() \) and \( ∂X → X \) be the boundery inclusion of \(X\).
Then \( i_0 \hat{@} (∂X → X) \) is the filling of a cylinder with one base surface of shape \(X\).
\end{example}
As we will come across this example multiple times it will get some special syntax.
\begin{definition}
\end{definition} \end{definition}
The following examples are true for any category of nice spaces, we will state them for simplicial sets as the reader is probably
familiar with them.
\begin{example}\label{ex:leib:prism}
Let \(δ_k : * → Δ^1\) for \(k ∈ {0,1}\) be one of the endpoint inclusions into the interval and \(i : \∂ Δ^n → Δ^n\) the boundary
inclusion of the \(n\)-simplex, then \(δ_k \hat{×} i\) is the inclusion of a prism without its interior and one cap, into the prism.
\end{example}
This gives us a description of a possible set of generating trivial cofibrations. This will be an ongoing theme. For cubical sets we
will define trivial cofibrations in this fashion. We can even observe something stronger. If we would replace the boundary inclusion
of the \(n\)-simplex with the boundary inclusion of an arbitrary space \(X\). We get the inclusion of the cylinder object of \(X\)
without a cap and its interior into the cylinder.
\begin{figure}
\missingfigure{cylinder inclusion as pushout product}
\caption{cylinder inclusion as Leibniz product}
\end{figure}
\begin{observation}
If \(f\) and \(g\) are cofibrations then \(f \hat{×} g\) is too. If one of them is a trivial cofibration then so is \(f \hat{×} g\).
\end{observation}
This has far reaching consequences, and one can define a monoidal model category where this are one of the axioms
for the tensor functor. For more
detail see \cite{hoveyModelCategories2007}. We are not going to need much of this theory explicitly. But it is worthy to note
that all examples of model categories that we are going to encouter are of this form.
We will also encouter the basically same construction as \cref{ex:leib:prism} in another way. We can also get the Cylinder object
functorialy in , such that the cap inclusions are natural transformations.
We can also get the inclusion the Leibniz product produces in a natural manner via the Leibniz application.
\begin{example}\label{ex:leib:functorialCylinder}
Let \( @ : \C^{\C} × \C\C \) defined as
\( F @ x ≔ F(x)\) the functor application functor. Let \(\I × ()\) be the functor that sends \(X\) to its cylinder object (also known as the
product with the interval).
Let be one of the inclusions \( δ_k : \Id\I() \), the natural transformation that embeds the space in one of the cylinder caps.
Let \( \∂ X → X \) be the boundery inclusion of \(X\).
Then \( δ_k \hat{@} (∂X → X) \) is the filling of a cylinder with one base surface of shape \(X\).
\end{example}
This kind of construction will later play an important role in the construction of our desired model categories.
We will also encouter another form of Leibniz application, which has at first glance not to do alot with homotopy. On
the other hand one might read this as, getting the component of an natural transformation is some sort of a homotopic construction.
\begin{lemma}
Let \(F,G : A → B \) and \(η : F ⇒ G\) be a natural transformation, and \A has in initial object \(\). Let
\(F\) and \(G\) preserve the initial object.
We write \(∅ → X\) for the unique map with the same type. Then we get \(η \at (∅ → X) = η_X\).
\end{lemma}
\begin{proof}\todo{this is contravariant and I'm too tired}
We will draw the diagram from the definiton
\begin{eqcd*}
F (∅) \arrow[r, "F(∅ → X)"] \arrow[d, "η_∅"] \arrow[dr, phantom, "\ulcorner", very near end] & F(X) \arrow[ddr, bend left, "η_X"] \arrow[d] & \\
G(∅) \arrow[drr, bend right, "G(∅ → X)"] \arrow[r] & G(∅) ∐\limits_{F(∅)} \mathrlap{F(X)} \phantom{G(∅)} \arrow[dr, dashed ,"f \hat{⊗} g"] & \\
& & G(X)
\end{eqcd*}
If we substitude \(F(\emptyset)\) and \(G()\) with \(\) the claim directly follows.
\end{proof}
% TODO Left adjoint % TODO Left adjoint
\end{document} \end{document}

View file

@ -1,18 +1,18 @@
\documentclass[../Main.tex]{subfiles} \documentclass[../Main.tex]{subfiles}
\begin{document} \begin{document}
\section{Model structure}\label{modelStructure} \section{Model structure}\label{modelStructure}
% TODO restructure lifting with respect to category % TODO restructure lifting with respect to category
% awfs % awfs
% Then cubical sets % Then cubical sets
\subsection{Equivariant Cartesian Cubes}
\subsection{Cartesian Cubes}
We now presenting our, main star, the equivariant model structure on cubical sets. We now presenting our, main star, the equivariant model structure on cubical sets.
We will give a description of the two involved factorization systems and then follow We will give a description of the two involved factorization systems and then follow
an argument from \cite{cavalloRelativeEleganceCartesian2022}, to show that it is indeed an argument from \cite{cavalloRelativeEleganceCartesian2022}, to show that it is indeed
a model structure. The modelstructure as well as the presentation is taken from a model structure. The modelstructure as well as the presentation is taken from
\cite{mathematicalsciencesresearchinstitutemsriEquivariantUniformKan2020}. This \cite{riehlVid}. This
equivariant model structure is also know as the ACCRS model structure. equivariant model structure is also know as the ACCRS model structure.
% reference for uniform fibrations CCHM and % reference for uniform fibrations CCHM and
@ -24,120 +24,98 @@ of the point a trivial cofibration. This is archived by forcing an extra propert
the fibrations. the fibrations.
\begin{equation*} \begin{equation*}
\begin{tikzcd}[column sep=large] \begin{tikzcd}[column sep=large, row sep=large]
* \arrow[r,"\id"] \arrow[d,tail,"f"] & * \arrow[d,tail,"σf"'] \arrow[r] & * \arrow[d,tail] \arrow[r,"β"] & X \arrow[d,"f"] \\ * \arrow[r,"\id"] \arrow[d,tail,"f"] & * \arrow[d,tail,"σf"'] \arrow[r] & * \arrow[d,tail] \arrow[r,"β"] & X \arrow[d,"f"] \\
\I^2 \arrow[r,"σ"', shift right=0.5ex] \arrow[r,"\id", shift left=0.5ex] \arrow[urrr, dashed, ,near end, "{j(αe,β)}"'] & \I^2 \arrow[urr, dashed, "{j(αeσ,β)}"] \arrow[r, "e"] & Q \arrow[r,"α"] & Y \I^2 \arrow[r,"σ"', shift right=0.7ex] \arrow[r,"\id", shift left=0.7ex] \arrow[urrr, dashed, ,near end, "{j(αe,β)}"'] & \I^2 \arrow[urr, dashed, "{j(αeσ,β)}"] \arrow[r, "e"] & Q \arrow[r,"α"] & Y
\end{tikzcd} \end{tikzcd}
\end{equation*} % Fix twist of suboject \end{equation*} % Fix twist of suboject
Here \(e\) is the coequilizer map. The hope would be, that Here \(σ\) is the map that swaps the dimenisons and \(e\) is the coequilizer map. The hope would be, that
the coequilizer, would now present a lift for the right most lifting problem. But in general that the coequilizer, would now present a lift for the right most lifting problem. But in general that
does not hold, the lifts have neither to commute with \(σ\) nor with \(\id\). The path forward does not hold, the lifts have neither to commute with \(σ\) nor with \(\id\). The path forward
will be to restrict the fibrations to have the desired property. will be to restrict the fibrations to have the desired property.
% %
\subsubsection{Weak factorization systems} % \subsubsection{Weak factorization systems}
If we force additional properties on our lifting problems, one might rightfully % If we force additional properties on our lifting problems, one might rightfully
ask if these indeed induce a weak factorization system (wfs). It is also unclear if % ask if these indeed induce a weak factorization system (wfs). It is also unclear if
there are induced factorizations at all. The good news is, there is a refined version % there are induced factorizations at all. The good news is, there is a refined version
of the small object argument \ref{smallObject}, that will help us out. This works on % of the small object argument \cref{smallObject}, that will help us out. This works on
the back of algebraic weak factorization systems. We don't need the whole theory and % the back of algebraic weak factorization systems. We don't need the whole theory and
get by with the following definitiont. % get by with the following definitiont.
%
\begin{definition}\label{def:rightMaps} % \begin{definition}\label{def:rightMaps}
Let \J be category and \(J : \J\M^\) be a functor. Then an object \(f\) in % Let \J be category and \(J : \J\M^\) be a functor. Then an object \(f\) in
\(\M^\) is a \emph{right \(J\)-map} if for every object \(i\) in \J, and for every % \(\M^\) is a \emph{right \(J\)-map} if for every object \(i\) in \J, and for every
lifting problem % lifting problem
\begin{equation*} % \begin{equation*}
\begin{tikzcd}[column sep=large] % \begin{tikzcd}[column sep=large,row sep=large]
L \arrow[r,"α"] \arrow[d, "Ji"'] & X \arrow[d,"f"] \\ % L \arrow[r,"α"] \arrow[d, "Ji"'] & X \arrow[d,"f"] \\
M \arrow[r,"β"] \arrow[ur, dashed,"{j(α,β)}"] & Y % M \arrow[r,"β"] \arrow[ur, dashed,"{j(α,β)}"] & Y
\end{tikzcd} % \end{tikzcd}
\end{equation*} % \end{equation*}
there is a specified lift \(j(α)\), such that for every \((a,b) : k → i\) in \J, the diagram % there is a specified lift \(j(α)\), such that for every \((a,b) : k → i\) in \J, the diagram
\begin{equation*} % \begin{equation*}
\begin{tikzcd}[column sep=huge] % \begin{tikzcd}[column sep=huge, row sep = large]
L' \arrow[r,"a"] \arrow[d,"Jk"'] & L \arrow[r,"α"] \arrow[d,"Ji"] & X \arrow[d,"f"] \\ % L' \arrow[r,"a"] \arrow[d,"Jk"'] & L \arrow[r,"α"] \arrow[d,"Ji"] & X \arrow[d,"f"] \\
M' \arrow[urr, dashed, near start, "{j(αa,βb)}"] \arrow[r,"b"] & M \arrow[ur, dashed, "{j(α,β)}"'] \arrow[r,"β"] & Y % M' \arrow[urr, dashed, near start, "{j(αa,βb)}"] \arrow[r,"b"] & M \arrow[ur, dashed, "{j(α,β)}"'] \arrow[r,"β"] & Y
\end{tikzcd} % \end{tikzcd}
\end{equation*} % \end{equation*}
commutes. % commutes.
\end{definition} % \end{definition}
%
\begin{remark} % \begin{remark}
If we start with a set \J and make it into a discret category, then we will get our old definition % If we start with a set \J and make it into a discret category, then we will get our old definition
of right lifting map, with an additional choice function for lifts. Which exists anyway if we % of right lifting map, with an additional choice function for lifts. Which exists anyway if we
are ready to believe in strong enough choice principles. % are ready to believe in strong enough choice principles.
\end{remark} % \end{remark}
%
After we defined a class of right maps by those lifting problems, it is not clear that these % After we defined a class of right maps by those lifting problems, it is not clear that these
will yield a wfs. % will yield a wfs.
To get those we invoke a refinment of Quillens small object argument, namely % To get those we invoke a refinment of Quillens small object argument, namely
Garners small object argument \cite{garnerUnderstandingSmallObject2009}. This has the additional benefit of producing % Garners small object argument \cite{garnerUnderstandingSmallObject2009}. This has the additional benefit of producing
algebraic weak factorization systems (awfs). For a quick reference see % algebraic weak factorization systems (awfs). For a quick reference see
\cite{riehlMadetoOrderWeakFactorization2015a}, and for an extensive tour \cite{riehlAlgebraicModelStructures2011}, or % \cite{riehlMadetoOrderWeakFactorization2015a}, and for an extensive tour \cite{riehlAlgebraicModelStructures2011}, or
\cite{bourkeAlgebraicWeakFactorisation2016,bourkeAlgebraicWeakFactorisation2016a}. The main takeaway for % \cite{bourkeAlgebraicWeakFactorisation2016,bourkeAlgebraicWeakFactorisation2016a}. The main takeaway for
us is the following theorem \cite[][Theorem 2.28]{riehlAlgebraicModelStructures2011} % us is the following theorem \cite[][Theorem 2.28]{riehlAlgebraicModelStructures2011}
%
\begin{theorem}[Garner]\label{smallObject} % \begin{theorem}[Garner]\label{smallObject}
Let \(\M\) be a cocomplete category satisfying either of the following conditions. % Let \(\M\) be a cocomplete category satisfying either of the following conditions.
\begin{itemize} % \begin{itemize}
\item[(\ast)] Every \(X ∈ \M\) is \(α_X\)-presentable for some regular cardinal \(α_X\). % \item[(\(*\))] Every \(X ∈ \M\) is \(α_X\)-presentable for some regular cardinal \(α_X\).
\item[(\dagger)] Every \(X ∈ \M\) is \(α_X\)-bounded with respect to some proper, well-copowered % \item[(\dagger)] Every \(X ∈ \M\) is \(α_X\)-bounded with respect to some proper, well-copowered
orthogonal factorization system on \(\M\), for some regular cardinal \(α_X\). % orthogonal factorization system on \(\M\), for some regular cardinal \(α_X\).
\end{itemize} % \end{itemize}
Let \( J : \mathcal{J}\M^\) be a category over \(\M^\), with \(\mathcal{J}\) small. Then the free awfs on \(\mathcal{J}\) exists % Let \( J : \mathcal{J}\M^\) be a category over \(\M^\), with \(\mathcal{J}\) small. Then the free awfs on \(\mathcal{J}\) exists
and is algebraically-free on \(\mathcal{J}\). % and is algebraically-free on \(\mathcal{J}\).
\end{theorem} % \end{theorem}
Without going to much into the size issues, every locally presentable categoy satisfies \((\ast)\). And as \(\□\) is a presheaf % Without going to much into the size issues, every locally presentable categoy satisfies \((\ast)\). And as \(\□\) is a presheaf
category over a countable category, it is locally presentable. Also by beeing a presheaf category makes it automatically cocomplete. % category over a countable category, it is locally presentable. Also by beeing a presheaf category makes it automatically cocomplete.
%
We also won't clarify all the consequences that getting an awfs instead of a wfs, brings with it. For us the important % We also won't clarify all the consequences that getting an awfs instead of a wfs, brings with it. For us the important
thing is, that every awfs has an underlying wfs. The free part of that statement unsures that this construction preserves % thing is, that every awfs has an underlying wfs. The free part of that statement unsures that this construction preserves
the right lifting class. % the right lifting class.
%
Now we need to formulate our definitions in a way that agrees with this conditions of this theorem. % Now we need to formulate our definitions in a way that agrees with this conditions of this theorem.
%
\subsubsection{Cofibrations and trivial fibrations} \subsubsection{Cofibrations and trivial fibrations}
The short way of specifying this weak factorization system, is by saying The short way of specifying this weak factorization system, is by saying
the cofibrations are the monomorphisms. Another, longer way, but for the further development more the cofibrations are the monomorphisms. Another, longer way, but for the further development more
enlightening is by formulating this as uniform lifting properties. enlightening is by formulating this as uniform lifting properties.
It is also not equivalent from the viewpoint of an awfs, as this definition has extra conditions on the
chosen lifts.
The condition to have uniform lifts, comes from \cite{CCHM} and generalized in \cite{gambinoFrobeniusConditionRight2017} . This requirement is motivated to have The condition to have uniform lifts, comes from \cite{CCHM} and generalized in \cite{gambinoFrobeniusConditionRight2017} . This requirement is motivated to have
constructive interpretation of Cubical type theory. As we don't focus on the typetheoretic interpretation, but on the model structure we won't constructive interpretation of Cubical type theory\todo{see ctt intro chapter when it is done}. As we don't focus on the typetheoretic interpretation, but on the model structure we won't
explore this further. explore this further.
\begin{definition} \begin{definition}
Let \(\mathcal{M}\) be a pullback stable class of morphisms. Let \(J : \mathcal{M}\A^\) be subcategory which objects are a pullback stable class of morphisms in \A, and morphisms are
A map \(f\) has the \emph{uniform right lifting property} against a \(\mathcal{M}\), if for every \(m ∈ \mathcal{M}\) the cartesian squares between them. The category \(J^\) is called the category \emph{uniform right lifting morphisms} with respect
every lifting problem of the form to \(\mathcal{M}\). Objects of \(J^\) are said to have the \emph{uniform right lifting property}.
\begin{equation*}
\begin{tikzcd}[column sep=large]
A \arrow[r,"α"] \arrow[d,"m"'] & X \arrow[d,"f"] \\
B \arrow[r,"β"] \arrow[ur, dashed, "{j(α,β)}"] & Y
\end{tikzcd}
\end{equation*}
has a specified solution, such that for every pullback square the following diagram with the specified
lifts commute.
\begin{equation*}
\begin{tikzcd}[column sep=large]
C \arrow[r,"a"] \arrow[d,"m'"] \pb & A \arrow[r, "α"] \arrow[d, "m"] & X \arrow[d,"f"] \\
D \arrow[r,"b"'] \arrow[urr,dashed,"{j(αa,βb)}"] & B \arrow[ur, dashed, "{j(α,β)}"'] \arrow[r,"β"'] & Y
\end{tikzcd}
\end{equation*}
\end{definition} \end{definition}
\begin{definition}
A \emph{generating cofibration} is a monomorphism into a cube \(c : C ↣ \I^n\)
\end{definition}
\begin{definition}
A \emph{uniform trivial fibration} is a map with the uniform right lifting property against the generating cofibrations.
\end{definition}
Alternatively a definition more in line with our discussion above
\begin{definition} \begin{definition}
The category of \emph{uniform generating cofibrations} has as object monomorphisms into a cube \(C ↣ I^n\) of arbitraty dimension, and The category of \emph{uniform generating cofibrations} has as object monomorphisms into a cube \(C ↣ I^n\) of arbitraty dimension, and
as morphism cartesian squares between those. as morphism cartesian squares between those.
@ -148,18 +126,18 @@ Alternatively a definition more in line with our discussion above
into \(\□^\) into \(\□^\)
\end{definition} \end{definition}
% TODO show equivalence in a rigorous way. \todo{show that the cofib are all monos}
\subsubsection{Trivial cofibrations and fibrations} \subsubsection{Trivial cofibrations and fibrations}
As we sketced above, the strategy will be to make more maps trivial cofibrations. This is done As we sketched above, the strategy will be to make more maps trivial cofibrations. This is done
by making it harder to be a fibration. Before we give the definition in full generality, we by making it harder to be a fibration. Before we give the definition in full generality, we
need to adress which coequilizer we talk about percisely. need to adress which coequilizer we talk about percisely.
Of course doing this only in the 2 dimensional case is not enough. For this we need to say what Of course doing this only in the 2 dimensional case is not enough. For this we need to say what
the “swap” maps are. What these should do is permutating the dimensions. So let \(Σ_k\) be the symmetric group the “swap” maps are. What these should do is permutating the dimensions. So let \(Σ_k\) be the symmetric group
on \(k\) elements. For every \(σ' ∈ Σ_k\), we get a map from \( σ : \I^k → \I^k \), by \(π_{σ'(1)}, … , π_{σ'(k)}\), on \(k\) elements. For every \(σ' ∈ Σ_k\), we get a map from \( σ : \I^k → \I^k \), by \((π_{σ'(1)}, … , π_{σ'(k)})\),
where \(π_l : \I^k → \I \) is the \(l\)-th projection. Also if we have a subcube \(f : \I^n \rightarrowtail \I^k\), where \(π_l : \I^k → \I \) is the \(l\)-th projection. Also if we have a subcube \(f : \I^n \rightarrowtail \I^k\),
we get a pullback square of the following form we get a pullback square of the following form
\begin{equation*} \begin{equation*}
@ -170,7 +148,7 @@ we get a pullback square of the following form
\end{equation*} \end{equation*}
whose right arrow we call \(σf\). whose right arrow we call \(σf\).
% %
Now we desrcibe our generating trivial cofibrations. We remeber example \ref{Leibniz:Ex:OpenBox}. We would Now we desrcibe our generating trivial cofibrations. We remeber example \cref{Leibniz:Ex:OpenBox}. We would
like to do the same here, but a bit more general. like to do the same here, but a bit more general.
@ -272,7 +250,7 @@ Now we can finaly give the definition of uniform equivariant fibrations.
\begin{definition} \begin{definition}
The category of \emph{generating uniform equivariant trivial cofibrations} has as object The category of \emph{generating uniform equivariant trivial cofibrations} has as object
generating cofibrations and as morphism squares of the shape defined in \ref{devilSquare} generating cofibrations and as morphism squares of the shape defined in \cref{devilSquare}
above. above.
\end{definition} \end{definition}
\begin{definition} \begin{definition}
@ -280,26 +258,401 @@ Now we can finaly give the definition of uniform equivariant fibrations.
uniform equivariant trivial cofibrations into \(\□^\). uniform equivariant trivial cofibrations into \(\□^\).
\end{definition} \end{definition}
\begin{notation}
By \cref{awfs:smallObject} these categories of maps form an awfs. We call it \((\TCF,\FF)\) and the underlying wfs
\((\Ct,\F)\)
\end{notation}
We will show that this is a modelstructure by using the theory of premodel structures We will show that this is a modelstructure by using the theory of premodel structures
\cites[section 3]{cavalloRelativeEleganceCartesian2022}[section 3]{solution}{sattlerCYLINDRICALMODELSTRUCTURES}. \cites{bartonModel2CategoryEnriched2019}[section 3]{cavalloRelativeEleganceCartesian2022}[section 3]{solution}{sattlerCYLINDRICALMODELSTRUCTURES}.
\todo{if there is time (there probably won't be) include the premodel theory and explain} \todo{if there is time (there probably won't be) include the premodel theory and explain}
This machinery gives us a nice condition to check if we are actually dealing with a modelstructure. But first we need to
introduce the notion of a premodel structure.
This machinery gives us a nice condition to check if we are actually dealing with a modelstructure. \begin{definition}[{\cites{bartonModel2CategoryEnriched2019}[def. 3.1.1]{cavalloRelativeEleganceCartesian2022}}]\label{def:preModelStructure}
A \emph{premodel Structure} on a finitely cocomplete and complete category \M consists of two weak factorization systems
\((C,F_t)\) (the \emph{cofibrations} and \emph{trivial fibrations}) and \((C_t, F)\) (the \emph{trivial cofibrations} and \emph{fibrations})
on \M such that \(C_t ⊆ C\) (or equivalently \(F_t ⊆ F\) ).
\end{definition}
\begin{theorem}[{\cite[theorem 3.3.5]{cavalloRelativeEleganceCartesian2022}}]\label{thm:premodelIsModel} And also the definition of a property of such.
Let \(M\) be a cylindrical premodel category in which \begin{definition}[{\cite[Definition 3.3.3]{cavalloRelativeEleganceCartesian2022}}]
We say a premodel category \M has the fibration extension property when for any fibration
\(f : Y → X\) and trivial cofibration \(m : X → X\), there exists a fibration \(f' : Y' → X'\) whose base change along \(m\) is
\(f\):
\begin{eqcd*}[row sep=large, column sep=large]
Y \arrow[r,dashed] \arrow[d,"f"] \pb & Y' \arrow[d,dashed, "f'"] \\
X \arrow[r,"m"] & X'
\end{eqcd*}
\end{definition}
\begin{definition}[{\cite[Definition 3.2.5]{cavalloRelativeEleganceCartesian2022}}]
A \emph{cylindrical} premodel structure on a category E consists of a premodel structure
and adjoint functorial cylinder on E that are compatible in the following sense:
\begin{itemize}
\item \(\⊗ ()\) preserves cofibrations and trivial cofibrations;
\item \(δ_k \⊗ ()\) sends cofibrations to trivial cofibrations for \(k ∈ \{0, 1\}\).
\end{itemize}
\end{definition}\todo{clean up and explain}
We get this characterizing theorem.
\begin{theorem}[{\cite[Theorem 3.3.5]{cavalloRelativeEleganceCartesian2022}}]\label{thm:premodelIsModel} \todo{if this gets longer expanded give it its own section}
Let \(\M\) be a cylindrical premodel category in which
\begin{enumerate} \begin{enumerate}
\item all objects are cofibrant; \item all objects are cofibrant;
\item the fibration extension property is satisfied. \item the fibration extension property is satisfied.
\end{enumerate} \end{enumerate}
Then the premodel structure on \(M\) defines a model structure. Then the premodel structure on \(\M\) defines a model structure.
\end{theorem} \end{theorem}
For this all to make sense we need to talk about what a \emph{cylindrical premodel} structure is, and what the The first item is fast verified, as all monomorhpisms are cofibrations.\todo{make a small lemma to prove it (closure properties got you covered)}
\emph{fibration extension property} is. The second condition we won't show directly. We will get for free if we construct fibrant universes, which we want to do anyway.
We will postpone to show that this is indeed a model structure with a univalent universe. First we will give a description \subsubsection{Fibrant Universes}
for a model structure on the Dedekind cubes. In this section we will change notation between commutative diagrams and the internal language of our presheaf.
As these tools can get quite technical and tedious to verify, we will only present the idea how these constructions
work and refer for the technical details to the source material.
The general construction of these universes follow mostly the same idea. We lift a Grothendieck universe from \Set to
the desired presheaf category, obtaining a Hofmann-Streicher universe \cite{hofmannLiftingGrothendieckUniverses1997},
classifying small maps.
This works in general for any presheaf topos over a small site. Afterwards we restrict this universe to only small fibrations.
We are then faced with a challenge, we need to prove that the resulting classifier is itself a fibration. In some cases
this can be made easier or avoided by stating this construction in a specific way.
For example by the arguments in \cite[Section 7]{awodeyHofmannStreicherUniverses2023} it suffices to construct \emph{classifying types}.
\begin{definition}\label{def:modelStructure:classifyingType}
A \emph{classifying type} for an awfs is a construction \(\Fib\) such that for every object \(X\)
and every morphism \(f : A → X\) there is a map \({\Fib(f) : \Fib(A) → X}\) and the section are in correspondence with
\(\mathrm{R}\)-algebra structures on \(f\).
Given a map \(g : Y → X\) then \(\Fib\) is stable under
pullbacks along \(g\).
\begin{eqcd*}[row sep=large]
g^*A
\arrow[r]
\arrow[d, "g^*f"']
\pb
& A
\arrow[d, "f"]
\\
Y
\arrow[r,"g"]
& X
\\
\Fib(g^*A)
\arrow[u, "\Fib(g^*f)"]
\arrow[r]
\arrow[ur,"\urcorner",phantom, very near start]
& \Fib(A)
\arrow[u, "\Fib(f)"']
\end{eqcd*}
\end{definition}
If the wfs, for that we try to classify the right maps, is functorial we might think of this as an internaliztaion
of the \(\R\)-algbera structures on \(f\).
We will first construct classifying types, after that we construct universes from it.
In preperation of the fibrant case we will first classify the trivial fibrations.
We don't have any equivariance condition on them, and thus they are the same as in \cite[Section 2]{awodeyCartesianCubicalModel2023},
when we assume \(Φ = Ω\).
Let us first think about the easiest case where the domain of auf trivial fibration is the terminal.
We should ask us what are the \TFF-algebra structures\todo{introduce notion above}. These are intuitively
choice functions of uniform liftings against cofibrations. So let us think about them first. If we picture
a lifting problem,
\begin{eqcd}\label{diag:liftProbTerminal}
A \arrow[d, tail] \arrow[r] & X \arrow[d] \\
C \arrow[r] & *
\end{eqcd}
we might think of this as a filling porblem of some shape \(A\) in \(X\) to some shape \(C\) in \(X\).
Or in other words as a complition of a partial map\footnote{Recall that a partial map \(C → X\) is a span \(C \leftarrow A → X\),
where the left map is a monomorphism.} from \(C → X\). We can classify these these by the partial map classifier,
given by \(X^+ ≔ Σ_{φ:Ω}X^{[φ]}\)\todo{give a reference}. This is gives us an object \(X^+\) (technicaly equipped with a map into the terminal)
and a monomrphism \( η_X : X → X^+\) such that for evey partial map \(C → X\) there is a unique map \(a: C → X^+\) completing
the following pullback square.
\begin{eqcd*}
A \arrow[d, tail] \arrow[r] \pb & X \arrow[d,"η_X"] \\
C \arrow[r, "a"] & X^+
\end{eqcd*}
This makes \(()^+\) into a pointed endofunctor, and with little surprise are the \(+\)-algebras in correspondence with
the \TFF-algebras.
\begin{proposition}[{\cite[Proposition 15]{awodeyCartesianCubicalModel2023}}]
The algebras of the pointed endofunctor \(()^+\) are in correspondence to the algebras of the pointed endofunctor \TFF
with codomain \(*\).
\end{proposition}
\begin{proof}[Proofsketch:]
A \(+\)-algebra is an object \(X\) together with a morphism \(α : X^+ → X\) such that \(α η_x = \id\).
Take some lifting problem like \cref{diag:liftProbTerminal}, define the chosen map to be \(αa\). If we
are given a chosen lift, we compose it with \(η_X\) to retrieve \(a\).
\end{proof}
The benefit of \(()^+\)-algebras is, that they are morphisms in \□ rather than in \(\□^\).
We can interalize the condition to be a \(()^+\)-algebra by means of the internal \(\Hom\).
\begin{eqcd*}
+\textrm{-Alg}(X)
\arrow[d]
\arrow[r]
\pb
& {[X^+,X]}
\arrow[d,"{[η_X,\id_X]}"]
\\
*
\arrow[r,"λ\id_X"]
& {[X,X]}
\end{eqcd*}
It is immediatly clear that a sections of the left map correspond to the desired \(+\)-algebra structure.
If we want to archive the same with a trivial fibration that has arbitrary codomain \(Y\), we might just move
to the slice topos \(\faktor{\□}{Y}\), there such a map becomes again a map into the terminal.
To not be confused we will call the partial map classification functor in
the slice over \(Y\) by the name \(()^{+Y}\). The algebras will be called relative \(+\)-algberas.
We have even more luck and this functor commutes with the pullback
functor \(\□\faktor{\□}{Y}\),
\begin{eqcd*}
\faktor{\□}{Y}
\arrow[r, "()^{+Y}"]
& \faktor{\□}{Y}
\\
\□
\arrow[u,"Y^*"]
\arrow[r,"()^+"]
& \□
\arrow[u,"Y^*"]
\end{eqcd*}
which allows us to compute it fiberwise \cite[Proposition 12]{awodeyCartesianCubicalModel2023}. This allows us to write
\((A)^{+Y} = Σ_{y:Y}A_y^+\), where \(A_y\) is the fiber of \(A\) at \(y\). We will still continue to write \(+\)-Alg, when
there is no confusion.
As all the involved functors \(()^+\), pullbacks and exponentials are pullback stable so is \(+\)-Alg. Wich lets us conlude
the following.
\begin{proposition}
\(+\)-Alg is a classifying type for trivial cofibrations.
\end{proposition}
We will move down a very similar path as \cite{awodeyCartesianCubicalModel2023}, but need to keep track of the group
actions of the symmetric groups on representables.
For this we will pass to a category that \cite{solution} calls cubical species. The argument in essence will be very similar
to the argument there, though in a different language.
\begin{definition}[compare {\cite[Section 4]{solution}}]
By abuse of notation we will say \(Σ_k\) to be the one object groupoid induced by the symmetric group \(Σ_k\).
Let \(Σ ≔ ∐_{k ∈ } Σ_k\), we define \(\□^Σ\) to be the category of \emph{cubical species}. For a cubical
species \(A\) we write \(A_k\) for the cubical set in the image of \(Σ_k\).
\end{definition}
\begin{remark}
\(\□^Σ\) is a presheaf topos.
\end{remark}
\begin{remark}
\cite{solution} excludes the case for \(k = 0\), because they want to exhibit a model category on cubical species.
For the construction on cubical Sets this makes no difference compare \cite[Remark 4.3.17]{solution}.
\end{remark}
\begin{remark}
Going forward we keep the names trivial fibrations,cofibrations, fibrationstructure, \dots, even though we don't
have defined any of this notions on \(\□^Σ\). We mean the objects that are equivalently classified in this
topos, like in the procedure above. This can actually be made into a modelstructure where these are the corresponding
fibrations, cofibrations, \dots, for details see \cite[Section 4]{solution}.
\end{remark}
We need to define our equivalent of an interval, and the generic point inclusion. Recall that a functor from a group
into a category is the same as choosing an object of that category together with a group action on it.
\begin{definition}[{\cite[Example 4.3.5]{solution}}]
Let \(\mathrm{I} : Σ → \□\) be the functor that sends \(Σ_k\) to \(\I^k\) together with the group action that freely
permutates the dimensions.
\end{definition}
\begin{remark}[{\cite[Lemma 4.2.7]{solution}}]
The interval \(\mathrm{I}\) is tiny.
\end{remark}
\begin{definition}[{\cite[Definition 4.3.3]{solution}}]
The diagonal \(δ : \mathrm{I}\mathrm{I} × \mathrm{I}\) as a map in the slice \(\faktor{\□^Σ}{\mathrm{I}}\) is called
the generic point.
\end{definition}
% We will classify what in \cite{awodeyCartesianCubicalModel2023} would be called biased fibrations with regard to \(δ_0\) in \(\□^Σ\).
% These are not the kind of fibrations we are interested in, as we want lifts against the generic point. We will sort that out later.
% As the monic maps in \(\□^Σ\) are precisely those that are componentwise monic, we are just going to characterize
% componentwise fibrations. But because we passed to \(\□^Σ\), the classifying type will have group actions on
% every level, and we need to restrict to those, that are invariant under those group actions.
The main idea to continue classifying uniform fibrations from here on out is quite simple. As being a uniform fibration is
equivalent by being send to a trivial fibration by the right adjoint of the leibniz applicationfunctor of the inteval,
we get that a \(+\)-algebra structure on \(δ ⇒ f\)\todo{write the right thing here}
is equivalent to a \FF-algebra structure on \(f\). This must then
be reformulated, to get the desired map. This reformulation is quite tedious and so we will refer
for the details to
\cite[The classifying types of unbiased fibration structures]{awodeyCartesianCubicalModel2023}.
In the source this takes place in \□, but it only makes use of the features we already established for \(\□^Σ\).
An important detail
to this procedure is, that in order to get the resulting classifying type pullback stable, this argument uses that
the interval is tiny.
We get a pullback stable construction \(\Fib^Σ(f)\)\footnote{this can be made into a classifying type for some
awfs compare \cite[Section 4.]{solution}}\todo{this footnote 2 is bad, but how to make different}
whose sections now correspond componentwise to \FF-algebra structures, that are equivariant with respect
to the group action of \(Σ^k\).
We now going to extract the classifying type for equivariant cubical fibrations. For this we need to relate cubical sets
and cubical species. There exists the constant diagram functor \(Δ\) that sends a cubical set to the constant cubical species.
Since \□ is complete this functor has a right adjoint\footnote{and also a left adjoint as \□ is cocomplete}
\(Γ\) compare \cite[Section 5.1]{solution}, which is given by
\begin{equation*}
Γ(A) ≔ ∏\limits_{k∈}(A_k)^{Σ_k}
\end{equation*}
where \((A_k)^Σ_k\) is the cubical set of \(Σ_k\) fixed points. It is not hard to see that this is indeed
the right adjoint functor.
\begin{proposition}
We have a pair of adjoints \(Δ ⊣ Γ\)
\end{proposition}
\begin{proof}
We need to show
\begin{equation*}
\Hom_{\□^Σ}(Δ(A),B) = \Hom_{\□}\left(A,∏\limits_{k∈}(B_k)^{Σ_k}\right) \text{.}
\end{equation*}
But this is immediatly clear, on the right hand side we have componentwise equivariant natural translations
from \(Δ(A)_k = A\) to \(B_k\). They can only be valued in the fixed points in \(B_k\) as \(Δ(A)_k\) carries the
trivial group action. A map on the right side is also a collection of natural transformations from \(A\) to
the fixed points of \(B_k\).
\end{proof}
We get now that an \FF-algebra on \(f : A → B\) structure corresponds to a section
\begin{eqcd*}
{} & \mathrm{Fib}^{Σ_k}(Δ(A))
\arrow[d,"\mathrm{Fib}^{Σ_k}(Δ(f))"]
\\ Δ(B) \arrow[r,equal]
\arrow[ru, dashed]
& Δ(B)
\end{eqcd*}
By adjointness we can transpose this lift to a lift in cubical sets, and by pulling back
along the adjunction unit to splits of a map over \(B\) (compare \cite[Lemma 5.3.3,2.1.16]{solution}).
\begin{eqcd*}[row sep=large]
\mathrm{Fib}(B)
\arrow[d,"\mathrm{Fib}(f)"]
\arrow[r]
\pb
& Γ(\mathrm{Fib}^{Σ_k}(Δ(A)))
\arrow[d,"Γ(\mathrm{Fib}^{Σ_k}(Δ(f)))"]
\\ B \arrow[r,"η_B"]
\arrow[ru, dashed]
\arrow[u,bend left, dashed]
& ΓΔ(B)
\end{eqcd*}
We will now present the main idea, how to get fibrant universes from such classifying types.
The technical details can be found in \cite[Section 7]{awodeyHofmannStreicherUniverses2023}.
One might hope that we can construct a universal fibration, in the sense that every fibration
is a pullback of this universal fibration. This can not happen for size reasons. We will do
the next best thing and construct a universe for big enough kardinals \(κ\). Because
our set theory has Grothendieck universes this will give us universes for \(κ\) small maps fibrations.
To start we will call our Hofmann-Streicher, or \(κ\)-small map classifier, \({p : \dot{\mathcal{V}}_κ → \mathcal{V}_κ}\).
\begin{proposition}[{\cite[Proposition 10]{awodeyHofmannStreicherUniverses2023}}]
For a large enough \(κ\), there is a universe for \(κ\)-small fibrant maps, in the sense that there is
a small fibration \(π : \dot{\mathcal{U}}_κ → \mathcal{U}_κ\) such that every small fibration \(f : A → X\), is
a pullback of it along a connonical classifying map \(a\)
\begin{eqcd*}
A
\pb
\arrow[d,"f"]
\arrow[r]
& \dot{\mathcal{U}}_{κ}
\arrow[d,"π_κ"]
\\
\mathcal{X}
\arrow[r,"a"]
& \mathcal{U}
\end{eqcd*}
\end{proposition}
\begin{proof}
We can then construct our universe that classifies \(κ\)-small fibrations by setting \(\mathcal{U}_κ ≔ \mathrm{Fib}(\dot{\mathcal{V}}_κ)\)
and building the following pullback.
\begin{eqcd*}[row sep=large, column sep = large]
\dot{\mathcal{U}}_κ
\pb
\arrow[d,"π_κ"]
\arrow[r]
& \dot{\mathcal{V}}_{κ}
\arrow[d,"p"]
\\
\mathcal{U}
\arrow[r,"\mathrm{Fib}(p)"]
& \mathcal{V}
\end{eqcd*}
To see this is a fibration we will exhibit we exhibit a split of the classifying type of \(π_κ\).
As the classifying type is pullback stable we get the following diagram
\begin{eqcd*}[row sep=large, column sep = large]
\dot{\mathcal{U}}_κ
\pb
\arrow[d,"π_κ"]
\arrow[r]
& \dot{\mathcal{V}}_{κ}
\arrow[d,"p"]
\\
\mathcal{U_κ}
\arrow[r,"\mathrm{Fib}(p)"]
& \mathcal{V_κ}
\\
\mathrm{Fib}(\dot{\mathcal{U}}_κ)
\arrow[u]
\arrow[r]
\arrow[ur,phantom,very near start,"\urcorner"]
& \mathrm{Fib}(\dot{\mathcal{V}}_κ)
\arrow[u,"\mathrm{Fib}(p)"]
\end{eqcd*}
The lower pullback square is a pullback of \(\Fib(p)\) along itself. This map has a split, namely the diagonal.
To show that this classifies small fibrations, let us consider a small fibration \(f : A → X\).
Because it is a small map we get it as a pull back along a cannonical map \(a : X → \mathcal{V}\).
\begin{eqcd*}
{}
& {}
& \dot{\mathcal{U}}
\arrow[dr]
\arrow[dd]
&
\\
{}
& |[alias=A]| A
\arrow[rr,crossing over]
\arrow[ru,dashed]
& & \dot{\mathcal{V}}
\arrow[dd]
\\
\Fib(A)
\arrow[rr]
\arrow[dr]
& & \mathcal{U}
\arrow[dr]
&
\\
{}
& |[alias=X]| X
\arrow[rr,"a"]
\arrow[lu,dashed,"s",bend left]
\arrow[ur,dashed,"s'"]
& & \mathcal{V}
\arrow[from=A,to=X,crossing over]
\end{eqcd*}
We get the lower square as a pullback as \Fib is pullback stable and \(\mathcal{U} = \Fib(\dot{\mathcal{V}})\).
As \(f\) is a fibration, we get a split \(s\) of the fibration structure. And so we get a unique corresponding map
\(s'\), which induces the required pullback square.
\end{proof}
\todo{introduce Leibniz pullback exponential}
\todo{introduced (unbiased) uniform fibrations and how they behave with leibniz constructions}
\todo{hate my life}
\subsubsection{HS-Universe}
Assume this as given for now, if we have time we can write this out \todo{delete it or write it out, and move it to some preliminary place}

View file

@ -1,11 +1,11 @@
\documentclass[../Main.tex]{subfiles} \documentclass[../Main.tex]{subfiles}
\begin{document} \begin{document}
% \chapter{Preliminaries} % \chapter{Preliminaries}
\section{Category Theory} % \section{Category Theory}
We already assume some basic notions of category theory. For the completly unitiated there are good % We already assume some basic notions of category theory. For the completly unitiated there are good
references for example \cite{maclaneCategoriesWorkingMathematician1978} or \cite{awodeyCategoryTheory2006}. % references for example \cite{maclaneCategoriesWorkingMathematician1978} or \cite{awodeyCategoryTheory2006}.
We will also assume some familarities with presheave categories \cite{maclaneSheavesGeometryLogic1994}. % We will also assume some familarities with presheave categories \cite{maclaneSheavesGeometryLogic1994}.
But we will revisit some branches of category theory that we will encouter later. % But we will revisit some branches of category theory that we will encouter later.
% \subfile{Model_Categories} % \subfile{Model_Categories}

View file

@ -1,18 +1,20 @@
\documentclass[../Main.tex]{subfiles} \documentclass[../Main.tex]{subfiles}
\begin{document} \begin{document}
\section{Equivalence to spaces}\todo{do I need to explain idempotent completions?} \section{Equivalence to spaces}\todo{\printline do I need to explain idempotent completions?}
\todo{give all natural transformations \(\) instead of \(\)}
For this section we fix the following notation. We write \(n\) for a set with cardinality \(n\), For this section we fix the following notation. We write \(n\) for a set with cardinality \(n\),
and identify \(2\) with the set \(\{⊥,\}\). Wen write \([n]\) for the well ordered sets and identify \(2\) with the set \(\{⊥,\}\). We write \([n]\) for the well ordered sets
\(\{0,…,n\}\). \(\{0,…,n\}\).
We would love to compare the model categories on \(\□\) and \(\). For this we want at least We would love to compare the model categories on \(\□\) and \(\). For this we want at least
an adjoint pair between those. If we somehow could get a functor on the site level, we could an adjoint pair between those. If we somehow could get a functor on the site level, we could
get an adjoint triple. But there is no immediate obvious functor to do the job. get an adjoint triple. But there is no immediate obvious functor to do the job.
The usual trick would be to go look at the idempotent completions of these categories. These The usual trick would be to go look at the idempotent completions of these categories. These
have usually more objects and are sometimes more suitable as the codomain of a functor, while usually have :w
more objects and are sometimes more suitable as the codomain of a functor, while
the presheaf category stays the same (up to equivalence). the presheaf category stays the same (up to equivalence).
For example the idempotent completion of \dCube is the category \FL of finite lattices and For example, the idempotent completion of \dCube is the category \FL of finite lattices and
monotone maps \cites{streicherSimplicialSetsCubical2021}{sattlerIdempotentCompletionCubes2019}. monotone maps \cites{streicherSimplicialSetsCubical2021}{sattlerIdempotentCompletionCubes2019}.
So we have automatically \(\widehat{\FL} = \dcSet\), So we have automatically \(\widehat{\FL} = \dcSet\),
and there is an obvious inclusion \(i: Δ → \FL\). Sadly for us \(\) is already idempotent complete. and there is an obvious inclusion \(i: Δ → \FL\). Sadly for us \(\) is already idempotent complete.
@ -31,22 +33,22 @@ To get around this issue we can embed \(j: □ → \dCube → \FL\), and get int
\end{tikzcd} \end{tikzcd}
\end{equation*} \end{equation*}
We can define \(j\) also explicitly like this: We can also define \(j\) explicitly like this:
\begin{definition} \begin{definition}
Let \(𝟚\) the finite order \(⊥ ≤ \), if it is convenient we will identify this with the order \(01\). Let \(𝟚\) be the finite order \(⊥ ≤ \). If it is convenient we will identify this with the order \(01\).
\end{definition} \todo{move this somewhere sane} \end{definition} \todo{\printline move this somewhere sane}
\begin{definition}\label{def:j} \begin{definition}\label{def:j}
Let \(j : □ → \FL\) the functor given by the following data: Let \(j : □ → \FL\) the functor given by the following data:
\begin{itemize} \begin{itemize}
\item On objects: \(f(\{⊥|x_1…x_n|\}) ­≔ 𝟚^n\) \todo{use \(⊥⊤\) notation in introduction chapter} \item On objects: \(f(\{⊥|x_1…x_n|\}) ­≔ 𝟚^n\) \todo{\printline use \(⊥⊤\) notation in introduction chapter}
\item On morphisms: \(j(f)(g)(x)\begin{cases}g(f(x)) & f(x) \notin 𝟚 \\ f(x) & f(x)𝟚\end{cases}\) \item On morphisms: \(j(f)(g)(x)\begin{cases}g(f(x)) & f(x) \notin 𝟚 \\ f(x) & f(x)𝟚\end{cases}\)
\end{itemize} \end{itemize}
\end{definition} \end{definition}
In \cite{rhielVid} it is claimed that \(i^*j_!\) is the triangulation functor and \(j^*i_!\) a In \cite{riehlVid} it is claimed that \(i^*j_!\) is the triangulation functor and \(j^*i_!\) a
left Quillen homotpoy inverse. In the mean time Reid Barton observed in an yet unpublished paper, left Quillen homotopy inverse. In the mean time Reid Barton observed in an yet unpublished paper,
\todo{how to cite this? claimed in \cite{solution}} \todo{\printline how to cite this? claimed in \cite{solution}}
that this triangulation can be described by a single functor on the level of sites \(Δ → □\). One that this triangulation can be described by a single functor on the level of sites \(Δ → □\). One
could now establish Quillen equivalence using this functor, for details see \cite[sec. 6]{solution}. could now establish Quillen equivalence using this functor, for details see \cite[sec. 6]{solution}.
We are going to verify the original argument. To give an overview of the necessary steps: We are going to verify the original argument. To give an overview of the necessary steps:
@ -58,17 +60,16 @@ We are going to verify the original argument. To give an overview of the necessa
\end{enumerate} \end{enumerate}
Before we jump into the formal proofs we like to get some intuition for the four functors of interest. Before we jump into the formal proofs we like to get some intuition for the four functors of interest.
To make sense of this all we need to understand what happens with \FL. \FL has the luxury as containing The category \FL has the luxury as containing
both \(Δ\) as the well orderings and \dCube as \(𝟚^n\). As \FL is the idempotent completion of \dCube, both \(Δ\) as the well orderings and \dCube as \(𝟚^n\). As \FL is the idempotent completion of \dCube,
we also now that a presheaf on \FL, is completly determined by its behavior on \dCube. The two functors we also know that a presheaf on \FL is completely determined by its behavior on \dCube. The two functors
\(i^*\) and \(j^*\) are just the restrictions, but also \(i_!\) and \(j_!\) are geometrically not so \(i^*\) and \(j^*\) are just the restrictions, and we can also understand \(i_!\) and \(j_!\) geometrically.
difficult to understand. On the elements in the image of \(i\) (or \(j\)) they agree with the original definition.
Let \(F ∈ \), then \(F(x) = i_!(F)(i(x))\). The interesting question is what does \(i_!\) define Let \(F ∈ \), then \(F(x) = i_!(F)(i(x))\). The interesting question is what does \(i_!\) define
on cubes and \(j_!\) on simplicials. \(j_!\) will basically triangulate the cube (see \fref{prop:triangulation}), on cubes (aka \(\Yo 𝟚^n\)) and \(j_!\) on simplicials (aka \(\Yo [n]\)). \(j_!\) will basically triangulate the cube (see \cref{prop:triangulation}),
while \(i_!\) will add just cubes that are degenerated to simplicials. while \(i_!\) will add just all possible cubes that are degenerated to simplicials.
To understand \(i^*\) a bit better we might take a look how things that are defined on \(𝟚^n\) extend to simplicials. To understand \(i^*\) a bit better we might take a look how things that are defined on \(𝟚^n\) extend to simplicials.
For this we need to exhibit \([n]\), as a retract of \(𝟚^n\). For this we define two maps. For this we need to exhibit \([n]\) as a retract of \(𝟚^n\). For this we define two maps.
\begin{definition} \begin{definition}
Let Let
\begin{itemize} \begin{itemize}
@ -77,60 +78,81 @@ For this we need to exhibit \([n]\), as a retract of \(𝟚^n\). For this we def
\end{itemize} \end{itemize}
\end{definition} \end{definition}
We can see directly that this is a split retract pair and thus gives rise to an idempotent \(e = ir : 𝟚^n → 𝟚^n\). We can see directly that this is a split retract pair and thus gives rise to an idempotent \(e = ir : 𝟚^n → 𝟚^n\).
And by general abstract nonesens we get that \(r\) is an absolute coequilizer map of \(e\) and \(\id\). As well as \(i\) And by general abstract nonsense we get that \(r\) is an absolute coequalizer map of \(e\) and \(\id\). Similarly \(i\)
is an aboslute equilizer of \(e\) and \(\id\). So for a presheaf is an absolute equalizer of \(e\) and \(\id\). So for a presheaf
\(F\) on \FL, we can compute \(F([n])\) as the equilizer from \(F(e)\) and \(F(\id)\), or the coequilizer of \(F(e)\) and \(F(\id)\). \(F\) on \FL, we can compute \(F([n])\) as the equalizer of \(F(e)\) and \(F(\id)\), or the coequalizer of \(F(e)\) and \(F(\id)\)
in \Set.
\begin{proposition}\label{prop:i_!Nothing} \begin{proposition}\label{prop:i_!Nothing}
\(i_!Δ^n\) agrees with \(\Yo [n]\). \(i_!Δ^n\) agrees with \(\Yo [n]\).
\end{proposition} \end{proposition}
\begin{proof} \begin{proof} We have
\begin{equation*} \begin{equation*}
\Hom_{\FL}(i_^n,B) = \Hom_{}^n,i^*B) = B(i([n])) = B([n]) = \Hom_{\FL}(\Yo [n], B) \Hom_{\FL}(i_^n,B) = \Hom_{}^n,i^*B) = B(i([n])) = B([n]) = \Hom_{\FL}(\Yo [n], B) \text{,}
\end{equation*} \end{equation*}
And by uniqueness of adjoints the claim follows. and by uniqueness of adjoints, the claim follows.
\end{proof} \end{proof}
\begin{proposition}\label{prop:j_!Nothing} \begin{proposition}\label{prop:j_!Nothing}
\(j_!(\I^n) = \Yo 𝟚^n\) \(j_!(\I^n) = \Yo 𝟚^n\)
\end{proposition} \todo{move to a sane place} \end{proposition} \todo{\printline move to a sane place}
\begin{proof} \begin{proof}
We have that \(\Hom(j_!(\I^n),B) = \Hom(\I^n,j^*(B))\). And we get We have that \(\Hom(j_!(\I^n),B) = \Hom(\I^n,j^*(B))\). And we get
\begin{equation} \begin{equation*}
\Hom(\Yo 𝟚^n, B) = B(𝟚^n) = B(j(n)) = \Hom(\I^n,j^*B) \Hom(\Yo 𝟚^n, B) = B(𝟚^n) = B(j(n)) = \Hom(\I^n,j^*B)\text{.}
\end{equation} \end{equation*}
thus by uniqueness of adjoints we have, that \(j_!(\I^n) = \Yo 𝟚^n\). Thus, by uniqueness of adjoints, we have that \(j_!(\I^n) = \Yo 𝟚^n\).
\end{proof} \end{proof}
Let us start with step 1. We need to show that \(i_!\) and \(i^*\) preserve cofibrations (or \subsection{\(i_!\) and \(i^*\) are left Quillen functors}
in other words monomorphisms), \(i^*\) preservers fibrations and trivial cofibrations. The hard We need to show that \(i_!\) and \(i^*\) preserve cofibrations (or
part will be to show that \(i_!\) preserves monomorphisms and we will do it last. in other words monomorphisms) and trivial cofibrations. The hard
part will be to show that \(i_!\) preserves monomorphisms and we will do that last.
\begin{proposition} \begin{lemma}\label{lem:restrictionPreserveMonos}
Let \(F: \A\B\) be a functor, then the restriction functor \(F^* : \hat{\B}\hat{\A}\) preserves monomorphisms.
\end{lemma}
\begin{proof}
Let \(f\) be a monomorphism in \(\hat{\B}\). As being monic can be tested componentwise, we need
to show that \((i^*f)_x\) is monic for an arbitrary \(x∈\A\). This is just \(f_{(ix)}\) which is monic by assumption.
\end{proof}
\begin{proposition}\label{prop:i*preservesMonos}
\(i^*\) preserves monomorphisms. \(i^*\) preserves monomorphisms.
\end{proposition} \end{proposition}
\begin{proof} \begin{proof}
Given a monomorphism \(f\) in \(\). As being monic can be tested component wise we need Directly by \cref{lem:restrictionPreserveMonos}.
to show that \((i^*f)_x\) is monic, for an arbitrary \(x∈□\). This is just \(f_{(ix)}\) which is monic by assumption.
\end{proof} \end{proof}
\begin{proposition} \begin{proposition}\label{prop:i*preservesTrivialCofib}
\(i^*\) preserves trivial cofibrations \todo{maybe equivalently \(i_*\) preserves fibrations} \(i^*\) preserves trivial cofibrations.
\end{proposition} \end{proposition}
\begin{proof} \begin{proof}
Sketch: We are in the comfortable position of being left and right adjoint.
generation trivial cofibrations are pushout product of mono and endpoint inclusion into the interval. \(i^*\) preserves the interval inclusion. As pushout products with the interval inclusion sends cofibrations
left and right adjoint functor (like \(i_*\) preserve these) pushout product with mono and interval to trivial cofibrations and \(i^*\) preserves monomorphisms by \cref{prop:i*preservesMonos}, \(i^*\) sends trivial cofibrations
are also trivial cofibrations in . to trivial cofibrations.
\end{proof} \end{proof}
We get now to the difficult part of step \ref{step1}. The argument we are going to follow is due to Sattler As the next proof is mostly a straight forward induction, we don't repeat the full argument and only give a proof sketch.
\begin{proposition}[{\cite[prop. 3.6]{sattlerIdempotentCompletionCubes2019}}]\label{prop:i!preservesTrivialCofib}
\(i_!\) preserves trivial cofibrations.
\end{proposition} \todo{\printline maybe give the full proof but low prio}
\begin{proof}[Proof sketch.]
It is fairly easy to see that \(i_!(\Yo -)\) on morphisms is valued in weak equivalences, by checking that representables in \dcSet are
weakly contractible.
Then one shows inductively that one can build the horn inclusions as pushouts of horn inclusion of dimension \(n\) and an
inclusion of the \(n\)-dimensional horn into a \(n+1\) dimensional horn. As \(i_!\) is cocontinuous and the trivial cofibrations
have as a left lifting class the necessary closure properties, the claim follows.
\end{proof}
We now get to the difficult part of step \ref{step1}. The argument we are going to follow is due to Sattler \cite{sattlerIdempotentCompletionCubes2019}
and involves the Reedy Structure of \(Δ\). We are going through the whole argument here, as it is a good preparation and involves the Reedy Structure of \(Δ\). We are going through the whole argument here, as it is a good preparation
for \(j_!\) in step \ref{step2}. for \(j_!\) in step \ref{step2}. We also use this to add a lot of detail and improve the criterion a bit, see the remark at \cref{lem:idemLemma3.5}.
\begin{proposition}[{\cite[prop. 3.3]{sattlerIdempotentCompletionCubes2019}}] \begin{proposition}[{\cite[prop. 3.3]{sattlerIdempotentCompletionCubes2019}}]\label{prop:i!preservesMonos}
\(i_!\) preserves monomorphisms. \(i_!\) preserves monomorphisms.
\end{proposition} \end{proposition}
Following \cite{sattlerIdempotentCompletionCubes2019} we are first going to prove two technical lemma. Following \cite{sattlerIdempotentCompletionCubes2019} we are first going to prove two technical lemmas.
Note that a repetition of this argument is also good to check if we actually need an elegant Reedy structure Note that a repetition of this argument is also good to check if we actually need an elegant Reedy structure
or if we can do with just an Eilenberg-Zilber category. or if we can do with just an Eilenberg-Zilber category.
@ -140,11 +162,11 @@ or if we can do with just an Eilenberg-Zilber category.
\end{lemma} \end{lemma}
\begin{remark} \begin{remark}
The original source has as an extra condition that these pullbacks preserve face and degeneracy maps. We The original source has as an extra condition that these pullbacks preserve face and degeneracy maps. We
don't need this by making use of the fact that degeneracies split. do not need this by making use of the fact that degeneracies split.
\end{remark} \end{remark}
For this proof we will make some use of the equivalence between presheaves and discrete Grothendieck fibrations. For this proof we will make some use of the equivalence between presheaves and discrete Grothendieck fibrations.
This has the benefit of exhibiting our monomorphism, as a Grothendieck fibration. We also can lift the This has the benefit of exhibiting our monomorphism as a Grothendieck fibration. We also can lift the
Eilenberg-Zilber structure from our site, a notion that is hard to express with presheaves. Eilenberg-Zilber structure from our site, a notion that is hard to express with presheaves.
\begin{lemma}\label{lem:liftEZFib} \begin{lemma}\label{lem:liftEZFib}
@ -152,8 +174,8 @@ Eilenberg-Zilber structure from our site, a notion that is hard to express with
Then we can lift the Eilenberg-Zilber structure to \(Q\). Then we can lift the Eilenberg-Zilber structure to \(Q\).
\end{lemma} \end{lemma}
The hard part of the proof will be the lifting of absolute pushouts. In general pushouts don't need to The hard part of the proof will be the lifting of absolute pushouts. In general pushouts don't need to
lift, as the fibre above the pushout in the base category might be empty. If we had a split pushout like in lift, as the fiber above the pushout in the base category might be empty. If we had a split pushout like in
lemma \ref{absolutePushout}, we could lift the splits to solve that problem. Sadly not all absolute pushouts \cref{absolutePushout}, we could lift the splits to solve that problem. Sadly, not all absolute pushouts
are of this form, but something a bit weaker is true. The first appearance of this observation seems to be are of this form, but something a bit weaker is true. The first appearance of this observation seems to be
in \cite{pareAbsoluteColimits1971}. in \cite{pareAbsoluteColimits1971}.
\begin{lemma}\label{lem:absPushChar} \begin{lemma}\label{lem:absPushChar}
@ -165,9 +187,9 @@ in \cite{pareAbsoluteColimits1971}.
is an absolute pushout diagram if and only if there exists is an absolute pushout diagram if and only if there exists
\begin{enumerate} \begin{enumerate}
\item A section \(u : P → B\) of \(m\). \item A section \(u : P → B\) of \(m\).
\item Morphisms \(r_1…r_k : B → A \) and \(s_1s_k : B → A\) for some \( k ≥ 1\), such that \(ps_1 = \id_B\), \item Morphisms \(r_1,…,r_k : B → A \) and \(s_1,…,s_k : B → A\) for some \( k ≥ 1\), such that \(ps_1 = \id_B\),
\(qs_i = qr_i\) for all \(i\), \(pr_i = ps_{i+1}\) for all \(i < k\), and \(pr_k = um\). \(qs_i = qr_i\) for all \(i\), \(pr_i = ps_{i+1}\) for all \(i < k\), and \(pr_k = um\).
\item Morphisms \(t_1t_{l+1} : C → A\) and \(v_1v_l : C → A\) for some \(l ≥ 0\), such that \(qt_1 = \id_C\), \item Morphisms \(t_1,…,t_{l+1} : C → A\) and \(v_1,…,v_l : C → A\) for some \(l ≥ 0\), such that \(qt_1 = \id_C\),
\(pt_i = pv_i\) for all \(i < l\), \(qv_i = qt_{i+1}\) for all \(i ≤ l\), and \(pt_{l+1} = un\). \(pt_i = pv_i\) for all \(i < l\), \(qv_i = qt_{i+1}\) for all \(i ≤ l\), and \(pt_{l+1} = un\).
\end{enumerate} \end{enumerate}
or the symmetric situation where \(B\) and \(C\) are interchanged. or the symmetric situation where \(B\) and \(C\) are interchanged.
@ -178,7 +200,8 @@ in \cite{pareAbsoluteColimits1971}.
A \arrow[r, "p" ] \arrow[d, "q" ] & B \arrow[d, "b"] \\ A \arrow[r, "p" ] \arrow[d, "q" ] & B \arrow[d, "b"] \\
C \arrow[r, "c"] & X C \arrow[r, "c"] & X
\end{eqcd*} \end{eqcd*}
a commutative square. We now need to find a \(x : P → X\), that witnesses \(P\) as a pushout. be a commutative square. As all this data gets preserved by any functor we only need to prove that such a diagram
is already a pushout. We now need to find a \(x : P → X\) that witnesses \(P\) as a pushout.
Since \(m\) splits, \(x\) will be unique automatically. We define \(x ≔ bu\). We now need to check Since \(m\) splits, \(x\) will be unique automatically. We define \(x ≔ bu\). We now need to check
if it makes the both evident triangles commute. We do this by chasing though all the data we have been given. if it makes the both evident triangles commute. We do this by chasing though all the data we have been given.
\begin{equation*} \begin{equation*}
@ -186,29 +209,29 @@ in \cite{pareAbsoluteColimits1971}.
\end{equation*} \end{equation*}
and and
\begin{equation*} \begin{equation*}
xn = bun = bpt_{l+1} = cqt_{l+1} = cqv_l = bpv_l = bpt_l = … = cqt_1 = c xn = bun = bpt_{l+1} = cqt_{l+1} = cqv_l = bpv_l = bpt_l = … = cqt_1 = c \text{.}
\end{equation*} \end{equation*}
Now let us assume the given square is an absolute pushout. Now we need to construct all the date from above. Now let us assume the given square is an absolute pushout. Now we need to construct all the date from above.
To get the desired split \(u\), we will apply the \(hom(P,)\) functor and get a pushout square in \(\Set\). To get the desired split \(u\), we will apply the \(\Hom(P,)\) functor and get a pushout square in \(\Set\).
\begin{eqcd*} \begin{eqcd*}
\hom(P,A) \arrow[r, "p"] \arrow[d, "q"] \pu & \hom(P,B) \arrow[d, "m"] \\ \Hom(P,A) \arrow[r, "p"] \arrow[d, "q"] \pu & \Hom(P,B) \arrow[d, "m"] \\
\hom(P,C) \arrow[r, "n"] & \hom(P,P) \Hom(P,C) \arrow[r, "n"] & \Hom(P,P)
\end{eqcd*} \end{eqcd*}
That means we have a surjection from \(\hom(P,B) + \hom(P,C)\hom(P,P)\). That means we have a surjection from \(\Hom(P,B) + \Hom(P,C)\Hom(P,P)\).
A preimage of \(\id_P\) under this surjection is a morphism \(u\) for that either \(mu = \id_P\) A preimage of \(\id_P\) under this surjection is a morphism \(u\) for that either \(mu = \id_P\)
or \(nu = \id_P\) holds. We assume WLOG the first case. or \(nu = \id_P\) holds. We assume WLOG the first case.
To get \(r_1…r_k\) and \(s_1s_k\) that relate \(\id_B\) and \(um\), we look at the image under the To get \(r_1,…,r_k\) and \(s_1,…,s_k\) that relate \(\id_B\) and \(um\), we look at the image under the
\(\hom(B,)\) functor. \(\Hom(B,)\) functor.
\begin{eqcd*} \begin{eqcd*}
\hom(B,A) \arrow[r, "p"] \arrow[d, "q"] \pu & \hom(B,B) \arrow[d, "m"] \\ \Hom(B,A) \arrow[r, "p"] \arrow[d, "q"] \pu & \Hom(B,B) \arrow[d, "m"] \\
\hom(B,C) \arrow[r, "n"] & \hom(B,P) \Hom(B,C) \arrow[r, "n"] & \Hom(B,P)
\end{eqcd*} \end{eqcd*}
We know that \(um\) and \(\id_B\), are getting sent to the same map in \(\hom(B,P)\), because We know that \(um\) and \(\id_B\), are getting sent to the same map in \(\hom(B,P)\), because
\(mum = m = m\id_B\). \(mum = m = m\id_B\).
We can now construct a sequence of maps in \(\hom(B,A)\) if we alternate between taking preimages of images We can now construct a sequence of maps in \(\hom(B,A)\) if we alternate between taking preimages of images
under \(p\) and \(q\). Lets call the preimages along \(p\) \(s_i\), and the preimages along \(q\) \(r_i\). under \(p\) and \(q\). Lets call the preimages along \(p\) by \(s_i\), and the preimages along \(q\) by \(r_i\).
This procedure immediately guarantees the equations This procedure immediately guarantees the equations
\begin{align*} \begin{align*}
qs_i = qr_i & & pr_i = ps_{i+1} \text{.} qs_i = qr_i & & pr_i = ps_{i+1} \text{.}
@ -217,12 +240,12 @@ in \cite{pareAbsoluteColimits1971}.
\(um\) get sent to the same element along \(m\) there exists a \(k\) and a choice of \(r_i\) and \(s_i\) such that \(um\) get sent to the same element along \(m\) there exists a \(k\) and a choice of \(r_i\) and \(s_i\) such that
\(pr_k = um\). \(pr_k = um\).
The maps for condition three, can be constructed with the same technique after applying the \(\hom(C,)\) functor. The maps for condition three can be constructed with the same technique after applying the \(\hom(C,)\) functor.
The index shift comes from the fact that \(\id_C ∈ \hom(C,C)\) and \(un ∈ \hom(C,B)\) are in opposite corners The index shift comes from the fact that \(\id_C ∈ \hom(C,C)\) and \(un ∈ \hom(C,B)\) are in opposite corners
of the resulting diagram. of the resulting diagram.
\end{proof} \end{proof}
To be able to lift this structure make sure we can lift commuting triangles first. To be able to lift this structure, we make sure we can lift commuting triangles first.
\begin{lemma}\label{lem:gfibliftcommute} \begin{lemma}\label{lem:gfibliftcommute}
Let \(F : Q → \C\) be a discrete Grothendieck fibration, \(f: A → B\) be a morphism in \(Q\), and Let \(F : Q → \C\) be a discrete Grothendieck fibration, \(f: A → B\) be a morphism in \(Q\), and
\(g : C → F(B)\) and \(h : F(A) → C\) such that \(gh = F(f)\). We can lift \(g\) and \(h\) to form \(g : C → F(B)\) and \(h : F(A) → C\) such that \(gh = F(f)\). We can lift \(g\) and \(h\) to form
@ -238,7 +261,7 @@ To be able to lift this structure make sure we can lift commuting triangles firs
It also suffices to just give an object \(B\) and a commutative diagram in \C. It also suffices to just give an object \(B\) and a commutative diagram in \C.
\end{remark} \end{remark}
\begin{proof} \begin{proof}
That we can get lifts \(g'\) and \(h'\) follows directly from the definition of discrete Grothendieck fibration. That we can get lifts \(g'\) and \(h'\) follows directly from the definition of discrete Grothendieck fibrations.
We only need to argue that the domain of \(h'\) is indeed \(A\) and that the diagram commutes. As \(f\) and \(g'h'\) are lifts We only need to argue that the domain of \(h'\) is indeed \(A\) and that the diagram commutes. As \(f\) and \(g'h'\) are lifts
of \(F(f) = gh\), we get \(f = g'h'\) by the uniqueness of lifts. of \(F(f) = gh\), we get \(f = g'h'\) by the uniqueness of lifts.
\end{proof} \end{proof}
@ -250,25 +273,25 @@ To be able to lift this structure make sure we can lift commuting triangles firs
these. Isomorphisms and split epis are preserved by every functor, so only noninvertible monomorphisms remain. these. Isomorphisms and split epis are preserved by every functor, so only noninvertible monomorphisms remain.
We will establish this in two steps, first showing mono preservation and then reflection of inverses. We will establish this in two steps, first showing mono preservation and then reflection of inverses.
Given some monomorphism \(f : A ↣B\) in \(Q\) and two morphisms \(g,h : C → \p(A)\) in \C, such that \(\p(f)h = \p(f)g \) Given some monomorphism \(f : A ↣B\) in \(Q\) and two morphisms \(g,h : C → \p(A)\) in \C, such that \(\p(f)h = \p(f)g \),
We can lift \(g\) and \(h\) to \(g' : C' → A\) and \(h' : C' → A\). By lemma \ref{lem:gfibliftcommute} we have \(fg' = fh'\) and we can lift \(g\) and \(h\) to \(g' : C' → A\) and \(h' : C' → A\). By \cref{lem:gfibliftcommute}, we have \(fg' = fh'\) and
thus \(g' = h'\). We get directly \( g = \p(g') = \p(h') = h\) and thus \(\p(f)\) is monic. thus \(g' = h'\). We get directly \( g = \p(g') = \p(h') = h\) and thus \(\p(f)\) is monic.
Assume we have a map \(f\) in \(Q\) such that \(\p(f)\) has a (one sided) inverse. By lemma \ref{lem:gfibliftcommute} the lift Assume we have a map \(f\) in \(Q\) such that \(\p(f)\) has a (one sided) inverse. By \cref{lem:gfibliftcommute}, the lift
of that (one sided) inverse, is also a (one sided) inverse of \(f\). of that (one sided) inverse is also a (one sided) inverse of \(f\).
This fact also helps us directly in the next step. We need to show that every morphism factors into a split epi followed by This fact also helps us directly in the next step. We need to show that every morphism factors into a split epi followed by
a monomorphism. We can lift a factorization by lemma \ref{lem:gfibliftcommute}, and as one sided inverses are preserved by lifting a monomorphism. We can lift a factorization by \cref{lem:gfibliftcommute}, and as one sided inverses are preserved by lifting,
\(\p\) reflects split epis. So we only need to show that monos are preserved by liftings. Let \(f : A → B\) be a morphism \(\p\) reflects split epis. So we only need to show that monos are preserved by liftings. Let \(f : A → B\) be a morphism
in \(Q\), such that \(\p(f)\) is monic and let \(g,h : C → A\) such that \(fg = fh\). Because \(p(f)\) is monic \(\p(g) = \p(h)\), in \(Q\), such that \(\p(f)\) is monic and let \(g,h : C → A\) such that \(fg = fh\). Because \(p(f)\) is monic, \(\p(g) = \p(h)\),
and thus \(g\) and \(h\) are liftings of the same morphism. As liftings are unique \(g = h\) and \(f\) monic. and thus \(g\) and \(h\) are liftings of the same morphism. As liftings are unique \(g = h\) and \(f\) is monic.
As a last step we must establish that a span of split epis has an absolute pushout. So let \(f: A → B \) and \(g : A → C\) be spilt As a last step we must establish that a span of split epis has an absolute pushout. So let \(f: A → B \) and \(g : A → C\) be spilt
epimorphisms in \(Q\). As these are preserved by functors \(\p(f)\) and \(\p(g)\) are split epis too. So there is an absolute pushout epimorphisms in \(Q\). As these are preserved by functors, \(\p(f)\) and \(\p(g)\) are split epis too. So there is an absolute pushout
of \(\p(f)\) and \(\p(g)\). By lemma \ref{lem:absPushChar}, we can extend this square, by all the extra morphisms that characterize of \(\p(f)\) and \(\p(g)\). By \cref{lem:absPushChar}, we can extend this square, by all the extra morphisms that characterize
the absolute pushout. We first lift the split established by 1 in lemma \ref{lem:absPushChar}, to get our candidate pushout. the absolute pushout. We first lift the split established by 1. in \cref{lem:absPushChar}, to get our candidate pushout.
Lifting the rest of the data, and checking that the required equalities still hold is a tedious but straight forward repeated Lifting the rest of the data and checking that the required equalities still hold is a tedious but straight forward repeated
application of lemma \ref{lem:liftEZFib}. Afterwards we can apply lemma \ref{lem:absPushChar} again to verify that we indeed got application of \cref{lem:liftEZFib}. Afterwards we can apply \cref{lem:absPushChar} again to verify that we indeed got
an absolute pushout in \(Q\). an absolute pushout in \(Q\).
\end{proof} \end{proof}
\begin{remark} \begin{remark}
@ -279,12 +302,12 @@ To be able to lift this structure make sure we can lift commuting triangles firs
\end{remark} \end{remark}
\begin{lemma}\label{lem:liftPullbacks} \begin{lemma}\label{lem:liftPullbacks}
Let \(F : Q → \C\) be a discrete Grothendieck fibration. Given as span in \(Q\) and a pullback of the image of the Let \(F : Q → \C\) be a discrete Grothendieck fibration. Given a span in \(Q\) and a pullback of the image of the
span in \C. Then there exists a lift of that pullback completing the original span to a pullback square. span in \C, then there exists a lift of that pullback completing the original span to a pullback square.
\end{lemma} \end{lemma}
\begin{proof} \begin{proof}
Let \(p : A → C\) and \(q : B → C\) be a span in \(Q\), such that its image under \(F\) completes to a pullback Let \(p : A → C\) and \(q : B → C\) be a span in \(Q\), such that its image under \(F\) completes to a pullback
square. We can lift this square to a commuting square in \(Q\), by lemma \ref{lem:gfibliftcommute}. square. We can lift this square to a commuting square in \(Q\) by \cref{lem:gfibliftcommute}.
We now need to check that this again a pullback square. We now need to check that this again a pullback square.
\begin{eqcd*}[column sep=tiny, row sep=small] \begin{eqcd*}[column sep=tiny, row sep=small]
X \arrow[dd, bend right, "x"' ] \arrow[rrrd, bend left=15, "y"] \arrow[dr, dashed] & & & \\ X \arrow[dd, bend right, "x"' ] \arrow[rrrd, bend left=15, "y"] \arrow[dr, dashed] & & & \\
@ -294,27 +317,27 @@ To be able to lift this structure make sure we can lift commuting triangles firs
& P \arrow[rr,"p"] \arrow[dl,"q"'] \pb & & F(A) \arrow[dl,"F(f)"] \\ & P \arrow[rr,"p"] \arrow[dl,"q"'] \pb & & F(A) \arrow[dl,"F(f)"] \\
F(B) \arrow[rr, "F(g)"'] & & F(C) & F(B) \arrow[rr, "F(g)"'] & & F(C) &
\end{eqcd*} \end{eqcd*}
So let us take any cospan \((x,y)\) that completes the span \((f,g)\) to a commuting square in \(Q\). So let us take any span \((x,y)\) that completes the cospan \((f,g)\) to a commuting square in \(Q\).
We can map this to \C via \(F\) and get a universal map witnessing that the square in \C is indeed a pullback. We can map this to \C via \(F\) and get a universal map witnessing that the square in \C is indeed a pullback.
We can lift this map uniquely to \(P'\) along \(F\), and by lemma \ref{lem:gfibliftcommute} this lift makes the We can lift this map uniquely to \(P'\) along \(F\), and by lemma \ref{lem:gfibliftcommute} this lift makes the
diagram commute. diagram commute.
\end{proof} \end{proof}
We will also need another technical property of discrete Grothendieck fibrations and Eilenberg-Zilber categories. % We will also need another technical property of discrete Grothendieck fibrations and Eilenberg-Zilber categories.
\begin{lemma}\label{lem:bifib} % \begin{lemma}\label{lem:bifib}
Let \(F : P ↣ \C\) be a a monic discrete Grothendieck fibration, and \C be an Eilenberg-Zilber (or elegant Reedy) category. Then % Let \(F : P ↣ \C\) be a a monic discrete Grothendieck fibration, and \C be an Eilenberg-Zilber (or elegant Reedy) category. Then
\(F\) as opcartesian lifts of degeneracy maps. \todo{check if we introduced “degeneracy” and “face” as words in the definition} % \(F\) as opcartesian lifts of degeneracy maps. \todo{\printline check if we introduced “degeneracy” and “face” as words in the definition}
\end{lemma} % \end{lemma}
\begin{proof} % \begin{proof}
So let \(A\) be an object in P and \(f : F(A) → B\) be a degeneracy in \C i.e. a split epi. % So let \(A\) be an object in P and \(f : F(A) → B\) be a degeneracy in \C i.e. a split epi.
We can then lift the split to \(A\), and get a morphism \(g : B' → A\) where \(B'\) is the unique object % We can then lift the split to \(A\), and get a morphism \(g : B' → A\) where \(B'\) is the unique object
above \(B\). We can now lift \(f\), to \(B'\) and get a morphism \(f': A → B'\) above \(f\). Verifying that % above \(B\). We can now lift \(f\), to \(B'\) and get a morphism \(f': A → B'\) above \(f\). Verifying that
\(f'\) has the required universal property is a straight forward repeated application of lemma \ref{lem:gfibliftcommute}. % \(f'\) has the required universal property is a straight forward repeated application of lemma \ref{lem:gfibliftcommute}.
\end{proof} % \end{proof}
\begin{proof}[Proof of Lemma \ref{lem:idemLemma3.4}] \begin{proof}[Proof of Lemma \ref{lem:idemLemma3.4}]
Let \(f : P' ↣ Q'\) be a monomorphism in \(\hat{\C}\). This gives equivalently rise to a monomorphism \(F\) Let \(f : P' ↣ Q'\) be a monomorphism in \(\hat{\C}\). This gives equivalently rise to a monomorphism \(F\)
of discrete Grothendieck fibrations via the category of elements. \todo{do I need to explain this?} of discrete Grothendieck fibrations via the category of elements.
We define \(P ­≔ \int P'\), \(Q ≔ \int Q'\) and \(F ≔ \int f\). We define \(P ­≔ \int P'\), \(Q ≔ \int Q'\) and \(F ≔ \int f\).
\begin{equation*} \begin{equation*}
\begin{tikzcd}[column sep=tiny, row sep=small] \begin{tikzcd}[column sep=tiny, row sep=small]
@ -323,22 +346,20 @@ We will also need another technical property of discrete Grothendieck fibrations
\end{tikzcd} \end{tikzcd}
\end{equation*} \end{equation*}
That \(\colim\) sends \(f\) to a monomorphism, That \(\colim\) sends \(f\) to a monomorphism,
can equivalently stated that \(F\) is monic on connected components. That follows directly from the explicit construction is equivalent to the statement that \(F\) is monic on connected components. That follows directly from the explicit construction
of colimits in \(\Set\). Also \(F\) gets to be a discrete Grothendieck fibration on its own. We can lift the Eilenberg-Zilber of colimits in \(\Set\). Also \(F\) gets to be a discrete Grothendieck fibration on its own. We can lift the Eilenberg-Zilber
structure to \(Q\) by lemma \ref{lem:liftEZFib}. Also \(F\) is a bifibration if restricted to the degeneracy maps by lemma \ref{lem:bifib}. structure to \(Q\) by \cref{lem:liftEZFib}.
\todo{We don't need this (and the lemma) anymore}
So now let S and T be be objects of \(P\) such that \(F(S)\) and \(F(T)\) lie in the same connected component of \(Q\). So now let \(S\) and \(T\) be be objects of \(P\) such that \(F(S)\) and \(F(T)\) lie in the same connected component of \(Q\).
This means there is a zigzag connecting \(F(S)\) and \(F(T)\). We can normalize this to a span. To see this we are going This means there is a zigzag connecting \(F(S)\) and \(F(T)\). We can normalize this to a span. To see this, we are going
to show how to turn a cospan into a span of morphisms. So let \( f : D → B\) and \( g : E → B \) be a cospan. Because of the Eilenberg-Zilber to show how to turn a cospan into a span of morphisms. So let \( f : D → B\) and \( g : E → B \) be a cospan. Because of the Eilenberg-Zilber
structure we can factor those into a degeneracy followed by a face map. Lets call them \(f_d\), \(f_f\), \(g_d\) and \(g_f\). structure, we can factor those into a degeneracy followed by a face map. Lets call them \(f_d\), \(f_f\), \(g_d\) and \(g_f\).
\todo{lift the pullback property}
\begin{eqcd*}[column sep=small] \begin{eqcd*}[column sep=small]
D \arrow[dr, two heads, "f_d"] & & P \arrow [dr, tail] \arrow[dl, tail] \arrow[dd, phantom,very near start, "\rotatebox{-45}{\(\lrcorner\)}"]& & E \arrow[dl, two heads, "g_d"'] \\ D \arrow[dr, two heads, "f_d"] & & P \arrow [dr, tail] \arrow[dl, tail] \arrow[dd, phantom,very near start, "\rotatebox{-45}{\(\lrcorner\)}"]& & E \arrow[dl, two heads, "g_d"'] \\
{} &\arrow[dr, tail, "f_f"] \arrow[lu, bend left] & &\arrow[dl,tail, "g_f"'] \arrow[ru, bend right] & \\ {} &\arrow[dr, tail, "f_f"] \arrow[lu, bend left] & &\arrow[dl,tail, "g_f"'] \arrow[ru, bend right] & \\
{} & & B & & {} & & B & &
\end{eqcd*} \end{eqcd*}
We can then construct the pullback \(P\) along \(f_f\) and \(g_f\), and the resulting maps concated with the splits of \(f_d\) and \(g_d\) We can then construct the pullback \(P\) along \(f_f\) and \(g_f\), and the resulting maps concatenated with the splits of \(f_d\) and \(g_d\)
are a span over \(D\) and \(E\). This means \(F(S)\) and \(F(T)\) are connected by a span of morphisms. Because \(F\) is injective are a span over \(D\) and \(E\). This means \(F(S)\) and \(F(T)\) are connected by a span of morphisms. Because \(F\) is injective
on objects we can lift this span to a span from \(S\) to \(T\) and thus \(S\) and \(T\) are in the same connected component. on objects we can lift this span to a span from \(S\) to \(T\) and thus \(S\) and \(T\) are in the same connected component.
\end{proof} \end{proof}
@ -349,16 +370,16 @@ structures. Sadly this does not work well for Eilenberg-Zilber categories. So we
\begin{lemma}\label{liftReedyToComma} \begin{lemma}\label{liftReedyToComma}
Let \A be an elegant Reedy category, \(i : \A\B \) a functor and \(B ∈ \B \) an object in \B. We can lift Let \A be an elegant Reedy category, \(i : \A\B \) a functor and \(B ∈ \B \) an object in \B. We can lift
the elegant Reedy structure along \(p : B ↓ i → \A \), where \(p\) is the evident projection from the comma the elegant Reedy structure along \(p : B ↓ i → \A \), where \(p\) is the evident projection from the comma
category. \todo{fill the gaps} category.
\end{lemma} \end{lemma}
\begin{proof} \begin{proof}
First we need to define a degree function \(\d: \Obj (\B)\), let \(\d'\) be the degree function from the elegant Reedy structure First we need to define a degree function \(\d: \Obj (\B)\), let \(\d'\) be the degree function from the elegant Reedy structure
we can then define \(\d\d'p\). We define the degeneracies and faces, as being degeneracies and faces under \(p\). we can then define \(\d\d'p\). We define the degeneracies and faces, as being degeneracies and faces under \(p\).
Here we would already get a problem with EZ-categories as we might have maps that are monic in \(B ↓ i\), but not in \B. Here we would already get a problem with EZ-categories as we might have maps that are monic in \(B ↓ i\), but not in \B.
As a next step we need to construct desired factorization into a degeneracy and a face map. So we take As a next step we need to construct the desired factorization into a degeneracy and a face map. So we take
a map \(f\) in \(B ↓ i\) and factor it as a map in \A into \(f = f_f f_d\). If we try to lift the factorization a map \(f\) in \(B ↓ i\) and factor it as a map in \A into \(f = f_f f_d\). If we try to lift the factorization
we end up with the following diagram. we end up with the following diagram:
\begin{eqcd*} \begin{eqcd*}
{} & B \arrow[dl, "ϕ"'] \arrow[dr, "ϕ'"] & \\ {} & B \arrow[dl, "ϕ"'] \arrow[dr, "ϕ'"] & \\
i(A) \arrow[rr,"i(f)", near start] \arrow[dr, "i(f_d)"'] & & i(A') \\ i(A) \arrow[rr,"i(f)", near start] \arrow[dr, "i(f_d)"'] & & i(A') \\
@ -381,14 +402,14 @@ structures. Sadly this does not work well for Eilenberg-Zilber categories. So we
\end{remark} \end{remark}
\begin{proof} \begin{proof}
So consider a span of maps in \(B ↓ i\). That fulfills the conditions of this lemma. So consider a span of maps in \(B ↓ i\). That fulfills the conditions of this lemma.
If we unravel this, we get the following diagram If we unravel this, we get the following diagram:
\begin{eqcd*} \begin{eqcd*}
{} & B \arrow[rdd, "ϕ'", bend left] \arrow[ldd, "ϕ"', bend right] & \\ {} & B \arrow[rdd, "ϕ'", bend left] \arrow[ldd, "ϕ"', bend right] & \\
{} & & \\ {} & & \\
i(X) \arrow[rd, "i(f)"] & & i(Y) \arrow[ld, "i(g)"'] \\ i(X) \arrow[rd, "i(f)"] & & i(Y) \arrow[ld, "i(g)"'] \\
{} & i(Z) & {} & i(Z) &
\end{eqcd*} \end{eqcd*}
By assumption we have pullbacks of \(i(f)\) and \(i(g)\) in \B, that lies in the image of \(i\). So we By assumption we have pullbacks of \(i(f)\) and \(i(g)\) in \B that lie in the image of \(i\). So we
can complete the diagram can complete the diagram
\begin{eqcd*} \begin{eqcd*}
{} & B \arrow[rdd, "ϕ'", bend left] \arrow[ldd, "ϕ"', bend right] \arrow[d,dashed, "{⟨ϕ,ϕ'⟩}"] & \\ {} & B \arrow[rdd, "ϕ'", bend left] \arrow[ldd, "ϕ"', bend right] \arrow[d,dashed, "{⟨ϕ,ϕ'⟩}"] & \\
@ -406,124 +427,132 @@ structures. Sadly this does not work well for Eilenberg-Zilber categories. So we
\end{lemma} \end{lemma}
\begin{remark} \begin{remark}
The original source also requires that these pullbacks preserve face and degeneracy maps. We get around this by making use of the fact The original source also requires that these pullbacks preserve face and degeneracy maps. We get around this by making use of the fact
that degeneracies are split in lemma \ref{lem:liftEZFib} that degeneracies are split in \cref{lem:liftEZFib}.
\end{remark} \end{remark}
\begin{proof} \begin{proof}
We only need to show that \(i_!\) preserves monomorphisms component wise, giving us a functor from \(\hat{\A}\Set\) for every We only need to show that \(i_!\) preserves monomorphisms component wise, giving us a functor from \(\hat{\A}\Set\) for every
object in \( B ∈\B\). This functor can be written as \todo{argue why stacks project 00VC} object in \( B ∈\B\). This functor can be written as
\begin{eqcd*} \begin{eqcd*}
\hat{\A} \arrow[r,"p^*"] & \widehat{B ↓ i} \arrow[r,"\colim"] & \Set \hat{\A} \arrow[r,"p^*"] & \widehat{B ↓ i} \arrow[r,"\colim"] & \Set
\end{eqcd*} \end{eqcd*}
where \(p : B ↓ i → \A\) is the projection. where \(p : B ↓ i → \A\) is the projection.
This is the usual construction of \(i_!\) in this setting, see for example \cite{stacks00VC}. This is the usual construction of \(i_!\) in this setting, see for example \cite{stacks00VC}.
It is trivial that \(p^*\) preserves monomorphisms, so we only need to check that \(\colim\) does. It is trivial that \(p^*\) preserves monomorphisms, so we only need to check that \(\colim\) does.
We want to apply lemma \ref{lem:idemLemma3.4} We want to apply \cref{lem:idemLemma3.4}.
We can lift the elegant Reedy structure from \A to \( B ↓ i \) along \(p\) by lemma \ref{liftReedyToComma}. We can lift the elegant Reedy structure from \A to \( B ↓ i \) along \(p\) by \cref{liftReedyToComma}.
And as the face maps in \(B↓i\) are defined as maps that \(p\) sends to face maps in \A. We can lift the And as the face maps in \(B↓i\) are defined as maps that \(p\) sends to face maps in \A. We can lift the
required pullbacks to \(B↓i\) by lemma \ref{commaPullback}. required pullbacks to \(B↓i\) by \cref{commaPullback}.
\end{proof} \end{proof}
\begin{proposition}[{\cite[proposition 3.3]{sattlerIdempotentCompletionCubes2019}}]\label{idemProp3.3}
\(i_! : \hat{Δ}\widehat{_{∧∨}}\) preserves monomorphisms.
\end{proposition} \begin{proof}[Proof of \cref{prop:i!preservesMonos}]
\begin{proof} We want to verify the conditions for \cref{lem:idemLemma3.5}. Our first observation is that \(Δ\) does not have arbitrary
We want to verify the conditions for lemma \ref{lem:idemLemma3.5}. Our first observation is, that \(Δ\). Dos not have arbitrary pullbacks of face maps. Intuitively speaking we have only the non-empty ones. So let us unpack the condition that
pullbacks of face maps. Intuitively speaking we have only the non empty ones. So let us unpack the condition that
we only need pullbacks if the cospan of face maps lies in the image of the projection \(B ↓ i → Δ\) for some \(B\). we only need pullbacks if the cospan of face maps lies in the image of the projection \(B ↓ i → Δ\) for some \(B\).
That means there is a \(B ∈ \FL\) such that That means there is a \(B ∈ \FL\) such that the following square commutes:
\begin{eqcd*} \begin{eqcd*}
{} & B \arrow[rd] \arrow[ld] & \\ {} & B \arrow[rd] \arrow[ld] & \\
i(X) \arrow[rd, "i(f_f)"] & & i(Y) \arrow[ld, "i(g_f)"'] \\ i(X) \arrow[rd, "i(f_f)"] & & i(Y) \arrow[ld, "i(g_f)"'] \\
{} & i(Z) & {} & i(Z) &
\end{eqcd*} \end{eqcd*} %COMMENT
As the face maps in \(Δ\) are monic we can identify \(X\) and \(Y\) with their respective image in \(Z\). As \FL does not As the face maps in \(Δ\) are monic, we can identify \(X\) and \(Y\) with their respective image in \(Z\). As \FL does not
contain the empty poset, they share at least a point, and the pullback is given by \(\im f_f ∩ \im g_f\). A quick calculation contain the empty poset, they share at least a point, and the pullback is given by \(\im f_f ∩ \im g_f\). A quick calculation
shows that this is also a pullback in \FL and thus we can apply lemma \ref{lem:idemLemma3.5} shows that this is also a pullback in \FL and thus we can apply \cref{lem:idemLemma3.5}
\end{proof} \end{proof}
\subsection{Step 2} \subsection{\(j^*\) and \(j_!\) are left Quillen}
We are now going to show that \(j^*\) and \(j_!\) are left Quillen. As expected the hard part will be again to show that We are now going to show that \(j^*\) and \(j_!\) are left Quillen. As expected, the hard part will be again to show that
\(j_!\) preserves monomorphisms. \(j_!\) preserves monomorphisms.
So let us start with \(j^*\) again. So let us start with \(j^*\) again.
\begin{proposition} \begin{proposition}\label{prop:j*preservesMonos}
\(j^*\) preserves monomorphisms. \(j^*\) preserves monomorphisms.
\end{proposition} \end{proposition}
\begin{proof} \begin{proof}
t \todo{trivial} Follows directly from \cref{lem:restrictionPreserveMonos}
\end{proof} \end{proof}
\begin{proposition} \begin{proposition}\label{prop:j^*preservesTrivialCofib}
\(j^*\) preserves trivial cofibration \(j^*\) preserves trivial cofibrations.
\end{proposition} \end{proposition}
\begin{proof} \begin{proof}
t \todo{same as \(i^*\)} Like in \cref{prop:i*preservesTrivialCofib}, we are in the comfortable position of being left and right adjoint.
Note that \(j^*\) also preserves the interval inclusion. As pushout products with the interval inclusion send cofibrations
to trivial cofibrations and \(j^*\) preserves monomorphisms by \cref{prop:j*preservesMonos}, \(j^*\) sends trivial cofibrations
to trivial cofibrations.
\end{proof} \end{proof}
\begin{proposition} \begin{proposition}\label{prop:j!preservesTrivialCofib}
\(j^*\) preserves fibrations (bäh) \(j^*\) preserves fibrations
\end{proposition} \end{proposition}
\begin{proof} \begin{proof}
t \todo{do i really need todo this?} Preservation of uniform fibrations is clear, as every lifting problem against a generating trivial cofibration factors
\end{proof} throug the image of a trivial cofibration under \(j^*\). As \(j^*\) preserves coequalizer the equivariance follows
from the equivariance of the model category on \dcSet, which follows from the presence of (at least one connection).
\end{proof}\todo{define uniform fibrations without equivariance}\todo{make a lemma that \dcSet is equivariant even if we just cite this from the relative elegance paper}
We want to recap the general proof idea, that \(i_!\) preserved monos. We can test being a monomorphism component wise and can write the We want to recap the general proof idea that \(i_!\) preserved monos. We can test being a monomorphism component wise and can write the
left Kan extension evaluated at a point as some mono preserving functor followed by a colimit. left Kan extension evaluated at a point as some mono preserving functor followed by a colimit.
That the colimit preserves monomorphisms is equivalent to a condition of connected on the categories That the colimit preserves monomorphisms is equivalent to a condition of connected components on the categories
of elements. As we can only lift morphisms backwards along discrete Grothendieck fibrations, but we are faced of elements. We can only lift morphisms backwards along discrete Grothendieck fibrations, but we are faced
with zigzags. To remedy this we want to lift a strong enough pullback property to turn these zigzags into spans. with zigzags. To remedy this we want to lift a strong enough pullback property to turn these zigzags into spans.
This strategy will be the same \(j_!\). Sadly we couldn't get arbitrary Eilenberg-Zilber structures to lift, so This strategy will be the same for \(j_!\). Sadly we couldn't get arbitrary Eilenberg-Zilber structures to lift, so
we will need a slightly different attack point. we will need a slightly different attack point.
We first observe that \(\) has a lot more pullbacks then \(Δ\). While \(Δ\) has only “non empty” pullbacks of face maps We first observe that \(\) has a lot more pullbacks then \(Δ\). While \(Δ\) has only “non-empty” pullbacks of face maps,
\(\) has all “non empty" pullbacks. This elevates us from the necessity to lift the Eilenberg-Zilber structure to \(B ↓ i\). \(\) has all “non-empty" pullbacks. This elevates us from the necessity to lift the Eilenberg-Zilber structure to \(B ↓ i\).
We can get by with the following lemma. We can get by with the following lemma.
\begin{lemma}\label{lem:pullbackMono} \begin{lemma}\label{lem:pullbackMono}
Let \(i: \A\B\) be a functor, such that for all \(B ∈ \B\), spans in the image of the projection \(p: B ↓ i\) have pullbacks Let \(i: \A\B\) be a functor such that for all \(B ∈ \B\), spans in the image of the projection \(p: B ↓ i\) have pullbacks
in \B, and these pullbacks are in the image of \(i\). Then \(i_!\) preserves monomorphisms. in \B, and these pullbacks are in the image of \(i\). Then \(i_!\) preserves monomorphisms.
\end{lemma} \end{lemma}
To show this we replace lemma \ref{lem:idemLemma3.4} with the following simpler lemma. To show this we replace \cref{lem:idemLemma3.4} with the following simpler lemma.
\begin{lemma}\label{lem:colimPullbackMono} \begin{lemma}\label{lem:colimPullbackMono}
Let \C be a category. Assume \C has pullbacks, then the functor \(\colim : \hat{\C}\Set\) preserves monomorphisms. Let \C be a category. Assume \C has pullbacks, then the functor \(\colim : \hat{\C}\Set\) preserves monomorphisms.
\end{lemma} \end{lemma}
\begin{proof} \begin{proof}
Again let \(f\) be a monomorphism in \(\hat{\C}\). This is equivalently a monomorphism \(F: P → Q\) of discrete Grothendieck fibrations Again let \(f\) be a monomorphism in \(\hat{\C}\). This is equivalently a monomorphism \(F: P → Q\) of discrete Grothendieck fibrations
over \C. We need to show that \(F\) is injective on connected components. We can lift the pullbacks to \(Q\) by lemma \ref{lem:liftPullbacks}. over \C. We need to show that \(F\) is injective on connected components. We can lift the pullbacks to \(Q\) by \cref{lem:liftPullbacks}.
Let now \(S\) and \(T\) be in \(Q\) such that \(F(S)\) and \(F(T)\) are connected by a zigzag. We can turn this zigzag into a span, by Let now \(S\) and \(T\) be in \(Q\) such that \(F(S)\) and \(F(T)\) are connected by a zigzag. We can turn this zigzag into a span, by
taking pullbacks of cospans. Because \(F\) is monic it is injective on object and we can lift this span to a span in \(P\) and thus taking pullbacks of cospans. Because \(F\) is monic it is injective on objects and we can lift this span to a span in \(P\) and thus
\(S\) and \(T\) are in the same connected component. \(S\) and \(T\) are in the same connected component.
\end{proof} \end{proof}
\begin{proof}[Proof of lemma \ref{lem:pullbackMono}] \begin{proof}[Proof of \cref{lem:pullbackMono}]
As in lemma \ref{lem:idemLemma3.5}, we check that \(i_!\) preserves monos component wise. We write \(i_!(B)\) again as As in \cref{lem:idemLemma3.5}, we check that \(i_!\) preserves monos component wise. We write \(i_!(B)\) again as
\begin{eqcd*} \begin{eqcd*}
\hat{\A} \arrow[r, "p^*"] & \widehat{B ↓ i} \arrow[r, "\colim"] & \Set \hat{\A} \arrow[r, "p^*"] & \widehat{B ↓ i} \arrow[r, "\colim"] & \Set
\end{eqcd*} \end{eqcd*}
where \(p : B ↓ i → A\) is the projection map. We need to show that \(B ↓ i\) has pullbacks, but that is an immediate where \(p : B ↓ i → A\) is the projection map. We need to show that \(B ↓ i\) has pullbacks, but that is an immediate
consequence of lemma \ref{commaPullback}. consequence of \cref{commaPullback}.
\end{proof} \end{proof}
And we arrive at the conclusion we wanted to get to: And we arrive at the conclusion we wanted to get to:
\begin{proposition} \begin{lemma}
Let \(F: \A\B\) be a functor, then the restriction functor \(F^* : \hat{\B}\hat{\A}\) preserves monomorphisms.
\end{lemma}
\begin{proposition}\label{prop:j!preservesMonos}
\(j_! : \widehat{\FL}\□\) preserves monomorphisms. \(j_! : \widehat{\FL}\□\) preserves monomorphisms.
\end{proposition} \end{proposition}
\begin{proof} \begin{proof}
For this proof we will identify \(2 = \{⊥,\}\). For this proof we will identify \(2 = \{⊥,\}\).
We want to fulfill the conditions of \fref{lem:pullbackMono}. To check these conditions we check that equalizer and products We want to fulfill the conditions of \cref{lem:pullbackMono}. To check these conditions, we check that equalizers and products
with the desired conditions exist. Products are a straightforward calculation so we go forward to the equalizer case. with the desired conditions exist. Products are a straightforward calculation so we go forward to the equalizer case.
To check this we will look at a diagram of the following form. To check this we will look at a diagram of the following form,
\begin{eqcd*}[column sep=small] \begin{eqcd*}[column sep=small]
{} & B \arrow[ld] \arrow[rd] & \\ {} & B \arrow[ld] \arrow[rd] & \\
𝟚^n \arrow[rr, "j(f)", shift left] \arrow[rr, "j(g)"', shift right] & & 𝟚^m 𝟚^n \arrow[rr, "j(f)", shift left] \arrow[rr, "j(g)"', shift right] & & 𝟚^m
\end{eqcd*} \end{eqcd*}
where we identify \(f\) and \(g\) with functions \(m → n + 2\). where we identify \(f\) and \(g\) with functions \(m → n + 2\).
We note that equalizer, if they exist in \FL, are constructed the same way as in \Set. We note that equalizers, if they exist in \FL, are constructed the same way as in \Set.
If we apply this to our diagram we can see, that this construction can't create something empty, If we apply this to our diagram, we can see that this construction can't create something empty,
as both maps needs to preserve elements from \(B\). as both maps needs to preserve elements from \(B\).
We would like to complete the diagram in the following way We would like to complete the diagram in the following way
%\begin{eqcd*}[column sep=large] %\begin{eqcd*}[column sep=large]
@ -533,9 +562,9 @@ And we arrive at the conclusion we wanted to get to:
𝟚^l \arrow[r,"j(e)"] & 𝟚^n \arrow[r,"j(f)", shift left] \arrow[r,"j(g)"',shift right] & 𝟚^m 𝟚^l \arrow[r,"j(e)"] & 𝟚^n \arrow[r,"j(f)", shift left] \arrow[r,"j(g)"',shift right] & 𝟚^m
\end{tikzcd} \end{tikzcd}
\end{equation*} \end{equation*}
So we need to find finite set \(l\) and a map \(e : n → l + 2\). We will construct those by So we need to find a finite set \(l\) and a map \(e : n → l + 2\). We will construct those by
effectively constructing the coequilizer in \FinSet. So we take the quotient \(\faktor{n+2}{~}\) effectively constructing the coequalizer in \FinSet. So we take the quotient \(\faktor{n+2}{\sim}\)
where \(~\) is the equivalence relation generated by the relation of having a joint preimage under where \(\sim\) is the equivalence relation generated by the relation of having a joint preimage under
\(f\) and \(g\). We will now argue that our equalizer would be empty if this quotient identifies \(f\) and \(g\). We will now argue that our equalizer would be empty if this quotient identifies
\(\) and \(\). \(\) and \(\).
@ -543,19 +572,19 @@ And we arrive at the conclusion we wanted to get to:
Because \(\) and \(\) are being identified, there exists two finite sequence of elements \(x_i\) and \(y_i\) in \(m\), such that Because \(\) and \(\) are being identified, there exists two finite sequence of elements \(x_i\) and \(y_i\) in \(m\), such that
there exists a \(k\) and there exists a \(k\) and
(WLOG) \(f(x_1) =\), \(g(x_i) = f(y_i)\), \(g(y_i) = f(x_{i+1})\) and \(g(y_k) = \). (WLOG) \(f(x_1) =\), \(g(x_i) = f(y_i)\), \(g(y_i) = f(x_{i+1})\) and \(g(y_k) = \).
Plugging that into the definition of \(j\) we get Plugging that into the definition of \(j\), we get
\begin{align*} \begin{align*}
⊥ = j(f)(h)(x_1) &= j(g)(h)(x_1) = j(f)(h)(y_1) \\ &= j(g)(h)(y_1) = j(f)(h)(x_2) = … = j(g)(h)(y_k) = ⊥ = j(f)(h)(x_1) &= j(g)(h)(x_1) = j(f)(h)(y_1) \\ &= j(g)(h)(y_1) = j(f)(h)(x_2) = … = j(g)(h)(y_k) =
\end{align*} \end{align*}
which is a contradiction and thus \(\) and \(\) are not identified. which is a contradiction and thus \(\) and \(\) are not identified.
For \(e : n → l + 2\) we take the evident quotient map restricted to \(n\). For \(e : n → l + 2\), we take the evident quotient map restricted to \(n\).
While we now have our candidate, we still need to show that this is actually a pushout in \FL. As \FL is well pointed While we now have our candidate, we still need to show that this is actually a pushout in \FL. As \FL is well pointed
it suffices to check global elements, which again are just elements in the set theoretic sense. it suffices to check global elements, which again are just elements in the set theoretic sense.
As \(fe = ge\) by construction and \(j\) is a functor we get \(j(f)j(e) = j(g)j(e)\). As \(fe = ge\) by construction and \(j\) is a functor we get \(j(f)j(e) = j(g)j(e)\).
Again let \(h ∈ 𝟚^n\) such that \(j(f)(h) = j(g)(h)\). We can then define Again let \(h ∈ 𝟚^n\) such that \(j(f)(h) = j(g)(h)\). We can then define
\begin{align*} \begin{align*}
h': l → 𝟚 & & h'(x) ≔ h(z) & & z ∈ e^{-1}(x) h': l → 𝟚 && \text{by} && h'(x) ≔ h(z) & & \text{where} && z ∈ e^{-1}(x)\text{.}
\end{align*} \end{align*}
For this definition to make sense we need to argue that the choice of \(z\) does not matter. For this definition to make sense we need to argue that the choice of \(z\) does not matter.
So let \(z\) and \(z'\) be both element of \(e^{-1}(x)\). Like above that means there are So let \(z\) and \(z'\) be both element of \(e^{-1}(x)\). Like above that means there are
@ -565,27 +594,22 @@ And we arrive at the conclusion we wanted to get to:
\begin{align*} \begin{align*}
h(z) = j(f)(h)(x_1) &= j(g)(h)(x_1) = j(f)(h)(y_1) \\ &= j(g)(h)(y_1) = j(f)(h)(x_2) = … = j(g)(h)(y_k) = h(z') h(z) = j(f)(h)(x_1) &= j(g)(h)(x_1) = j(f)(h)(y_1) \\ &= j(g)(h)(y_1) = j(f)(h)(x_2) = … = j(g)(h)(y_k) = h(z')
\end{align*} \end{align*}
We also need that the map \(j(e)\) is monic, but this follows directly from the fact, that \(e\) in \FinSet is epic. We also need that the map \(j(e)\) is monic, but this follows directly from the fact that \(e\) in \FinSet is epic.
We only need to produce a map \(B → 𝟚^l\), that commutes. We get this map directly, because we have shown that We only need to produce a map \(B → 𝟚^l\), that commutes. We get this map directly, because we have shown that
this diagram is an equalizer in \FL. this diagram is an equalizer in \FL.
This means we fulfill the conditions of \cref{lem:pullbackMono} and thus \(j_!\) preserves monomorphisms.
This means we fulfill the conditions of \fref{lem:pullbackMono} and thus \(j_!\) preserves monomorphisms.
\end{proof} \end{proof}
\subsection{Step *} \subsection{\(i^*j_!\) and \(j^*i_!\) induce an equivalence of homotopy categories}
By Ken Browns lemma we now that left Quillen functors preserve weak equivalences between cofibrant objects. Before we continue our plan directly, we take a short detour to get a little bit better idea how these two functors behave.
In our setting every object is cofibrant, so all for functors preserve weak equivalences. Thus all for functors
descend to functors of the underlying homotpoy categories.
\subsection{Step 3} From the previous sections it is immediately clear that \(i^*j_!\) and \(j^*i_!\) are left Quillen functors with some right adjoint.
One might ask if these two functors are adjoint to each other. The former development would suggest that they aren't
For \(i_!j^*\) and \(i^*j_!\) to be a potential quillen equivalence, they need to adjoint. So we continue by showing that. and that is indeed the case.
To do that we first verify the claim that \(i^*j_!\) is indeed the triangulation functor.
\begin{definition}\label{def:triangulation} \begin{definition}\label{def:triangulation}
Let \(t: □ → \) be the functor that sends \([1]^n \mapsto (Δ^1)^n\). The \emph{triangulation functor \(\T\)} Let \(t: □ → \) be the functor that sends \([1]^n \mapsto (Δ^1)^n\). The \emph{triangulation functor \(\T\)}
is the left Kan extension of \(t\) along yoneda. is the left Kan extension of \(t\) along Yoneda.
\begin{eqcd*} \begin{eqcd*}
\arrow[r,"t"] \arrow[d,"\Yo"] & \\ \arrow[r,"t"] \arrow[d,"\Yo"] & \\
\□ \arrow[ru,"\T"] & \□ \arrow[ru,"\T"] &
@ -596,76 +620,156 @@ To do that we first verify the claim that \(i^*j_!\) is indeed the triangulation
The functor \(i^*j_!\) is the triangulation functor. The functor \(i^*j_!\) is the triangulation functor.
\end{proposition} \end{proposition}
\begin{proof} \begin{proof}
As \T is the unique cocontinues functor that extends product preserving functor from □ to that sends the interval As \T is the unique cocontinuous functor that extends the product preserving functor from \□ to that sends the interval
to the interval, we only need to show these conditions. \(i^*j_!\) is cocontinues as it is the composition of two left to the interval, we only need to show these conditions. \(i^*j_!\) is cocontinuous as it is the composition of two left
adjoints. We need to show that this functor is product preserving on representables. But by and adjoints. We need to show that this functor is product preserving on representables. But by Yoneda and \cref{prop:j_!Nothing}
\begin{equation*} \begin{equation*}
j_!(\I^n × \I^m) = j_!(\I^{n+m}) = \Yo 𝟚^{n+m} = \Yo 𝟚^n × \Yo 𝟚^m = j_!(\I^n) × j_!(\I^m) j_!(\I^n × \I^m) = j_!(\I^{n+m}) = \Yo 𝟚^{n+m} = \Yo 𝟚^n × \Yo 𝟚^m = j_!(\I^n) × j_!(\I^m)
\end{equation*} \end{equation*}
and \(i^*\) is a right adjoint we get this property immediatly. We also need to show that \(i^*j_!\) preserves the interval. \(j_!\) preserves products of representables.
We already know this for \(j_!\). So the question ist if \(i^*\) preserves the interval. We now that \(i^*(\I)(x) = Because \(i^*\) is a right adjoint we get this property immediately. We also need to show that \(i^*j_!\) preserves the interval.
We already know this for \(j_!\). So the question is if \(i^*\) preserves the interval. We now that \(i^*(\I)(x) =
\Hom_{\dCube}(\Yo [1],i(x))\). As \(i\) is fully faithful the claim follows. \Hom_{\dCube}(\Yo [1],i(x))\). As \(i\) is fully faithful the claim follows.
\end{proof} \end{proof}
The main insight from that this adjunction tells us, is that simplicials can't see connections. Or in other word degenerating \begin{example}
a simplex along a connection is the same as degenrating it along a projection. While this is relatively easy to see from
an intuitive standpoint it is a bit tedious to actually show.
\begin{lemma}\label{lem:trianglesDon'tSeeConnections}
We have the following natural bijection
\begin{equation*} \begin{equation*}
\Hom_{\□}(j^*i_!(Δ^n),\I^m) = \Hom_{}^n,i^*j_!(\I^m)) \Hom_{\□}(j^*i_!(Δ^2),\I^2) ≠ \Hom_{}^2,i^*j_!(\I^2))
\end{equation*} \end{equation*}
\end{lemma} \end{example}%
Intuitively the reason for this is that we can't map a square with one side degenerated to a triangle into the representable
square in \□. But we have a lot of ways to map a triangle into a triangulated square in .
\begin{proof} \begin{proof}
Using that \(j_!\) and \(i_!\) preserve representables by \cref{prop:i_!Nothing,prop:j_!Nothing} , using the yoneda lemma on the right and expand the definition By the Yoneda lemma and that \(j^*i_!\) is the triangulation functor, a map on the right-hand side corresponds to a
on the left we get the following. pair of monotone maps \([2][1]\). There are 16 of such pairs. On the left we can use \cref{prop:j_!Nothing},
\begin{equation*} and expanding the definition to get \(\Hom_{\□}(\Hom_{\FL}(j(-),[2]),\Hom_{}(-,2))\). Lets take such a transformation
\Hom_{\□}(\Hom_{\FL}(j(-),[n]),\Hom_□(-,m)) = \Hom_{\FL}([n],𝟚^m) and call it \(η\). \(η_0\) gives rise to a map \(f : [2]𝟚^2\). \(η_1\) witnesses that \(f\) is indeed monotone, by
\end{equation*} composing with face maps. And \(η_2\) rules out all injective maps, as for them always exactly 2 faces agree and
So given a natural transformation \(η : \Hom_{\FL}(j(-),[n])\Hom_(-,n)\), we define \(f: [n]𝟚^m\). For this we identify \(\I^2(2)\) does not contain such faces. There are only 9 such maps, which is an upper bound for the possible number of
an element \(x\) of a finite lattice \(Y\) with the monotone map \(x : 𝟚^0 → Y\) sending the one point to \(x\). If we plug natural transformations.
such an \(x\) into \(η_0\), we get a map \(η_0(x) : 0 → m\) in \□, or in other words a function \(m → 2\), which we
can identify with an element of \(𝟚^m\). Monotonicity is witnessed by \(η_1\). Translating \(f\) back to a function
\(η_0\) bijectively is trivial, we only need to show that \(η_0\) already determines all of \(η\). So assume we have
\(η_l\) already defined, we want to define \(η_{l+1}\). We only need to care about non degenerated cells, as these are
already forced by the lower data. For these non degenrate cases there is at most one candidate aligning with the face maps
to lower data. Using monotonicity of \(f\) we can see that these align correctly, and thus we ge a unique \(η\) given \(f\).
\end{proof} \end{proof}
\todo{compute the last part, but that would be at least 3 lemmas and a 2 pages}
\todo{is it true at all? It has to be else everything else is wrong too. But can i map a triangle into a square? \begin{example}
on the right side for sure} \begin{equation*}
\begin{proposition}\label{prop:adjoints} \Hom_{}(i^*j_!(\I^2),Δ^1 × Δ^1) ≠ \Hom_{\□}(\I^2,j^*i_!(Δ^1 × Δ^1))
\(j^*i_!\) is left adjoint to \(i^*j_!\) \end{equation*}
\end{proposition} \end{example}%
Here the problem is that the square made from a product in doesn't have a 2-cell that fills the whole square, this
doesn't give us a way to map the representable square on it without degenerating something. While if we triangulate the representable square
first we don't run into that problem.
\begin{proof} \begin{proof}
As we already know that the left adjoint is cocontinuous, we only need to check this for representables in the As \(j^*i_!\) is the triangulation functor, the left-hand side becomes \(\Hom_{}(Δ^1 × Δ^1^1 × Δ^1)\).
left component. We can write \( Δ^1 × Δ^1 \) as a pushout of two copies of \(Δ^1\) joint by an edge.
\begin{equation*} As \(i_!\) is cocontinuous, we can construct this pushout in \FL. By this and \cref{prop:i_!Nothing}, we
\Hom_{\□}(j^*i_!(Δ^n),B) = \Hom_{}^n,i^*j_!(B)) get a colimit of two representables there. Unraveling these definition it is a straightforward exercise
\end{equation*} to verify that the set of on the left-hand side has more elements.
We can decomposition \(B\) into a cubical cell complex \cref{??}, a morphism from \(j^*i_!
\end{proof} \end{proof}
\todo{somehow handle that we can get a cell decomposition of \(B\) (cite something?)}
From here on we follow the argument from \cite[§6.2]{solution}. From here on we follow the argument from \cite[§6.2]{solution}. They lay down a criterion on functors and model categories
What we still need to show is that the unit and couint of the adjunction \(i_!j^* ⊣ i^*j_!\) are valued such that every natural transformation between them is a weak equivalence. Thus we only need to construct any natural
in weak equivalences. transformation between the identity functor and the concatenation of these functors.
For this we need a technical Lemma about Eilenberg-Zilber categories. But to make sense of its claim we first need For this, we cite two technical
to introduce a technicality. If we have a category \A and presheafs on it. Then the automorphism group of some lemmas whose theory is mostly developed in \cite{riehlTheoryPracticeReedy2014}, but are taken in this form from \cite{solution}.
To make sense of the first lemma we need
to introduce a technicality. If we have a category \A and presheaves on it, then the automorphism group of some
\(a ∈ \A\) acts on \(\Yo a\,b\) by composition. This kind of acting is natural in the sense that \(\Yo a f\) is an \(a ∈ \A\) acts on \(\Yo a\,b\) by composition. This kind of acting is natural in the sense that \(\Yo a f\) is an
\(\mathrm{Aut}(a)\) equivariant map. Thus we can say \(\mathrm{Aut}(a)\) acts on \(\Yo a\). And thus we can also \(\mathrm{Aut}(a)\)-equivariant map. Thus we can say \(\mathrm{Aut}(a)\) acts on \(\Yo a\). And thus we can also
quotient \(\Yo a\) by subgroups of \(\mathrm{Aut}(a)\). quotient \(\Yo a\) by subgroups of \(\mathrm{Aut}(a)\). This is also where equivariance comes into play again, as
equivariance by definition guarantees us that these quotients of representables by some automorphism subgroup are
contractible.
\todo{if time and resources explain and proof these lemma (ca. 4 pages)}
\begin{lemma}[{\cites[§5]{riehlTheoryPracticeReedy2014}[lemma 6.2.13]{solution}}]\label{lem:Eilenberg-ZilberMonosCreated} \begin{lemma}[{\cites[§5]{riehlTheoryPracticeReedy2014}[lemma 6.2.13]{solution}}]\label{lem:Eilenberg-ZilberMonosCreated}
Let \A be an Eilenberg-Zilber category. Then the monomorphism in \(\hat{\A}\) are generated under Let \A be an Eilenberg-Zilber category. Then the monomorphisms in \(\hat{\A}\) are generated under
coproduct, pushout, sequential composition, and right cancelation under monomorphisms by the maps coproduct, pushout, sequential composition, and right cancellation under monomorphisms by the maps
\(­­∅ → \faktor{\Yo a}{H}\) valued in the quotient of a representable presheaf at some \(a ∈ \A\) by an arbitrary \(­­∅ → \faktor{\Yo a}{H}\) valued in the quotient of a representable presheaf at some \(a ∈ \A\) by an arbitrary
subgroup \(H\) of its automorphism group. subgroup \(H\) of its automorphism group.
\end{lemma} \end{lemma}
\todo{if there is time (lol) proof and explain the theory behind this}
\begin{lemma}[{\cites[§5]{riehlTheoryPracticeReedy2014}[lemma 6.2.15]{solution}}]\label{lem:PairClosure}
Let \(U,V : K → M\) be a cocontinuous pair of functors valued in a model category and \(α : U ⇒ V\) a
natural transformation between them. Define the cofibrations in \(K\) to be the maps that are sent to
cofibrations under both \(U\) and \(V\). Define \(\mathcal{N}\) to be the class of cofibrations
between cofibrant objects that are sent by Leibniz pushout application with \(α\) to weak equivalences in
\(M\). Then \(\mathcal{N}\) is closed under coproducts, pushout, (transfinite) composition, and right cancellation
between cofibrations.
\end{lemma}
\begin{corollary}[{\cite[corollary 6.2.16]{solution}}]\label{lem:everythingWorks}
Let \A be an EilenbergZilber category and consider a parallel pair of functors \(U, V : \Set^{\A^\op} → M\) valued in a model
category \(M\) together with a natural transformation \(α : U ⇒ V\) . Suppose that \(U\) and \(V\) preserve colimits and send monomorphisms
in \(K\) to cofibrations in \(M\). Then if the components of \(α\) at quotients of representables by subgroups of their automorphism groups
are weak equivalences, then all components of \(α\) are weak equivalences.
\end{corollary}
\begin{proof}
By \cref{test}\todo{In Leibniz construction chapter make this an example} we can write the the part of \(α\) at \(X\) as a
Leibniz application with the monomorphism \(∅ → X\). We can use both lemmas from above to show that \(α\) gets send by all
of those maps to a weak equivalence. By \cref{lem:Eilenberg-ZilberMonosCreated} the monomorphisms in \(\Set^{\A^\op}\) are generated by
morphisms \(∅ → \faktor{\Yo a}{H}\) under some closure properties. By assumption, morphisms of that form are sent to weak equivalences
by the Leibniz application with \(α\). By \cref{lem:PairClosure}, the maps that get send to weak equivalences by Leibniz application with
\(α\) have the same closure properties, thus all monomorphisms get send to weak equivalences, and with them all morphisms of the
form \(∅ → X\).
\end{proof}
\begin{corollary}[{\cite[Corollary 6.2.17]{solution}}]\label{cor:dafuq}
Let \A be an EilenbergZilber category for which \(\Set^{\A^\op}\) admits a model structure whose cofibrations are the monomorphisms,
in which the
quotients \(\faktor{\Yo a}{H}\) of representables by subgroups of their automorphism groups are weakly contractible. Then if
\(U, V : \Set^{\A^\op} → M\) define a
pair of left Quillen functors that preserve the terminal object, then any natural transformation \(α : U ⇒ V\) is a natural weak equivalence.
\end{corollary}
\begin{proof}
By Ken Browns lemma, left Quillen functors preserve weak equivalences between cofibrant objects. If such a functor preserves the terminal
object, then it also preserves weak contractibility between cofibrant objects. To apply \cref{lem:everythingWorks}
we need show that \(α_{\faktor{\Yo a}{H}}\) is a weak equivalence. As this is weakly contractible, we get the following commuting
square.
\begin{eqcd*}[column sep=large, row sep=large]
U\left(\faktor{\Yo a}{H}\right) \arrow[r,"α_{\faktor{\Yo a}{H}}"] \arrow[d, "\rotatebox{90}{\(\sim\)}"] & V\left(\faktor{\Yo a}{H}\right) \arrow[d, "\rotatebox{90}{\(\sim\)}"] \\
U(*) \arrow[r,equal] & V(*)
\end{eqcd*}
And by 2-out-of-3, the upper map must be a weak equivalence, and thus by \cref{lem:everythingWorks} the claim follows.
\end{proof}
\begin{theorem}
\(j^*i_!\) and \(i^*j_!\) induce an equivalence between \(\Ho(\□)\) and \(\Ho()\).
\end{theorem}
\begin{proof}
By \cref{prop:i*preservesMonos,prop:i*preservesTrivialCofib,prop:i!preservesMonos,prop:i!preservesTrivialCofib,%
prop:j!preservesTrivialCofib,prop:j!preservesMonos,prop:j*preservesMonos,prop:j^*preservesTrivialCofib}, are
\(i^*\),\(i_!\),\(j^*\), and \(j_!\) left Quillen functors, and thus \(j^*i_!\) and \(i^*j_!\) too.
By Ken Browns lemma we know that left Quillen functors preserve weak equivalences between cofibrant objects. In
the model structures on \□ and , every monomorphism a cofibration and thus every object cofibrant.
Thus \(j^*i_!\) and \(i^*j_!\) preserve weak equivalences and descend to functors between the corresponding
homotopy categories.
To show that these induce an equivalence between the homotopy categories we must show that we have a zigzag of
natural transformations between \(i^*j_!j^*i_!\) and the identity functor such that every natural transformation
is valued in weak equivalences, and likewise for \(j^*i_!i^*j_!\). We will call \(η^i\) and \(ε^i\) the unit and counit
of the adjunction \(i_! ⊣ i^*\). Likewise \(η^j\) and \(ε^j\) for the adjunction \(j_! ⊣ j^*\).
We can construct these as follows
\begin{eqcd*}[column sep=large, row sep=large,execute at end picture={\draw[decorate,decoration={brace,mirror,amplitude=4.5}] (sBrcs.south) to (eBrcs.south);}]
\arrow[r,"i_!", ""'{name=i!s}] & |[alias=sBrcs]| \dcSet \arrow[r,"j^*"] \arrow[d,phantom,"⊛"] \arrow[rr,bend right=20,phantom , ""'{name=cc}] & \□ \arrow[r,"j_!"] & |[alias=eBrcs]| \dcSet \arrow[r,"i^*", ""'{name=i*s}] \arrow[d,phantom,"⊛"] & \\
\arrow[r,"i_!"{name=i!t}] & \dcSet \arrow[rr,"\id"{name=sida},""'{name=sidb}, equal] & & \dcSet \arrow[r,"i^*"{name=i*t}] & \\
\arrow[rrrr,"\id"{name=lid}, equal] & & & &
\arrow[Rightarrow ,from=i!s, to=i!t, "\id"]
\arrow[Rightarrow, from=cc, to=sida, "ε^j"]
\arrow[Rightarrow, from=i*s, to=i*t, "\id"]
\arrow[Rightarrow, from=lid, to=sidb, "η^i"]
\end{eqcd*}
and
\begin{eqcd*}[column sep=large, row sep=large,execute at end picture={\draw[decorate,decoration={brace,mirror,amplitude=4.5}] (sBrcs.south) to (eBrcs.south);}]
\□ \arrow[r,"j_!", ""'{name=j!s}] & |[alias=sBrcs]| \dcSet \arrow[r,"i^*"] \arrow[d,phantom,"⊛"] \arrow[rr,bend right=20,phantom, ""'{name=cc}] & \arrow[r,"i_!"] & |[alias=eBrcs]| \dcSet \arrow[r,"j^*", ""'{name=j*s}] \arrow[d,phantom,"⊛"] & \□ \\
\□ \arrow[r,"j_!"{name=j!t}] & \dcSet \arrow[rr,"\id"{name=sida},""'{name=sidb}, equal] & & \dcSet \arrow[r,"j^*"{name=j*t}] & \□ \\
\□ \arrow[rrrr,"\id"{name=lid}, equal] & & & & \□
\arrow[Rightarrow ,from=j!s, to=j!t, "\id"]
\arrow[Rightarrow, from=cc, to=sida, "ε^i"]
\arrow[Rightarrow, from=j*s, to=j*t, "\id"]
\arrow[Rightarrow, from=lid, to=sidb, "η^j"]
\end{eqcd*}
where \(\) is the Godement product.
By \cref{prop:i_!Nothing,prop:j_!Nothing}, \(i_!\) and \(j_!\) preserve terminal obejcts and \(i^*\) and \(j^*\) do so because they are
right adjoints. So by \cref{cor:dafuq} these two cospans of natural transformations are weak equivalences and thus we have our
desired equivalence of homotopy categories.
\end{proof}
\end{document} \end{document}

View file

@ -1,5 +1,5 @@
\documentclass[../Main.tex]{subfiles} \documentclass[../Main.tex]{subfiles}
\begin{document} \begin{document}
\subfile{Model_Structure_Argument} \subfile{Equivalence}
\end{document} \end{document}

View file

@ -33,6 +33,22 @@
file = {/home/nerf/Zotero/storage/XTZNK2WW/Angiuli et al. - 2018 - Cartesian Cubical Computational Type Theory Const.pdf} file = {/home/nerf/Zotero/storage/XTZNK2WW/Angiuli et al. - 2018 - Cartesian Cubical Computational Type Theory Const.pdf}
} }
@online{awodeyCartesianCubicalModel2023,
title = {Cartesian Cubical Model Categories},
author = {Awodey, Steve},
date = {2023-07-14},
eprint = {2305.00893},
eprinttype = {arXiv},
eprintclass = {math},
doi = {10.48550/arXiv.2305.00893},
url = {http://arxiv.org/abs/2305.00893},
urldate = {2024-08-03},
abstract = {The category of Cartesian cubical sets is introduced and endowed with a Quillen model structure using ideas coming from recent constructions of cubical systems of univalent type theory.},
pubstate = {prepublished},
keywords = {18N40 18B25 18N45,Mathematics - Algebraic Topology,Mathematics - Category Theory,Mathematics - Logic},
file = {/home/nerf/Zotero/storage/KWCXVX7B/Awodey - 2023 - Cartesian cubical model categories.pdf;/home/nerf/Zotero/storage/PNTHDEYX/2305.html}
}
@book{awodeyCategoryTheory2006, @book{awodeyCategoryTheory2006,
title = {Category {{Theory}}}, title = {Category {{Theory}}},
author = {Awodey, Steve}, author = {Awodey, Steve},
@ -383,6 +399,27 @@
file = {/home/nerf/Zotero/storage/SVIAPJW5/Gambino and Sattler - 2017 - The Frobenius Condition, Right Properness, and Uni.pdf;/home/nerf/Zotero/storage/DTMPA7ZW/1510.html;/home/nerf/Zotero/storage/NLPL6A6T/1510.html} file = {/home/nerf/Zotero/storage/SVIAPJW5/Gambino and Sattler - 2017 - The Frobenius Condition, Right Properness, and Uni.pdf;/home/nerf/Zotero/storage/DTMPA7ZW/1510.html;/home/nerf/Zotero/storage/NLPL6A6T/1510.html}
} }
@article{gambinoPolynomialFunctorsPolynomial2013,
title = {Polynomial Functors and Polynomial Monads},
author = {Gambino, Nicola and Kock, Joachim},
date = {2013-01},
journaltitle = {Mathematical Proceedings of the Cambridge Philosophical Society},
shortjournal = {Math. Proc. Camb. Phil. Soc.},
volume = {154},
number = {1},
eprint = {0906.4931},
eprinttype = {arXiv},
eprintclass = {math},
pages = {153--192},
issn = {0305-0041, 1469-8064},
doi = {10.1017/S0305004112000394},
url = {http://arxiv.org/abs/0906.4931},
urldate = {2024-08-04},
abstract = {We study polynomial functors over locally cartesian closed categories. After setting up the basic theory, we show how polynomial functors assemble into a double category, in fact a framed bicategory. We show that the free monad on a polynomial endofunctor is polynomial. The relationship with operads and other related notions is explored.},
keywords = {18C15 18D05 18D50 03G30,Mathematics - Category Theory},
file = {/home/nerf/Zotero/storage/JMIDZIWR/Gambino and Kock - 2013 - Polynomial functors and polynomial monads.pdf;/home/nerf/Zotero/storage/CADCALSE/0906.html}
}
@article{garnerUnderstandingSmallObject2009, @article{garnerUnderstandingSmallObject2009,
title = {Understanding the Small Object Argument}, title = {Understanding the Small Object Argument},
author = {Garner, Richard}, author = {Garner, Richard},
@ -453,6 +490,15 @@
file = {/home/nerf/Zotero/storage/VKM9TK2D/Hofmann - Doctor of Philosophy University of Edinburgh 1995.pdf} file = {/home/nerf/Zotero/storage/VKM9TK2D/Hofmann - Doctor of Philosophy University of Edinburgh 1995.pdf}
} }
@article{hofmannLiftingGrothendieckUniverses1997,
title = {Lifting {{Grothendieck Universes}}},
author = {Hofmann, Martin and Streicher, Thomas},
date = {1997-21},
url = {https://www2.mathematik.tu-darmstadt.de/~streicher/NOTES/lift.pdf},
langid = {english},
file = {/home/nerf/Zotero/storage/LE9B8IMD/Hofmann et al. - Lifting Grothendieck Universes.pdf}
}
@article{hofmannSyntaxSemanticsDependent, @article{hofmannSyntaxSemanticsDependent,
title = {Syntax and {{Semantics}} of {{Dependent Types}}}, title = {Syntax and {{Semantics}} of {{Dependent Types}}},
author = {Hofmann, Martin}, author = {Hofmann, Martin},
@ -478,6 +524,23 @@
organization = {American Mathematical Society} organization = {American Mathematical Society}
} }
@online{hoveyModelCategories2007a,
title = {Model {{Categories}}},
author = {Hovey, Mark},
date = {2007-10-17},
series = {Mathematical {{Surveys}} and {{Monographs}}},
volume = {63},
publisher = {American Mathematical Society},
issn = {0076-5376, 2331-7159},
doi = {10.1090/surv/063},
url = {https://www.ams.org/surv/063},
urldate = {2024-08-02},
abstract = {Advancing research. Creating connections.},
isbn = {9780821843611 9781470412906},
langid = {english},
organization = {American Mathematical Society}
}
@online{IntroductionHomotopyTheory, @online{IntroductionHomotopyTheory,
title = {Introduction to {{Homotopy Theory}} in {{nLab}}}, title = {Introduction to {{Homotopy Theory}} in {{nLab}}},
url = {https://ncatlab.org/nlab/show/Introduction+to+Homotopy+Theory}, url = {https://ncatlab.org/nlab/show/Introduction+to+Homotopy+Theory},
@ -625,6 +688,58 @@
file = {/home/nerf/Zotero/storage/IPQSVWQ9/Orton and Pitts - 2018 - Axioms for Modelling Cubical Type Theory in a Topo.pdf;/home/nerf/Zotero/storage/KRK2SGUB/1712.html} file = {/home/nerf/Zotero/storage/IPQSVWQ9/Orton and Pitts - 2018 - Axioms for Modelling Cubical Type Theory in a Topo.pdf;/home/nerf/Zotero/storage/KRK2SGUB/1712.html}
} }
@article{pareAbsoluteColimits1971,
title = {On Absolute Colimits},
author = {Paré, Robert},
date = {1971-09-01},
journaltitle = {Journal of Algebra},
shortjournal = {Journal of Algebra},
volume = {19},
number = {1},
pages = {80--95},
issn = {0021-8693},
doi = {10.1016/0021-8693(71)90116-5},
url = {https://www.sciencedirect.com/science/article/pii/0021869371901165},
urldate = {2024-07-10},
file = {/home/nerf/Zotero/storage/E68DYK76/0021869371901165.html}
}
@incollection{quillenAxiomaticHomotopyTheory1967,
title = {Axiomatic Homotopy Theory},
booktitle = {Homotopical {{Algebra}}},
author = {Quillen, Daniel G.},
editor = {Quillen, Daniel G.},
date = {1967},
pages = {1--64},
publisher = {Springer},
location = {Berlin, Heidelberg},
doi = {10.1007/BFb0097439},
url = {https://doi.org/10.1007/BFb0097439},
urldate = {2024-08-02},
isbn = {978-3-540-35523-6},
langid = {english},
keywords = {Double Coset,Homotopy Category,Homotopy Theory,Model Category,Weak Equivalence},
file = {/home/nerf/Zotero/storage/WIV7ECSM/Quillen - 1967 - Axiomatic homotopy theory.pdf}
}
@incollection{quillenExamplesSimplicialHomotopy1967,
title = {Examples of Simplicial Homotopy Theories},
booktitle = {Homotopical {{Algebra}}},
author = {Quillen, Daniel G.},
editor = {Quillen, Daniel G.},
date = {1967},
pages = {65--155},
publisher = {Springer},
location = {Berlin, Heidelberg},
doi = {10.1007/BFb0097440},
url = {https://doi.org/10.1007/BFb0097440},
urldate = {2024-08-02},
isbn = {978-3-540-35523-6},
langid = {english},
keywords = {Abelian Group,Exact Sequence,Projective Resolution,Simplicial Group,Spectral Sequence},
file = {/home/nerf/Zotero/storage/SV4WJESD/Quillen - 1967 - Examples of simplicial homotopy theories.pdf}
}
@book{quillenHomotopicalAlgebra1967, @book{quillenHomotopicalAlgebra1967,
title = {Homotopical {{Algebra}}}, title = {Homotopical {{Algebra}}},
author = {Quillen, Daniel G.}, author = {Quillen, Daniel G.},
@ -643,6 +758,24 @@
file = {/home/nerf/Zotero/storage/JG33WVEZ/Quillen - 1967 - Homotopical Algebra.pdf} file = {/home/nerf/Zotero/storage/JG33WVEZ/Quillen - 1967 - Homotopical Algebra.pdf}
} }
@book{quillenHomotopicalAlgebra1967a,
title = {Homotopical {{Algebra}}},
author = {Quillen, Daniel G.},
editor = {Dold, A. and Eckmann, B.},
editortype = {redactor},
date = {1967},
series = {Lecture {{Notes}} in {{Mathematics}}},
volume = {43},
publisher = {Springer},
location = {Berlin, Heidelberg},
doi = {10.1007/BFb0097438},
url = {http://link.springer.com/10.1007/BFb0097438},
urldate = {2024-08-02},
isbn = {978-3-540-03914-3 978-3-540-35523-6},
keywords = {algebra,homotopical algebra,Homotopie,homotopy,homotopy theory,Quillen},
file = {/home/nerf/Zotero/storage/6JP38TF2/Quillen - 1967 - Homotopical Algebra.pdf}
}
@unpublished{riehlAlgebraicModelStructures2011, @unpublished{riehlAlgebraicModelStructures2011,
title = {Algebraic Model Structures}, title = {Algebraic Model Structures},
author = {Riehl, Emily}, author = {Riehl, Emily},
@ -818,6 +951,13 @@
file = {/home/nerf/Zotero/storage/SYH7C9JV/Sattler - 2019 - Idempotent completion of cubes in posets.pdf;/home/nerf/Zotero/storage/9ZN8RBCZ/1805.html} file = {/home/nerf/Zotero/storage/SYH7C9JV/Sattler - 2019 - Idempotent completion of cubes in posets.pdf;/home/nerf/Zotero/storage/9ZN8RBCZ/1805.html}
} }
@online{Section00VCFunctoriality,
title = {Section 7.5 ({{00VC}}): {{Functoriality}} of Categories of Presheaves—{{The Stacks}} Project},
url = {https://stacks.math.columbia.edu/tag/00VC},
urldate = {2024-07-12},
file = {/home/nerf/Zotero/storage/LEP4E2FJ/00VC.html}
}
@online{shulmanAllInftyToposes2019, @online{shulmanAllInftyToposes2019,
title = {All \$(\textbackslash infty,1)\$-Toposes Have Strict Univalent Universes}, title = {All \$(\textbackslash infty,1)\$-Toposes Have Strict Univalent Universes},
author = {Shulman, Michael}, author = {Shulman, Michael},

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.