-
Notifications
You must be signed in to change notification settings - Fork 3
/
dirtt.tex
1053 lines (968 loc) · 50.4 KB
/
dirtt.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
\documentclass{amsart}
\usepackage{amssymb,amsmath,latexsym,stmaryrd,mathtools}
\usepackage{cleveref}
\usepackage{mathpartir}
\usepackage{xcolor}
\let\types\vdash % turnstile
\def\cb{\mid} % context break
\def\op{^{\mathrm{op}}}
\def\p{^+} % variances on variables
\def\m{^-}
\let\mypm\pm
\let\mymp\mp
\def\pm{^\mypm}
\def\mp{^\mymp}
\def\jdeq{\equiv}
\def\cat{\;\mathsf{cat}}
\def\type{\;\mathsf{type}}
\def\ctx{\;\mathsf{ctx}}
\let\splits\rightrightarrows
\def\flip#1{#1^*} % reverse the variances of all variables
\def\dual#1{#1^\dagger} % reverse variances *and* interchange \hat and \check
\def\mor#1{\hom_{#1}}
\def\id{\mathrm{id}}
\def\ec{\cdot} % empty context
\def\psplit{\overset{\mathsf{pair}}{\splits}}
\def\iso{\cong}
\def\tpair#1#2{#1\otimes #2}
\def\cpair#1#2{\langle #1,#2\rangle}
\def\tlet#1,#2:=#3in{\mathsf{let}\; \tpair{#1}{#2} \coloneqq #3 \;\mathsf{in}\;}
\def\clet#1,#2:=#3in{\mathsf{let}\; \cpair{#1}{#2} \coloneqq #3 \;\mathsf{in}\;}
\def\mix#1,#2 with #3 in{\mathsf{mix} {\scriptsize \begin{array}{c} \check{#1} \coloneqq \check{#3} \\ \hat{#2} \coloneqq \hat{#3} \end{array} }\mathsf{in}\;}
\def\pcol{\overset{\scriptscriptstyle +}{:}}
\def\mcol{\overset{\scriptscriptstyle -}{:}}
\def\pmcol{\overset{\scriptscriptstyle \pm}{:}}
\def\mpcol{\overset{\scriptscriptstyle \mp}{:}}
\newcommand{\coend}{\begingroup\textstyle\int\endgroup}
\newcommand{\tend}{\begingroup\textstyle\int\endgroup}
\newcommand{\Set}{\mathrm{Set}}
\newcommand{\unsigned}[1]{#1^0}
\newcommand{\joinvar}[1]{\left[#1\right]}
\newcommand{\red}[1]{\textcolor{red}{#1}}
\newcommand{\gray}[1]{\textcolor{gray}{#1}}
\def\unif{\doteq}
\title{A directed type theory for formal category theory}
\author{Dan Licata \and Andreas Nuyts \and Patrick Schultz \and Michael Shulman}
\begin{document}
\maketitle
\section{Introduction}
\label{sec:introduction}
We describe a two-level theory with two kinds of contexts and types.
The first level, whose types we call \textbf{categories}, is a simple linear type theory with an involutive modality, represented judgmentally by assigning variances to variables in the context.
Thus, for instance, a typical term judgment would look like
\[ x \pcol A, y\mcol B, z \pcol C \types t:D \]
As usual, linearity means that all the rules maintain the invariant that each variable in the context is used exactly once in the conclusion.
In particular, there are no contraction and weakening rules; but we do allow an unrestricted (and usually implicit) exchange rule.
We often write $\Psi$ for contexts of category-variables marked with variances (``\textbf{psi}gned contexts''), and we write $\flip\Psi$ for the analogous context with all variances reversed; thus $\flip{(x\pcol A, y\mcol B)} = (x\mcol A, y\pcol B)$.
Substitution into a variable of variance ``$-$'' (a ``contravariant variable'') reverses variances.
For instance, we have
\begin{mathpar}
\inferrule{x\pcol A, y \mcol B \types f(x,y):C \\ z\mcol C \types g(z):D}{x\mcol A, y\pcol B \types g(f(x,y)):D}
\end{mathpar}
If necessary, we may write $\flip{f(x,y)}$ to denote the term $f(x,y)$ with all the variances of its variables reversed; of course this is not by itself a valid term, but only something that can be substituted for a contravariant variable.
The second level is a more complicated sort of linear type theory, whose types we call \textbf{types} (or sometimes \textbf{sets} or \textbf{modules}), and that depends ``quadratically'' on the first level (in a sense made precise below).
Its basic type judgment is
\[ \Psi \types M \type \]
That is, each type depends on some collection of category-variables, with variance.
The basic term judgment for types is
\[ \Delta \cb \Gamma \types M \]
Here $\Gamma$ is a context of type-variables and $M$ is a type.
Unlike the first level, we formulate this second level as a sequent calculus, and instead of including terms in the judgments we regard the derivations themselves as the (normal-form) terms.
Later on we may introduce an equational theory, a term calculus, a third layer of logic with equality, or other techniques.
This sequent calculus is also structurally linear, with no contraction or weakening, but with unrestricted exchange.
The interesting thing happens in the context $\Delta$, which is a list of category-variables \emph{without} variances on which the types in $\Gamma$ and $M$ can depend.
This dependence is ``quadratic'' in the following sense: all the rules maintain the invariant that each variable in $\Delta$ appears \emph{twice} in $\Gamma$ and $M$, and that the two occurrences have opposite variance (where the variance of $\Gamma$ is flipped relative to $M$).
In fact, to be more precise, the actual dependence of $\Gamma$ and $M$ is on a category-context \emph{with} variances that is obtained from $\Delta$ by splitting each variable into two with opposite variance.
The implementation will, of course, use de Brujin indices; when writing judgments with named variables, if $x:A$ is a variable in $\Delta$ we write $\hat{x}\pcol A$ and $\check{x}\mcol A$ for the two corresponding signed variables that must appear in $\Gamma$ and $M$ (exactly once each).
If $\Psi$ is a signed context, we write $\unsigned{\Psi}$ for the unsigned context obtained by discarding the variances, and also any hats or checks on the variables.
And if the variables in $\Psi$ have hats and checks (which technically means that it was obtained as part of a splitting an unsigned context), we write $\dual\Psi$ for the signed context obtained by reversing the variances \emph{and} interchanging hats and checks.
In particular, we always have $\unsigned{\Psi} = \unsigned{(\dual\Psi)}$. % and $\unsigned{\Psi} \splits \Psi,\dual\Psi$.
With this notation, we can express the variable dependence conditions as follows:
\begin{center}
$\Delta\cb\Gamma\types N$ requires as preconditions that
$\begin{array}{c}
\Delta = \unsigned{\Psi}\\
\Psi, \dual\Psi \cong \Psi_1,\Psi_2\\
\flip{\Psi_1} \types \Gamma \ctx\\
\Psi_2 \types M \type
\end{array}$
\end{center}
As above, the notation $\flip{\Psi_1}$ means that we reverse the variances of the variables in $\Psi_1$ (without changing the hats and checks).
%and $\Delta\splits \Psi_1,\Psi_2$ means that each variable $x$ in the unsigned context $\Delta$ becomes a pair $\hat x, \check x$ of opposite variance in the signed context $\Psi_1,\Psi_2$.
For example, if $\Delta = (x:A)$ then we could have $\Psi = (\hat x\pcol A)$, so that $\dual\Psi = (\check x\mcol A)$; we could then take $\Psi_1 = (\check x\mcol A)$ and $\Psi_2=(\hat x\pcol A)$, so that $\flip\Psi_1 = (\check x\pcol A)$ and so a well-formed judgement could have $\check x \pcol A \types \Gamma \ctx$ and $\hat x \pcol A \types A \type$.
Note that that $\check{x}$ is allowed to occur covariantly in $\Gamma$, which counts as a negative occurrence overall, because $\Gamma$ is itself in a contravariant position.
To express rules more readably, we introduce the following combined judgment
\begin{equation}
\inferrule{\flip{\Psi_1} \types \Gamma\ctx \\ \Psi_2 \types N\type}{\Psi_1,\Psi_2 \types \Gamma\ctx, N\type}\label{eq:ctx-type}
\end{equation}
As always, concatenation of contexts is implicitly done up to permutation of variables.
Thus, the preconditions for $\Delta\cb\Gamma\types N$ to make sense can alternatively be written as $\Delta = \unsigned{\Psi}$ where $\Psi,\dual{\Psi} \types \Gamma\ctx, N\type$.
If $\hat{x}$ appears in $M$ and $\check{x}$ appears in $\Gamma$, as above, then our judgment might look like this:
\[ x:A \cb N(\check x) \types M(\hat x) \]
In this case we are simply talking about a morphism of types ``over the category $A$'' --- in semantic terms, a natural transformation.
The dual case when $\check{x}$ appears in $M$ and $\hat{x}$ appears in $\Gamma$ simply represents a natural transformation between \emph{contravariant} functors rather than between covariant ones.
With this case in mind, one might say that we resolve the problem of ``dependent linear type theory'' by stipulating that both the context and the conclusion of a ``linearly dependent judgment'' must \emph{separately} depend ``linearly'' on the same variables.
However, it is the additional freedom to allow $\hat{x}$ and $\check{x}$ to \emph{both} appear in $M$ and neither in $\Gamma$, or vice versa, that gives us the ability to express formal versions of nontrivial facts about category theory.
Semantically, these judgments correspond to what are sometimes called \emph{extraordinary natural transformations}, or simply \emph{extranatural transformations}.
For example, each category $A$ will have a type of morphisms that is contravariant in its first variable and covariant in its second:
\[ x\mcol A, y\pcol A \types \mor A(x,y) \type \]
To express the \emph{composition} of such morphisms then requires an ``extranaturality judgment'':
\[ x:A, y:A, z:A \cb \mor A(\hat x, \check y), \mor A(\hat y, \check z) \types \mor A(\check x, \hat z) \]
The \emph{identity} morphism judgment is similarly extranatural on the other side:
\[ x:A \cb \ec \types \mor A(\check x,\hat x) \]
% More formally, context splitting is defined by the following possibilities:
% \begin{mathpar}
% \inferrule{ }{\cdot \splits \cdot,\cdot}\\
% \inferrule{\Delta \splits \Psi_1,\Psi_2}
% {\Delta, x:A \splits (\Psi_1,\check x \mcol A,\hat x \pcol A),\Psi_2}\and
% \inferrule{\Delta \splits \Psi_1,\Psi_2}
% {\Delta, x:A \splits \Psi_1,(\Psi_2,\check x \mcol A,\hat x \pcol A)}\and
% \inferrule{\Delta \splits \Psi_1,\Psi_2}
% {\Delta, x:A \splits (\Psi_1,\check x \mcol A),(\Psi_2,\hat x \pcol A)}\and
% \inferrule{\Delta \splits \Psi_1,\Psi_2}
% {\Delta, x:A \splits (\Psi_1,\hat x \pcol A),(\Psi_2,\check x \mcol A)}
% \end{mathpar}
% Frequently our rules have a $\unsigned{\Psi}$ in the conclusion where $\Psi$ and/or $\dual{\Psi}$ occur in the premises; when read upwards this means that part of the unsigned context $\Delta$ in the conclusion is identified with $\unsigned{\Psi}$ and hence gets split into $\Psi$ and $\flip{\Psi}$; but this is a special ``paired'' sort of splitting, where of the two variables $\hat x$ and $\check x$ arising from any variable $x$ in $\Delta$, one must be in $\Psi_1$ and one in $\Psi_2$:
% \begin{mathpar}
% \inferrule{ }{\ec \psplit \ec,\ec}\\
% \inferrule{\Delta \psplit \Psi_1,\Psi_2}
% {\Delta, x:A \psplit (\Psi_1\check x \mcol A),(\Psi_2,\hat x \pcol A)}\and
% \inferrule{\Delta \psplit \Psi_1,\Psi_2}
% {\Delta, x:A \psplit (\Psi_1,\hat x \pcol A),(\Psi_2,\check x \mcol A)}
% \end{mathpar}
% Note that when $\Delta \psplit \Psi_1,\Psi_2$, we have $\dual{\Psi_1} = \Psi_2$ and $\Delta = \unsigned{\Psi_1} = \unsigned{\Psi_2}$.
If $a$ is a category-term in context $\Psi$, we will write $\dual{a}$ for the result of interchanging hats and checks in $a$.
This is ``in context $\dual{\Psi}$'' but of course it is not well-typed on its own; like $\flip{a}, $its purpose is to be substituted into a contravariant variable.
See, for instance, the substitution rule below.
\section{Structural and admissible rules}
\label{sec:structural}
\subsection{Basic rules}
\label{sec:basic-rules}
The basic structural rules for context formation are unsurprising, given our linearity restriction:
\begin{mathpar}
\inferrule{
}{
\ec \types \ec \ctx
}
\and
\inferrule{
\Psi_1 \types \Gamma \ctx\\
\Psi_2 \types M \type\\
}{
\Psi_1,\Psi_2 \types \Gamma,M \ctx
}
\end{mathpar}
For the moment, we are hoping to describe a cut-free sequent calculus with an admissible cut rule, and also admissible substitution for category-variables.
We discuss the cut rule in its own section, since it is somewhat complicated.
But we can state the intended identity rule at this point:
\begin{mathpar}
\inferrule{
\Psi \types M \type\\
}{
\unsigned{\Psi} \cb M[\dual{\Psi}/\Psi] \types M
}
\end{mathpar}
For example, we might have
\begin{mathpar}
\inferrule{
x\pcol A, y\mcol B, z\pcol C \types M(x,y,z)\type\\
}{
x:A, y:B, z:C \cb M(\check x, \hat y, \check z) \types M(\hat x, \check y, \hat z)
}
\end{mathpar}
The substitution rule for category-variables (which we also intend to be admissible) will be
\begin{mathpar}
\inferrule{
\Psi \types a:A \\
\Delta,x:A \cb \Gamma \types M \\
}{
\Delta,\unsigned\Psi \cb \Gamma[a/\hat x,\dual{a}/\check x] \types M[a/\hat x,\dual{a}/\check x]
}
\end{mathpar}
Of course, by quadraticality, each of $\hat x$ and $\check x$ appears exactly once in $\Gamma$ and $M$ combined.
We may abbreviate a substitution $M[a/\hat x,\dual{a}/\check x]$ by $M[a/x]$.
\subsection{Cut rule}
\label{sec:cut-rule}
The basic cut rule looks like this:
\begin{mathpar}
\inferrule{
\Delta_1 \cb \Gamma_1 \types M' \\
\Delta_2 \cb \Gamma_2, M \types N\\\\
\text{$\dual M$ loop-free-unifies with $M'$ reducing $\Delta_1,\Delta_2$ to $\Delta$ by $\theta$}
}{
\Delta \cb \Gamma_1[\theta,\dual\theta],\Gamma_2[\theta,\dual\theta] \types N[\theta,\dual\theta]
}
\end{mathpar}
As usual, we assume at the outset that the variables in $\Delta_1$ and $\Delta_2$ are disjoint.
Then $\theta$ is a substitution for some of the signed variables in $\Delta_1,\Delta_2$, assigning each of them to a term depending on signed versions of other variables in $\Delta_1,\Delta_2$ (i.e.\ with hats and/or checks), that is supposed to be produced by ``loop-free unification'' (to be described).
As usual, we write $\dual M$ and $\dual\theta$ for the results of reversing all hats and checks (here a purely syntactic operation); and $\Delta$ is the obtained by omitting from $\Delta_1,\Delta_2$ those variables that are substituted for in $\theta$.
The short description of loop-free unification is that it is like ordinary first-order unification, except that
\begin{enumerate}
\item after deciding on a substitution for a variable, we alter the remaining equations not just by that substitution but also by its dual (reversing hats and checks); and
\item an ``identity'' equation such as $\hat x \unif \hat x$ is a failure rather than simply deleted (the loop-free condition).
\end{enumerate}
To explain this, we begin with simple examples.
The simplest case is when neither $M'$ nor $M$ contains any duplicated variables.
For instance, we can cut like this:
\begin{mathpar}
\inferrule{
x:A \cb \Gamma_1 \types M(\hat x)\\
y:A \cb \Gamma_2, M(\check y) \types N
}{
x:A \cb \Gamma_1, \Gamma_2[\hat x/\hat y] \types N[\hat x/\hat y]
}
\end{mathpar}
where unifying $M(\hat x)$ with $\dual{M(\check y)} = M(\hat y)$ produces $\hat y\mapsto \hat x$.
(If it instead produced $\hat x\mapsto \hat y$, then the conclusion would be the $\alpha$-variant $y:A \cb \Gamma_1[\check y/\check x], \Gamma_2 \types N$.)
We can also cut like this:
\begin{mathpar}
\inferrule{
x:A \cb \Gamma_1 \types M(f(\hat x))\\
y:B \cb \Gamma_2, M(\hat y) \types N
}{
x:A \cb \Gamma_1, \Gamma_2[f(\hat x)/\check y] \types N[f(\hat x)/\check y]
}
\end{mathpar}
Here the unification produces $\check y\mapsto f(\hat x)$, and it must be that $f$ is contravariant, i.e.\ that $u \mcol A \types f(u) :B$.
It may also happen that there are variable duplications in the type being cut.
For instance, we can cut:
\begin{mathpar}
\inferrule{
x:A,y:A \cb \Gamma_1 \types M(\check x, \hat y)\\
z:A \cb \Gamma_2, M(\hat z, \check z) \types N
}{
z:A \cb \Gamma_1[\hat z/\hat x, \check z/\check y], \Gamma_2 \types N
}
\end{mathpar}
Here we have unified $M(\check x,\hat y)$ with $M(\check z, \hat z)$ to get $(\check x \mapsto \check z,\hat y \mapsto \hat z)$.
A different path of unification could start by choosing $\check z \mapsto \check x$, and then the dual of this equation would alter the remaining equation $\hat y \unif \hat z$ to $\hat y \unif \hat x$.
This would result in an eventual substitution such as $(\check z \mapsto \check x, \hat y \mapsto \hat x)$, giving the $\alpha$-variant conclusion $x:A \cb \Gamma_1[\check x/\check y], \Gamma_2 \types N$.
A more complicated allowable cut is
\begin{mathpar}
\inferrule{
x:A, y:A \cb \Gamma_1 \types M(\hat x, \check y, \hat y)\\
z:A, w:A \cb \Gamma_2, M(\check z, \hat z, \check w) \types N
}{
z:A \cb \Gamma_1[\check z/\check x,], \Gamma_2[\hat z/\hat w] \types N[\hat z/\hat w]
}
\end{mathpar}
Here unifying $M(\hat x, \check y, \hat y)$ with $M(\hat z, \check z, \hat w)$ has produced $(\hat x \mapsto\hat z, \check y \mapsto \check z, \hat w \mapsto \hat z)$.
Finally, here is an example where the loop-free condition fails:
\begin{mathpar}
\inferrule{
x:A \cb \Gamma_1 \types M(\check x, \hat x) \\
y:A \cb \Gamma_2, M(\hat y, \check y) \types N
}{
???
}
\end{mathpar}
Here the unification of $M(\check x, \hat x)$ with $M(\check y,\hat y)$ may begin by choosing $\hat x \mapsto \hat y$, but then the remaining equation is substituted to become $\check y \unif \check y$, which fails the loop-free condition.
\section{Type formers}
\label{sec:type-formers}
Now we are ready to start giving the rules for some type formers.
Since we want substitution into category-variables to be admissible, many rules must incorporate a substitution; but they are easier to understand before that substitution has been incorporated.
Thus, we often give both versions.
In the version without substitutions, we also generally omit the judgments of well-formedness of the types and contexts.
Note that the version with substitutions included can usually be derived fairly automatically from the other.
\subsection{Morphism types}
\label{sec:morphism-types}
We begin with the morphism types.
The formation and right rules are straightforward given the expected variance:
\begin{mathpar}
\inferrule{A\cat}{x\mcol A, y\pcol A \types \mor A(x,y) \type}\and
\inferrule{A\cat}{x: A \cb \ec \types \mor A(\check x,\hat x)}\and
\end{mathpar}
When we incorporate substitutions, these become:
\begin{mathpar}
\inferrule{A\cat \\ \Psi_1\types a:A \\ \Psi_2 \types b:A}{\flip{\Psi_1},\Psi_2 \types \mor A(a,b) \type}\and
% \inferrule{\Psi \types a:A \\ \Delta\psplit \Psi,\Psi'}{\Delta \cb \ec \types \mor A(a,a)}\and
\inferrule{
\Psi \types a:A \\
}{
\unsigned\Psi \cb \ec \types \mor A(\dual a,a)
}
\end{mathpar}
There are three versions of the left rule.
The first ``two-sided'' one is a ``directed'' analogue of the elimination rule for equality due to Lawvere and Martin-L\"of:
\begin{mathpar}
\inferrule{
\Delta,x:A \cb \Gamma[\hat x/\hat y] \types M[\hat x/\hat y]
}{
\Delta,x:A,y:A\cb \Gamma,\mor A(\hat x, \check y) \types M
}\and
\inferrule{
\Psi,\dual\Psi,\hat x\pcol A,\check x \mcol A \types \Gamma\ctx, M\type\\
\Psi_a \types a:A \\
\Psi_b \types b:A \\
\unsigned\Psi,x:A \cb \Gamma \types M
}{
\unsigned{\Psi},\unsigned{\Psi_a},\unsigned{\Psi_b}\cb \Gamma[\dual{a}/\check x,b/\hat{x}],\mor A(a,\dual b)
\types M[\dual{a}/\check x,b/\hat{x}]
}\and
\end{mathpar}
Note that although we write substitutions in both $\Gamma$ and $M$, the linearity means that each variable such as $\hat x$ or $\check x$ can only occur in one of the two.
For instance, we can use this rule to define composition of morphisms:
\begin{equation}
\inferrule{\inferrule{\inferrule{x\pcol A\types x:A}{x:A\cb \ec \types \mor A(\check x, \hat x)}}
{x:A,y:A \cb \mor A(\hat x, \check y) \types \mor A(\check x, \hat y)}}
{x:A, y:A, z:A \cb \mor A(\hat x, \check y), \mor A(\hat y, \check z) \types \mor A(\check x, \hat z)}\label{eq:composition}
\end{equation}
Starting from the bottom we have the left rule on $y,z$, then the left rule again on $x,y$, then the right rule on $x$.
Note that ${x:A,y:A \cb \mor A(\hat x, \check y) \types \mor A(\check x, \hat y)}$ is an instance of the identity rule, so if we had that rule postulated we could stop there; but if identity is to be admissible then it would reduce to the above complete derivation.
Similarly, we have the functorial action of any judgment $x\pcol A \types f(x):B$:
\begin{equation}
\label{eq:functor}
\inferrule{\inferrule{x\pcol A \types f(x):B}
{x:A \cb \ec \types \mor B(f(\check x),f(\hat x))}}
{x:A, y:A \cb \mor A(\hat x, \check y) \types \mor B(f(\check x),f(\hat y))}
\end{equation}
And the action of morphisms on types (directed transport):
\begin{equation}
\label{eq:transport}
\inferrule{\inferrule{
\Delta,u\pcol A \types M\type
}{
\Delta,x:A \cb M[\check x/u] \types M[\hat x/u]
}}{
\Delta,x:A,y:A \cb M[\check x/u], \mor A(\hat x,\check y) \types M[\hat y/u]
}
\end{equation}
We also consider ``one-sided'' left rules for morphism types (directed analogues of the Paulin-Mohring rule for identity types):
\begin{mathpar}
\inferrule{\Delta\cb \Gamma[\dual b/\check x] \types M[\dual b/\check x]}{\Delta,x:A\cb \Gamma,\mor A(\hat x, \dual b) \types M}\and
\inferrule{\Delta\cb \Gamma[a/\hat x] \types M[a/\hat x]}{\Delta,x:A\cb \Gamma,\mor A(a, \check x) \types M}\and
\end{mathpar}
And with substitutions:
\begin{mathpar}
\inferrule{
\Psi,\dual\Psi,\Psi_b,u\mcol A \types \Gamma\ctx, M\type\\
\Psi_a \types a:A\\
\unsigned\Psi, \unsigned{\Psi_b} \cb \Gamma[\dual b/ u] \types M[\dual b/ u]
}{
\unsigned{\Psi},\unsigned{\Psi_b},\unsigned{\Psi_a}\cb \Gamma[\dual a/ u],\mor A(a, \dual b) \types M[\dual a/ u]
}\and
\inferrule{
\Psi,\dual\Psi,\dual{\Psi_a},v\pcol A \types \Gamma\ctx, M\type\\
\Psi_b \types b:A\\
\unsigned\Psi, \unsigned{\Psi_a}\cb \Gamma[a/v] \types M[a/v]
}{
\unsigned\Psi,\unsigned{\Psi_a},\unsigned{\Psi_b}\cb \Gamma[b/v],\mor A(a, \dual b) \types M[b/v]
}\and
\end{mathpar}
Note that these can be regarded as the two Yoneda lemmas, one for covariant functors and one for contravariant functors.
Either one-sided rule makes the two-sided rule admissible, just as the Paulin-Mohring rule implies the Martin-L\"of one.
For instance, here is a derivation of the two-sided rule from the second one-sided rule, using substitution for category-variables:
\begin{mathpar}
\inferrule{
\Psi_b \types b:A\\
% \inferrule{
% \Psi_a \types a:A \\
% \Psi,\dual\Psi,\hat x\pcol A,\check x \mcol A \types \Gamma\ctx, M\type\\
% }{
% \Psi,\dual\Psi,\dual{\Psi_a},\hat x\pcol A \types \Gamma[\dual{a}/\check x]\ctx, M[\dual{a}/\check x]\type\\
% }\\
\inferrule*{
\Psi_a \types a:A \\
\unsigned\Psi,x:A \cb \Gamma \types M
}{
\unsigned\Psi, \unsigned{\Psi_a}\cb \Gamma[\dual{a}/\check x][a/\hat x] \types M[\dual{a}/\check x][a/\hat x]
}
}{
\unsigned{\Psi},\unsigned{\Psi_a},\unsigned{\Psi_b}\cb \Gamma[\dual{a}/\check x][b/\hat{x}],\mor A(a,\dual b)
\types M[\dual{a}/\check x][b/\hat{x}]
}
\end{mathpar}
Conversely, here is a proof of the second one-sided rule when the variable is on the right (i.e.\ $v$ occurs in $M$ rather than $\Gamma$):
\begin{mathpar}
\scriptsize
\inferrule{
% \Psi,\dual\Psi,\dual{\Psi_a},v\pcol A \types \Gamma\ctx, M\type\\
\unsigned\Psi, \unsigned{\Psi_a}\cb \Gamma \types M[a/v]\\
\inferrule*{
\Psi_b \types b:A\\
\inferrule*{
\inferrule{ }{\unsigned{\Psi_M},x:A \cb M[\check x/v] \types M[\hat x/v]}\\
}{
\unsigned{\Psi_M},x:A,y:A \cb M[\check x/v], \mor A(\hat x,\check y) \types M[\hat y/v]
}
}{
\unsigned{\Psi_M},x:A,\unsigned{\Psi_b} \cb M[\check x/v], \mor A(\hat x,\dual b) \types M[b/v]
}
}{
\unsigned\Psi,\unsigned{\Psi_a},\unsigned{\Psi_b}\cb \Gamma,\mor A(a, \dual b) \types M[b/v]
}\and
\end{mathpar}
We've left the division of $\Psi$ into $\Psi_M$ and a $\Psi_\Gamma$ implicit; the unification at the cut gives $\hat x \mapsto a$ and matches up all the other variables in $\Psi_M$ with their counterparts in $\Psi$, so that $\Psi_M$ disappears in the final conclusion.
(Can we do something analogous when $v$ appears in $\Gamma$? At least if we assume $\Gamma$ is a single type, we can. I (Patrick) think this could probably be made to work in general.)
\begin{mathpar}
\scriptsize
\inferrule{
% \Psi,\dual\Psi,\dual{\Psi_a},v\pcol A \types \Gamma\ctx, M\type\\
\inferrule*{
\Psi_b \types b:A\\
\inferrule*{
\inferrule{ }{\unsigned{\Psi_N},x:A \cb N[\hat x/v] \types N[\check x/v]}\\
}{
\unsigned{\Psi_N},x:A,y:A \cb N[\hat y/v], \mor A(\hat x,\check y) \types N[\check x/v]
}
}{
\unsigned{\Psi_N},x:A,\unsigned{\Psi_b} \cb N[b/v], \mor A(\hat x,\dual b) \types N[\check x/v]
}\\
\unsigned\Psi, \unsigned{\Psi_a}\cb N[a/v] \types M
}{
\unsigned\Psi,\unsigned{\Psi_a},\unsigned{\Psi_b}\cb N[b/v],\mor A(a, \dual b) \types M
}\and
\end{mathpar}
Finally, note that with morphism types, we can make the categories into a 2-category: the morphisms are judgments $x:A \types f(x):B$, and the 2-cells from $f$ to $g$ are the judgments $x:A \cb \ec \types \mor B(f(\check x),g(\hat x))$.
The identity 2-cell is the $\mor B$-right rule, while the composite of 2-cells (along a morphism) is obtained by cutting with~\eqref{eq:composition}.
Prewhiskering of a 2-cell is a substitution, while postwhiskering is a cut with~\eqref{eq:functor}.
\subsection{Tensor}
\label{sec:tensor}
The rules for the plain (or ``non-binding'') tensor are very similar to those in ordinary linear logic.
\begin{mathpar}
\inferrule{
\Psi_M \types M\type \\ \Psi_N \types N\type
}{
\Psi_M,\Psi_N \types M\otimes N\type
}\and
\inferrule{
\Psi,\dual\Psi \types (\Gamma,M,N)\ctx, C\type \\
\unsigned\Psi \cb \Gamma,M,N \types C
}{
\unsigned\Psi \cb \Gamma, M\otimes N \types C
}\and
\inferrule{
\Psi_M,\dual{\Psi_M} \types \Gamma_M\ctx, M\type \\
\Psi_N,\dual{\Psi_N} \types \Gamma_N\ctx, N\type \\\\
\unsigned{\Psi_M} \cb \Gamma_M \types M \\
\unsigned{\Psi_N} \cb \Gamma_N \types N
}{
\unsigned{\Psi_M},\unsigned{\Psi_N} \cb \Gamma_M,\Gamma_N \types M\otimes N
}\and
\end{mathpar}
\subsection{Coend type}
\label{sec:coend}
The coend type is significantly trickier than either of the examples we've considered so far.
It can be thought of as a sort of ``quadratic $\Sigma$-type'' that binds a category-variable $x$ in both covariant and contravariant positions at the same time.
This intuition is sufficient for the formation and left rules.
\begin{mathpar}
\inferrule{
\Psi, \hat x\pcol A, \check x\mcol A \types M \type \\
}{
\Psi \types \coend^{x:A} M \type
}\and
\inferrule{
\Psi,\dual\Psi,\hat x\pcol A,\check x\mcol A \types (\Gamma,M) \ctx, C\type\\
\unsigned\Psi, x:A \cb \Gamma, M \types C
}{
\unsigned\Psi \cb \Gamma, \coend^{x:A} M \types C
}\and
\end{mathpar}
However, the right rule is trickier to state correctly.
In order to get the necessary generality, we need to allow the right rule to ``decouple'' the two occurrences of $x$ that are bound in the coend.
Our first approximation to the rule might therefore be:
\begin{equation}\label{eq:coend-right-firsttry}
\inferrule{
\Delta, x : A, y : A \cb \Gamma[\check x / \check z, \hat y / \hat z] \types M[\check x / \check z , \hat x / \hat w, \check y / \check w , \hat y / \hat z]
}{
\Delta, z : A \cb \Gamma \types \coend^{w:A} M
}
\end{equation}
% or equivalently
% \begin{equation}
% \inferrule{
% \Delta, x : A, y : A \cb \Gamma \types M
% }{
% \Delta, z : A \cb \Gamma[\check z / \check x, \hat z / \hat y] \types \coend^{w:A} M[\check z / \check x , \hat w / \hat x, \check w / \check y , \hat z / \hat y]
% }
% \end{equation}
That is, to construct an element of $\coend^{w:A} M$, we choose some other variable $z$ that may appear in $M$ or in the context, and we alter the pairings
\[
\begin{array}{ccc}
\hat w &\leftrightarrow& \check w\\\\
\check z &\leftrightarrow& \hat z
\end{array}
\quad\text{to become instead}\quad
\begin{array}{ccc}
\hat x && \check y\\
\updownarrow && \updownarrow \\
\check x && \hat y
\end{array}
\]
The simplest case of this is when $\hat z$ and $\check z$ both occur in the context $\Gamma$.
In this case, it can be used to derive a functoriality result for coends:
\begin{mathpar}
\inferrule{
\inferrule{
x:A,y:A \cb M(\hat y, \check x) \types N(\check y, \hat x)
}{
z:A \cb M(\hat z, \check z) \types \coend^{w:A} N(\check w, \hat w)
}}{
\ec \cb \coend^{z:A} M(\hat z, \check z) \types \coend^{w:A} N(\check w, \hat w)
}
\end{mathpar}
(Hmm, should the \emph{bound} occurrences of $z$ on the final line still have their variances reversed because of being in contravariant position in the context? That seems wrong\dots)
The case when $\hat z$ or $\check z$ appears in $M$ instead is required, for instance, to derive the backwards direction of the co-Yoneda lemma.
The forwards direction is easy using the left rules for tensor and coend:
\begin{mathpar}
\inferrule{
\inferrule{
\inferrule{ }{x:A \cb M(\check x) \types M(\hat x)}
}{
x:A,y:A \cb M(\check y) \otimes \hom_A(\hat y,\check x) \types M(\hat x)
}
}{
x:A \cb \coend^{y:A} M(\check y) \otimes \hom_A(\hat y,\check x) \types M(\hat x)
}
\end{mathpar}
But the backwards direction requires the right rule for both, and in the case of the coend we have $\hat z$ appearing in the type being coended rather than in the context:
\begin{mathpar}
\inferrule{
\inferrule*{
\inferrule{ }{x:A \cb M(\check x) \types M(\hat x)}\\
\inferrule{ }{y:A\cb \cdot \types \hom_A(\check y,\hat y)}
}{
x:A,y:A \cb M(\check x) \types M(\hat x) \otimes \hom_A(\check y,\hat y)
}
}{
z:A \cb M(\check z) \types \coend^{w:A} M(\hat w)\otimes \hom_A(\check w,\hat z)
}\and
\end{mathpar}
To prove that these are inverses requires either extensive manipulation of cut-eliminations or a term calculus, so we postpone it until later.
However,~\eqref{eq:coend-right-firsttry} is insufficient (even before we worry about adding substitutions).
For one thing, it doesn't seem to suffice to prove a more general functoriality of coends in which the category also varies along a functor $f:C\to A$; we would like to argue as follows:
\begin{mathpar}
\inferrule{
\inferrule{
x:C,y:C \cb M(\hat y, \check x) \types N(f(\check y), f(\hat x))
}{
z:C \types M(\hat z, \check z) \types \coend^{w:A} N(\check w, \hat w)
}
}{
\ec \types \coend^{z:C} M(\hat z, \check z) \types \coend^{w:A} N(\check w, \hat w)
}
\end{mathpar}
but the first step is not justified by the right rule we have now, since $\check w$ and $\hat w$ have been replaced not by $\check y$ and $\hat x$ but by functions of them.
A rule that allows this is
\begin{equation}\label{eq:coend-right-secondtry}
\inferrule{
u\pcol C \types a:A\\\\
\Delta, x : C, y : C \cb \Gamma[\check x / \check z, \hat y / \hat z] \types M[\check x / \check z , a[\hat x/u] / \hat w, a[\check y/u] / \check w , \hat y / \hat z]
}{
\Delta, z : C \cb \Gamma \types \coend^{w:A} M
}
\end{equation}
Of course, in general we want to allow $a$ to depend on more than one variable, in which case $C$ needs to be replaced everywhere by a whole context.
\begin{equation}\label{eq:coend-right}
\inferrule{
\Psi_1,\dual{\Psi_1},\Psi_2,\dual{\Psi_2} \cong \Psi_\Gamma, \Psi_M\\
\flip{\Psi_\Gamma} \types \Gamma \ctx\\
\Psi_M, \hat w\pcol A, \check w \mcol A \types M \type \\\\
\Psi_2 \types a:A\\
\Psi_x\cong \Psi_y\cong\Psi_2\\\\
\unsigned{\Psi_1}, \unsigned\Psi_x, \unsigned{\Psi_y} \cb \Gamma[\dual\Psi_x / \dual{\Psi_2}, \Psi_y / \Psi_2] \types M[\dual{\Psi_x} / \dual{\Psi_2} , a[\Psi_x/\Psi_2] / \hat w, \dual{a[\Psi_y/\Psi_2]} / \check w , \Psi_y / \Psi_2]
}{
\unsigned{\Psi_1}, \unsigned{\Psi_2} \cb \Gamma \types \coend^{w:A} M
}
\end{equation}
(We are unable to use the single-judgment version~\eqref{eq:ctx-type} of the preconditions, since we need to ensure that $\hat w$ and $\check w$ appear in $M$ rather than in $\Gamma$.)
Note that an extra generalization has also happened automatically: unlike $\hat z$ and $\check z$, each of which can appear in only one of $\Gamma$ and $M$, the variables in $\Psi_2$ and $\dual{\Psi_2}$ could each be distributed with some in $\Gamma$ and some in $M$.
Furthermore, this rule now includes substitutions already, so we don't need to incorporate them by a further step.
% It seems to me that this rule~\eqref{eq:coend-right} is basically equivalent to Patrick's rule, below, if we (1) identify $\Psi_2$ with $\Psi_x$, so that only half of the substitutions need to happen, (2) make the substitutions not involving $a$ happen in the conclusion rather than the premises, and (3) break up the contexts by where the variables appear so that we aren't writing no-op substitutions.
% \begin{mathpar}
% \inferrule{
% \Delta_a \iso \Delta'_a \\
% \Delta_a \psplit (\flip{\Phi_\Gamma},\Phi_M),\Psi_a \\
% \Delta'_a \psplit \flip{\Psi_a'},(\Phi_\Gamma',\flip{\Phi_M'}) \\
% \Delta \splits \Psi_M,\flip{\Psi_{\Gamma}} \\\\
% \Psi_a \types a:A \\
% \Psi_{\Gamma},\Phi_\Gamma,\Phi_\Gamma' \types \Gamma\ctx \\
% \Psi_M,\Phi_M,\Phi_M',\hat x\pcol A,\check x\mcol A \types M\type \\\\
% \Delta,\Delta_a,\Delta'_a \cb \Gamma \types M[a/\hat x,a[\flip{\Psi_a'}/\Psi_a]/\check x]
% }{
% \Delta,\Delta_a \cb \Gamma[\Phi_\Gamma/\Phi_\Gamma'] \types \coend^{x:A} M[\Phi_M/\Phi_M']
% }\and
% \inferrule{
% \Phi_\Gamma \iso \Phi_\Gamma' \\
% \Phi_M \iso \Phi_M' \\
% \Phi_\Gamma, \flip{\Phi_M} \types a:A \\\\
% \Psi_{\Gamma},\Phi_\Gamma,\flip{{\Phi_\Gamma'}} \types \Gamma\ctx \\\\
% \Psi_M,\Phi_M,\flip{{\Phi_M'}},\hat x\pcol A,\check x\mcol A \types M\type \\\\
% \joinvar{\Psi_M, \flip{\Psi_\Gamma}},\unsigned{\Phi_\Gamma}, \unsigned{\Phi_M},\unsigned{{\Phi_\Gamma'}}, \unsigned{{\Phi_M'}} \cb \Gamma \types M[a/\hat x,a[\Phi_\Gamma'/\Phi_\Gamma, \Phi_M'/\Phi_M]/\check x]
% }{
% \joinvar{\Psi_M, \flip{\Psi_\Gamma}},\unsigned{\Phi_\Gamma}, \unsigned{\Phi_M} \cb \Gamma[\Phi_\Gamma/\Phi_\Gamma'] \types \coend^{x:A} M[\Phi_M/\Phi_M']
% }\and
% \end{mathpar}
\subsection{Binding tensor types}
\label{sec:tensor-types}
The binding tensor $M\otimes_{x:A} N$ can be defined as $\coend^{x:A} M\otimes N$.
Its derived rules, with their derivations, are:
\begin{mathpar}
\inferrule{
\inferrule{
\Psi_M, \hat x\pcol A \types M\type\\
\Psi_N, \check x\mcol A \types N\type
}{
\Psi_M, \Psi_N, \hat x\pcol A, \check x \mcol A \types M \otimes N \type
}
}{
\Psi_M, \Psi_N \types \coend^{x:A} M\otimes N \type
}\and
\inferrule{
\inferrule{
\unsigned{\Psi}, x:A \cb \Gamma, M, N \types C
}{
\unsigned{\Psi}, x:A \cb \Gamma, M\otimes N \types C
}
}{
\unsigned{\Psi} \cb \Gamma, \coend^{x:A} M\otimes N \types C
}\and
\scriptsize
\inferrule{
% \Psi_1,\dual{\Psi_1},\Psi_2,\dual{\Psi_2} \cong \Psi_\Gamma, \Psi_{MN}\\
% \flip{\Psi_\Gamma} \types \Gamma \ctx\\
% \Psi_{MN}, \hat w\pcol A, \check w \mcol A \types M\otimes N \type \\\\
% \Psi_2 \types a:A\\
% \Psi_x\cong \Psi_y\cong\Psi_2\\\\
\inferrule{
\unsigned{\Psi_M}, \unsigned{\Psi_2} \cb \Gamma_M \types M[a / \hat w]\\
\unsigned{\Psi_N}, \unsigned{\Psi_2} \cb \Gamma_N \types N[\dual a / \check w]
}{
\unsigned{\Psi_M}, \unsigned{\Psi_N}, \unsigned{\Psi_x}, \unsigned{\Psi_y} \cb \Gamma_M[\dual\Psi_x / \dual{\Psi_2}], \Gamma_N[\Psi_y / \Psi_2] \types (M\otimes N)[\dual{\Psi_x} / \dual{\Psi_2} , a[\Psi_x/\Psi_2] / \hat w, \dual{a[\Psi_y/\Psi_2]} / \check w , \Psi_y / \Psi_2]
}
}{
\unsigned{\Psi_M}, \unsigned{\Psi_N}, \unsigned{\Psi_2} \cb \Gamma_M, \Gamma_N \types \coend^{w:A} M\otimes N
}
\end{mathpar}
(In the right rule, we have renamed $\Psi_x$ and $\Psi_y$ back to $\Psi_2$ on the top line, for readability.)
More generally, for a context $\Phi=(x_1:A_1,\dots,x_n:A_n)$ we can write $M\otimes_{\Phi} N$ for $\coend^{x_1:A_1} \cdots \coend^{x_n:A_n} M\otimes N$.
Its derived rules should be the following:
\begin{mathpar}
\inferrule{
\Psi_M, \Phi \types M\type \\
\Psi_N, \dual{\Phi} \types N\type
}{
\Psi_M, \Psi_N \types M \otimes_\Phi N\type
}\and
\inferrule{
\unsigned{\Psi}, \unsigned{\Phi} \cb \Gamma, M, N \types C
}{
\unsigned{\Psi} \cb \Gamma, M\otimes_\Phi N \types C
}\and
\inferrule{
\Psi_2 \types \alpha : \Phi\\
\unsigned{\Psi_M}, \unsigned{\Psi_2} \cb \Gamma_M \types M[\alpha/\Phi]\\
\unsigned{\Psi_N}, \unsigned{\Psi_2} \cb \Gamma_N \types N[\dual \alpha/\dual \Phi]
}{
\unsigned{\Psi_M}, \unsigned{\Psi_N}, \unsigned{\Psi_2} \cb \Gamma_M, \Gamma_N \types M\otimes_\Phi N
}
\end{mathpar}
In the other direction, the non-binding tensor is of course recoverable from the binding one by tensoring over the empty context.
Less obviously, the coend should be derivable by tensoring over $(\hat x\pcol A,\check x\mcol A)$ with $\mor A(\check x, \hat x)$, but we should write out the details.
\subsection{End type}
\label{sec:end-type}
The end type is dual to the coend type.
In particular, its formation rule looks the same, and its right rule looks like the left rule for the coend type.
\begin{mathpar}
\inferrule{
\Psi,\hat x\pcol A,\check x\mcol A \types M \type \\
}{
\Psi \types \tend_{x:A} M \type
}\and
\inferrule{
\Psi,\dual\Psi, \hat x \pcol A, \check x \mcol A \types \Gamma\ctx, M\type\\
\unsigned\Psi, x:A \cb \Gamma \types M
}{
\unsigned\Psi \cb \Gamma \types \tend_{x:A} M
}\and
\end{mathpar}
Its left rule similarly looks like the right rule for the coend; first without substitutions:
\begin{mathpar}
\inferrule{
\Delta, x : A, y : A \cb \Gamma[\check x / \check z , \hat y / \hat z], M[\check x / \check z , \hat x / \hat w , \check x / \check w , \hat y / \hat z] \types C[\check x / \check z , \hat y / \hat z]
}{
\Delta, z : A \cb \Gamma, \tend_{w : A} M \types C
}
\end{mathpar}
and then with them:
\begin{mathpar}
\scriptsize
\inferrule{
\Psi_1,\dual{\Psi_1},\Psi_2,\dual{\Psi_2} \cong \Psi_\Gamma, \Psi_M\\
\flip{\Psi_\Gamma} \types \Gamma \ctx\\
\Psi_M, \hat w\pcol A, \check w \mcol A \types M \type \\\\
\Psi_2 \types a:A\\
\Psi_x\cong \Psi_y\cong\Psi_2\\\\
\unsigned{\Psi_1}, \unsigned\Psi_x, \unsigned{\Psi_y} \cb \Gamma[\dual\Psi_x / \dual{\Psi_2}, \Psi_y / \Psi_2], M[\dual{\Psi_x} / \dual{\Psi_2} , a[\Psi_x/\Psi_2] / \hat w, \dual{a[\Psi_y/\Psi_2]} / \check w , \Psi_y / \Psi_2] \types N[\dual\Psi_x / \dual{\Psi_2}, \Psi_y / \Psi_2]
}{
\unsigned{\Psi_1}, \unsigned{\Psi_2} \cb \Gamma, \tend_{w:A} M \types N
}
\end{mathpar}
% \begin{mathpar}
% \inferrule{
% \Delta_a \iso \Delta'_a \\\\
% \Delta_a \psplit (\flip{\Psi_1},\flip{\Psi_2},\Psi_3),\Psi_a \\
% \Delta'_a \psplit \flip{\Psi_a'},(\Psi_1',\Psi_2',\flip{\Psi_3'}) \\
% \Delta \splits \flip{\Psi_{\Gamma}},\flip{\Psi_M},\Psi_C \\\\
% \Psi_a \types a:A \\ \Psi_{\Gamma},\Psi_1,\Psi_1' \types \Gamma\ctx \\
% \Psi_M,\Psi_2,\Psi'_2,\hat x\pcol A,\check x\mcol A \types M\type \\
% \Psi_C,\Psi_3,\Psi'_3 \types C\type \\\\
% \Delta,\Delta_a,\Delta'_a \cb \Gamma, M[a/\hat x,a[\flip{\Psi_a'}/\Psi_a]/\check x] \types C
% }{
% \Delta,\Delta_a \cb \Gamma[\Psi_1/\Psi'_1], \tend_{x:A}M[\Psi_2/\Psi'_2] \types C[\Psi_3/\Psi'_3]
% }\and
% \end{mathpar}
\subsection{Function types}
\label{sec:function-types}
The non-binding function type acts mostly like a linear function type, except that it reverses the variance of category-variables in its domain.
\begin{mathpar}
\inferrule{
\Psi_M \types M\type \\ \Psi_N \types N\type
}{
\flip{\Psi_M},\Psi_N \types M\multimap N\type
}\and
\inferrule{
\Psi, \dual\Psi \types (\Gamma,M)\ctx, N\type\\
\unsigned\Psi \cb \Gamma, M \types N
}{
\unsigned\Psi \cb \Gamma \types M\multimap N
}\and
\inferrule{
\Psi_1, \dual{\Psi_1} \types \Gamma_1 \ctx, M\type\\
\Psi_2, \dual{\Psi_2} \types (\Gamma_2, N)\ctx, C \type\\\\
\unsigned{\Psi_1} \cb \Gamma_1 \types M \\
\unsigned{\Psi_2} \cb \Gamma_2,N \types C
}{
\unsigned{\Psi_1},\unsigned{\Psi_2} \cb \Gamma_1,\Gamma_2,M\multimap N \types C
}
\end{mathpar}
\section{Category formers}
\label{sec:category-formers}
\subsection{Opposites}
\label{sec:opposites}
\subsection{Tensor products}
\label{sec:tensor-products}
\subsection{Exponentials}
\label{sec:exponentials}
\subsection{Collages}
\label{sec:collages}
\subsection{The type classifier}
\label{sec:type-classifier}
\section{Without terms}
\label{sec:without-terms}
We can do some category theory without a term calculus.
All we need is the fact that the left/right rules for each type former express a universal property.
In other words, for a positive type like $\mor A$, tensor, and coend:
\begin{itemize}
\item Applying the left rule, then cutting with the right rule, gives back what we started with.
This should be essentially the principal cut corresponding to this type former in the proof of cut admissibility.
\item Cutting with a right rule, then applying the left rule, also gives back what we started with.
Equivalently, two sequents containing such a type on the left are equal if they become equal upon cutting with the right rule.
This should dually follow from the proof of admissibility for identity.
\end{itemize}
The situation with negative types like hom and end is the same but with the roles of right and left switched.
(We also need the fact that cut is associative, unital, and commutes with things like substitution.)
\subsection{The co-Yoneda lemma}
\label{sec:co-yoneda-noterms}
Recall that in \cref{sec:morphism-types} we defined the sequents going in both directions of the co-Yoneda lemma:
\begin{mathpar}
\inferrule{
\inferrule{
\inferrule{ }{x:A \cb M(\check x) \types M(\hat x)}
}{
x:A,y:A \cb M(\check y) \otimes \hom_A(\hat y,\check x) \types M(\hat x)
}
}{
x:A \cb \coend^{y:A} M(\check y) \otimes \hom_A(\hat y,\check x) \types M(\hat x)
}\and
\inferrule{
\inferrule*{
\inferrule{ }{x:A \cb M(\check x) \types M(\hat x)}\\
\inferrule{ }{y:A\cb \cdot \types \hom_A(\check y,\hat y)}
}{
x:A,y:A \cb M(\check x) \types M(\hat x) \otimes \hom_A(\check y,\hat y)
}
}{
z:A \cb M(\check z) \types \coend^{w:A} M(\hat w)\otimes \hom_A(\check w,\hat z)
}\and
\end{mathpar}
If we cut these together in the order
\[ M(\check z) \types \coend^{w:A} M(\hat w)\otimes \hom_A(\check w,\hat z) \types M(\hat z) \]
we have a right-then-left for the coend, and upon reducing that we have a right-then-left for the tensor, which reduces back to the identity.
And if we cut them in the other order
\[ \coend^{w:A} M(\hat w)\otimes \hom_A(\check w,\hat z) \types M(\hat z) \types \coend^{w:A} M(\hat w)\otimes \hom_A(\check w,\hat z) \]
then we can test its equality with the identity by cutting with the two right rules on the left that form the map $M(\check z) \types \coend^{w:A} M(\hat w)\otimes \hom_A(\check w,\hat z)$; thus this direction follows from the other.
\section{Terms}
For now, the rules for terms are written without substitutions incorporated.
We just take care that for variables mentioned explicitly in the contexts of the conclusion, it would make sense to substitute any reasonable term for them wherever they occur in the term being defined.
\subsection{Morphism types}
\begin{mathpar}
\inferrule{ }{x:A \cb \ec \types \id(x):\mor A(\check x,\hat x)}\and
\inferrule{
\Delta,z:A \cb \Gamma[\hat z/\hat y,\check z/\check x] \types m:M[\hat z/\hat y,\check z/\check x]
}{
\Delta,x:A,y:A \cb \Gamma,f:\mor A(\hat x, \check y) \types J(x.y.M,z,m; x, y, f):M
}\textsc{(2-sided)}\and
J(x.y.M,z,m;x,x,\id(x))\jdeq m\and
J(x.y.M,z,m[z/x,z/y,\id(z)/f];x,y,f)\jdeq m
\end{mathpar}
We will show that the following one-sided elimination rules are actually admissable.
\begin{mathpar}
\inferrule{\Delta\cb \Gamma \types m:M[a/\hat x]}
{\Delta,x:A\cb \Gamma,f:\mor A(a, \check x) \types \hat J(m;x,f) :M}\textsc{(covariant)}\and
\inferrule{\Delta\cb \Gamma \types m:M[b/\check x]}
{\Delta,x:A\cb \Gamma,f:\mor A(\hat x, b) \types \check J(m;x,f):M}\textsc{(contravariant)}\\
\hat J(m;a,\id(a)) \jdeq m\and
\hat J(m[a/x,\id(a)/f];x,f) \jdeq m \\
\check J(m;b,\id(b)) \jdeq m \and
\check J(m[b/x,\id(b)/f];x,f) \jdeq m
\end{mathpar}
The derivation of the covariant rule is as follows:
\begin{mathpar}
\inferrule{
\Delta \cb \Gamma \types t : M[a/\hat x] \and
\inferrule*{
x:A \cb m:M \types m:M
}{
x:A,y:A \cb m:M, f:\hom A(\hat x,\check y) \types J(x.y.M,x,m;x,y,f) : M[\hat y/\hat x]
}
}{
\Delta,y:A \cb \Gamma, f:\mor A(a,\check y) \types J(x.y.M,a,t;a,y,f) : M[\hat y/\hat x]
}
\end{mathpar}
Thus we define $\hat J(m;x,f) \jdeq J(x.y.M,a,m;a,x,f)$. The computation rules are easy to check.
\subsection{The rest}
Tensor types:
\begin{mathpar}
\inferrule{
\Delta_M \cb \Gamma_M \types t_m:M \\
\Delta_N \cb \Gamma_N \types t_n:N
}{
\Delta_M,\Delta_N \cb \Gamma_M,\Gamma_N \types \tpair{t_m}{t_n} : M\otimes N
}\and
\inferrule{
\Delta \cb \Gamma,m:M,n:N \types c:C
}{
\Delta \cb \Gamma,t:M\otimes N \types (\tlet m,n := t in c) : C
}\and
(\tlet m,n := \tpair{t_m}{t_n} in c) \jdeq c[t_m/m,t_n/n]\and
(\tlet m,n := t in c[\tpair{m}{n}/z])\jdeq c[t/z]
\end{mathpar}
Coend types:
\begin{mathpar}
% \inferrule{
% \Delta,x:A,x':A \cb \Gamma \types t_m : M[\check x'/\check x]
% }{
% \Delta,y:A \cb \Gamma[y/x'] \types \cpair{y}{t_m[y/x,y/x']} : \coend^{x:A} M[y/x']
% }\and
% (\clet x,m := \cpair{y}{t_m} in c) \jdeq c[y/x,t_m/m]\and
% (\clet x,m := t in c[\cpair{x}{m}/z]) \jdeq c[t/z]
\inferrule{
\Delta, x : A, y : A \cb \Gamma[\check x / \check z, \hat y / \hat z] \types m: M[\check x / \check z , \hat x / \hat w, \check y / \check w , \hat y / \hat z]
}{
\Delta, z : A \cb \Gamma \types (\mix x,y with z in m): \coend^{w:A} M
}\and
\inferrule{
\Psi_C \types C \type \\
\Delta, w:A \cb \Gamma, m:M \types c : C
}{
\Delta \cb \Gamma, t:\coend^{w:A} M \types (\clet w,m := t in c) : C
}\and
\end{mathpar}
Note that both the left rule and the right rule bind variables!
The term $(\mix x,y with z in m)$ binds $x:A$ and $y:A$ in $m$, while the term $(\clet w,m := t in c)$ binds $w:A$ and $m:M$ in $c$.
Also there is some sort of ``renaming in the context'' going on as we pass across the \textsf{mix}, since the $\Gamma$ above the line is also a substitution instance.
However, I (Mike) can't figure out what the computation rule should say, for reasons that make me wonder whether a term calculus is doomed to failure.
Going back to the principal cut for the coend type, there are actually several different possible such cuts:
\begin{mathpar}
\inferrule{\inferrule{
\Delta', x : A, y : A \cb \Gamma'[\check x / \check z, \hat y / \hat z] \types M[\check x / \check z , \hat x / \hat w, \check y / \check w , \hat y / \hat z]
}{
\Delta', z : A \cb \Gamma' \types \coend^{w:A} M
} \\ \inferrule{
\Delta, w:A \cb \Gamma, M \types C
}{
\Delta \cb \Gamma, \coend^{w:A} M \types C
}}{\Delta,\Delta',(z:A)? \cb \Gamma,\Gamma' \types C}
\end{mathpar}
depending on how the occurrences of $\hat z$ and $\check z$ are distributed through $\Gamma'$ and $M$.
\begin{itemize}
\item If both are in $\Gamma'$, then $z\notin\Delta$, so $z:A$ remains in the context of the conclusion separate from $\Delta,\Delta'$.
\item If both are in $M$, then we must have $z\in \Delta$, and the cut is disallowed because it forms a variable loop.
\item If one is in $M$ and one in $\Gamma'$, then one must also be in $\Gamma$ and we have $z\in \Delta$, so $z:A$ doesn't need to appear separately in the context of the conclusion.
\end{itemize}
In each case I can see how to reduce the cut, namely to the obvious
\begin{mathpar}
\inferrule{\Delta', x : A, y : A \cb \Gamma'[\check x / \check z, \hat y / \hat z] \types M[\check x / \check z , \hat x / \hat w, \check y / \check w , \hat y / \hat z]
\\
\Delta, w:A \cb \Gamma, M \types C}
{\Delta,\Delta',(z:A)? \cb \Gamma,\Gamma' \types C}
\end{mathpar}
but this latter cut involves nontrivial category-variable manipulation because it's ``contracting a string'', and I can't figure out how to represent that by substitution.
In particular, the category-variables $x,y$ appearing in one of the premises disappear in the conclusion and I don't see how to represent that disappearance in terms of ``substituting'' something for them.
But possibly I'm just being dense.
Perhaps the solution is that the notion of ``substitution'' has to involve a more careful ``matching up of variables'' according to a loop-free string picture.
See the example below.
Proof of co-Yoneda lemma, starting with one direction:
\begin{mathpar}
\inferrule{
\inferrule{
\inferrule{
\inferrule{ }{
x:A \cb n:N(\check x) \types n:N(\hat x)
}
}{
x:A,y:A \cb n:N(\check x), t:\hom_A(\hat x,\check y) \types \hat J(n,y,f):N(\hat y)
}
}{