-
Notifications
You must be signed in to change notification settings - Fork 2
/
analysis--complex-analysis-math185.tex
1771 lines (1568 loc) · 63.9 KB
/
analysis--complex-analysis-math185.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
\renewcommand{\bar}{\overline}
\renewcommand{\ddx}{\frac{\partial}{\partial x}}
\newcommand{\dfdx}{\frac{\partial f}{\partial x}}
\newcommand{\dfdy}{\frac{\partial f}{\partial y}}
\newcommand{\dfdzbar}{\frac{\partial f}{\partial \bar z}}
\newcommand{\dfdz}{\frac{\partial f}{\partial z}}
\renewcommand{\dudx}{\frac{\partial u}{\partial x}}
\newcommand{\dudy}{\frac{\partial u}{\partial y}}
\newcommand{\dudz}{\frac{\partial u}{\partial z}}
\renewcommand{\dvdx}{\frac{\partial v}{\partial x}}
\newcommand{\dvdy}{\frac{\partial v}{\partial y}}
\newcommand{\dveczdx}{\frac{\partial \vec z}{\partial x}}
\newcommand{\dveczdy}{\frac{\partial \vec z}{\partial y}}
\newcommand{\dydzbar}{\frac{\partial y}{\partial \bar z}}
\newcommand{\dydz}{\frac{\partial y}{\partial z}}
\newcommand{\dzbar}{\frac{\partial}{\partial \bar z}}
\renewcommand{\ddy}{\frac{\partial}{\partial y}}
\newcommand{\ddz}{\frac{\partial}{\partial z}}
\newcommand{\dxdzbar}{\frac{\partial x}{\partial \bar z}}
\newcommand{\dxdz}{\frac{\partial x}{\partial z}}
\newcommand{\dddfzbarz}{\frac{\partial^2 f}{\partial \bar z \partial z}}
\newcommand{\dddxx}{\frac{\partial^2}{\partial x^2}}
\newcommand{\dddyy}{\frac{\partial^2}{\partial y^2}}
\newcommand{\dddzbarz}{\frac{\partial^2}{\partial \bar z \partial z}}
\newcommand{\ddfdxx}{\frac{\partial^2 f}{\partial x}}
\newcommand{\ddfdyy}{\frac{\partial^2 f}{\partial y}}
% - Berkeley Math 185 - Complex Function Theory (Sarason)
\section*{Useful results}
\textbf{Geometric series} $a + aw + \ldots + aw^n = \frac{a(1 - w^{n+1})}{1-w}$
\section{Complex Numbers}
\begin{description}
% \begin{comment}
\exercise{I.2.1}{Prove that $\C$ obeys the associative law for multiplication
and the distributive law.}
Let $u, v, w \in \C$ with $u = a + bi$, $v = c + di$, and $w = f + gi$.
Multiplication is associative since
\begin{align*}
uv &= (ac - bd) - (ad + bc)i \\
&= (ca - db) - (cb + da)i = vu. \\
\end{align*}
Multiplication is left-distributive over addition since
\begin{align*}
u(v + w)
&= (a + bi)\big((c + f) + (d + g)i\big) \\
&= (ac + af - bd - bg) + (ad + ag + bc + bf)i \\
&= (ac - bd) + (ad + bc)i + (af - bg) + (ag + bf)i \\
&= (a + bi)(c + di) + (a + bi)(f + gi) \\
&=uv + uw.
\end{align*}
Since multiplication is commutative, multiplication is also right-distributive over addition.
\exercise{I.2.2}{Find the multiplicative inverses of the complex numbers (0, 1) and (1, 1)}
\exercise{I.2.3}{Think of $\C$ as a vector space over $\R$. Let $c = (a,b)$ be
in $\C$, and regard multiplication by $c$ as a real linear transformation
$T_c$. Find the matrix $M_c$ for $T_c$ with respect to the basis
$(1, 0), (0, 1)$. Observe that the map $c \mapsto M_c$ preserves addition and
multiplication. Conclude that the algebra of two-by-two matrices over $\R$
contains a replica of $\C$.}
Background:
What does ``$\C$ as a vector space over $\R$'' mean? A vector space is a set of
tuples. The elements of the tuples are elements of the field ($\R$ in this
case). Vector spaces support addition and scalar multiplication, where the
scalars come from the field. So this means that $\C$ is a set of ordered pairs
of reals, supporting addition of pairs and multiplication of a pair by a real
scalar. What it does \textit{not} imply is that pairs can be multiplied,
although, in the case of $\C$, they can, since $\C$ is a field.
A linear transformation is a function from one vector space $U$ to another,
$W$, such that $f(u + w) = f(u) + f(w)$, and $f(au) = af(u)$ for $u \in U$,
$w \in W$ and $a$ in the field. In other words, the linear transformation
preserves the two vector space operations, addition and scalar multiplication;
it is a homomorphism on the vector space.
OK, so the operation that was ignored by conceiving of $\C$ as a vector space
over $\R$, multiplication of the vectors, we're going to regard as a ``real
linear transformation'', i.e. a function of $\R^2$. Find the matrix for it with
respect to the basis $((1, 0), (0, 1))$. The first basis vector ($1$) is
transformed as $(1, 0) \mapsto (a, b)(1, 0) = (a, b)$. The second basis vector
($i$) is transformed as $(0, 1) \mapsto (a, b)(0, 1) = (-b, a)$. Therefore the
matrix of $T_c$ is
$$M_c = \matMMxNN{a}{-b}
{b}{a},$$
which is a rotation + scaling transformation of $\R^2$.
\textbf{Observe that the map $c \mapsto M_c$ preserves addition and
multiplication.}
Let $f: \R^2 \rightarrow \text{(2x2 matrices)}$ denote the map $c \mapsto M_c$. Then
$$f(c_1 + c_2) = \matMMxNN{a_1 + a_2}{-b_1 - b_2}
{b_1 + b_2}{a_1 + a_2} = f(c_1) + f(c_2),$$
and
$$f(c_1c_2) = \matMMxNN{a_1b_1 - a_2b_2}{-a_1b_2 - a_2b_1}
{a_1b_2 + a_2b_1}{a_1b_1 - a_2b_2},$$
while
$$f(c_1)f(c_2)
= \matMMxNN{a_1}{-b_1}
{b_1}{a_1} \matMMxNN{a_2}{-b_2}
{b_2}{a_2}
= \matMMxNN{a_1a_2 - b_1b_2}{-a_1b_2 - a_2b_1}
{a_2b_1 + a_1b_2}{a_1a_2-b_1b_2}
$$
? That's not preserving multiplication of complex vectors. Does it mean
preserving scalar multiplication?
\textbf{Conclude that the algebra of two-by-two matrices over $\R$
contains a replica of $\C$}
The ``algebra of two-by-two matrices over $\R$'' refers to the fact that $2x2$
matrices can be added, and multiplied by a scalar from the field $\R$ (they
form a vector space), and can also be multiplied.
% \end{comment}
\exercise{I.4.3}{Prove that if a polynomial with real coefficients has the
complex root $z$, then it also has $\bar z$ as a root.}
Let $P:\C \rightarrow \C$ defined by $P(c) = r_0 + r_1c^1 + \ldots + r_kc^k$ be a $k$-th degree
polynomial of a complex variable $c$, with real coefficients $r_k$, and let
$z = a + bi$ be a root, i.e. $P(z) = 0$. The claim is that
$P(\bar z) = 0$.
To show this, take the complex conjugate of both sides of the equation
$P(z) = 0$:
$$
\bar{P(z)} = \bar{r_0 + r_1z^1 + \ldots + r_kz^k} = \bar 0.
$$
Then, since $\bar{z_1 + z_2} = \bar z_1 + \bar z_2$ and $\bar{rz} = r\bar{z}$,
$$
r_0 + r_1\bar{z^1} + \ldots + r_k\bar{z^k} = P(\bar z) = 0,
$$
proving that if $z$ is a root then $\bar z$ is a root also.
\exercise{I.7.4}{Prove that the distinct complex numbers $z_1, z_2, z_3$ are
the vertices of an equilateral triangle if and only if
$$z_1^2 + z_2^2 + z_3^2 = z_1z_2 + z_2z_3 + z_3z_1$$}
The condition can be rewritten as
$$
(z_1 - z_2)^2 + (z_2 - z_3)^2 + (z_3 - z_1)^2 = 0,
$$
Each of the three terms on the left side is the square of a complex number
which, when viewed as a vector in $\R^2$, forms one side of a triangle.
\includegraphics[width=100pt]{img/equilateral-1.png}
So the original claim is equivalent to the claim that the three vectors
$$
(z_1 - z_2)^2, ~ (z_2 - z_3)^2, ~ (z_3 - z_1)^2
$$
sum to 0 if and only if the triangle is equilateral.
First let's prove that if the triangle is equilateral, then the three vectors
sum to 0. Translate each of the unsquared vectors
$$
(z_1 - z_2), ~ (z_2 - z_3), ~ (z_3 - z_1)
$$
so that they originate at the origin; they are of equal magnitude and they
divide the circle into 3 sectors of equal angle $\frac{2\pi}{3}$. Let
$\theta < \frac{2\pi}{3}$ be the arbitrary angle between one of the vectors and
the first coordinate axis. Interpreted as complex numbers, we see that their
arguments are $\theta$, $\frac{2\pi}{3} + \theta$, and
$\frac{4\pi}{3} + \theta$.
\includegraphics[width=100pt]{img/equilateral-2.png}
Now we form their squares
$$
(z_1 - z_2)^2, ~ (z_2 - z_3)^2, ~ (z_3 - z_1)^2.
$$
Since $(z_1 - z_2)$, $(z_2 - z_3)$, and $(z_3 - z_1)$ are of equal magnitude,
so are their squares. And the arguments of their squares are $2\theta$,
$\frac{4\pi}{3} + 2\theta$, and
$\frac{8\pi}{3} + 2\theta \equiv \frac{2\pi}{3} + 2\theta$ mod
$2\pi$. Therefore the three squared side vectors, when translated so that
they originate at the origin, also divide up the circle into sectors of equal
angle $\frac{2\pi}{3}$: the geometrical picture differs from the previous one
only by a uniform scaling and relabeling of the vectors, and we conclude that
these squared vectors also sum to zero (return to the origin when placed
head-to-tail). I.e. the equilaterality assumption implies
$$
(z_1 - z_2)^2 + (z_2 - z_3)^2 + (z_3 - z_1)^2 = 0,
$$
proving one direction of the equivalence.
To prove the other direction, we need to show that if
$$
z_1^2 + z_2^2 + z_3^2 = z_1z_2 + z_2z_3 + z_3z_1,
$$
or equivalently,
$$
(z_1 - z_2)^2 + (z_2 - z_3)^2 + (z_3 - z_1)^2 = 0,
$$
then the triangle is equilateral. For example, it would suffice to show that
$$
|z_1 - z_2| = |z_2 - z_3| = |z_3 - z_1|,
$$
but I haven't found a way to do so.
% The left side of this equation obeys the following triangle inequality
% $$
% |z_1^2 + z_2^2 + z_3^2|
% \le |z_1|^2 + |z_2|^2 + |z_3|^2,
% $$
% and the right side obeys
% $$
% |z_1z_2 + z_2z_3 + z_3z_1|
% \le |z_1||z_2| + |z_2||z_3| + |z_3||z_1|.
% $$
\exercise{I.10.1}{Use de Moivre's formula to find expressions for
$\cos 5\theta$ and $\sin 5\theta$ as polynomials in $\cos \theta$ and
$\sin \theta$.}
From de Moivre's formula we have
$(\cos\theta + i\sin\theta)^5 = \cos 5\theta + i\sin 5\theta$. The left hand
side expands as
\begin{align*}
(\cos\theta + i\sin\theta)^5 =& \cos^5\theta \\
&+ 5i\cos^4\theta\sin\theta \\
&- 10\cos^3\theta\sin^2\theta \\
&- 10i\cos^2\theta\sin^3\theta \\
&+ 5\cos\theta\sin^4\theta \\
&+ i\sin^5\theta.
\end{align*}
Equating real and imaginary components from the right side and the expansion of
the left side we have
\begin{align*}
\cos 5\theta &= \cos^5\theta - 10\cos^3\theta\sin^2\theta + 5\cos\theta\sin^4\theta \\
\sin 5\theta &= \sin^5\theta - 10\cos^2\theta\sin^3\theta + 5\cos^4\theta\sin\theta
\end{align*}
We can write these as polynomials in $\cos\theta$ and $\sin\theta$
respectively by using the identity $\cos^2\theta = 1 - \sin^2\theta$:
\begin{align*}
\cos 5\theta
&= \cos^5\theta - 10\cos^3\theta(1 - \cos^2\theta) + 5\cos\theta(1 - 2\cos^2\theta + \cos^4\theta) \\
&= 16\cos^5\theta - 20\cos^3\theta + 5\cos\theta, \\
\sin 5\theta
&= \sin^5\theta - 10(1 - \sin^2\theta)\sin^3\theta + 5(1 - 2\sin^2\theta + \sin^4\theta)\sin\theta \\
&= 16\sin^5\theta - 20\sin^3\theta + 5\sin\theta.
\end{align*}
\exercise{I.11.4}{Prove that the sum of the $n$-th roots of 1 equals 0,
($n > 1$).}
Let $w = \cos \frac{2\pi}{n} + i\sin \frac{2\pi}{n}$ be the $n$-th root of 1
with smallest argument, other than 1 itself. Then the sum of the roots is
$1 + w + w^2 + \ldots + w^{n-1}$. This is the first $n$ terms of a geometric
series with constant ratio $w$, and is therefore equal to
$\frac{1 - w^{n}}{1 - w} = \frac{1 - 1}{1 - w} = 0$.
\exercise{I.11.5}{Let $w$ be an $n$-th root of 1 different from 1
itself. Establish the formulas
$$
1 + 2w + 3w^2 + \ldots + nw^{n-1} = \frac{n}{w -1},
$$
$$
1 + 4w + 9w^2 + \ldots + n^2w^{n-1} = \frac{n^2}{w-1} - \frac{2n}{(w-1)^2}.
$$
}
[Note: my answers to this question appear to be wrong.]
The sum of the first $n+1$ terms of a geometric series with first term $1$ and
constant ratio $w$ is
$$
1 + w + w^2 + \ldots + w^{n} = \frac{~~~~~1 - w^{n+1}}{1 - w} = \frac{1 - w}{1 - w} = 1,
$$
since $w^{n+1} = w$.
\textcolor{red}{This equation is true for $w$ in $n$-th root, but not for any $w$.}
Taking derivatives of both sides gives
$$
1 + 2w + 3w^2 + \ldots + nw^{n-1} = 0,
$$
which does not agree with the given formula, so something's wrong.
\textcolor{red}{To take derivatives of both sides need to write a function
identity, not an identity between two numbers.}
Nevertheless, if it were the case that
$$
1 + 2w + 3w^2 + \ldots + nw^{n-1} = \frac{n}{w -1}
$$
then we could multiply by $w$, giving
$$
w + 2w^2 + 3w^3 + \ldots + nw^n = \frac{nw}{w -1},
$$
and differentiate with respect to $w$ again, giving
$$
1 + 4w + 9w^2 + \ldots + n^2w^{n-1} = \frac{(w-1)n - nw}{(w - 1)^2} = \frac{n}{w - 1} - \frac{nw}{(w - 1)^2},
$$
which also doesn't agree with the given formula.
\textcolor{red}{Again, to take a derivative, need a function on RHS that
agrees with LHS $\forall w$.}
% \begin{comment}
\exercise{I.13.1}{Stereographic projection}
The stereographic projection maps $z$ onto the surface of a sphere according to
$$
z \mapsto \frac{(2 \Re z, 2 \Im z, |z|^2 - 1)}{|z|^2 + 1}.
$$
% It's 1-1 and the inverse map is
% $$
% (\xi, \eta, \zeta) \mapsto (s + 1)\frac{\xi}{2} + (s + 1)\frac{\eta}{2}i
% $$
\exercise{I.13.1}{Establish the following formula for the spherical metric
$$
\rho(z_1, z_2) = \frac{2|z_1 - z_2|}{\sqrt{|z_1|^2 + 1}\sqrt{|z_2|^2 + 1}}
$$}
$\rho(z_1, z_2)$
is the Euclidean distance between the image points of $z_1$ and $z_2$ on the
Riemann sphere, therefore
\begin{align*}
\rho(z_1, z_2)
&=
\left|
\frac{(2 \Re z_1, 2 \Im z_1, |z_1|^2 - 1)}{|z_1|^2 + 1} -
\frac{(2 \Re z_2, 2 \Im z_2, |z_2|^2 - 1)}{|z_2|^2 + 1}
\right| \\
&=
\left|
\frac{
(2 \Re z_1, 2 \Im z_1, |z_1|^2 - 1)(|z_2|^2 + 1) -
(2 \Re z_2, 2 \Im z_2, |z_2|^2 - 1)(|z_1|^2 + 1)
}
{(|z_1|^2 + 1)(|z_2|^2 + 1)}
\right| \\
\end{align*}
Meanwhile,
$$
|z_1 - z_2| = \sqrt{(\Re z_1 - \Re z_2)^2 + (\Im z_1 - \Im z_2)^2}
$$
% \end{comment}
\exercise{I.14.1}{Establish the formula
$$
\rho(z, \infty) = \frac{2}{\sqrt{|z|^2 + 1}}
$$
}
$\rho(z, \infty)$ is the Euclidean distance between the image point of $z$ and
the north pole $(0, 0, 1)$:
\begin{align*}
\rho(z, \infty)
&= \sqrt{
\(\frac{2 \Re z}{|z|^2 + 1} - 0\)^2 +
\(\frac{2 \Im z}{|z|^2 + 1} - 0\)^2 +
\(\frac{|z|^2 - 1}{|z|^2 + 1} - 1\)^2
} \\
&= \frac
{\sqrt{4 \(\Re z\)^2 + 4 \(\Im z\)^2 + 4}}
{|z|^2 + 1} \\
&= \frac
{2}
{\sqrt{|z|^2 + 1}}.
\end{align*}
\end{description}
\section{Complex Differentiation}
Consider $z$ approaching $z_0$. $z - z_0$ is a vector pointing from $z_0$ to
$z$, and $f(z) - f(z_0)$ is a vector pointing between the image points for some
complex-valued function $f$. The derivative of $f$ at $z_0$ is the rotation +
scaling linear transformation (i.e. the complex number $c$) that takes
$z - z_0$ as close as possible to $f(z) - f(z_0)$. Note that the transformation
must be the \textit{same} regardless of the path taken by $z$ as it approaches
$z_0$. In other words, the action of $f$ on \textit{all} vectors in an
infinitisimal disc around $z_0$ is the same as multiplying by a complex number
$c$.
The transformation $f$ can be described by two surfaces over the complex plane:
$u(x,y)$ and $v(x,y)$, so that $f: \cvec{x}{y} \mapsto
\cvec{u(x,y)}{v(x,y)}$. If $f$ is differentiable at $(x_0, y_0)$ then it has a
local linear approximation with sublinear error. That linear approximation is
$$
\cvec{u(x, y)}
{v(x, y)} \approx \cvec{u(x_0, y_0)}
{v(x_0, y_0)} + \cvec{(x - x_0)\dudx + (y - y_0)\dudy}
{(x - x_0)\dvdx + (y - y_0)\dvdy}
$$
This is more succinctly expressed using the Jacobian:
$$
\cvec{u(x, y)}
{v(x, y)} \approx \cvec{u(x_0, y_0)}
{v(x_0, y_0)} + \matMMxNN{u_x}{u_y}
{v_x}{v_y} \cvec{x - x_0}
{y - y_0}.
$$
Note that this "linear approximation" form
$$
y \approx y_0 + y'(x - x_0)
$$
could just as well be written
$$
y - y_0 \approx y'(x - x_0)
$$
showing that one way of describing the derivative is "whatever you have to
multiply a small displacement in the input space by to get the displacement in
the output space".
Recall that the derivative of a complex function $f$ is defined to be a complex
number,
$$
f'\(\cvec{x_0}
{y_0}\) = \lim_{(x,y) \rightarrow (x_0,y_0)}
\frac{
\cvec{u(x, y)}
{v(x, y)} - \cvec{u(x_0)}
{v(y_0)}
}
{
\cvec{x}
{y} - \cvec{x_0}
{y_o}.
},
$$
i.e.
$$
f'(z_0) = \lim_{z \rightarrow z_0} \frac{f(z) - f(z_0)}{z - z_0},
$$
i.e. the derivative is whatever complex number you multiply the vector $z-z_0$
by to get its image vector $f(z) - f(z_0)$, in the limit as $z \rightarrow z_0$.
The partial derivatives of the complex-valued $f$ in the real and imaginary
directions are the complex numbers
\begin{align*}
f_x &= u_x + iv_x\\
f_y &= u_y + iv_y\\
\end{align*}
or
\begin{align*}
f_x &= \cvec{u_x}{v_x}\\
f_y &= \cvec{u_y}{v_y}\\
\end{align*}
The geometric interpretation of these is that they define how the image vector
$f(z)$ changes in response to a small change to $z$.
$u$ can be approximated by a local tangent plane. That's what $u_x$ and $u_y$
do. And so can $v$; that's what $v_x$ and $v_y$ do. But when we consider the
effect of a small displacement in the 2D input space on the 2D output space, we
describe the two tangent plane approximations jointly as a linear
transformation of the input plane, defined by the Jacobian. The thing is, the
linear transformation must have the same effect as multiplication by a complex
number.
The derivative is "what you have to multiply the input displacement by to get
the output displacement". That's true for a single-variable function
$\R \rightarrow \R$
$$
u(x) - u(x_0) = f'(x_0) \cdot (x - x_0)
$$
and it's true for a surface over the plane $(\R^2 \rightarrow \R$)
$$
u(x, y) - u(x_0, y_0) = \dudx \cdot (x - x_0) + \dudy \cdot (y - y_0)
$$
so presumably something analogous holds for a linear transformation of the
plane $(\R^2 \rightarrow \R^2$), i.e.
$$
\vec z - \vec z_0 = \dveczdx \cdot (x - x_0) + \dveczdy \cdot (y - y_0).
$$
or
$$
\cvec{u(x, y)}{v(x, y)} - \cvec{u(x_0, y_o)}{v(x_0, y_0)} =
\cvec{u_x}{v_x} \cdot (x - x_0) + \cvec{u_y}{v_y} \cdot (y - y_0).
$$
That's exactly the same as the equation involving the Jacobian above
$$
\cvec{u(x, y)}
{v(x, y)} - \cvec{u(x_0, y_0)}
{v(x_0, y_0)} = \matMMxNN{u_x}{u_y}
{v_x}{v_y} \cvec{x - x_0}
{y - y_0}.
$$
So how are we to make sense of the equation relating $f'$ and the partial
derivatives $\dfdx$ and $\dfdy$? Clearly in some sense the Jacobian \textit{is}
$f'$, or at least, the complex number that does what the Jacobian does is
$f'$. And
\begin{align*}
\dfdx &= \cvec{u_x}
{v_x} = u_x + iv_x \\
\dfdy &= \cvec{u_y}
{v_y} = u_y + iv_y,
\end{align*}
and so from the Cauchy-Riemann constraint
\begin{align*}
\dfdy &= -v_x + iu_x = i\dfdx,
\end{align*}
i.e. the partial derivative w.r.t. $y$ points at 90° to the $x$ partial
derivative.
% $f_x$ is the complex number that transforms a small real displacement vector
% $\epsilon$ to its image, and $f_y$ is the complex number that transforms a
% small imaginary displacement vector $i\delta$ to its image. [But shouldn't
% these be the same and equal to $f'$, seeing as $f'$ rotates and scales all
% displacement vectors in an infinitesimal disc uniformly? Instead, C-R says that
% $f_x = -if_y$.]
% One notation for the "directional derivative" of $f$ in the direction of
% $\vec z$ is $\frac{\partial f}{\partial \vec z}$. Is this an entirely separate
% notion from the partial derivatives $\frac{\partial f}{\partial z}$ and
% $\frac{\partial f}{\partial \bar z}$ considered in the context of complex
% analysis / Wirtinger derivatives?
So if the local linear approximation to the transformation $f$ behaves exactly as
multiplication by a complex number, then the Jacobian must have the form of a
rotation+scale matrix, $\smat{a}{-b}
{b}{a}$. Therefore the Jacobian must satisfy the
Cauchy-Riemann equations
$$
\begin{cases}
u_x = v_y \\
v_x = -u_y. \\
\end{cases}
$$
The Jacobian that effects the local linear rotation+scale transformation, together with
the equivalent complex number, is
$$
\matMMxNN{u_x}{-v_x}
{v_x}{u_x} ~~~~ u_x + iv_x
$$
or
$$
\matMMxNN{v_y}{u_y}
{-u_y}{v_y} ~~~~ v_y - iu_y.
$$
So we can write
\begin{align*}
f'
&= u_x + iv_x = f_x \\
&= v_y - iu_y = -if_y,
\end{align*}
therefore as above, another expression of the Cauchy-Riemann criterion is
$$
f_x = -if_y.
$$
Question: what is the intuition for the fact that the complex number
representing the partial derivative with respect to $x$ is the \textit{same} as
the complex number that effects the full linear transformation? (and at 90° to
the partial with respect to $y$) And what's the intuition for the fact that
$\dfdz = \dfdx$, while $\dfdzbar = 0$?
% The partial derivatives of the complex-valued $f$ in the real and imaginary
% directions are
% \begin{align*}
% \dfdx &= \dudx + i\dvdx\\
% \dfdy &= \dudy + i\dvdy\\
% \end{align*}
% So we can write
% \begin{align*}
% f'(z_0)
% &= \dudx(z_0) + i \dvdx(z_0) = \dfdx(z_0) \\
% &= \dvdy(z_0) - i \dudy(z_0) = -i\dfdy(z_0),
% \end{align*}
% or
% \begin{align*}
% f'\(\cvec{x_0}{y_0}\)
% &= \cvec{\dudx(z_0)}{\dvdx(z_0)} = \dfdx(z_0) \\\\
% &= \cvec{\dvdy(z_0)}{-\dudy(z_0)} = -i\dfdy(z_0).
% \end{align*}
$f$ is differentiable iff the error in the linear transformation goes to $0$ as
$(x,y) \rightarrow (x_0,y_0)$ (i.e. real partial derivatives of $u$ and $v$
exist) and the partial derivatives satisfy the Cauchy-Riemann equations.
\subsection*{Partial derivatives in the $z$ and $\bar z$ directions}
The (fixed) $x, y$ and (varying) $z, \bar z$ directions are related by
\begin{align*}
x &= (z + \bar z)/2 \\
y &= (z - \bar z)/2i.
\end{align*}
So by the chain rule,
\begin{align*}
\dfdz
&= \dfdx \dxdz + \dfdy \dydz \\
&= (u_x + iv_x)\frac{1}{2} + i(u_x + iv_x)\frac{1}{2i} \\
&= u_x + iv_x \\
&= \dfdx
\end{align*}
and
\begin{align*}
\dfdzbar
&= \dfdx \dxdzbar + \dfdy \dydzbar \\
&= (u_x + iv_x)\frac{1}{2} + i(u_x + iv_x)\frac{-1}{2i} \\
&= \frac{1}{2}\Big((u_x - u_x) + i(v_x - v_x)\Big) \\
&= 0.
\end{align*}
\begin{description}
\exercise{II.8.1(b,d)}{
Let the function $f$ be holomorphic in the open disc $D$. Prove that each of
the following conditions forces $f$ to be constant:\\\\
\textnormal{Let $f(z) = u(z) + iv(z)$.}
\begin{description}
\exercise{(a)}{$f' = 0$ throughout $D$} \\\\
\textnormal{ \textbfit{Informally:} $f' = 0$ throughout $D$ means that the
best linear approximation of $f(z) - f(z_0)$ is $0(z - z_0)$ which
implies that $f(z) = f(z_0)$ everywhere, so $f$ is constant.\\
~\\
\textbfit{Formally:} Since $f$ is holomorphic, $f' = u_x + iv_x =
0$. Equating real and imaginary parts shows that $u_x = v_x = 0$ and
therefore that $v_y = u_x = 0$ and $u_y = -v_x = 0$. Since the Jacobian
of $f$ is the zero matrix, $f$ is constant.}\\
\exercise{(b)}{$f$ is real-valued in $D$} \\\\
\textnormal{
% \textbfit{Informally:}
$f$ is real-valued, so $f(z) - f(z_0)$ is real-valued. Therefore the
local linear approximation $c(z - z_0)$ collapses the plane onto the real
axis, i.e. the Jacobian matrix has the form $\matMMxNN{a}{b} {0}{0}$. But $f$
is holomorphic, so the Jacobian must also have the form
$\matMMxNN{a}{-b}
{b}{a}$. Therefore the Jacobian is the zero matrix, i.e.
all partial derivatives are zero, $u_x = u_y = v_x = v_y = 0$, so $f$ is constant.\\
% \textbfit{Formally:} $f$ is real-valued means that $v(z) = 0$
% everywhere. Therefore the partial derivatives $v_x = v_y = 0$. Since $f$
% is holomorphic the partial derivatives satisfy the Cauchy-Riemann
% equations, therefore we have $u_x = v_y = 0$ and $u_y = -v_x = 0$. The
% Jacobian is the zero matrix hence $f$ is constant.
% \\
}
\exercise{(c)}{$|f|$ is constant in $D$} \\\\
\textnormal{ \textbfit{Informally:} $|f|$ is constant means that it collapses
all points in the open disc $D$ onto a circle. Therefore the Jacobian of
$f$ has determinant $0 = u_x^2 + v_x^2 = v_y^2 + u_y^2$. Therefore the
Jacobian
is the zero matrix and the function $f$ is constant.\\
~\\
\textbfit{Formally:} $|f|$ is constant, therefore
$|f|^2 = f\bar f = u^2 + v^2$ is constant. Therefore the following two
partial derivatives are constant:
\begin{align*}
\begin{cases}
\ddx |f|^2 = 2uu_x + 2vv_x = 0 \\
\ddy |f|^2 = 2uu_y + 2vv_y = 0.
\end{cases}
\end{align*}
Since $f$ is holomorphic, $u_x = v_y$ and $u_y = -v_x$, so
\begin{align*}
\begin{cases}
uu_x - vu_y = 0 \\
uu_y + vu_x = 0,
\end{cases}
\end{align*}
% i.e.
% $$
% \matMMxNN{u}{-v}
% {u}{v} \cvec{u_x}{u_y} = \cvec{0}{0}.
% $$
% The determinant of this system is $2uv$ which is not in general zero. But
% the equations hold throughout the disc $D$, therefore
Multiplying the first equation by $u$ and the second by $v$ we have
$$
\begin{cases}
u^2u_x - uvu_y = 0 \\
uvu_y + v^2u_x = 0,
\end{cases}
$$
and summing these gives
$$
u_x(u^2 + v^2) = 0,
$$
which proves that either $u_x = 0$ or that $f$ is constant (in which case $u_x = 0$ also).\\
Similarly, multiplying the first equation by $v$ and the second by $u$ gives
$$
\begin{cases}
uvu_x - v^2u_y = 0 \\
u^2u_y + uvu_x = 0,
\end{cases}
$$
and subtracting the first from the second gives
$$
u_y(u^2 + v^2) = 0.
$$
We conclude that $u_x = u_y = 0$ and that $f$ is therefore constant. }
\exercise{(d)}{$\arg f$ is constant in $D$} \\\\
\textnormal{
% \textbfit{Informally:}
Let $\arg f = \theta$, constant throughout
$D$. Then $\arg (f(z) - f(z_0)) = \theta$, whenever $z \neq z_0$.
Therefore the best local linear approximation to $f$ is a linear
transformation that collapses the plane onto a line with angle $\theta$.
The Jacobian determinant is therefore zero. Since $f$
is holomorphic the Jacobian is of the form $\matMMxNN{a}{-b}
{b}{a}$ and therefore we have
$a^2 + b^2 = 0$, so $a = b = 0$. Therefore the Jacobian is the zero
matrix, i.e. $f' = 0$ throughout $D$, so $f$ is constant.\\
% ~\\
% \textbfit{Formally:}
% The polar form of the Cauchy-Riemann equations is
% \begin{align*}
% v_\theta &= ru_r \\
% u_\theta &= -rv_r.
% \end{align*}
% Since $\arg f$ is constant, $v_\theta$
}
\end{description}
}
\exercise{II.8.2}{
Let the function $f$ be holomorphic in the open set $G$. Prove that the
function $g(z) = \bar{f(\bar z)}$ is holomorphic in the set
$G^* = \{\bar z: z \in G\}$.
} \\\\
Let
\begin{align*}
f: x + iy &\mapsto s(x, y) + i t(x,y). \\
g: x + iy &\mapsto u(x, y) + i v(x,y) \\
\end{align*}
We want to show that the Jacobian of $g$ exists and and satisfies the
Cauchy-Riemann equations. We have
\begin{align*}
g(x + iy)
&= \bar{s(x, -y) + i t(x, -y)} \\
&= s(x, -y) - i t(x, -y),
\end{align*}
and therefore
\begin{align*}
u(x,y) &= s(x, -y) \\
v(x, y) &= -t(x, -y).
\end{align*}
Now $f = s + it$ is holomorphic, so $s_x=t_y$ and $s_y = -t_x$. Therefore the
partial derivatives of $g$ are
\begin{align*}
u_x &= \ddx s(x, -y) = s_x \\
u_y &= \ddy s(x, -y) = -s_y = t_x \\
v_x &= -\ddx t(x, -y) = -t_x \\
v_y &= -\ddy t(x, -y) = t_y = s_x. \\
\end{align*}
Therefore $u_x = v_y$ and $v_x = -u_y$, showing that the Jacobian of $g$
satisfies the Cauchy-Riemann equations, and therefore that $g$ is holomorphic
in its domain.
\exercise{II.16.4}{
Prove that, if $u$ is a real-valued harmonic function in an open disk $D$,
then any two harmonic conjugates of $u$ in $D$ differ by a constant.
}
Let $v$ and $w$ be harmonic conjugates of $u$, so that
$$
\begin{cases}
u_x = v_y = w_y\\
u_y = -v_x = -w_x.
\end{cases}
$$
We want to show that $q = v-w$ is constant, i.e. that $q_x = q_y = 0$,
throughout $D$. From the Cauchy-Riemann equalities above, we have
$q_x = v_x - w_x = 0$ and $q_y = v_y - w_y = 0$ as required.
% \exercise{II.16.5}{
% [Not in homework.] Suppose that $u$ is a real-valued harmonic function in an open disk $D$, and
% suppose that $u^2$ is also harmonic. Prove that $u$ is constant.
% }
\exercise{II.16.7}{
Prove (assuming equality of second-order mixed partial derivatives) that
$$
\dddzbarz =
\frac{1}{4}\(\frac{\partial^2}{\partial x^2} +
\frac{\partial^2}{\partial y^2}\)
$$
Thus, Laplace's equation can be written as $\frac{\partial^2 f}{\partial \bar z \partial z} = 0$.
}
First note that $x$ and $y$ are related to $\bar z$ via
\begin{align*}
x = \frac{z + \bar z}{2} \\
y = \frac{z - \bar z}{2i}, \\
\end{align*}
therefore by the chain rule
\begin{align*}
\dzbar
&= \ddx \dxdzbar = \frac{1}{2}\ddx \\
&= \ddy \dydzbar = -\frac{1}{2i}\ddy. \\
\end{align*}
Now $\ddz$ is defined by
$$
\ddz = \frac{1}{2}\(\ddx - i\ddy\),
$$
and taking the partial derivative with respect to $\bar z$ gives
\begin{align*}
\dddzbarz
&= \frac{1}{2}\(\dzbar \ddx - i \dzbar \ddy\) \\
&= \frac{1}{2}\(\frac{1}{2}\ddx \ddx - i \(\frac{-1}{2i}\)\ddy \ddy\) \\
&= \frac{1}{4}\(\dddxx + \dddyy\).
\end{align*}
Laplace's equation is $\ddfdxx + \ddfdyy = 0$, which can also be written as
$$
\(\dddxx + \dddyy\)f = 0,
$$
and therefore
$$
4 \dddzbarz f = 0,
$$
i.e.
$$
\dddfzbarz = 0.
$$
\exercise{II.16.8}{
Prove that if $u$ is a real-valued harmonic function then the function
$\frac{\partial u}{\partial z}$ is holomorphic.
}
As above, first note that $x$ and $y$ are related to $\bar z$ via
\begin{align*}
x &= \frac{z + \bar z}{2} \\
y &= \frac{z - \bar z}{2i} = -i\frac{z - \bar z}{2}. \\
\end{align*}
By the chain rule
\begin{align*}
\dudz
&= \dudx \dxdz + \dudy \dydz \\
&= \frac{1}{2} \dudx - \frac{i}{2} \dudy. \\
\end{align*}
Switching notation, we write this as $\dudz = \frac{1}{2} u_x - \frac{i}{2} u_y$.
Define a complex-valued function
\begin{align*}
w(x + iy)
= \dudz &= s(x,y) + it(x,y) \\
&= \frac{1}{2} u_x - \frac{i}{2} u_y.
\end{align*}
Then the Jacobian of $w$ is
\begin{align*}
\matMMxNN{s_x}{s_y}
{t_x}{t_y} = \frac{1}{2}\matMMxNN{u_{xx}}{u_{xy}}
{-u_{yx}}{-u_{yy}}.
\end{align*}
But since $u$ is harmonic, we know that $u_{xx} + u_{yy} = 0$, therefore the
Jacobian of $w$ satisfies the Cauchy-Riemann equations and $w = \dudz$ is
holomorphic.
\end{description}
\section{Image of a curve under a transformation}
What is the effect of the inversion mapping $z \mapsto w = \frac{1}{z}$ on circles
and lines?
Let $z = x + iy$ with image $w = \frac{1}{z} = u + iv$ and note that
$\frac{1}{z} = \frac{\bar z}{|z|^2}$. Therefore the mapping is
\begin{align*}
x + iy \mapsto \frac{x}{x^2 + y^2} - i\frac{y}{x^2 + y^2} = u + iv.
\end{align*}
The general equation of a circle or line in the plane is
\begin{align*}
Ax^2 + Ay^2 + Bx + Cy + D = 0.
\end{align*}
We use the inverse mapping to establish an equation that holds in the
transformed complex plane. Since the inverse mapping is the same as the forward
mapping, we have
\begin{align*}
w = u + iv \mapsto \frac{u}{u^2 + v^2} - i\frac{v}{u^2 + v^2} = x + iy.
\end{align*}
So points $w = u + iv$ in the transformed complex plane satisfy
\begin{align*}
A\frac{u^2}{(u^2 + v^2)^2} + A\frac{v^2}{(u^2 + v^2)^2} + B\frac{u}{u^2 + v^2} - C\frac{v}{u^2 + v^2} + D = 0,
\end{align*}
i.e.
\begin{align*}
\frac{A}{u^2 + v^2} + B\frac{u}{u^2 + v^2} - C\frac{v}{u^2 + v^2} + D = 0,
\end{align*}
or
\begin{align*}
A + Bu - Cv + Du^2 + Dv^2 = 0.
\end{align*}
So we see that, if a circle/line exists in the pre-transformed plane, then...
\section{Linear-Fractional Transformations}
Complex projective space $\CP^1$ is a space of equivalence classes of vectors
in $\C^2$. Basically the elements of $\CP^1$ are analogs of lines through the
origin in $\R^2$ (one-dimensionsal subspaces): two vectors are equivalent if
the ratios between their vector components are equal. And that ratio provides a
bijection between $\CP^1$ and $\bar \C$.
Since linear transformations of $\C^2$ map lines (in $\C^2$) to lines (in
$\C^2$), they induce a bijection on $\CP^1$ and therefore on $\bar \C$.
In fact linear-fractional transformations are induced by a two-by-two complex
matrix (an element of $\GLC{2}$) [Do I understand why?]. This makes
linear-fractional transformations closed under composition and gives them an
identity (the LFT corresponding to the identity matrix) and inverses (given by
the matrix inverse). So there is a group of LFTs which is the homomorphic image
of $\GLC {2}$, under the map which sends a two-by-two matrix to its induced
LFT. The kernel of the homomorphism contains scalar multiples of the identity
matrix $I_2$. I think that's basically because such uniform scaling matrices
leave lines unchanged and therefore leave the one-dimensional subspaces
unchanged. Therefore the group of LFTs is isomorphic to the quotient group
$\GLC{2}/(\C\backslash\{0\})I_2$ (each coset is formed by taking a matrix and
scaling it by multipying it with a scaled identity matrix from the kernel).
\begin{description}
\exercise{III.5.2}{
~\\
Given four distinct points $z_1, z_2, z_3, z_4$ in $\bar \C$, their cross
ratio, which is denoted by $(z_1, z_2; z_3, z_4)$ is defined to be the
image of $z_4$ under the linear-fractional transformation that sends
$z_1,z_2,z_3$ to $\infty, 0, 1$, respectively. Prove that if $\phi$ is a
linear-fractional transformation then
$$\(\phi(z_1),\phi(z_2);\phi(z_3),\phi(z_4)\) = (z_1, z_2; z_3, z_4).$$
~\\
}\\
Let $f$ be the linear-fractional transformation that maps $z_1, z_2, z_3$ to
$\infty, 0, 1$ respectively, so that the cross-ratio is defined to be
$(z_1, z_2; z_3, z_4) = f(z_4)$. We want to show that the cross ratio,
defined in this way, is invariant under an arbitrary linear-fractional
transformation $\phi$.
First, let's find an explicit expression for $f(z)$ in terms of
$z_1,z_2,z_3$. We know that $f(z_1) = \infty$ and $f(z_2) = 0$, so perhaps
$f$ has the form $f(z) = c\frac{z_2 - z}{z_1 - z}$ for some constant $c$. We
also require $f(z_3) = 1$. One way to achieve that is to choose
$c = \frac{z_1 - z_3}{z_2 - z_3}$, so the definition of $f$ becomes
$$
f(z) = c\frac{(z_2 - z)(z_1 - z_3)}{(z_2 - z_3)(z_1 - z)}.
$$
Defined like this, $f$ is a linear-fractional transformation, and it does
send $z_1,z_2,z_3$ to $\infty, 0, 1$, respectively. Furthermore, by theorem
III.5, this is the only linear-fractional transformation that does so.
So we have
$$
(z_1, z_2; z_3, z_4) = f(z_4) = \frac{(z_1 - z_3)(z_2 - z_4)}{(z_1 - z_4)(z_2 - z_3)},
$$
and we want to show that this quantity is invariant under an arbitrary
linear-fractional transformation $\phi$. Let
$\phi(z) = \frac{az + b}{cz + d}$, with $ad - bc = 1$ (since we are free to
scale the coefficients $a,b,c,d$ uniformly as we wish, if $ad - bc \ne 1$
then we scale them all by $\frac{1}{\sqrt{ad - bc}}$). Now consider
\begin{align*}
\phi(z_i) - \phi(z_j)
&= \frac{(az_i + b)(cz_j + d) - (az_j + b)(cz_i + d)}{(cz_i + d)(cz_j + d)} \\
&= \frac{z_iz_j(ac - ac) + z_i(ad - bc) + z_j(bc - ad) + (bd - bd)}{(cz_i + d)(cz_j + d)} \\
&= \frac{z_i - z_j}{(cz_i + d)(cz_j + d)}.
\end{align*}
Letting $A_i = cz_i + d$, we see that the cross-ratio of the transformed
points is
\begin{align*}
\(\phi(z_1),\phi(z_2);\phi(z_3),\phi(z_4)\) = \frac{(z_1 - z_3)(z_2 - z_4)/A_1A_3A_2A_4}{(z_1 - z_4)(z_2 - z_3)/A_1A_4A_2A_3} = (z_1, z_2; z_3, z_4).
\end{align*}
% $$
% z_1, z_2, z_3 = -\frac{d}{c}, \frac{b}{a}, \frac{+d - b}{-c + a}
% $$
\exercise{III.6.3}{
~\\
Prove that a linear-fractional transformation with only one fixed point is
conjugate to a translation.
~\\
}\\
Let $\phi(z) = \frac{az + b}{cz + d}$, with $ad - bc = 1$ (justified in
III.5.2 above). The fixed points of this mapping are the solutions of
\begin{align*}
\frac{az + b}{cz + d} = z,
\end{align*}
which is a quadratic equation
$$
cz^2 + (d - a)z - b = 0,
$$
with solutions