-
Notifications
You must be signed in to change notification settings - Fork 1
/
expression.bib
1743 lines (1605 loc) · 72.3 KB
/
expression.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
% This file was created with JabRef 2.6.
% Encoding: Cp1252
@ARTICLE{Ambadar05,
author = {Zara Ambadar and J. Schooler and Jeffrey Cohn},
title = {{D}eciphering the {E}nigmatic {F}ace: the {I}mportance of {F}acial
{D}ynamics to {I}nterpreting {S}ubtle {F}acial {E}xpressions},
journal = {Psychological Science},
year = {2005}
}
@ARTICLE{Aung_TAC15,
author = {Min S. H. Aung and Sebastian Kaltwang and Bernardino Romera-Paredes
and Brais Martinez and Aneesha Singh and Matteo Cella and Michel
Valstar and Hongying Meng and Andrew Kemp and Moshen Shafizadeh and
Aaron C. Elkins and Natalie Kanakam and Amschel de Rothschild and
Nick Tyler and Paul J. Watson and Amanda C. de C. Williams and Maja
Pantic and Nadia Bianchi-Berthouze},
title = {The automatic detection of chronic pain-related expression: requirements,
challenges and a multimodal dataset},
journal = {IEEE Trans},
year = {2015},
owner = {Songfan},
timestamp = {2015.07.21}
}
@INPROCEEDINGS{Baker_ICCV07,
author = {Baker, S. and Scharstein, D. and Lewis, J.P. and Roth, S. and Black,
M.J. and Szeliski, R.},
title = {{A} {D}atabase and {E}valuation {M}ethodology for {O}ptical {F}low},
booktitle = {Proc. ICCV},
year = {2007},
issn = {1550-5499},
keywords = {absolute flow endpoint error;average angular error;frame interpolation
error;hidden fluorescent texture;high frame-rate video;image database;nonrigid
motion;optical flow;optical tracking;statistics;stereo sequence;error
analysis;image sequences;image texture;interpolation;optical tracking;statistical
analysis;stereo image processing;visual databases;}
}
@INPROCEEDINGS{Baltrusaitis_FERA11,
author = {Tadas Baltrusaitis and Daniel McDuff and Ntombikayise Banda and Marwa
Mahmoud and Rana el Kaliouby and Peter Robinson and Rosalind Picard},
title = {{R}eal-time {I}nference of {M}ental {S}tates from {F}acial {E}xpressions
and {U}pper {B}ody {G}estures},
booktitle = {Proc. FG Workshop on FERA Challenge},
year = {2011},
owner = {Songfan},
timestamp = {2011.05.01}
}
@INPROCEEDINGS{Bartlett_FG06,
author = {Bartlett, M.S. and Littlewort, G. and Frank, M. and Lainscsek, C.
and Fasel, I. and Movellan, J.},
title = {{F}ully {A}utomatic {F}acial {A}ction {R}ecognition in {S}pontaneous
{B}ehavior},
booktitle = {Proc. FG},
year = {2006},
keywords = {AdaBoost classifiers;facial action coding system;fully automatic facial
action recognition;spontaneous behavior;support vector machines;emotion
recognition;face recognition;support vector machines;}
}
@INPROCEEDINGS{Bartlett03,
author = {M. S. Bartlett and G. Littlewort and B. Braathen and T. J. Sejnowski
and J. R. Movellan},
title = {A prototype for automatic recognition of spontaneous facial actions},
booktitle = {Proc. NIPS},
year = {2003}
}
@ARTICLE{Bassili79,
author = {Bassili, John N.},
title = {{E}motion {R}ecognition: {T}he {R}ole of {F}acial {M}ovement and
the {R}elative {I}mportance of {U}pper and {L}ower {A}reas of the
{F}ace},
journal = {Personality and Social Psychology},
year = {1979},
owner = {songfan},
timestamp = {2011.02.28}
}
@ARTICLE{Beck09,
author = {Amir Beck and Marc Teboulle},
title = {A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse
Problems},
journal = {SIAM Journal on Imaging Sciences},
year = {2009},
owner = {Songfan},
timestamp = {2014.06.30}
}
@ARTICLE{Boker09,
author = {Steven M. Boker and Jeffrey F. Cohn and Iain Matthews and Timothy
R. Brick},
title = {{E}ffects of {D}amping {H}ead {M}ovement and {F}acial {E}xpression
in {D}yadic {C}onversation {U}sing {R}eal-time {F}acial {E}xpression
{T}racking and {S}ynthesized {A}vatars},
journal = {Philosophical Transactions B of the Royal Society},
year = {2009}
}
@ARTICLE{opencv,
author = {Bradski, G.},
title = {{The OpenCV Library}},
journal = {Dr. Dobb's Journal of Software Tools},
year = {2000},
citeulike-article-id = {2236121},
keywords = {bibtex-import},
posted-at = {2008-01-15 19:21:54},
priority = {4}
}
@ARTICLE{Candes11,
author = {E. Cand\`{e}s and X. Li and Y. Ma and J. Wright},
title = {{R}obust {P}rincipal {C}omponent {A}nalysis?},
journal = {Journal of the {ACM}},
year = {2011}
}
@ARTICLE{Caspi_PAMI02,
author = {Caspi, Y. and Irani, M.},
title = {Spatio-temporal alignment of sequences},
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
year = {2002}
}
@ARTICLE{libsvm,
author = {Chang, Chih-Chung and Lin, Chih-Jen},
title = {{LIBSVM}: A Library for Support Vector Machines},
journal = {ACM Trans. Intell. Syst. Technol.},
year = {2011},
volume = {2},
pages = {27:1--27:27},
number = {3},
month = may,
acmid = {1961199},
address = {New York, NY, USA},
articleno = {27},
issn = {2157-6904},
issue_date = {April 2011},
numpages = {27},
publisher = {ACM}
}
@INPROCEEDINGS{Chew_FERA11,
author = {Sien Wei Chew and Patrick J. Lucey and Simon Lucey and Jason Saragih
and Jeffrey Cohn and Sridha Sridharan},
title = {{P}erson-independent {F}acial {E}xpression {D}etection {U}sing {C}onstrained
{L}ocal {M}odels},
booktitle = {Proc. FG Workshop on FERA Challenge},
year = {2011},
abstract = {{I}n automatic facial expression detection, very accurate registration
is desired which can be achieved via a deformable model approach
where a dense mesh of 60-70 points on the face is used, such as an
active appearance model ({AAM}). {H}owever, for applications where
manually labeling frames is prohibitive, {AAM}s do not work well
as they do not generalize well to unseen subjects. {A}s such, a more
coarse approach is taken for person-independent facial expression
detection, where just a couple of key features (such as face and
eyes) are tracked using a {V}iola-{J}ones type approach. {T}he tracked
image is normally post-processed to encode for shift and illumination
invariance using a linear bank of filters. {R}ecently, it was shown
that this preprocessing step is of no benefit when close to ideal
registration has been obtained. {I}n this paper, we present a system
based on the {C}onstrained {L}ocal {M}odel ({CLM}) which is a generic
or person-independent face alignment algorithm which gains high accuracy.
{W}e show these results against the {LBP} feature extraction on the
{CK}+ and {GEMEP} datasets.},
keywords = {facial expression recognition, constrained local models, local binary
patterns, fera2011 challenge}
}
@ARTICLE{Cootes_PAMI01,
author = {Cootes, T.F. and Edwards, G.J. and Taylor, C.J.},
title = {{A}ctive {A}ppearance {M}odels},
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
year = {2001},
abstract = {{W}e describe a new method of matching statistical models of appearance
to images. {A} set of model parameters control modes of shape and
gray-level variation learned from a training set. {W}e construct
an efficient iterative matching algorithm by learning the relationship
between perturbations in the model parameters and the induced image
errors},
issn = {0162-8828},
keywords = {active appearance models;deformable template;gray-level variation;iterative
method;learning;model matching;optimisation;shape matching;statistical
models;texture matching;image matching;image texture;iterative methods;learning
(artificial intelligence);optimisation;statistical analysis;}
}
@INPROCEEDINGS{Cox_CVPR08,
author = {Cox, M. and Sridharan, S. and Lucey, S. and Cohn, J.},
title = {{L}east {S}quares {C}ongealing for {U}nsupervised {A}lignment of
{I}mages},
booktitle = {Proc. CVPR},
year = {2008},
issn = {1063-6919},
keywords = {image alignment;least squares congealing;unsupervised learning;warp
parameter update estimation;image processing;least squares approximations;unsupervised
learning;}
}
@INPROCEEDINGS{Cruz_AVEC11,
author = {Albert Cruz and Bir Bhanu and Songfan Yang},
title = {{A} {P}sychologically-{I}nspired {M}atch-{S}core {F}usion {M}odel
for {V}ideo-{B}ased {F}acial {E}xpression {R}ecognition},
booktitle = {Proc. ACII workshop on AVEC},
year = {2011},
bibsource = {DBLP, http://dblp.uni-trier.de},
ee = {http://dx.doi.org/10.1007/978-3-642-24571-8_45}
}
@ARTICLE{Dahmane_TMM14,
author = {Mohamed Dahmane and Jean Meunier},
title = {Prototype-Based Modeling for Facial Expression Analysis},
journal = {IEEE Trans. Multimedia},
year = {2014},
owner = {chidi},
timestamp = {2015.04.15}
}
@INPROCEEDINGS{Dahmane_FERA11,
author = {Mohamed Dahmane and Jean Meunier},
title = {{E}motion {R}ecognition using {D}ynamic {G}rid-based {H}o{G} {F}eatures},
booktitle = {Proc. FG Workshop on FERA Challenge},
year = {2011},
owner = {Songfan},
timestamp = {2011.05.01}
}
@INPROCEEDINGS{Dalal_CVPR05,
author = {Dalal, N. and Triggs, B.},
title = {{H}istograms of {O}riented {G}radients for {H}uman {D}etection},
booktitle = {Proc. CVPR},
year = {2005}
}
@INBOOK{Torre11,
chapter = {Facial Expression Analysis},
title = {Guide to Visual Analysis of Humans: Looking at People},
publisher = {Springer},
year = {2011},
author = {Fernando {De la Torre} and Jeffrey F. Cohn}
}
@INPROCEEDINGS{Dhall_FERA11,
author = {Abhinav Dhall and Akshay Asthana and Roland Goecke and Tom Gedeon},
title = {{E}motion {R}ecognition {U}sing \uppercase{PHOG} and \uppercase{LPQ}
features},
booktitle = {Proc. FG Workshop on FERA Challenge},
year = {2011},
owner = {Songfan},
timestamp = {2011.05.01}
}
@ARTICLE{Donato_PAMI99,
author = {Donato, G. and Bartlett, M.S. and Hager, J.C. and Ekman, P. and Sejnowski,
T.J.},
title = {{C}lassifying {F}acial {A}ctions},
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
year = {1999},
abstract = {{T}he facial action coding system ({FAGS}) is an objective method
for quantifying facial movement in terms of component actions. {T}his
paper explores and compares techniques for automatically recognizing
facial actions in sequences of images. {T}hese techniques include:
analysis of facial motion through estimation of optical flow; holistic
spatial analysis, such as principal component analysis, independent
component analysis, local feature analysis, and linear discriminant
analysis; and methods based on the outputs of local filters, such
as {G}abor wavelet representations and local principal components.
{P}erformance of these systems is compared to naive and expert human
subjects. {B}est performances were obtained using the {G}abor wavelet
representation and the independent component representation, both
of which achieved 96 percent accuracy for classifying 12 facial actions
of the upper and lower face. {T}he results provide converging evidence
for the importance of using local filters, high spatial frequencies,
and statistical independence for classifying facial actions},
issn = {0162-8828},
keywords = {Gabor wavelet;computer vision;facial action coding system;facial expression
recognition;image sequences;independent component analysis;linear
discriminant analysis;local feature analysis;motion estimation;optical
flow;principal component analysis;computer vision;face recognition;image
sequences;motion estimation;principal component analysis;wavelet
transforms;}
}
@ARTICLE{Beltfast,
author = {Ellen Douglas-Cowie and Nick Campbell and Roddy Cowie and Peter Roach},
title = {{E}motional {S}peech: {T}owards {A} {N}ew {G}eneration of {D}atabases},
journal = {Speech Communication},
year = {2003}
}
@INPROCEEDINGS{Douglas-Cowie00,
author = {Ellen Douglas-Cowie and Roddy Cowie and Marc Schröder},
title = {{A} {N}ew {E}motion {D}atabase: {C}onsiderations, {S}ources and {S}cope},
booktitle = {Proc. the ISCA Workshop on Speech and Emotion},
year = {2000}
}
@BOOK{Ekman78,
title = {{F}acial {A}ction {C}oding {S}ystem: {A} {T}echnique for the {M}easurement
of {F}acial {M}ovement},
publisher = {Consulting Psychologists Press},
year = {1978},
author = {Ekman, P. and Friesen, W.},
journal = {Consulting Psychologists Press},
owner = {Songfan},
timestamp = {2012.02.20}
}
@BOOK{Ekman2005,
title = {What the Face Reveals: Basic and Applied Studies of Spontaneous Expression
Using the Facial Action Coding System (FACS)},
publisher = {Oxford University Press},
year = {2005},
author = {Paul Ekman and Erika Rosenberg},
owner = {Songfan},
timestamp = {2014.04.04}
}
@INPROCEEDINGS{Kaliouby_SMC04,
author = {El Kaliouby, R. and Robinson, P.},
title = {{M}ind {R}eading {M}achines: {A}utomated {I}nference of {C}ognitive
{M}ental {S}tates from {V}ideo},
booktitle = {Proc. SMC},
year = {2004},
abstract = {{M}ind reading encompasses our ability to attribute mental states
to others, and is essential for operating in a complex social environment.
{T}he goal in building mind reading machines is to enable computer
technologies to understand and react to people's emotions and mental
states. {T}his paper describes a system, for the automated inference
of cognitive mental states from observed facial expressions and head
gestures in video. {T}he system is based on a multilevel dynamic
{B}ayesian network classifier which models cognitive mental states
as a number of interacting facial and head displays. {E}xperimental
results yield an average recognition rate of 87.4% for 6 mental states
groups: agreement, concentrating, disagreement, interested, thinking
and unsure. {R}eal time performance, unobtrusiveness and lack of
preprocessing make our system particularly suitable for user-independent
human computer interaction},
issn = {1062-922X},
keywords = {automated inference;cognitive mental states;complex social environment;head
gestures;mind reading machines;multilevel dynamic Bayesian network
classifier;observed facial expressions;user-independent human computer
interaction;Bayes methods;belief networks;cognition;emotion recognition;human
computer interaction;inference mechanisms;}
}
@ARTICLE{Essa_PAMI97,
author = {Essa, I.A. and Pentland, A.P.},
title = {{C}oding, {A}nalysis, {I}nterpretation, and {R}ecognition of {F}acial
{E}xpressions},
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
year = {1997},
abstract = {{W}e describe a computer vision system for observing facial motion
by using an optimal estimation optical flow method coupled with geometric,
physical and motion-based dynamic models describing the facial structure.
{O}ur method produces a reliable parametric representation of the
face's independent muscle action groups, as well as an accurate estimate
of facial motion. {P}revious efforts at analysis of facial expression
have been based on the facial action coding system ({FACS}), a representation
developed in order to allow human psychologists to code expression
from static pictures. {T}o avoid use of this heuristic coding scheme,
we have used our computer vision system to probabilistically characterize
facial motion and muscle activation in an experimental population,
thus deriving a new, more accurate, representation of human facial
expressions that we call {FACS}+. {F}inally, we show how this method
can be used for coding, analysis, interpretation, and recognition
of facial expressions},
issn = {0162-8828},
keywords = {FACS;FACS+;computer vision system;facial action coding system;facial
expression recognition;facial motion;facial structure;image analysis;image
coding;image interpretation;independent muscle action groups;muscle
activation;optimal estimation optical flow method;probabilistic characterization;face
recognition;image coding;image sequences;motion estimation;}
}
@ARTICLE{LIBLINEAR,
author = {Rong-En Fan and Kai-Wei Chang and Cho-Jui Hsieh and Xiang-Rui Wang
and Chih-Jen Lin},
title = {{LIBLINEAR}: {A} {L}ibrary for {L}arge {L}inear {C}lassification},
journal = {JMLR},
year = {2008}
}
@ARTICLE{Fanelli_TMM10,
author = {Gabriele Fanelli and Juergen Gall and Harald Romsdorfer and Thibaut
Weise and Luc Van Gool},
title = {A 3-D Audio-Visual Corpus of Affective Communication},
journal = {IEEE Trans. Multimedia},
year = {2010},
owner = {songfan},
timestamp = {2015.04.15}
}
@ARTICLE{Freeman_IJCV00,
author = {Freeman, William T. and Pasztor, Egon C. and Carmichael, Owen T.},
title = {{L}earning {L}ow-{L}evel {V}ision},
journal = {Int. J. Comput. Vis},
year = {2000},
issue = {1}
}
@INPROCEEDINGS{Fridlund_87,
author = {A.J. Fridlund and P. Ekman and H. Oster},
title = {Facial Expressions of Emotion: Review Literature 1970-1983},
booktitle = {In A. W. Siegman \& S. Feldstein (Eds.), Nonverbal behavior and communication},
year = {1987},
owner = {chidi},
timestamp = {2015.06.11}
}
@INPROCEEDINGS{Gehrig_CVPRW11,
author = {Gehrig, T. and Ekenel, H.K.},
title = {{A} {C}ommon {F}ramework for {R}eal-time {E}motion {R}ecognition
and {F}acial {A}ction {U}nit {D}etection},
booktitle = {Proc. CVPR Workshops},
year = {2011},
issn = {2160-7508},
keywords = {FG 2011 facial expression analysis;FG 2011 facial expression recognition;discrete
cosine transform;facial action unit detection;local appearance-based
face representation approach;real-time emotion recognition;support
vector machine classifiers;discrete cosine transforms;emotion recognition;face
recognition;image classification;support vector machines;}
}
@ARTICLE{Han_PAMI06,
author = {Han, J. and Bhanu, B.},
title = {{I}ndividual {R}ecognition {U}sing {G}ait {E}nergy {I}mage},
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
year = {2006},
abstract = {{I}n this paper, we propose a new spatio-temporal gait representation,
called {G}ait {E}nergy {I}mage ({GEI}), to characterize human walking
properties for individual recognition by gait. {T}o address the problem
of the lack of training templates, we also propose a novel approach
for human recognition by combining statistical gait features from
real and synthetic templates. {W}e directly compute the real templates
from training silhouette sequences, while we generate the synthetic
templates from training sequences by simulating silhouette distortion.
{W}e use a statistical approach for learning effective features from
real and synthetic templates. {W}e compare the proposed {GEI}-based
gait recognition approach with other gait recognition approaches
on {USF} {H}uman{ID} {D}atabase. {E}xperimental results show that
the proposed {GEI} is an effective and efficient gait representation
for individual recognition, and the proposed approach achieves highly
competitive performance with respect to the published gait recognition
approaches},
doi = {10.1109/TPAMI.2006.38},
issn = {0162-8828},
keywords = {distortion analysis;feature fusion;gait energy image;human recognition;human
walking properties;individual recognition;spatio-temporal gait representation;statistical
gait features;gait analysis;image recognition;}
}
@INPROCEEDINGS{Hu_IVC06,
author = {Changbo Hu and Ya Chang and Feris, R. and Turk, M.},
title = {{M}anifold {B}ased {A}nalysis of {F}acial {E}xpression},
booktitle = {Image and Vision Computing},
year = {2006}
}
@ARTICLE{Huang11,
author = {Di Huang and Caifeng Shan and Ardabilian, M. and Yunhong Wang and
Liming Chen},
title = {Local Binary Patterns and Its Application to Facial Image Analysis:
A Survey},
journal = {IEEE Trans. Syst., Man, Cybern. C, Appl. Rev.},
year = {2011}
}
@INPROCEEDINGS{Huang_ICCV07,
author = {Huang, G.B. and Jain, V. and Learned-Miller, E.},
title = {{U}nsupervised {J}oint {A}lignment of {C}omplex {I}mages},
booktitle = {Proc. ICCV},
year = {2007},
issn = {1550-5499},
keywords = {canonical pose recognition;class-specialized learning algorithm;face
detector;face recognition;image recognition algorithm;unsupervised
joint alignment;face recognition;pose estimation;unsupervised learning;}
}
@ARTICLE{Huang_TMM10,
author = {Yizhen Huang and Ying Li and Na Fan},
title = {Robust Symbolic Dual-View Facial Expression Recognition With Skin
Wrinkles: Local Versus Global Approach},
journal = {IEEE Trans. Multimedia},
year = {2010},
owner = {songfan},
timestamp = {2015.04.15}
}
@BOOK{Huber81,
title = {{R}obust {S}tatistics},
publisher = {John Wiley \& Sons, Inc.},
year = {1981},
author = {Huber, P. J.},
address = {Hoboken, NJ},
owner = {Songfan},
timestamp = {2012.02.22}
}
@ARTICLE{Irani91,
author = {Irani, Michal and Peleg, Shmuel},
title = {{I}mproving {R}esolution by {I}mage {R}egistration},
journal = {Graph. Models Image Process.},
year = {1991},
issue = {3}
}
@INPROCEEDINGS{Jiang_FG11,
author = {B. Jiang and M.Valstar and M. Pantic},
title = {Action unit detection using sparse appearance descriptors in space-time
video volumes},
booktitle = {Proc. FG},
year = {2011},
owner = {chidi},
timestamp = {2015.06.10}
}
@ARTICLE{Josephine2003,
author = {Josephine L.C.M. Woltman Elpers, Michel Wedel, Rik G.M. Pieters},
title = {Why Do Consumers Stop Viewing Television Commercials? Two Experiments
on the Influence of Moment-to-Moment Entertainment and Information
Value},
journal = {Journal of Marketing Research},
year = {2003},
owner = {Songfan},
timestamp = {2014.04.14}
}
@INPROCEEDINGS{Kanade_FG00,
author = {Kanade, T. and Cohn, J.F. and Yingli Tian},
title = {{C}omprehensive {D}atabase for {F}acial {E}xpression {A}nalysis},
booktitle = {Proc. FG},
year = {2000},
abstract = {{W}ithin the past decade, significant effort has occurred in developing
methods of facial expression analysis. {B}ecause most investigators
have used relatively limited data sets, the generalizability of these
various methods remains unknown. {W}e describe the problem space
for facial expression analysis, which includes level of description,
transitions among expressions, eliciting conditions, reliability
and validity of training and test data, individual differences in
subjects, head orientation and scene complexity image characteristics,
and relation to non-verbal behavior. {W}e then present the {CMU}-{P}ittsburgh
{AU}-{C}oded {F}ace {E}xpression {I}mage {D}atabase, which currently
includes 2105 digitized image sequences from 182 adult subjects of
varying ethnicity, performing multiple tokens of most primary {FACS}
action units. {T}his database is the most comprehensive testbed to
date for comparative studies of facial expression analysis},
keywords = {CMU-Pittsburgh AU-Coded Face Expression Image Database;FACS action
units;description level;digitized image sequences;eliciting conditions;expression
transitions;facial expression analysis;head orientation;image characteristics;non-verbal
behavior;reliability;scene complexity;subject differences;validity;face
recognition;image sequences;reliability;visual databases;}
}
@INPROCEEDINGS{Keren_CVPR88,
author = {Keren, D. and Peleg, S. and Brada, R.},
title = {{I}mage {S}equence {E}nhancement {U}sing {S}ub-pixel {D}isplacements},
booktitle = {Proc. CVPR},
year = {1988}
}
@ARTICLE{Kotsia_IP07,
author = {Kotsia, I. and Pitas, I.},
title = {{F}acial {E}xpression {R}ecognition in {I}mage {S}equences {U}sing
{G}eometric {D}eformation {F}eatures and {S}upport {V}ector {M}achines},
journal = {IEEE Trans. Image Process.},
year = {2007},
issn = {1057-7149},
keywords = {Candide grid nodes;SVM;facial action units;facial expression;facial
expression recognition;geometric deformation features;grid-tracking;image
sequences;support vector machines;video frames;face recognition;image
sequences;support vector machines;Algorithms;Artificial Intelligence;Face;Facial
Expression;Humans;Image Enhancement;Image Interpretation, Computer-Assisted;Information
Storage and Retrieval;Pattern Recognition, Automated;Subtraction
Technique;Video Recording;}
}
@ARTICLE{LearnedMiller_PAMI06,
author = {Learned-Miller, E.G.},
title = {{D}ata {D}riven {I}mage {M}odels {T}hrough {C}ontinuous {J}oint {A}lignment},
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
year = {2006},
issn = {0162-8828},
keywords = {bias removal;congealing;continuous joint alignment;data driven image
models;handwritten character recognition;handwritten digit classifier;magnetic
resonance images;nuisance variables;computer vision;handwritten character
recognition;image resolution;magnetic resonance imaging;nonparametric
statistics;Algorithms;Artificial Intelligence;Automatic Data Processing;Computer
Simulation;Documentation;Handwriting;Image Enhancement;Image Interpretation,
Computer-Assisted;Information Storage and Retrieval;Models, Theoretical;Pattern
Recognition, Automated;Subtraction Technique;}
}
@INPROCEEDINGS{Levi_CVPR04,
author = {K. Levi and Y.Weiss},
title = {Learning object detection from a small number of examples: The importance
of good features},
booktitle = {Proc. CVPR},
year = {2004},
owner = {chidi},
timestamp = {2015.06.10}
}
@ARTICLE{Lin12,
author = {Yuxu Lin and Mingli Song and Dao Thi Phuong Quynh and Ying He and
Chun Chen},
title = {Sparse Coding for Flexible, Robust 3D Facial-Expression Synthesis},
journal = {IEEE Computer Graphics and Applications},
year = {2012}
}
@INPROCEEDINGS{Lin09,
author = {Zhouchen Lin and Arvind Ganesh and John Wright and Leqin Wu and Minming
Chen and Yi Ma},
title = {Fast convex optimization algorithms for exact recovery of a corrupted
low-rank matrix},
booktitle = {UIUC Technical Report UILU-ENG-09-2214},
year = {2009}
}
@INPROCEEDINGS{Littlewort_IVC06,
author = {Littlewort, G. and Bartlett, M.S. and Fasel, I. and Susskind, J.
and Movellan, J.},
title = {{D}ynamics of {F}acial {E}xpression {E}xtracted {A}utomatically from
{V}ideo},
booktitle = {Image and Vision Computing},
year = {2006},
abstract = { {W}e present a systematic comparison of machine learning methods
applied to the problem of fully automatic recognition of facial expressions,
including {A}da{B}oost, support vector machines, and linear discriminant
analysis. {E}ach video-frame is first scanned in real-time to detect
approximately upright-frontal faces. {T}he faces found are scaled
into image patches of equal size, convolved with a bank of {G}abor
energy filters, and then passed to a recognition engine that codes
facial expressions into 7 dimensions in real time: neutral, anger,
disgust, fear, joy, sadness, surprise. {W}e report results on a series
of experiments comparing spatial frequency ranges, feature selection
techniques, and recognition engines. {B}est results were obtained
by selecting a subset of {G}abor filters using {A}da{B}oost and then
training {S}upport {V}ector {M}achines on the outputs of the filters
selected by {A}da{B}oost. {T}he generalization performance to new
subjects for a 7-way forced choice was 93% or more correct on two
publicly available datasets, the best performance reported so far
on these datasets. {S}urprisingly, registration of internal facial
features was not necessary, even though the face detector does not
provide precisely registered images. {T}he outputs of the classifier
change smoothly as a function of time and thus can be used for unobtrusive
motion capture. {W}e developed an end-to-end system that provides
facial expression codes at 24 frames per second and animates a computer
generated character. {I}n real-time this expression mirror operates
down to resolutions of 16 pixels from eye to eye. {W}e also applied
the system to fully automated facial action coding.},
doi = {10.1109/CVPR.2004.53}
}
@INPROCEEDINGS{Bartlett_FG11,
author = {Littlewort, G. and Whitehill, J and Wu, T. and Fasel, I. and Frank,M.
and Movellan, J. and Bartlett, M.},
title = {{C}omputer {E}xpression {R}ecognition {T}oolbox},
booktitle = {Proc. FG},
year = {2011},
abstract = {{W}e present a live demo of the {C}omputer {E}xpression {R}ecognition
{T}oolbox ({CERT}) developed at {U}niversity of {C}alifornia, {S}an
{D}iego. {CERT} measures facial expressions in real-time, and codes
them with respect to expressions of basic emotion, as well as over
20 facial actions from the {F}acial {A}ction {C}oding {S}ystem ({E}kman
amp; {F}riesen, 1978). {H}ead pose (yaw, pitch, and roll) is also
detected using an algorithm presented at this conference ({W}hitehill
amp; {M}ovellan, 2008). {A} sample output is shown in {F}igure 1.},
doi = {10.1109/AFGR.2008.4813406},
keywords = {computer expression recognition toolbox;emotion recognition;facial
action coding system;facial expression;pose estimation;emotion recognition;face
recognition;image coding;pose estimation;}
}
@INPROCEEDINGS{Littlewort_CERT_FG2011,
author = {Littlewort, G. and Whitehill, J. and Tingfan Wu and Fasel, I. and
Frank, M. and Movellan, J. and Bartlett, M.},
title = {{T}he {C}omputer {E}xpression {R}ecognition {T}oolbox ({CERT})},
booktitle = {Proc. FG},
year = {2011},
keywords = {3D orientation;CERT;FACS;automatic real-time facial expression recognition;computer
expression recognition toolbox;dual core laptop;extended Cohn-Kanade;facial
action unit coding system;facial expression dataset;software tool;two-alternative
forced choice task;emotion recognition;face recognition;image coding;software
tools;}
}
@INPROCEEDINGS{Littlewort_FERA11,
author = {Gwen Littlewort and Jacob Whitehill and Ting-Fan Wu and Nicholas
Butko and Paul Ruvolo and Javier Movellan and Marian Bartlett},
title = {{T}he {M}otion in {E}motion {A} {CERT} {B}ased {A}pproach to the
{FERA} {E}motion {C}hallenge},
booktitle = {Proc. FG Workshop on FERA Challenge},
year = {2011},
owner = {Songfan},
timestamp = {2011.04.29}
}
@ARTICLE{Liu_PAMI11,
author = {Liu, C. and Yuen, J. and Torralba, A.},
title = {\uppercase{SIFT} \uppercase{F}low: {D}ense {C}orrespondence across
{S}cenes and its {A}pplications},
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
year = {2011},
doi = {http://doi.ieeecomputersociety.org/10.1109/TPAMI.2010.147},
issn = {0162-8828}
}
@INPROCEEDINGS{Lowe_ICCV99,
author = {Lowe, D.G.},
title = {{O}bject {R}ecognition from {L}ocal {S}cale-invariant {F}eatures},
booktitle = {Proc. ICCV},
year = {1999},
doi = {10.1109/ICCV.1999.790410},
keywords = {3D projection;blurred image gradients;candidate object matches;cluttered
partially occluded images;computation time;inferior temporal cortex;local
geometric deformations;local image features;local scale-invariant
features;low residual least squares solution;multiple orientation
planes;nearest neighbor indexing method;primate vision;robust object
recognition;staged filtering approach;unknown model parameters;computational
geometry;feature extraction;image matching;least squares approximations;object
recognition;}
}
@INPROCEEDINGS{CKplus,
author = {Lucey, P. and Cohn, J.F. and Kanade, T. and Saragih, J. and Ambadar,
Z. and Matthews, I},
title = {The Extended Cohn-Kanade Dataset ({CK}+): A complete dataset for
action unit and emotion-specified expression},
booktitle = {CVPR Workshop},
year = {2010}
}
@INPROCEEDINGS{Lucey_FG06,
author = {Lucey, S. and Matthews, I. and Changbo Hu and Ambadar, Z. and de
la Torre, F. and Cohn, J.},
title = {\uppercase{AAM} {D}erived {F}ace {R}epresentations for {R}obust {F}acial
{A}ction {R}ecognition},
booktitle = {Proc. FG},
year = {2006},
doi = {10.1109/FGR.2006.17},
keywords = {active appearance model;face representations;normalization methods;robust
facial action recognition;face recognition;image representation;}
}
@ARTICLE{Lyons_PAMI99,
author = {M. J. Lyons and J. Budynek and S. Akamatsu},
title = {Automatic classification of single facial images},
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
year = {1999},
owner = {chidi},
timestamp = {2015.06.10}
}
@ARTICLE{Mao_TMM14,
author = {Qirong Mao and MingDong and Zhengwei Huang and Yongzhao Zhan},
title = {Learning Salient Features for Speech Emotion Recognition Using Convolutional
Neural Networks},
journal = {IEEE Trans. Multimedia},
year = {2014},
owner = {songfan},
timestamp = {2015.04.15}
}
@ARTICLE{Martinez_PAMI13,
author = {Brais Martinez and Michel Valstar and Xavier Binefa and Maja Pantic},
title = {Local Evidence Aggregation for Regression Based Facial Point Detection},
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
year = {2013},
owner = {Songfan},
timestamp = {2014.04.04}
}
@ARTICLE{Matthews_IJCV03,
author = {Iain Matthews and Simon Baker},
title = {{A}ctive {A}ppearance {M}odels {R}evisited},
journal = {IJCV},
year = {2003},
volume = {60},
pages = {135-164}
}
@ARTICLE{McDuff07,
author = {McDuff, D. and Kaliouby, R.E. and Picard, R.W.},
title = {Crowdsourcing Facial Responses to Online Videos},
journal = {IEEE Trans. Affective Computing},
year = {2012},
doi = {10.1109/T-AFFC.2012.19},
issn = {1949-3045},
keywords = {Internet;advertising;face recognition;statistics;video databases;video
signal processing;Cohn-Kanade database;Internet;MMI database;commercial;facial
behavior;facial region luminance;facial region movement;facial region
pose;facial region position;facial region scale;facial response analysis;facial
response collection;facial response crowdsourcing;head movement;media
content;online video;smile response;statistics;trackable face video;Advertising;Content
awareness;Ethics;Face recognition;Human factors;Internet;Media;Videos;Crowdsourcing;advertising;facial
expressions;market research;nonverbal behavior},
owner = {chidi},
timestamp = {2015.06.10}
}
@ARTICLE{McDuff_IVC14,
author = {McDuff, D. and Kaliouby, R. and Senechal, T. and Demirdjian, D. and
Picard, R},
title = {Automatic Measurement of Ad Preferences from Facial Responses Gathered
Over the Internet},
journal = {Image and Vision Computing},
year = {2014},
owner = {Songfan},
timestamp = {2014.04.14}
}
@INPROCEEDINGS{McDuff13,
author = {Daniel McDuff and Rana El Kaliouby and Evan Kodra and Rosalind W.
Picard},
title = {Measuring Voter's Candidate Preference Based on Affective Responses
to Election Debates},
booktitle = {ACII},
year = {2013},
bibsource = {dblp computer science bibliography, http://dblp.org},
biburl = {http://dblp.uni-trier.de/rec/bib/conf/acii/McDuffKKP13},
owner = {chidi},
timestamp = {2015.06.10}
}
@INPROCEEDINGS{amfed,
author = {Daniel McDuff and Rana El Kaliouby and Thibaud Senechal and May Amr
and Jeffrey F. Cohn and Rosalind W. Picard},
title = {Affectiva-MIT Facial Expression Dataset (AM-FED): Naturalistic and
Spontaneous Facial Expressions Collected In-the-Wild},
booktitle = {CVPR Workshops},
year = {2013},
pages = {881-888},
owner = {chidi},
timestamp = {2015.06.10}
}
@INPROCEEDINGS{Meng_FERA11,
author = {Hongying Meng and Bernardino Romera-Paredes and Nadia Bianchi-Berthouze},
title = {{E}motion {R}ecognition by {T}wo {V}iew \uppercase{SVM} 2\uppercase{K}
{C}lassifier on {D}ynamic {F}acial {E}xpression {F}eatures},
booktitle = {Proc. FG Workshop on FERA Challenge},
year = {2011},
owner = {Songfan},
timestamp = {2011.05.01}
}
@INCOLLECTION{Meng05,
author = {Meng, Hongying and Shawe-Taylor, John and Szedmak, Sandor and Farquhar,
Jason},
title = {{S}upport {V}ector {M}achine to {S}ynthesise {K}ernels},
booktitle = {Deterministic and Statistical Methods in Machine Learning},
publisher = {Springer Berlin / Heidelberg},
year = {2005},
editor = {Winkler, Joab and Niranjan, Mahesan and Lawrence, Neil},
affiliation = {School of Electronics and Computer Science, University of Southampton,
Southampton, SO17 1BJ UK}
}
@INPROCEEDINGS{Negri06,
author = {Negri, P. and Clady, X. and Milgram, M. and Poulenard, R.},
title = {An Oriented-Contour Point Based Voting Algorithm for Vehicle Type
Classification},
booktitle = {Proc. ICPR},
year = {2006}
}
@ARTICLE{Ojala_PAMI02,
author = {Ojala, T. and Pietik\"ainen, M. and M\"aenp\"a\"a, T.},
title = {{M}ultiresolution {G}ray-scale and {R}otation {I}nvariant {T}exture
{C}lassification with {L}ocal {B}inary {P}atterns},
journal = {IEEE Trans. Pattern Anal. Mach. Intell.},
year = {2002},
abstract = {{P}resents a theoretically very simple, yet efficient, multiresolution
approach to gray-scale and rotation invariant texture classification
based on local binary patterns and nonparametric discrimination of
sample and prototype distributions. {T}he method is based on recognizing
that certain local binary patterns, termed "uniform," are fundamental
properties of local image texture and their occurrence histogram
is proven to be a very powerful texture feature. {W}e derive a generalized
gray-scale and rotation invariant operator presentation that allows
for detecting the "uniform" patterns for any quantization of the
angular space and for any spatial resolution and presents a method
for combining multiple operators for multiresolution analysis. {T}he
proposed approach is very robust in terms of gray-scale variations
since the operator is, by definition, invariant against any monotonic
transformation of the gray scale. {A}nother advantage is computational
simplicity as the operator can be realized with a few operations
in a small neighborhood and a lookup table. {E}xperimental results
demonstrate that good discrimination can be achieved with the occurrence
statistics of simple rotation invariant local binary patterns },
doi = {10.1109/TPAMI.2002.1017623},
issn = {0162-8828},
keywords = {angular space;computational simplicity;gray-scale variations;local
binary patterns;local image texture;multiresolution analysis;multiresolution
gray-scale texture classification;nonparametric discrimination;occurrence
histogram;prototype distributions;rotation invariant texture classification;sample
distributions;spatial resolution;uniform patterns;image classification;image
texture;invariance;nonparametric statistics;}
}
@INPROCEEDINGS{LPQ,
author = {Ojansivu, V. and Heikkil\"a, J.},
title = {{B}lur {I}nsensitive {T}exture {C}lassification {U}sing {L}ocal {P}hase
{Q}uantization},
booktitle = {Proc. ICISP},
year = {2008},
acmid = {1426636},
doi = {http://dx.doi.org/10.1007/978-3-540-69905-7_27},
isbn = {978-3-540-69904-0},
location = {Cherbourg-Octeville, France},
numpages = {8}
}
@INPROCEEDINGS{LPQ-TOP,
author = {P\"aiv\"arinta, J. and Rahtu, E. and Heikkil\"a, J.},
title = {Volume Local Phase Quantization for Blur-Insensitive Dynamic Texture
Classification},
booktitle = {Proc. Scandinavian Conference on Image Analysis (SCIA)},
year = {2011},
owner = {chidi},
timestamp = {2015.05.13}
}
@ARTICLE{Pantic_SMCB06,
author = {Pantic, M. and Patras, I.},
title = {{D}ynamics of {F}acial {E}xpression: {R}ecognition of {F}acial {A}ctions
and {T}heir {T}emporal {S}egments from {F}ace {P}rofile {I}mage {S}equences},
journal = {IEEE Trans. Syst. Man, Cybern. B, Cybern.},
year = {2006},
abstract = {{A}utomatic analysis of human facial expression is a challenging problem
with many applications. {M}ost of the existing automated systems
for facial expression analysis attempt to recognize a few prototypic
emotional expressions, such as anger and happiness. {I}nstead of
representing another approach to machine analysis of prototypic facial
expressions of emotion, the method presented in this paper attempts
to handle a large range of human facial behavior by recognizing facial
muscle actions that produce expressions. {V}irtually all of the existing
vision systems for facial muscle action detection deal only with
frontal-view face images and cannot handle temporal dynamics of facial
actions. {I}n this paper, we present a system for automatic recognition
of facial action units ({AU}s) and their temporal models from long,
profile-view face image sequences. {W}e exploit particle filtering
to track 15 facial points in an input face-profile sequence, and
we introduce facial-action-dynamics recognition from continuous video
input using temporal rules. {T}he algorithm performs both automatic
segmentation of an input video into facial expressions pictured and
recognition of temporal segments (i.e., onset, apex, offset) of 27
{AU}s occurring alone or in a combination in the input face-profile
video. {A} recognition rate of 87% is achieved.},
issn = {1083-4419},
keywords = {automatic facial action unit recognition;automatic human facial expression
analysis;automatic video segmentation;computer vision system;emotional
expression recognition;face profile image sequences;facial muscle
action recognition;facial-action-dynamics recognition;human facial
behavior;particle filtering;temporal model;temporal segment recognition;computer
vision;emotion recognition;face recognition;image segmentation;image
sequences;particle filtering (numerical methods);Algorithms;Artificial
Intelligence;Cluster Analysis;Face;Facial Expression;Humans;Image
Enhancement;Image Interpretation, Computer-Assisted;Information Storage
and Retrieval;Movement;Pattern Recognition, Automated;Photography;Reproducibility
of Results;Sensitivity and Specificity;Subtraction Technique;Time
Factors;Video Recording;}
}
@ARTICLE{Pantic_SMCB04,
author = {Pantic, M. and Rothkrantz, L.J.M.},
title = {{F}acial {A}ction {R}ecognition for {F}acial {E}xpression {A}nalysis
from {S}tatic {F}ace {I}mages},
journal = {IEEE Trans. Syst. Man, Cybern. B, Cybern.},
year = {2004},
abstract = {{A}utomatic recognition of facial gestures (i.e., facial muscle activity)
is rapidly becoming an area of intense interest in the research field
of machine vision. {I}n this paper, we present an automated system
that we developed to recognize facial gestures in static, frontal-
and/or profile-view color face images. {A} multidetector approach
to facial feature localization is utilized to spatially sample the
profile contour and the contours of the facial components such as
the eyes and the mouth. {F}rom the extracted contours of the facial
features, we extract ten profile-contour fiducial points and 19 fiducial
points of the contours of the facial components. {B}ased on these,
32 individual facial muscle actions ({AU}s) occurring alone or in
combination are recognized using rule-based reasoning. {W}ith each
scored {AU}, the utilized algorithm associates a factor denoting
the certainty with which the pertinent {AU} has been scored. {A}
recognition rate of 86% is achieved.},
issn = {1083-4419},
keywords = {facial action recognition;facial component contour sampling;facial
expression analysis;facial feature localization;facial gesture recognition;facial
muscle action units;image processing;profile contour sampling;profile-contour
fiducial points;rule-based reasoning;spatial reasoning;static face
images;face recognition;feature extraction;gesture recognition;image
colour analysis;knowledge based systems;spatial reasoning;uncertainty
handling;Algorithms;Artificial Intelligence;Face;Facial Expression;Humans;Image
Interpretation, Computer-Assisted;Pattern Recognition, Automated;Photography;Posture;Reproducibility