diff --git a/README.md b/README.md
index 53f918f..b8433d9 100644
--- a/README.md
+++ b/README.md
@@ -34,6 +34,16 @@ Presentation at PyCode Conference 2019 in Gdansk.
Video recording. Coming, maybe in November.
+## SenseCamp2019: Classification of Environmental Sound using IoT sensors
+
+
+
+
+
+Presentation at SenseCamp 2019 hosted by FORCE Technology Senselab.
+Slides: [web](https://jonnor.github.io/machinehearing/pycode2019/slides.html),
+[.PDF](./sensecamp2019/slides.pdf)
+
## NMBU lecture on Audio Classification
diff --git a/sensecamp2019/Makefile b/sensecamp2019/Makefile
new file mode 100644
index 0000000..cee7a60
--- /dev/null
+++ b/sensecamp2019/Makefile
@@ -0,0 +1,6 @@
+
+slides:
+ pandoc -t revealjs -s presentation.md -o slides.html --slide-level=2 --mathml -V theme=white
+
+slides.pdf:
+ pandoc -t beamer -s presentation.md -o slides.pdf --slide-level=2 --mathml
diff --git a/sensecamp2019/README.md b/sensecamp2019/README.md
new file mode 100644
index 0000000..84652a0
--- /dev/null
+++ b/sensecamp2019/README.md
@@ -0,0 +1,216 @@
+
+# Context
+
+https://forcetechnology.com/en/events/2019/sensecamp-2019
+"Opportunities with Machine Learning in audio”
+
+
+09.45 - 10.25 Deep audio - data, representations and interactivity
+Lars Kai Hansen, Professor, DTU Compute - Technical University of Denmark
+10.25 - 11.05 On applying AI/ML in Audiology/Hearing aids
+Jens Brehm Bagger Nielsen, Architect, Machine Learning & Data Science, Widex
+11.05 - 11.45 Data-driven services in hearing health care
+Niels H. Pontoppidan, Research Area Manager, Augmented Hearing Science - Eriksholm Research Center
+
+
+## Format
+
+30 minutes, 10 minutes QA
+
+# TODO
+
+- Add some Research Projects at the end
+
+Pretty
+
+- Add Soundsensing logo to frontpage
+- Add Soundsensing logo to ending page
+- Add Soundensing logo at bottom of each page
+
+
+# Goals
+
+From our POV
+
+1. Attract partners for Soundsensing
+Research institutes. Public or private.
+Joint technology development?
+2. Attract pilot projects for Soundsensing
+(3. Attract contacts for consulting on ML+audio+embedded )
+
+From audience POV
+
+> you as audio proffesionals, understand:
+>
+> possibilities of on-sensor ML
+>
+> how Soundsensing applies this to Noise Monitoring
+
+> basics of machine learning for audio
+
+
+
+## Partnerships
+
+Research
+
+What do we want to get out of a partnership?
+How can someone be of benefit to us?
+
+- Provide funding from their existing R&D project budgets
+- Provide resources (students etc) to work on our challenges
+- Help secure funding in joint project
+
+
+
+## Calls to action
+
+1-2 Data Science students in Spring 2020.
+
+Looking for pilot projects for Autumn 2020 (or maybe spring).
+
+Interested in machine learning (for audio) on embedded devices?
+Come talk to me!
+Send email.
+
+
+## Title
+Classification of environmental sound using IoT sensors
+
+
+## Audience
+
+Audio practitioners. Many technical, some management.
+
+- Familiar with Sound.
+Audio aquisition, Sampling rate, Frequency spectrum, Spectrograms
+- Not familiar with Machine Learning
+Supervised learning. Convolutional Neural Networks.
+- Not familiar with Internet of THings
+
+## Scope
+
+Style.
+Less training/tutorial/howto compared to EuroPython/PyCode
+More Research&Development oriented.
+More Soundsensing focused.
+
+
+
+# Outline
+
+Introduction
+
+- About me
+- About Soundsensing
+- Noise Monitoring
+- Thesis
+
+- Environmental Sound Classification
+- Wireless sensor network contraints. IoT
+- On-edge classification
+- Future sneakpeak: Neural accelerators for HW
+
+
+- Existing ESC work
+- SB-CNN model
+- Typical Audio classification pipeline
+- Performance vs compute landscape
+
+
+- How to get this to fit on a small device?
+Limiting input size
+Depthwise Convolutions
+
+Tricks
+
+- Unknown class
+- Merging to more high-level classes
+- Mapping over longer times
+
+## Out of scope
+
+On-edge challenges
+
+## Q
+
+Availability of
+
+- Low-power microcontroller. ARM Cortex M4F
+- FPGA.
+- ASIC.
+
+ST Orlando
+
+Cortex-M4 microcontroller (MCU) and 128 KB of memory
+6.2 mm x 5.5 mm die
+200 Mhz
+41 mWatt
+2.9 TOPS/W
+AlexNet at 10 FPS.
+
+
+Microphone becomes the bottleneck.
+
+Vesper VM1010
+Wake on Sound
+18 uWatt
+
+PUI Audio PMM-3738-VM1010
+Wake on Sound
+9 μW of power
+
+https://www.digikey.com/en/product-highlight/p/pui-audio/wake-on-sound-piezoelectric-mems-microphone
+
+
+https://blog.st.com/orlando-neural-network-iot/
+
+
+What is the TOPS/watt for current Cortex M4F?
+How does it compare with proposed milli-watt scale accelerators
+
+
+Lattice sensAI stack
+FPGA
+1 mW-1W
+
+https://www.latticesemi.com/Blog/2019/05/17/18/25/sensAI
+
+Human presence detection. 5 FPS 64x64x3. 7 mW
+VGG8. 8 layer CNN.
+
+Lattice ICE40 UltraPlus CNN accelerator IP
+http://www.latticesemi.com/Products/DesignSoftwareAndIP/IntellectualProperty/IPCore/IPCores04/compactcnn
+
+TensorFlow Lite for microcontrollers
+https://www.tensorflow.org/lite/microcontrollers
+
+STM32Cube.AI
+STM32 X-CUBE-AI
+https://www.st.com/en/embedded-software/x-cube-ai.html
+
+
+emlearn
+
+
+
+
+
+## Takeaways
+Or talking points...
+
+- ML on audio close to human-level performance on some tasks
+(when not compute constrainted)
+
+- On-edge inference is desirable to keep data traffic down.
+Enable battery power / energy harvesting - cheaper installation costs - denser networks.
+Lower data traffic - cheaper wireless costs.
+
+- ML-accelerators for low-power sensor units are expected in 2020
+
+- Soundsensing has developed a low-power sensor unit for Noise Monitoring.
+- We are running pilot projects now.
+
+- Strong cross pollination from bigger ML domains.
+Image and Natural Language Processing pushes Audio forward
+CNNs. Sequence modelling (RNNs).
diff --git a/sensecamp2019/img/CMSIS-NN-functions.png b/sensecamp2019/img/CMSIS-NN-functions.png
new file mode 100644
index 0000000..82fcaf1
Binary files /dev/null and b/sensecamp2019/img/CMSIS-NN-functions.png differ
diff --git a/sensecamp2019/img/SONYC-CPS.png b/sensecamp2019/img/SONYC-CPS.png
new file mode 100644
index 0000000..ae981f1
Binary files /dev/null and b/sensecamp2019/img/SONYC-CPS.png differ
diff --git a/sensecamp2019/img/ST-Orlando-SoC.png b/sensecamp2019/img/ST-Orlando-SoC.png
new file mode 100644
index 0000000..cbc5b78
Binary files /dev/null and b/sensecamp2019/img/ST-Orlando-SoC.png differ
diff --git a/sensecamp2019/img/STM32F103VGT6-LD.jpg b/sensecamp2019/img/STM32F103VGT6-LD.jpg
new file mode 100644
index 0000000..2e4a0dd
Binary files /dev/null and b/sensecamp2019/img/STM32F103VGT6-LD.jpg differ
diff --git a/sensecamp2019/img/activation-functions.png b/sensecamp2019/img/activation-functions.png
new file mode 100644
index 0000000..5bbed47
Binary files /dev/null and b/sensecamp2019/img/activation-functions.png differ
diff --git a/sensecamp2019/img/activation-functions.svg b/sensecamp2019/img/activation-functions.svg
new file mode 100644
index 0000000..753632d
--- /dev/null
+++ b/sensecamp2019/img/activation-functions.svg
@@ -0,0 +1,206 @@
+
+
+
+
diff --git a/sensecamp2019/img/analysis-windows.png b/sensecamp2019/img/analysis-windows.png
new file mode 100644
index 0000000..1f65943
Binary files /dev/null and b/sensecamp2019/img/analysis-windows.png differ
diff --git a/sensecamp2019/img/analysis-windows.svg b/sensecamp2019/img/analysis-windows.svg
new file mode 100644
index 0000000..d411d26
--- /dev/null
+++ b/sensecamp2019/img/analysis-windows.svg
@@ -0,0 +1,343 @@
+
+
+
+
diff --git a/sensecamp2019/img/artificial-neuron.png b/sensecamp2019/img/artificial-neuron.png
new file mode 100644
index 0000000..9f6ca2e
Binary files /dev/null and b/sensecamp2019/img/artificial-neuron.png differ
diff --git a/sensecamp2019/img/artificial-neuron.svg b/sensecamp2019/img/artificial-neuron.svg
new file mode 100644
index 0000000..f320412
--- /dev/null
+++ b/sensecamp2019/img/artificial-neuron.svg
@@ -0,0 +1,328 @@
+
+
+
+
diff --git a/sensecamp2019/img/artificial-neuron.svg.2019_05_10_17_19_22.0.svg b/sensecamp2019/img/artificial-neuron.svg.2019_05_10_17_19_22.0.svg
new file mode 100644
index 0000000..855332e
--- /dev/null
+++ b/sensecamp2019/img/artificial-neuron.svg.2019_05_10_17_19_22.0.svg
@@ -0,0 +1,331 @@
+
+
+
+
diff --git a/sensecamp2019/img/audio-aquisition.png b/sensecamp2019/img/audio-aquisition.png
new file mode 100644
index 0000000..1257342
Binary files /dev/null and b/sensecamp2019/img/audio-aquisition.png differ
diff --git a/sensecamp2019/img/audio-aquisition.svg b/sensecamp2019/img/audio-aquisition.svg
new file mode 100644
index 0000000..6c2f340
--- /dev/null
+++ b/sensecamp2019/img/audio-aquisition.svg
@@ -0,0 +1,828 @@
+
+
+
+
diff --git a/sensecamp2019/img/classification-pipeline.png b/sensecamp2019/img/classification-pipeline.png
new file mode 100644
index 0000000..9c38e8b
Binary files /dev/null and b/sensecamp2019/img/classification-pipeline.png differ
diff --git a/sensecamp2019/img/classification-pipeline.svg b/sensecamp2019/img/classification-pipeline.svg
new file mode 100644
index 0000000..af0e095
--- /dev/null
+++ b/sensecamp2019/img/classification-pipeline.svg
@@ -0,0 +1,788 @@
+
+
+
+
diff --git a/sensecamp2019/img/confusion_test.png b/sensecamp2019/img/confusion_test.png
new file mode 100644
index 0000000..71dc819
Binary files /dev/null and b/sensecamp2019/img/confusion_test.png differ
diff --git a/sensecamp2019/img/conv-block-effnet.png b/sensecamp2019/img/conv-block-effnet.png
new file mode 100644
index 0000000..018dbaa
Binary files /dev/null and b/sensecamp2019/img/conv-block-effnet.png differ
diff --git a/sensecamp2019/img/conv-block-mobilenet.png b/sensecamp2019/img/conv-block-mobilenet.png
new file mode 100644
index 0000000..fb61ee8
Binary files /dev/null and b/sensecamp2019/img/conv-block-mobilenet.png differ
diff --git a/sensecamp2019/img/conv-block-shufflenet.png b/sensecamp2019/img/conv-block-shufflenet.png
new file mode 100644
index 0000000..5c24f9f
Binary files /dev/null and b/sensecamp2019/img/conv-block-shufflenet.png differ
diff --git a/sensecamp2019/img/conv-blocks-imagenets.png b/sensecamp2019/img/conv-blocks-imagenets.png
new file mode 100644
index 0000000..11f0248
Binary files /dev/null and b/sensecamp2019/img/conv-blocks-imagenets.png differ
diff --git a/sensecamp2019/img/conv-blocks-imagenets.svg b/sensecamp2019/img/conv-blocks-imagenets.svg
new file mode 100644
index 0000000..9d11af4
--- /dev/null
+++ b/sensecamp2019/img/conv-blocks-imagenets.svg
@@ -0,0 +1,3370 @@
+
+
+
+
diff --git a/sensecamp2019/img/conv-depthwise-separable.png b/sensecamp2019/img/conv-depthwise-separable.png
new file mode 100644
index 0000000..046bdce
Binary files /dev/null and b/sensecamp2019/img/conv-depthwise-separable.png differ
diff --git a/sensecamp2019/img/conv-grouped-1x1-g3.png b/sensecamp2019/img/conv-grouped-1x1-g3.png
new file mode 100644
index 0000000..aca41c8
Binary files /dev/null and b/sensecamp2019/img/conv-grouped-1x1-g3.png differ
diff --git a/sensecamp2019/img/conv-grouped-3x3-g3.png b/sensecamp2019/img/conv-grouped-3x3-g3.png
new file mode 100644
index 0000000..76cbace
Binary files /dev/null and b/sensecamp2019/img/conv-grouped-3x3-g3.png differ
diff --git a/sensecamp2019/img/conv-standard.png b/sensecamp2019/img/conv-standard.png
new file mode 100644
index 0000000..15ffb65
Binary files /dev/null and b/sensecamp2019/img/conv-standard.png differ
diff --git a/sensecamp2019/img/convolution-2d.png b/sensecamp2019/img/convolution-2d.png
new file mode 100644
index 0000000..48e85e3
Binary files /dev/null and b/sensecamp2019/img/convolution-2d.png differ
diff --git a/sensecamp2019/img/convolution-2d.svg b/sensecamp2019/img/convolution-2d.svg
new file mode 100644
index 0000000..4315912
--- /dev/null
+++ b/sensecamp2019/img/convolution-2d.svg
@@ -0,0 +1,1692 @@
+
+
+
+
diff --git a/sensecamp2019/img/cortex-m4.jpeg b/sensecamp2019/img/cortex-m4.jpeg
new file mode 100644
index 0000000..61d01e2
Binary files /dev/null and b/sensecamp2019/img/cortex-m4.jpeg differ
diff --git a/sensecamp2019/img/cortexM4.png b/sensecamp2019/img/cortexM4.png
new file mode 100644
index 0000000..377c0a5
Binary files /dev/null and b/sensecamp2019/img/cortexM4.png differ
diff --git a/sensecamp2019/img/cortexM4.webp b/sensecamp2019/img/cortexM4.webp
new file mode 100644
index 0000000..a115e9d
Binary files /dev/null and b/sensecamp2019/img/cortexM4.webp differ
diff --git a/sensecamp2019/img/cover.png b/sensecamp2019/img/cover.png
new file mode 100644
index 0000000..f9a25c5
Binary files /dev/null and b/sensecamp2019/img/cover.png differ
diff --git a/sensecamp2019/img/cover.svg b/sensecamp2019/img/cover.svg
new file mode 100644
index 0000000..a249d02
--- /dev/null
+++ b/sensecamp2019/img/cover.svg
@@ -0,0 +1,587 @@
+
+
+
+
diff --git a/sensecamp2019/img/cpu-efficiency.png b/sensecamp2019/img/cpu-efficiency.png
new file mode 100644
index 0000000..3526933
Binary files /dev/null and b/sensecamp2019/img/cpu-efficiency.png differ
diff --git a/sensecamp2019/img/crossvalidation.png b/sensecamp2019/img/crossvalidation.png
new file mode 100644
index 0000000..9a64ce9
Binary files /dev/null and b/sensecamp2019/img/crossvalidation.png differ
diff --git a/sensecamp2019/img/crossvalidation.svg b/sensecamp2019/img/crossvalidation.svg
new file mode 100644
index 0000000..3c6c01e
--- /dev/null
+++ b/sensecamp2019/img/crossvalidation.svg
@@ -0,0 +1,954 @@
+
+
+
+
diff --git a/sensecamp2019/img/demo-tightcrop.jpg b/sensecamp2019/img/demo-tightcrop.jpg
new file mode 100644
index 0000000..dea74d4
Binary files /dev/null and b/sensecamp2019/img/demo-tightcrop.jpg differ
diff --git a/sensecamp2019/img/depthwise-separable-convolution.png b/sensecamp2019/img/depthwise-separable-convolution.png
new file mode 100644
index 0000000..37a696b
Binary files /dev/null and b/sensecamp2019/img/depthwise-separable-convolution.png differ
diff --git a/sensecamp2019/img/depthwise-separable-convolution.svg b/sensecamp2019/img/depthwise-separable-convolution.svg
new file mode 100644
index 0000000..c9a5455
--- /dev/null
+++ b/sensecamp2019/img/depthwise-separable-convolution.svg
@@ -0,0 +1,2100 @@
+
+
+
+
diff --git a/sensecamp2019/img/envnet.png b/sensecamp2019/img/envnet.png
new file mode 100644
index 0000000..6b5e7b0
Binary files /dev/null and b/sensecamp2019/img/envnet.png differ
diff --git a/sensecamp2019/img/fail-dropout.png b/sensecamp2019/img/fail-dropout.png
new file mode 100644
index 0000000..cb634ff
Binary files /dev/null and b/sensecamp2019/img/fail-dropout.png differ
diff --git a/sensecamp2019/img/fail-truncation.png b/sensecamp2019/img/fail-truncation.png
new file mode 100644
index 0000000..29af236
Binary files /dev/null and b/sensecamp2019/img/fail-truncation.png differ
diff --git a/sensecamp2019/img/framing.png b/sensecamp2019/img/framing.png
new file mode 100644
index 0000000..b627cb2
Binary files /dev/null and b/sensecamp2019/img/framing.png differ
diff --git a/sensecamp2019/img/frontpage.png b/sensecamp2019/img/frontpage.png
new file mode 100644
index 0000000..e773f3d
Binary files /dev/null and b/sensecamp2019/img/frontpage.png differ
diff --git a/sensecamp2019/img/grouped_confusion_test_foreground.png b/sensecamp2019/img/grouped_confusion_test_foreground.png
new file mode 100644
index 0000000..53a3a3d
Binary files /dev/null and b/sensecamp2019/img/grouped_confusion_test_foreground.png differ
diff --git a/sensecamp2019/img/iCE40UltraPlus.png b/sensecamp2019/img/iCE40UltraPlus.png
new file mode 100644
index 0000000..b138faa
Binary files /dev/null and b/sensecamp2019/img/iCE40UltraPlus.png differ
diff --git a/sensecamp2019/img/input-size.svg b/sensecamp2019/img/input-size.svg
new file mode 100644
index 0000000..d09cd63
--- /dev/null
+++ b/sensecamp2019/img/input-size.svg
@@ -0,0 +1,322 @@
+
+
+
+
diff --git a/sensecamp2019/img/lenet5.png b/sensecamp2019/img/lenet5.png
new file mode 100644
index 0000000..265d825
Binary files /dev/null and b/sensecamp2019/img/lenet5.png differ
diff --git a/sensecamp2019/img/maxpooling.png b/sensecamp2019/img/maxpooling.png
new file mode 100644
index 0000000..d7cc2e0
Binary files /dev/null and b/sensecamp2019/img/maxpooling.png differ
diff --git a/sensecamp2019/img/maxpooling.svg b/sensecamp2019/img/maxpooling.svg
new file mode 100644
index 0000000..cdaed86
--- /dev/null
+++ b/sensecamp2019/img/maxpooling.svg
@@ -0,0 +1,498 @@
+
+
+
+
diff --git a/sensecamp2019/img/models-list.png b/sensecamp2019/img/models-list.png
new file mode 100644
index 0000000..74c7f52
Binary files /dev/null and b/sensecamp2019/img/models-list.png differ
diff --git a/sensecamp2019/img/models.svg b/sensecamp2019/img/models.svg
new file mode 100644
index 0000000..ed849c3
--- /dev/null
+++ b/sensecamp2019/img/models.svg
@@ -0,0 +1,813 @@
+
+
+
+
diff --git a/sensecamp2019/img/models_accuracy.png b/sensecamp2019/img/models_accuracy.png
new file mode 100644
index 0000000..05cf651
Binary files /dev/null and b/sensecamp2019/img/models_accuracy.png differ
diff --git a/sensecamp2019/img/models_efficiency.png b/sensecamp2019/img/models_efficiency.png
new file mode 100644
index 0000000..e942c04
Binary files /dev/null and b/sensecamp2019/img/models_efficiency.png differ
diff --git a/sensecamp2019/img/multilayer-perceptron.png b/sensecamp2019/img/multilayer-perceptron.png
new file mode 100644
index 0000000..8879e0e
Binary files /dev/null and b/sensecamp2019/img/multilayer-perceptron.png differ
diff --git a/sensecamp2019/img/multilayer-perceptron.svg b/sensecamp2019/img/multilayer-perceptron.svg
new file mode 100644
index 0000000..4e2ce30
--- /dev/null
+++ b/sensecamp2019/img/multilayer-perceptron.svg
@@ -0,0 +1,789 @@
+
+
+
+
diff --git a/sensecamp2019/img/nmbu_logo_eng_rgb.jpg b/sensecamp2019/img/nmbu_logo_eng_rgb.jpg
new file mode 100644
index 0000000..6d6a8d7
Binary files /dev/null and b/sensecamp2019/img/nmbu_logo_eng_rgb.jpg differ
diff --git a/sensecamp2019/img/nmbu_logo_eng_rgb_trans.png b/sensecamp2019/img/nmbu_logo_eng_rgb_trans.png
new file mode 100644
index 0000000..d1c38b4
Binary files /dev/null and b/sensecamp2019/img/nmbu_logo_eng_rgb_trans.png differ
diff --git a/sensecamp2019/img/noise-monitoring.jpg b/sensecamp2019/img/noise-monitoring.jpg
new file mode 100644
index 0000000..37cb80d
Binary files /dev/null and b/sensecamp2019/img/noise-monitoring.jpg differ
diff --git a/sensecamp2019/img/noiseseverity.png b/sensecamp2019/img/noiseseverity.png
new file mode 100644
index 0000000..5c94219
Binary files /dev/null and b/sensecamp2019/img/noiseseverity.png differ
diff --git a/sensecamp2019/img/noiseseverity.svg b/sensecamp2019/img/noiseseverity.svg
new file mode 100644
index 0000000..e3309f9
--- /dev/null
+++ b/sensecamp2019/img/noiseseverity.svg
@@ -0,0 +1,355 @@
+
+
+
+
diff --git a/sensecamp2019/img/piczak-cnn.png b/sensecamp2019/img/piczak-cnn.png
new file mode 100644
index 0000000..a3f6983
Binary files /dev/null and b/sensecamp2019/img/piczak-cnn.png differ
diff --git a/sensecamp2019/img/results.csv b/sensecamp2019/img/results.csv
new file mode 100644
index 0000000..98cab63
--- /dev/null
+++ b/sensecamp2019/img/results.csv
@@ -0,0 +1,12 @@
+experiment,result_path,maccs_frame,flash_usage,ram_usage_max,ram_usage_min,test_acc_mean,foreground_test_acc_mean,background_test_acc_mean,model,conv_block,n_stages,conv_size,downsample_size,filters,modelcheck,nickname,classifications_per_second
+0,./data/results/20190501-0223/0.confusion.npz,10185806.0,415100.0,36290.0,36290.0,0.7311827956989249,0.8427230046948355,0.4956629491945476,sbcnn,conv,3,5x5,3x2,24,skip,Baseline,2.7777777777777777
+1,./data/results/20190501-0223/1.confusion.npz,2980798.0,381150.0,56720.0,56720.0,0.7185716182131953,0.8210093896713615,0.5022717885171417,strided,conv,3,5x5,2x2,22,,Stride-5x5,2.7777777777777777
+10,./data/results/20190501-0223/10.confusion.npz,468649.0,128750.0,48750.0,48750.0,0.6713128899508827,0.7546948356807511,0.49524989673688563,strided,effnet,3,5x5,2x2,22,,Stride-Effnet-5x5,2.7777777777777777
+2,./data/results/20190501-0223/2.confusion.npz,477236.0,184640.0,56250.0,56250.0,0.7254745785211735,0.8133802816901408,0.5398595621643948,strided,depthwise_separable,3,5x5,2x2,24,,Stride-DS-5x5,2.7777777777777777
+3,./data/results/20190501-0223/3.confusion.npz,318497.0,97650.0,56250.0,56250.0,0.7011814682065578,0.7938184663536776,0.5055762081784386,strided,depthwise_separable,4,3x3,2x2,24,,Stride-DS-3x3,2.7777777777777777
+4,./data/results/20190501-0223/4.confusion.npz,445688.0,81940.0,48750.0,48750.0,0.685517058276915,0.7767996870109547,0.49277158199091287,strided,bottleneck_ds,3,5x5,2x2,22,,Stride-BN-DS-5x5,2.7777777777777777
+5,./data/results/20190501-0223/5.confusion.npz,477236.0,184640.0,56250.0,56250.0,0.7145891411124385,0.812793427230047,0.5072284180090871,strided,depthwise_separable,3,5x5,2x2,24,,DS-5x5-24,2.7777777777777777
+6,./data/results/20190501-0223/6.confusion.npz,380749.0,152810.0,46880.0,46880.0,0.7285278109650869,0.8194444444444443,0.5365551425030979,strided,depthwise_separable,3,5x5,2x2,20,,DS-5x5-20,2.7777777777777777
+7,./data/results/20190501-0223/7.confusion.npz,291318.0,121590.0,37500.0,37500.0,0.7155183857692818,0.8092723004694835,0.5175547294506402,strided,depthwise_separable,3,5x5,2x2,16,,DS-5x5-16,2.7777777777777777
+8,./data/results/20190501-0223/8.confusion.npz,208943.0,90970.0,28130.0,28130.0,0.6998539758396389,0.7924491392801252,0.5043370508054523,strided,depthwise_separable,3,5x5,2x2,12,,DS-5x5-12,2.7777777777777777
+9,./data/results/20190501-0223/9.confusion.npz,1567280.0,98410.0,56350.0,56350.0,0.7265365724147085,0.8395931142410016,0.4878149524989674,sbcnn,depthwise_separable,3,5x5,3x2,24,,Baseline-DS,2.7777777777777777
diff --git a/sensecamp2019/img/results.png b/sensecamp2019/img/results.png
new file mode 100644
index 0000000..71df9da
Binary files /dev/null and b/sensecamp2019/img/results.png differ
diff --git a/sensecamp2019/img/sensornetworks.png b/sensecamp2019/img/sensornetworks.png
new file mode 100644
index 0000000..992fc12
Binary files /dev/null and b/sensecamp2019/img/sensornetworks.png differ
diff --git a/sensecamp2019/img/sensornetworks.svg b/sensecamp2019/img/sensornetworks.svg
new file mode 100644
index 0000000..ae671f0
--- /dev/null
+++ b/sensecamp2019/img/sensornetworks.svg
@@ -0,0 +1,6254 @@
+
+
+
+
diff --git a/sensecamp2019/img/sensortile-annotated.jpg b/sensecamp2019/img/sensortile-annotated.jpg
new file mode 100644
index 0000000..191f620
Binary files /dev/null and b/sensecamp2019/img/sensortile-annotated.jpg differ
diff --git a/sensecamp2019/img/sensortile-annotated.svg b/sensecamp2019/img/sensortile-annotated.svg
new file mode 100644
index 0000000..736e041
--- /dev/null
+++ b/sensecamp2019/img/sensortile-annotated.svg
@@ -0,0 +1,2149 @@
+
+
+
+
diff --git a/sensecamp2019/img/sensortile-devkit.jpg b/sensecamp2019/img/sensortile-devkit.jpg
new file mode 100644
index 0000000..bb3d3ea
Binary files /dev/null and b/sensecamp2019/img/sensortile-devkit.jpg differ
diff --git a/sensecamp2019/img/soundsensing-logo.png b/sensecamp2019/img/soundsensing-logo.png
new file mode 100644
index 0000000..8c41f81
Binary files /dev/null and b/sensecamp2019/img/soundsensing-logo.png differ
diff --git a/sensecamp2019/img/soundsensing-logo.xcf b/sensecamp2019/img/soundsensing-logo.xcf
new file mode 100644
index 0000000..f5d3ada
Binary files /dev/null and b/sensecamp2019/img/soundsensing-logo.xcf differ
diff --git a/sensecamp2019/img/soundsensing-sensor-metro.jpg b/sensecamp2019/img/soundsensing-sensor-metro.jpg
new file mode 100644
index 0000000..56a1c04
Binary files /dev/null and b/sensecamp2019/img/soundsensing-sensor-metro.jpg differ
diff --git a/sensecamp2019/img/soundsensing-withlogo.png b/sensecamp2019/img/soundsensing-withlogo.png
new file mode 100644
index 0000000..0050358
Binary files /dev/null and b/sensecamp2019/img/soundsensing-withlogo.png differ
diff --git a/sensecamp2019/img/soundsensing-withlogo.svg b/sensecamp2019/img/soundsensing-withlogo.svg
new file mode 100644
index 0000000..386e606
--- /dev/null
+++ b/sensecamp2019/img/soundsensing-withlogo.svg
@@ -0,0 +1,52247 @@
+
+
+
+
diff --git a/sensecamp2019/img/spatially-separable-convolution.png b/sensecamp2019/img/spatially-separable-convolution.png
new file mode 100644
index 0000000..22fb1e5
Binary files /dev/null and b/sensecamp2019/img/spatially-separable-convolution.png differ
diff --git a/sensecamp2019/img/spatially-separable-convolution.svg b/sensecamp2019/img/spatially-separable-convolution.svg
new file mode 100644
index 0000000..a3f267c
--- /dev/null
+++ b/sensecamp2019/img/spatially-separable-convolution.svg
@@ -0,0 +1,611 @@
+
+
+
+
diff --git a/sensecamp2019/img/spectrograms.svg b/sensecamp2019/img/spectrograms.svg
new file mode 100644
index 0000000..fce3261
--- /dev/null
+++ b/sensecamp2019/img/spectrograms.svg
@@ -0,0 +1,13106 @@
+
+
+
+
diff --git a/sensecamp2019/img/stm32cubeai.png b/sensecamp2019/img/stm32cubeai.png
new file mode 100644
index 0000000..2675d88
Binary files /dev/null and b/sensecamp2019/img/stm32cubeai.png differ
diff --git a/sensecamp2019/img/stoykart.png b/sensecamp2019/img/stoykart.png
new file mode 100644
index 0000000..c3311a9
Binary files /dev/null and b/sensecamp2019/img/stoykart.png differ
diff --git a/sensecamp2019/img/strided-convolution.png b/sensecamp2019/img/strided-convolution.png
new file mode 100644
index 0000000..fe478ac
Binary files /dev/null and b/sensecamp2019/img/strided-convolution.png differ
diff --git a/sensecamp2019/img/strided-convolution.svg b/sensecamp2019/img/strided-convolution.svg
new file mode 100644
index 0000000..9111228
--- /dev/null
+++ b/sensecamp2019/img/strided-convolution.svg
@@ -0,0 +1,868 @@
+
+
+
+
diff --git a/sensecamp2019/img/thesis.png b/sensecamp2019/img/thesis.png
new file mode 100644
index 0000000..7299af0
Binary files /dev/null and b/sensecamp2019/img/thesis.png differ
diff --git a/sensecamp2019/img/training-inference.png b/sensecamp2019/img/training-inference.png
new file mode 100644
index 0000000..db822d5
Binary files /dev/null and b/sensecamp2019/img/training-inference.png differ
diff --git a/sensecamp2019/img/training-inference.svg b/sensecamp2019/img/training-inference.svg
new file mode 100644
index 0000000..af5f59a
--- /dev/null
+++ b/sensecamp2019/img/training-inference.svg
@@ -0,0 +1,594 @@
+
+
+
+
diff --git a/sensecamp2019/img/training-settings.png b/sensecamp2019/img/training-settings.png
new file mode 100644
index 0000000..fbe24e4
Binary files /dev/null and b/sensecamp2019/img/training-settings.png differ
diff --git a/sensecamp2019/img/unknown-class-tradeoffs.png b/sensecamp2019/img/unknown-class-tradeoffs.png
new file mode 100644
index 0000000..d525746
Binary files /dev/null and b/sensecamp2019/img/unknown-class-tradeoffs.png differ
diff --git a/sensecamp2019/img/unknown-class.png b/sensecamp2019/img/unknown-class.png
new file mode 100644
index 0000000..3df8b79
Binary files /dev/null and b/sensecamp2019/img/unknown-class.png differ
diff --git a/sensecamp2019/img/urbansound8k-examples.png b/sensecamp2019/img/urbansound8k-examples.png
new file mode 100644
index 0000000..5ddb53c
Binary files /dev/null and b/sensecamp2019/img/urbansound8k-examples.png differ
diff --git a/sensecamp2019/img/urbansound8k-existing-models-logmel.png b/sensecamp2019/img/urbansound8k-existing-models-logmel.png
new file mode 100644
index 0000000..6552a2d
Binary files /dev/null and b/sensecamp2019/img/urbansound8k-existing-models-logmel.png differ
diff --git a/sensecamp2019/img/urbansound8k-existing-models-logmel.tex b/sensecamp2019/img/urbansound8k-existing-models-logmel.tex
new file mode 100644
index 0000000..caa3c73
--- /dev/null
+++ b/sensecamp2019/img/urbansound8k-existing-models-logmel.tex
@@ -0,0 +1,12 @@
+\begin{tabular}{lrrr}
+\toprule
+{} & Accuracy (\%) & MACC / second & Model parameters \\
+name & & & \\
+\midrule
+Dmix-CNN-mel & 82.6 & 298M & 1180k \\
+D-CNN & 81.9 & 458M & 33000k \\
+SB-CNN & 79.0 & 25M & 432k \\
+LD-CNN & 79.0 & 10M & 580k \\
+PiczakCNN & 75.0 & 88M & 25534k \\
+\bottomrule
+\end{tabular}
diff --git a/sensecamp2019/img/what-we-do.png b/sensecamp2019/img/what-we-do.png
new file mode 100644
index 0000000..c96d285
Binary files /dev/null and b/sensecamp2019/img/what-we-do.png differ
diff --git a/sensecamp2019/img/xcubeai.png b/sensecamp2019/img/xcubeai.png
new file mode 100644
index 0000000..0db1da0
Binary files /dev/null and b/sensecamp2019/img/xcubeai.png differ
diff --git a/sensecamp2019/notes.md b/sensecamp2019/notes.md
new file mode 100644
index 0000000..344b57e
--- /dev/null
+++ b/sensecamp2019/notes.md
@@ -0,0 +1,170 @@
+
+
+## Talk 1
+
+DTU. Danish Technical University
+
+Demo. DANSpeech system
+
+
+Safe AI.
+Trustworthy.
+Explainable AI.
+
+! New book. Explainable AI, Interpreting Explaining and Visualizing Deep Learning
+
+DUT. Introduction to ML. 1000 engineering students per year.
+
+Close collaboration with Hearing Systems.
+ML used quite a bit.
+Early patenting.
+
+DABAI. Open source ML workflow.
+Danish language.
+
+
+Data and Representation
+
+- Division of labor.
+- Neural network. Simple units.
+Reused in different contexts.
+- Learning. Adaptivity, Plasticity
+- Attention
+
+What is it we attend to?
+Race to short term memory
+
+Looking for things that "move indepdendetly"
+
+
+Paper.
+Lewincki, 2002. Efficient Coding of natural sounds.
+Adapted ICA in primarly audidotury.
+Mel MFCC
+
+How to model this independence?
+
+Comparing generative/unsupervised with supervised learning
+
+Train independently. Are they similar?
+
+? how do they estimate the similarity. Mutual information?
+
+Ling Feng.
+
+! can predict height of person based on their speech
+
+ICLR2016.
+Convergent Learning.
+Do different neural networks learn the same representation?
+Authors suggest Yes!
+
+! Controversial paper
+
+Neural networks are permutation invariant.
+
+
+Podcast indexing based on audio
+- AudiobUrst
+
+Using wikipedia as knowledge based of ML system
+
+Danspeech
+
+http://github.com/danspeech/danspeech
+"An open-source python package for Danish speech recognition"
+
+
+
+## Talk 2
+
+WS Audiology
+
+Started PhD in 2015
+Everyone talks about "Big Data"
+
+Dreams about future in 2017
+
+Rule-based system -> Data driven
+Opinion-based decision -> Fact-based
+Increasing system/use complexity -> Simple, intutive
+
+Prerequisites
+
+- Data Collection
+- Data Warehousing
+- High performance Compute
+- Machine Learning
+
+
+### SoundSense Learn
+
+Mobile app.
+Allows to play around with personalized aud
+Primarily for hearing aid tuning
+Compares 2-and-2 settings
+
+Using Baysian optimization for Active Learning
+
+Jensen et. al 2019
+
+
+### Data Infrastructure
+
+From Lake to Warehouse
+
+Go from lots of unstructured data. Independent.
+To lower amount of strucutred data. Inter-linked. Contextualized.
+
+
+
+## Talk 3
+
+Eriksholm Research Center
+
+```
+Audio Enhancment
++ Preferences
+= new services for hearing in healthcare
+```
+
+Hearing aids
+
+Source separation
+
+! want to do it on hearing aid hardware
+! with 8-10 millisecond latency
+
+Single microphone
+For more than 2 people need directionality
+
+Competition?
+Hearables. Will get more crowded
+
+Training?
+Audio-Visual correspondence
+
+EVOTIUM Consurtiom
+
+
+### Privacy perserving
+
+Temporal
+1 sample per minute
+
+SPL 3 bands
+Noise floor.
+
+Environment Classification.
+Quiet, Noise, Speech, Noisy Speech
+
+EOTOION Hearing Aid data
+
+51 million datapoints
+1000 users
+
+? is some of this dataset available
+
+5 M parameters
+
+
diff --git a/sensecamp2019/presentation.md b/sensecamp2019/presentation.md
new file mode 100644
index 0000000..4d1f8cf
--- /dev/null
+++ b/sensecamp2019/presentation.md
@@ -0,0 +1,673 @@
+
+---
+title: Classification of Environmental Sound using IoT sensors
+author: Jon Nordby
+date: November 19, 2019
+css: style.css
+width: 1920
+height: 1080
+margin: 0
+pagetitle: 'Sensecamp2019: Classification of Environmental Sound using IoT sensors'
+---
+
+
+# Introduction
+
+## Jon Nordby
+
+Internet of Things specialist
+
+- B.Eng in **Electronics**
+- 9 years as **Software** developer. **Embedded** + **Web**
+- M. Sc in **Data** Science
+
+Now:
+
+- CTO at Soundsensing
+- Machine Learning Consultant
+
+
+## Soundsensing
+
+![](./img/soundsensing-withlogo.png){width=100%}
+
+
+::: notes
+Provide **Noise Monitoring** and Audio **Condition Monitoring** solutions
+that are used in Real-Estate, Industry, and Smart Cities.
+
+Perform Machine Learning for sound classification **on sensor**.
+:::
+
+
+## Dashboard
+
+![Pilot projects with customers Now - 2020](img/what-we-do.png)
+
+
+## Thesis
+
+> Environmental Sound Classification
+> on Microcontrollers
+> using Convolutional Neural Networks
+
+![Report & Code: https://github.com/jonnor/ESC-CNN-microcontroller](./img/thesis.png){width=30%}
+
+
+## Wireless Sensor Networks
+
+- Want: Wide and dense coverage
+- Need: Sensors need to be low-cost
+- **Opportunity**: Wireless reduces costs
+- **Challenge**: Power consumption
+
+::: notes
+
+* No network cabling, no power cabling
+* No site infrastructure needed
+* Less invasive
+* Fewer approvals needed
+* Temporary installs feasible
+* Mobile sensors possible
+
+Electrician is 750 NOK/hour
+
+Image: https://www.nti-audio.com/en/applications/noise-measurement/unattended-monitoring
+:::
+
+
+## Sensor Network Architectures
+
+![](img/sensornetworks.png){width=70%}
+
+
+# Audio Machine Learning on low-power sensors
+
+## What do you mean by low-power?
+
+Want: 1 year lifetime for palm-sized battery
+
+Need: `<1mW` system power
+
+## General purpose microcontroller
+
+
+![](img/cortexM4.png){width=40%}
+
+STM32L4 @ 80 MHz. Approx **10 mW**.
+
+- TensorFlow Lite for Microcontrollers (Google)
+- ST X-CUBE-AI (ST Microelectronics)
+
+
+## FPGA
+
+![Lattice ICE40 UltraPlus with Lattice sensAI](img/iCE40UltraPlus.png){width=50%}
+
+Human presence detection. VGG8 on 64x64 RGB image, 5 FPS: 7 mW.
+
+Audio ML approx **1 mW**
+
+## Neural Network co-processors
+
+![Project Orlando (ST Microelectronics), expected 2020](img/ST-Orlando-SoC.png){width=25%}
+
+2.9 TOPS/W. AlexNet, 1000 classes, 10 FPS. 41 mWatt
+
+Audio models probably **< 1 mWatt**.
+
+::: notes
+
+https://www.latticesemi.com/Blog/2019/05/17/18/25/sensAI
+
+:::
+
+
+# On-edge Classification of Noise
+
+## Environmental Sound Classification
+
+> Given an audio signal of environmental sounds,
+>
+> determine which class it belongs to
+
+* Widely researched. 1000 hits on Google Scholar
+* Datasets. Urbansound8k (10 classes), ESC-50, AudioSet (632 classes)
+* 2017: Human-level performance on ESC-50
+
+::: notes
+
+https://github.com/karoldvl/ESC-50
+
+:::
+
+
+
+
+
+::: notes
+
+STM32L476
+
+ARM Cortex M4F
+Hardware floating-point unit (FPU)
+DSP SIMD instructions
+80 MHz CPU clock
+1024 kB of program memory (Flash)
+128 kB of RAM.
+
+25 mWatt max
+
+:::
+
+## Urbansound8k
+
+![](img/urbansound8k-examples.png){width=100%}
+
+::: notes
+
+Classes from an urban sound taxonomy,
+based on noise complains in New York city
+
+Most sounds around 4 seconds. Some classes around 1 second
+
+Foreground/background
+
+:::
+
+
+## Existing work
+
+- Convolutional Neural Networks dominate
+- Techniques come from image classification
+- Mel-spectrogram input standard
+- End2end models: getting close in accuracy
+- "Edge ML" focused on mobile-phone class HW
+- "Tiny ML" (sensors) just starting
+
+::: notes
+
+* Efficient Keyword-Spotting
+* Efficient (image) CNNs
+* Efficient ESC-CNN
+
+ESC-CNN
+
+* 23 papers reviewed in detail
+* 10 referenced in thesis
+* Only 4 consider computational efficiency
+
+:::
+
+## Model requirements
+
+With 50% of STM32L476 capacity:
+
+* 64 kB RAM
+* 512 kB FLASH memory
+* 4.5 M MACC/second
+
+::: notes
+
+* RAM: 1000x 64 MB
+* PROGMEM: 1000x 512 MB
+* CPU: 1000x 5 GFLOPS
+* GPU: 1000'000X 5 TFLOPS
+
+:::
+
+## Existing models
+
+![Green: Feasible region](img/urbansound8k-existing-models-logmel.png){width=100%}
+
+eGRU: running on ARM Cortex-M0 microcontroller, accuracy 61% with **non-standard** evaluation
+
+::: notes
+
+Assuming no overlap. Most models use very high overlap, 100X higher compute
+
+:::
+
+## Pipeline
+
+![](img/classification-pipeline.png){max-height=100%}
+
+
+## Models
+
+
+
+![](img/models.svg){width=70%}
+
+
+::: notes
+
+Baseline from SB-CNN
+
+Few modifications
+
+* Uses smaller input feature representation
+* Reduced downsample factor to accommodate
+
+CONV = entry point for trying different convolution operators
+
+:::
+
+
+
+
+# Strategies for shrinking Convolutional Neural Network
+
+
+## Reduce input dimensionality
+
+![](img/input-size.svg){width=70%}
+
+- Lower frequency range
+- Lower frequency resolution
+- Lower time duration in window
+- Lower time resolution
+
+::: notes
+
+Directly limits time and RAM use first few layers.
+
+Follow-on effects.
+A simpler input representation is (hopefully) easier to learn
+allowing for a simpler model
+
+TODO: make a picture illustrating this
+
+:::
+
+## Reduce overlap
+
+![](img/framing.png){width=80%}
+
+Models in literature use 95% overlap or more. 20x penalty in inference time!
+
+Often low performance benefit. Use 0% (1x) or 50% (2x).
+
+
+
+## Depthwise-separable Convolution
+
+
+![](img/depthwise-separable-convolution.png){width=90%}
+
+MobileNet, "Hello Edge", AclNet. 3x3 kernel,64 filters: 7.5x speedup
+
+::: notes
+
+* Much fewer operations
+* Less expressive - but regularization effect can be beneficial
+
+:::
+
+## Spatially-separable Convolution
+
+![](img/spatially-separable-convolution.png){width=90%}
+
+EffNet, LD-CNN. 5x5 kernel: 2.5x speedup
+
+
+## Downsampling using max-pooling
+
+![](img/maxpooling.png){width=100%}
+
+Wasteful? Computing convolutions, then throwing away 3/4 of results!
+
+## Downsampling using strided convolution
+
+![](img/strided-convolution.png){width=100%}
+
+Striding means fewer computations and "learned" downsampling
+
+## Model comparison
+
+![](img/models_accuracy.png){width=100%}
+
+::: notes
+
+- Baseline relative to SB-CNN and LD-CNN is down from 79% to 73%
+Expected because poorer input representation.
+Much lower overlap
+
+:::
+
+
+## Performance vs compute
+
+![](img/models_efficiency.png){width=100%}
+
+:::
+
+- Performance of Strided-DS-24 similar to Baseline despite 12x the CPU use
+- Suprising? Stride alone worse than Strided-DS-24
+- Bottleneck and EffNet performed poorly
+- Practical speedup not linear with MACC
+
+:::
+
+
+
+
+
+## Quantization
+
+Inference can often use 8 bit integers instead of 32 bit floats
+
+- 1/4 the size for weights (FLASH) and activations (RAM)
+- 8bit **SIMD** on ARM Cortex M4F: 1/4 the inference time
+- Supported in X-CUBE-AI 4.x (July 2019)
+
+
+
+::: notes
+
+EnvNet-v2 got 78.3% on Urbansound8k with 16 kHz
+:::
+
+
+
+## Conclusions
+
+- Able to perform Environmental Sound Classification at `~ 10mW` power,
+- Using *general purpose microcontroller*, ARM Cortex M4F
+- Best performance: 70.9% mean accuracy, under 20% CPU load
+- Highest reported Urbansound8k on microcontroller (over eGRU 62%)
+- Best architecture: Depthwise-Separable convolutions with striding
+- Quantization enables 4x bigger models (and higher perf)
+- With dedicated Neural Network Hardware
+
+
+# Further Research
+
+
+## Waveform input to model
+
+- Preprocessing. Mel-spectrogram: **60** milliseconds
+- CNN. Stride-DS-24: **81** milliseconds
+- With quantization, spectrogram conversion is the bottleneck!
+- Convolutions can be used to learn a Time-Frequency transformation.
+
+Can this be faster than the standard FFT? And still perform well?
+
+
+::: notes
+
+- Especially interesting with CNN hardware acceleration.
+
+:::
+
+
+## On-sensor inference challenges
+
+- Reducing power consumption. Adaptive sampling
+- Efficient training data collection in WSN. Active Learning?
+- Real-life performance evaluations. Out-of-domain samples
+
+::: notes
+
+TODO: Add few more projects here. From research document
+
+:::
+
+
+
+# Wrapping up
+
+## Summary
+
+- Noise pollution is a growing problem
+- Wireless Sensor Networks can used to quantify
+- Noise Classification can provide more information
+- Want high density of sensors. Need to be low cost
+- On-sensor classification desirable for power/cost and privacy
+
+
+## More resources
+
+Machine Hearing. ML on Audio
+
+- [github.com/jonnor/machinehearing](https://github.com/jonnor/machinehearing)
+
+Machine Learning for Embedded / IoT
+
+- [github.com/jonnor/embeddedml](https://github.com/jonnor/embeddedml)
+
+Thesis Report & Code
+
+- [github.com/jonnor/ESC-CNN-microcontroller](https://github.com/jonnor/ESC-CNN-microcontroller)
+
+
+## Questions
+
+
?
+
+Email:
+
+## Come talk to me!
+
+- Noise Monitoring sensors. Pilot projects for 2020?
+- Environmental Sound, Wireless Sensor Networks for Audio. Research partnering?
+- "On-edge" / Embedded Device ML. Happy to advise!
+
+Email:
+
+
+
+
+# Thesis results
+
+
+## Model comparison
+
+![](img/models_accuracy.png){width=100%}
+
+::: notes
+
+- Baseline relative to SB-CNN and LD-CNN is down from 79% to 73%
+Expected because poorer input representation.
+Much lower overlap
+
+:::
+
+
+
+## List of results
+
+![](img/results.png){width=100%}
+
+
+## Confusion
+
+![](img/confusion_test.png){width=70%}
+
+## Grouped classification
+
+![](img/grouped_confusion_test_foreground.png){width=60%}
+
+Foreground-only
+
+## Unknown class
+
+![](img/unknown-class.png){width=100%}
+
+::: notes
+
+Idea: If confidence of model is low, consider it as "unknown"
+
+* Left: Histogram of correct/incorrect predictions
+* Right: Precision/recall curves
+* Precision improves at expense of recall
+* 90%+ precision possible at 40% recall
+
+Usefulness:
+
+* Avoids making decisions on poor grounds
+* "Unknown" samples good candidates for labeling->dataset. Active Learning
+* Low recall not a problem? Data is abundant, 15 samples a 4 seconds per minute per sensor
+
+:::
+
+
+# Experimental Details
+
+
+## All models
+
+![](img/models-list.png)
+
+::: notes
+
+* Baseline is outside requirements
+* Rest fits the theoretical constraints
+* Sometimes had to reduce number of base filters to 22 to fit in RAM
+
+:::
+
+
+# Methods
+
+Standard procedure for Urbansound8k
+
+- Classification problem
+- 4 second sound clips
+- 10 classes
+- 10-fold cross-validation, predefined
+- Metric: Accuracy
+
+## Training settings
+
+![](img/training-settings.png)
+
+## Training
+
+- NVidia RTX2060 GPU 6 GB
+- 10 models x 10 folds = 100 training jobs
+- 100 epochs
+- 3 jobs in parallel
+- 36 hours total
+
+::: notes
+
+- ! GPU utilization only 15%
+- CPU utilization was near 100%
+- Larger models to utilize GPU better?
+- Parallel processing limited by RAM of biggest models
+- GPU-based augmentation might be faster
+
+:::
+
+## Evaluation
+
+For each fold of each model
+
+1. Select best model based on validation accuracy
+2. Calculate accuracy on test set
+
+For each model
+
+- Measure CPU time on device
+
+
+# Your model will trick you
+
+And the bugs can be hard to spot
+
+## FAIL: Integer truncation
+
+![](img/fail-truncation.png){width=100%}
+
+## FAIL. Dropout location
+
+![](img/fail-dropout.png){width=100%}
+
+
+# Background
+
+
+## Mel-spectrogram
+
+![](img/spectrograms.svg)
+
+## Noise Pollution
+
+Reduces health due to stress and loss of sleep
+
+In Norway
+
+* 1.9 million affected by road noise (2014, SSB)
+* 10'000 healty years lost per year (Folkehelseinstituttet)
+
+In Europe
+
+* 13 million suffering from sleep disturbance (EEA)
+* 900'000 DALY lost (WHO)
+
+
+::: notes
+
+1.9 million
+https://www.ssb.no/natur-og-miljo/artikler-og-publikasjoner/flere-nordmenn-utsatt-for-stoy
+
+1999: 1.2 million
+
+10 245 tapte friske leveår i Norge hvert år
+https://www.miljostatus.no/tema/stoy/stoy-og-helse/
+
+
+https://www.eea.europa.eu/themes/human/noise/noise-2
+
+Burden of Disease WHO
+http://www.euro.who.int/__data/assets/pdf_file/0008/136466/e94888.pdf
+
+:::
+
+
+## Noise Mapping
+
+Simulation only, no direct measurements
+
+![](img/stoykart.png)
+
+::: notes
+
+- Known sources
+- Yearly average value
+- Updated every 5 years
+- Low data quality. Ex: communal roads
+
+Image: https://www.regjeringen.no/no/tema/plan-bygg-og-eiendom/plan--og-bygningsloven/plan/kunnskapsgrunnlaget-i-planlegging/statistikk-i-plan/id2396747/
+
+:::
+
+
diff --git a/sensecamp2019/reveal.js/CONTRIBUTING.md b/sensecamp2019/reveal.js/CONTRIBUTING.md
new file mode 100644
index 0000000..c2091e8
--- /dev/null
+++ b/sensecamp2019/reveal.js/CONTRIBUTING.md
@@ -0,0 +1,23 @@
+## Contributing
+
+Please keep the [issue tracker](http://github.com/hakimel/reveal.js/issues) limited to **bug reports**, **feature requests** and **pull requests**.
+
+
+### Personal Support
+If you have personal support or setup questions the best place to ask those are [StackOverflow](http://stackoverflow.com/questions/tagged/reveal.js).
+
+
+### Bug Reports
+When reporting a bug make sure to include information about which browser and operating system you are on as well as the necessary steps to reproduce the issue. If possible please include a link to a sample presentation where the bug can be tested.
+
+
+### Pull Requests
+- Should follow the coding style of the file you work in, most importantly:
+ - Tabs to indent
+ - Single-quoted strings
+- Should be made towards the **dev branch**
+- Should be submitted from a feature/topic branch (not your master)
+
+
+### Plugins
+Please do not submit plugins as pull requests. They should be maintained in their own separate repository. More information here: https://github.com/hakimel/reveal.js/wiki/Plugin-Guidelines
diff --git a/sensecamp2019/reveal.js/LICENSE b/sensecamp2019/reveal.js/LICENSE
new file mode 100644
index 0000000..697d156
--- /dev/null
+++ b/sensecamp2019/reveal.js/LICENSE
@@ -0,0 +1,19 @@
+Copyright (C) 2019 Hakim El Hattab, http://hakim.se, and reveal.js contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/sensecamp2019/reveal.js/README.md b/sensecamp2019/reveal.js/README.md
new file mode 100644
index 0000000..33956e9
--- /dev/null
+++ b/sensecamp2019/reveal.js/README.md
@@ -0,0 +1,1438 @@
+# reveal.js [![Build Status](https://travis-ci.org/hakimel/reveal.js.svg?branch=master)](https://travis-ci.org/hakimel/reveal.js)
+
+A framework for easily creating beautiful presentations using HTML. [Check out the live demo](http://revealjs.com/).
+
+reveal.js comes with a broad range of features including [nested slides](https://github.com/hakimel/reveal.js#markup), [Markdown contents](https://github.com/hakimel/reveal.js#markdown), [PDF export](https://github.com/hakimel/reveal.js#pdf-export), [speaker notes](https://github.com/hakimel/reveal.js#speaker-notes) and a [JavaScript API](https://github.com/hakimel/reveal.js#api). There's also a fully featured visual editor and platform for sharing reveal.js presentations at [slides.com](https://slides.com?ref=github).
+
+
+## Table of contents
+
+- [Online Editor](#online-editor)
+- [Installation](#installation)
+ - [Basic setup](#basic-setup)
+ - [Full setup](#full-setup)
+ - [Folder Structure](#folder-structure)
+- [Instructions](#instructions)
+ - [Markup](#markup)
+ - [Markdown](#markdown)
+ - [Element Attributes](#element-attributes)
+ - [Slide Attributes](#slide-attributes)
+- [Configuration](#configuration)
+- [Presentation Size](#presentation-size)
+- [Dependencies](#dependencies)
+- [Ready Event](#ready-event)
+- [Auto-sliding](#auto-sliding)
+- [Keyboard Bindings](#keyboard-bindings)
+- [Vertical Slide Navigation](#vertical-slide-navigation)
+- [Touch Navigation](#touch-navigation)
+- [Lazy Loading](#lazy-loading)
+- [API](#api)
+ - [Slide Changed Event](#slide-changed-event)
+ - [Presentation State](#presentation-state)
+ - [Slide States](#slide-states)
+ - [Slide Backgrounds](#slide-backgrounds)
+ - [Parallax Background](#parallax-background)
+ - [Slide Transitions](#slide-transitions)
+ - [Internal links](#internal-links)
+ - [Fragments](#fragments)
+ - [Fragment events](#fragment-events)
+ - [Code syntax highlighting](#code-syntax-highlighting)
+ - [Slide number](#slide-number)
+ - [Overview mode](#overview-mode)
+ - [Fullscreen mode](#fullscreen-mode)
+ - [Embedded media](#embedded-media)
+ - [Stretching elements](#stretching-elements)
+ - [Resize Event](#resize-event)
+ - [postMessage API](#postmessage-api)
+- [PDF Export](#pdf-export)
+- [Theming](#theming)
+- [Speaker Notes](#speaker-notes)
+ - [Share and Print Speaker Notes](#share-and-print-speaker-notes)
+ - [Server Side Speaker Notes](#server-side-speaker-notes)
+- [Plugins](#plugins)
+- [Multiplexing](#multiplexing)
+ - [Master presentation](#master-presentation)
+ - [Client presentation](#client-presentation)
+ - [Socket.io server](#socketio-server)
+- [MathJax](#mathjax)
+- [License](#license)
+
+#### More reading
+
+- [Changelog](https://github.com/hakimel/reveal.js/releases): Up-to-date version history.
+- [Examples](https://github.com/hakimel/reveal.js/wiki/Example-Presentations): Presentations created with reveal.js, add your own!
+- [Browser Support](https://github.com/hakimel/reveal.js/wiki/Browser-Support): Explanation of browser support and fallbacks.
+- [Plugins](https://github.com/hakimel/reveal.js/wiki/Plugins,-Tools-and-Hardware): A list of plugins that can be used to extend reveal.js.
+
+
+## Online Editor
+
+Presentations are written using HTML or Markdown but there's also an online editor for those of you who prefer a graphical interface. Give it a try at [https://slides.com](https://slides.com?ref=github).
+
+
+## Installation
+
+The **basic setup** is for authoring presentations only. The **full setup** gives you access to all reveal.js features and plugins such as speaker notes as well as the development tasks needed to make changes to the source.
+
+### Basic setup
+
+The core of reveal.js is very easy to install. You'll simply need to download a copy of this repository and open the index.html file directly in your browser.
+
+1. Download the latest version of reveal.js from
+2. Unzip and replace the example contents in index.html with your own
+3. Open index.html in a browser to view it
+
+### Full setup
+
+Some reveal.js features, like external Markdown and speaker notes, require that presentations run from a local web server. The following instructions will set up such a server as well as all of the development tasks needed to make edits to the reveal.js source code.
+
+1. Install [Node.js](http://nodejs.org/) (4.0.0 or later)
+
+1. Clone the reveal.js repository
+ ```sh
+ $ git clone https://github.com/hakimel/reveal.js.git
+ ```
+
+1. Navigate to the reveal.js folder
+ ```sh
+ $ cd reveal.js
+ ```
+
+1. Install dependencies
+ ```sh
+ $ npm install
+ ```
+
+1. Serve the presentation and monitor source files for changes
+ ```sh
+ $ npm start
+ ```
+
+1. Open to view your presentation
+
+ You can change the port by using `npm start -- --port=8001`.
+
+### Folder Structure
+
+- **css/** Core styles without which the project does not function
+- **js/** Like above but for JavaScript
+- **plugin/** Components that have been developed as extensions to reveal.js
+- **lib/** All other third party assets (JavaScript, CSS, fonts)
+
+
+## Instructions
+
+### Markup
+
+Here's a barebones example of a fully working reveal.js presentation:
+```html
+
+
+
+
+
+
+
+
+ Slide 1
+ Slide 2
+
+
+
+
+
+
+```
+
+The presentation markup hierarchy needs to be `.reveal > .slides > section` where the `section` represents one slide and can be repeated indefinitely. If you place multiple `section` elements inside of another `section` they will be shown as vertical slides. The first of the vertical slides is the "root" of the others (at the top), and will be included in the horizontal sequence. For example:
+
+```html
+
+```
+
+### Markdown
+
+It's possible to write your slides using Markdown. To enable Markdown, add the `data-markdown` attribute to your `` elements and wrap the contents in a `
"+u(e.message+"",!0)+"
";throw e}}f.exec=f,m.options=m.setOptions=function(e){return d(m.defaults,e),m},m.getDefaults=function(){return{baseUrl:null,breaks:!1,gfm:!0,headerIds:!0,headerPrefix:"",highlight:null,langPrefix:"language-",mangle:!0,pedantic:!1,renderer:new r,sanitize:!1,sanitizer:null,silent:!1,smartLists:!1,smartypants:!1,tables:!0,xhtml:!1}},m.defaults=m.getDefaults(),m.Parser=p,m.parser=p.parse,m.Renderer=r,m.TextRenderer=s,m.Lexer=a,m.lexer=a.lex,m.InlineLexer=h,m.inlineLexer=h.output,m.Slugger=t,m.parse=m,"undefined"!=typeof module&&"object"==typeof exports?module.exports=m:"function"==typeof define&&define.amd?define(function(){return m}):e.marked=m}(this||("undefined"!=typeof window?window:global));
\ No newline at end of file
diff --git a/sensecamp2019/reveal.js/plugin/math/math.js b/sensecamp2019/reveal.js/plugin/math/math.js
new file mode 100755
index 0000000..d76c9dd
--- /dev/null
+++ b/sensecamp2019/reveal.js/plugin/math/math.js
@@ -0,0 +1,92 @@
+/**
+ * A plugin which enables rendering of math equations inside
+ * of reveal.js slides. Essentially a thin wrapper for MathJax.
+ *
+ * @author Hakim El Hattab
+ */
+var RevealMath = window.RevealMath || (function(){
+
+ var options = Reveal.getConfig().math || {};
+ var mathjax = options.mathjax || 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js';
+ var config = options.config || 'TeX-AMS_HTML-full';
+ var url = mathjax + '?config=' + config;
+
+ var defaultOptions = {
+ messageStyle: 'none',
+ tex2jax: {
+ inlineMath: [ [ '$', '$' ], [ '\\(', '\\)' ] ],
+ skipTags: [ 'script', 'noscript', 'style', 'textarea', 'pre' ]
+ },
+ skipStartupTypeset: true
+ };
+
+ function defaults( options, defaultOptions ) {
+
+ for ( var i in defaultOptions ) {
+ if ( !options.hasOwnProperty( i ) ) {
+ options[i] = defaultOptions[i];
+ }
+ }
+
+ }
+
+ function loadScript( url, callback ) {
+
+ var head = document.querySelector( 'head' );
+ var script = document.createElement( 'script' );
+ script.type = 'text/javascript';
+ script.src = url;
+
+ // Wrapper for callback to make sure it only fires once
+ var finish = function() {
+ if( typeof callback === 'function' ) {
+ callback.call();
+ callback = null;
+ }
+ }
+
+ script.onload = finish;
+
+ // IE
+ script.onreadystatechange = function() {
+ if ( this.readyState === 'loaded' ) {
+ finish();
+ }
+ }
+
+ // Normal browsers
+ head.appendChild( script );
+
+ }
+
+ return {
+ init: function() {
+
+ defaults( options, defaultOptions );
+ defaults( options.tex2jax, defaultOptions.tex2jax );
+ options.mathjax = options.config = null;
+
+ loadScript( url, function() {
+
+ MathJax.Hub.Config( options );
+
+ // Typeset followed by an immediate reveal.js layout since
+ // the typesetting process could affect slide height
+ MathJax.Hub.Queue( [ 'Typeset', MathJax.Hub ] );
+ MathJax.Hub.Queue( Reveal.layout );
+
+ // Reprocess equations in slides when they turn visible
+ Reveal.addEventListener( 'slidechanged', function( event ) {
+
+ MathJax.Hub.Queue( [ 'Typeset', MathJax.Hub, event.currentSlide ] );
+
+ } );
+
+ } );
+
+ }
+ }
+
+})();
+
+Reveal.registerPlugin( 'math', RevealMath );
diff --git a/sensecamp2019/reveal.js/plugin/multiplex/client.js b/sensecamp2019/reveal.js/plugin/multiplex/client.js
new file mode 100644
index 0000000..3ffd1e0
--- /dev/null
+++ b/sensecamp2019/reveal.js/plugin/multiplex/client.js
@@ -0,0 +1,13 @@
+(function() {
+ var multiplex = Reveal.getConfig().multiplex;
+ var socketId = multiplex.id;
+ var socket = io.connect(multiplex.url);
+
+ socket.on(multiplex.id, function(data) {
+ // ignore data from sockets that aren't ours
+ if (data.socketId !== socketId) { return; }
+ if( window.location.host === 'localhost:1947' ) return;
+
+ Reveal.setState(data.state);
+ });
+}());
diff --git a/sensecamp2019/reveal.js/plugin/multiplex/index.js b/sensecamp2019/reveal.js/plugin/multiplex/index.js
new file mode 100644
index 0000000..8195f04
--- /dev/null
+++ b/sensecamp2019/reveal.js/plugin/multiplex/index.js
@@ -0,0 +1,64 @@
+var http = require('http');
+var express = require('express');
+var fs = require('fs');
+var io = require('socket.io');
+var crypto = require('crypto');
+
+var app = express();
+var staticDir = express.static;
+var server = http.createServer(app);
+
+io = io(server);
+
+var opts = {
+ port: process.env.PORT || 1948,
+ baseDir : __dirname + '/../../'
+};
+
+io.on( 'connection', function( socket ) {
+ socket.on('multiplex-statechanged', function(data) {
+ if (typeof data.secret == 'undefined' || data.secret == null || data.secret === '') return;
+ if (createHash(data.secret) === data.socketId) {
+ data.secret = null;
+ socket.broadcast.emit(data.socketId, data);
+ };
+ });
+});
+
+[ 'css', 'js', 'plugin', 'lib' ].forEach(function(dir) {
+ app.use('/' + dir, staticDir(opts.baseDir + dir));
+});
+
+app.get("/", function(req, res) {
+ res.writeHead(200, {'Content-Type': 'text/html'});
+
+ var stream = fs.createReadStream(opts.baseDir + '/index.html');
+ stream.on('error', function( error ) {
+ res.write('
reveal.js multiplex server.
Generate token');
+ res.end();
+ });
+ stream.on('readable', function() {
+ stream.pipe(res);
+ });
+});
+
+app.get("/token", function(req,res) {
+ var ts = new Date().getTime();
+ var rand = Math.floor(Math.random()*9999999);
+ var secret = ts.toString() + rand.toString();
+ res.send({secret: secret, socketId: createHash(secret)});
+});
+
+var createHash = function(secret) {
+ var cipher = crypto.createCipher('blowfish', secret);
+ return(cipher.final('hex'));
+};
+
+// Actually listen
+server.listen( opts.port || null );
+
+var brown = '\033[33m',
+ green = '\033[32m',
+ reset = '\033[0m';
+
+console.log( brown + "reveal.js:" + reset + " Multiplex running on port " + green + opts.port + reset );
\ No newline at end of file
diff --git a/sensecamp2019/reveal.js/plugin/multiplex/master.js b/sensecamp2019/reveal.js/plugin/multiplex/master.js
new file mode 100644
index 0000000..7f4bf45
--- /dev/null
+++ b/sensecamp2019/reveal.js/plugin/multiplex/master.js
@@ -0,0 +1,34 @@
+(function() {
+
+ // Don't emit events from inside of notes windows
+ if ( window.location.search.match( /receiver/gi ) ) { return; }
+
+ var multiplex = Reveal.getConfig().multiplex;
+
+ var socket = io.connect( multiplex.url );
+
+ function post() {
+
+ var messageData = {
+ state: Reveal.getState(),
+ secret: multiplex.secret,
+ socketId: multiplex.id
+ };
+
+ socket.emit( 'multiplex-statechanged', messageData );
+
+ };
+
+ // post once the page is loaded, so the client follows also on "open URL".
+ window.addEventListener( 'load', post );
+
+ // Monitor events that trigger a change in state
+ Reveal.addEventListener( 'slidechanged', post );
+ Reveal.addEventListener( 'fragmentshown', post );
+ Reveal.addEventListener( 'fragmenthidden', post );
+ Reveal.addEventListener( 'overviewhidden', post );
+ Reveal.addEventListener( 'overviewshown', post );
+ Reveal.addEventListener( 'paused', post );
+ Reveal.addEventListener( 'resumed', post );
+
+}());
diff --git a/sensecamp2019/reveal.js/plugin/multiplex/package.json b/sensecamp2019/reveal.js/plugin/multiplex/package.json
new file mode 100644
index 0000000..bbed77a
--- /dev/null
+++ b/sensecamp2019/reveal.js/plugin/multiplex/package.json
@@ -0,0 +1,19 @@
+{
+ "name": "reveal-js-multiplex",
+ "version": "1.0.0",
+ "description": "reveal.js multiplex server",
+ "homepage": "http://revealjs.com",
+ "scripts": {
+ "start": "node index.js"
+ },
+ "engines": {
+ "node": "~4.1.1"
+ },
+ "dependencies": {
+ "express": "~4.13.3",
+ "grunt-cli": "~0.1.13",
+ "mustache": "~2.2.1",
+ "socket.io": "~1.3.7"
+ },
+ "license": "MIT"
+}
diff --git a/sensecamp2019/reveal.js/plugin/notes-server/client.js b/sensecamp2019/reveal.js/plugin/notes-server/client.js
new file mode 100644
index 0000000..00b277b
--- /dev/null
+++ b/sensecamp2019/reveal.js/plugin/notes-server/client.js
@@ -0,0 +1,65 @@
+(function() {
+
+ // don't emit events from inside the previews themselves
+ if( window.location.search.match( /receiver/gi ) ) { return; }
+
+ var socket = io.connect( window.location.origin ),
+ socketId = Math.random().toString().slice( 2 );
+
+ console.log( 'View slide notes at ' + window.location.origin + '/notes/' + socketId );
+
+ window.open( window.location.origin + '/notes/' + socketId, 'notes-' + socketId );
+
+ /**
+ * Posts the current slide data to the notes window
+ */
+ function post() {
+
+ var slideElement = Reveal.getCurrentSlide(),
+ notesElement = slideElement.querySelector( 'aside.notes' );
+
+ var messageData = {
+ notes: '',
+ markdown: false,
+ socketId: socketId,
+ state: Reveal.getState()
+ };
+
+ // Look for notes defined in a slide attribute
+ if( slideElement.hasAttribute( 'data-notes' ) ) {
+ messageData.notes = slideElement.getAttribute( 'data-notes' );
+ }
+
+ // Look for notes defined in an aside element
+ if( notesElement ) {
+ messageData.notes = notesElement.innerHTML;
+ messageData.markdown = typeof notesElement.getAttribute( 'data-markdown' ) === 'string';
+ }
+
+ socket.emit( 'statechanged', messageData );
+
+ }
+
+ // When a new notes window connects, post our current state
+ socket.on( 'new-subscriber', function( data ) {
+ post();
+ } );
+
+ // When the state changes from inside of the speaker view
+ socket.on( 'statechanged-speaker', function( data ) {
+ Reveal.setState( data.state );
+ } );
+
+ // Monitor events that trigger a change in state
+ Reveal.addEventListener( 'slidechanged', post );
+ Reveal.addEventListener( 'fragmentshown', post );
+ Reveal.addEventListener( 'fragmenthidden', post );
+ Reveal.addEventListener( 'overviewhidden', post );
+ Reveal.addEventListener( 'overviewshown', post );
+ Reveal.addEventListener( 'paused', post );
+ Reveal.addEventListener( 'resumed', post );
+
+ // Post the initial state
+ post();
+
+}());
diff --git a/sensecamp2019/reveal.js/plugin/notes-server/index.js b/sensecamp2019/reveal.js/plugin/notes-server/index.js
new file mode 100644
index 0000000..b95f071
--- /dev/null
+++ b/sensecamp2019/reveal.js/plugin/notes-server/index.js
@@ -0,0 +1,69 @@
+var http = require('http');
+var express = require('express');
+var fs = require('fs');
+var io = require('socket.io');
+var Mustache = require('mustache');
+
+var app = express();
+var staticDir = express.static;
+var server = http.createServer(app);
+
+io = io(server);
+
+var opts = {
+ port : 1947,
+ baseDir : __dirname + '/../../'
+};
+
+io.on( 'connection', function( socket ) {
+
+ socket.on( 'new-subscriber', function( data ) {
+ socket.broadcast.emit( 'new-subscriber', data );
+ });
+
+ socket.on( 'statechanged', function( data ) {
+ delete data.state.overview;
+ socket.broadcast.emit( 'statechanged', data );
+ });
+
+ socket.on( 'statechanged-speaker', function( data ) {
+ delete data.state.overview;
+ socket.broadcast.emit( 'statechanged-speaker', data );
+ });
+
+});
+
+[ 'css', 'js', 'images', 'plugin', 'lib' ].forEach( function( dir ) {
+ app.use( '/' + dir, staticDir( opts.baseDir + dir ) );
+});
+
+app.get('/', function( req, res ) {
+
+ res.writeHead( 200, { 'Content-Type': 'text/html' } );
+ fs.createReadStream( opts.baseDir + '/index.html' ).pipe( res );
+
+});
+
+app.get( '/notes/:socketId', function( req, res ) {
+
+ fs.readFile( opts.baseDir + 'plugin/notes-server/notes.html', function( err, data ) {
+ res.send( Mustache.to_html( data.toString(), {
+ socketId : req.params.socketId
+ }));
+ });
+
+});
+
+// Actually listen
+server.listen( opts.port || null );
+
+var brown = '\033[33m',
+ green = '\033[32m',
+ reset = '\033[0m';
+
+var slidesLocation = 'http://localhost' + ( opts.port ? ( ':' + opts.port ) : '' );
+
+console.log( brown + 'reveal.js - Speaker Notes' + reset );
+console.log( '1. Open the slides at ' + green + slidesLocation + reset );
+console.log( '2. Click on the link in your JS console to go to the notes page' );
+console.log( '3. Advance through your slides and your notes will advance automatically' );
diff --git a/sensecamp2019/reveal.js/plugin/notes-server/notes.html b/sensecamp2019/reveal.js/plugin/notes-server/notes.html
new file mode 100644
index 0000000..ab8c5b1
--- /dev/null
+++ b/sensecamp2019/reveal.js/plugin/notes-server/notes.html
@@ -0,0 +1,585 @@
+
+
+
+
+
+ reveal.js - Slide Notes
+
+
+
+
+
+
+
+