pydata intro/outro
This commit is contained in:
parent
23afea1731
commit
20f600d798
6 changed files with 1403 additions and 0 deletions
133
pydata14/__init__.py
Normal file
133
pydata14/__init__.py
Normal file
|
@ -0,0 +1,133 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import subprocess
|
||||
import os.path
|
||||
from renderlib import *
|
||||
|
||||
# URL to Schedule-XML
|
||||
scheduleUrl = 'file://' + os.path.join(os.path.dirname(os.path.abspath(__file__)), 'schedule.xml')
|
||||
|
||||
# For (really) too long titles
|
||||
titlemap = {
|
||||
|
||||
}
|
||||
|
||||
def introFrames():
|
||||
frames = int(.5*fps)
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('name', 'style', 'fill-opacity', 0),
|
||||
('title', 'style', 'fill-opacity', 0),
|
||||
)
|
||||
|
||||
frames = 1*fps
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('name', 'style', 'fill-opacity', '%.4f' % easeInCubic(i, 0, 1, 3*fps)),
|
||||
('title', 'style', 'fill-opacity', 0),
|
||||
)
|
||||
|
||||
frames = 2*fps
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('name', 'style', 'fill-opacity', '%.4f' % easeInCubic(i+1*fps, 0, 1, 3*fps)),
|
||||
('title', 'style', 'fill-opacity', '%.4f' % easeInCubic(i, 0, 1, 3*fps)),
|
||||
)
|
||||
|
||||
frames = 1*fps
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('name', 'style', 'fill-opacity', 1),
|
||||
('title', 'style', 'fill-opacity', '%.4f' % easeInCubic(i+2*fps, 0, 1, 3*fps)),
|
||||
)
|
||||
|
||||
frames = 3*fps
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('name', 'style', 'fill-opacity', 1),
|
||||
('title', 'style', 'fill-opacity', 1),
|
||||
)
|
||||
|
||||
frames = 2*fps
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('name', 'style', 'fill-opacity', easeLinear(i, 1, -1, frames)),
|
||||
('title', 'style', 'fill-opacity', easeLinear(i, 1, -1, frames)),
|
||||
)
|
||||
|
||||
def outroFrames():
|
||||
frames = 1*fps
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('plate', 'style', 'opacity', '%.4f' % easeInCubic(i, 0, 1, 3*fps)),
|
||||
('text', 'style', 'opacity', 0),
|
||||
)
|
||||
|
||||
frames = 2*fps
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('plate', 'style', 'opacity', '%.4f' % easeInCubic(i+1*fps, 0, 1, 3*fps)),
|
||||
('text', 'style', 'opacity', '%.4f' % easeInCubic(i, 0, 1, 3*fps)),
|
||||
)
|
||||
|
||||
frames = 1*fps
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('plate', 'style', 'opacity', 1),
|
||||
('text', 'style', 'opacity', '%.4f' % easeInCubic(i+2*fps, 0, 1, 3*fps)),
|
||||
)
|
||||
|
||||
frames = 3*fps
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('plate', 'style', 'opacity', 1),
|
||||
('text', 'style', 'opacity', 1),
|
||||
)
|
||||
|
||||
frames = 2*fps
|
||||
for i in range(0, frames):
|
||||
yield (
|
||||
('plate', 'style', 'opacity', easeLinear(i, 1, -1, frames)),
|
||||
('text', 'style', 'opacity', easeLinear(i, 1, -1, frames)),
|
||||
)
|
||||
|
||||
def debug():
|
||||
render(
|
||||
'outro.svg',
|
||||
'../outro.dv',
|
||||
outroFrames
|
||||
)
|
||||
|
||||
render(
|
||||
'intro.svg',
|
||||
'../intro.dv',
|
||||
introFrames,
|
||||
{
|
||||
'$id': 20227,
|
||||
'$title': "Driving Moore's Law with Python-Powered Machine Learning: An Insider's Perspective",
|
||||
'$subtitle': '',
|
||||
'$personnames': 'Felix Marczinowski, Philipp Mack, Sönke Niekamp'
|
||||
}
|
||||
)
|
||||
|
||||
def tasks(queue):
|
||||
uid = []
|
||||
# iterate over all events extracted from the schedule xml-export
|
||||
for event in events(scheduleUrl):
|
||||
if event['id'] in uid:
|
||||
continue
|
||||
|
||||
uid.append(event['id'])
|
||||
|
||||
# generate a task description and put them into the queue
|
||||
queue.put(Rendertask(
|
||||
infile = 'intro.svg',
|
||||
outfile = str(event['id'])+".dv",
|
||||
sequence = introFrames,
|
||||
parameters = {
|
||||
'$id': event['id'],
|
||||
'$title': event['title'],
|
||||
'$subtitle': event['subtitle'],
|
||||
'$personnames': event['personnames']
|
||||
}
|
||||
))
|
BIN
pydata14/artwork/bg.png
Normal file
BIN
pydata14/artwork/bg.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 69 KiB |
199
pydata14/artwork/by-sa.svg
Normal file
199
pydata14/artwork/by-sa.svg
Normal file
|
@ -0,0 +1,199 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://web.resource.org/cc/"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="120"
|
||||
height="42"
|
||||
id="svg2759"
|
||||
sodipodi:version="0.32"
|
||||
inkscape:version="0.45+devel"
|
||||
version="1.0"
|
||||
sodipodi:docname="by-sa.svg"
|
||||
inkscape:output_extension="org.inkscape.output.svg.inkscape">
|
||||
<defs
|
||||
id="defs2761" />
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#8b8b8b"
|
||||
borderopacity="1"
|
||||
gridtolerance="10000"
|
||||
guidetolerance="10"
|
||||
objecttolerance="10"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="1"
|
||||
inkscape:cx="179"
|
||||
inkscape:cy="89.569904"
|
||||
inkscape:document-units="px"
|
||||
inkscape:current-layer="layer1"
|
||||
width="120px"
|
||||
height="42px"
|
||||
inkscape:showpageshadow="false"
|
||||
inkscape:window-width="1198"
|
||||
inkscape:window-height="624"
|
||||
inkscape:window-x="488"
|
||||
inkscape:window-y="401" />
|
||||
<metadata
|
||||
id="metadata2764">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
inkscape:label="Layer 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1">
|
||||
<g
|
||||
transform="matrix(0.9937807,0,0,0.9936694,-177.69409,-74.436409)"
|
||||
id="g287"
|
||||
inkscape:export-filename="/mnt/hgfs/Bov/Documents/Work/2007/cc/identity/srr buttons/big/by-sa.png"
|
||||
inkscape:export-xdpi="300.23013"
|
||||
inkscape:export-ydpi="300.23013">
|
||||
<path
|
||||
id="path3817_2_"
|
||||
nodetypes="ccccccc"
|
||||
d="M 182.23532,75.39014 L 296.29928,75.59326 C 297.89303,75.59326 299.31686,75.35644 299.31686,78.77344 L 299.17721,116.34033 L 179.3569,116.34033 L 179.3569,78.63379 C 179.3569,76.94922 179.51999,75.39014 182.23532,75.39014 z"
|
||||
style="fill:#aab2ab" />
|
||||
|
||||
<g
|
||||
id="g5908_2_"
|
||||
transform="matrix(0.872921,0,0,0.872921,50.12536,143.2144)">
|
||||
|
||||
<path
|
||||
id="path5906_2_"
|
||||
cx="296.35416"
|
||||
ry="22.939548"
|
||||
cy="264.3577"
|
||||
type="arc"
|
||||
rx="22.939548"
|
||||
d="M 187.20944,-55.6792 C 187.21502,-46.99896 180.18158,-39.95825 171.50134,-39.95212 C 162.82113,-39.94708 155.77929,-46.97998 155.77426,-55.66016 C 155.77426,-55.66687 155.77426,-55.67249 155.77426,-55.6792 C 155.76922,-64.36054 162.80209,-71.40125 171.48233,-71.40631 C 180.16367,-71.41193 187.20441,-64.37842 187.20944,-55.69824 C 187.20944,-55.69263 187.20944,-55.68591 187.20944,-55.6792 z"
|
||||
style="fill:#ffffff" />
|
||||
|
||||
<g
|
||||
id="g5706_2_"
|
||||
transform="translate(-289.6157,99.0653)">
|
||||
<path
|
||||
id="path5708_2_"
|
||||
d="M 473.88455,-167.54724 C 477.36996,-164.06128 479.11294,-159.79333 479.11294,-154.74451 C 479.11294,-149.69513 477.40014,-145.47303 473.9746,-142.07715 C 470.33929,-138.50055 466.04281,-136.71283 461.08513,-136.71283 C 456.18736,-136.71283 451.96526,-138.48544 448.42003,-142.03238 C 444.87419,-145.57819 443.10158,-149.81537 443.10158,-154.74451 C 443.10158,-159.6731 444.87419,-163.94049 448.42003,-167.54724 C 451.87523,-171.03375 456.09728,-172.77618 461.08513,-172.77618 C 466.13342,-172.77618 470.39914,-171.03375 473.88455,-167.54724 z M 450.76657,-165.20239 C 447.81982,-162.22601 446.34701,-158.7395 446.34701,-154.74005 C 446.34701,-150.7417 447.80529,-147.28485 450.72125,-144.36938 C 453.63778,-141.45288 457.10974,-139.99462 461.1383,-139.99462 C 465.16683,-139.99462 468.66848,-141.46743 471.64486,-144.41363 C 474.47076,-147.14947 475.88427,-150.59069 475.88427,-154.74005 C 475.88427,-158.85809 474.44781,-162.35297 471.57659,-165.22479 C 468.70595,-168.09546 465.22671,-169.53131 461.1383,-169.53131 C 457.04993,-169.53131 453.59192,-168.08813 450.76657,-165.20239 z M 458.52106,-156.49927 C 458.07074,-157.4809 457.39673,-157.9715 456.49781,-157.9715 C 454.90867,-157.9715 454.11439,-156.90198 454.11439,-154.763 C 454.11439,-152.62341 454.90867,-151.55389 456.49781,-151.55389 C 457.54719,-151.55389 458.29676,-152.07519 458.74647,-153.11901 L 460.94923,-151.94598 C 459.8993,-150.0805 458.32417,-149.14697 456.22374,-149.14697 C 454.60384,-149.14697 453.30611,-149.64367 452.33168,-150.63653 C 451.35561,-151.62994 450.86894,-152.99926 450.86894,-154.7445 C 450.86894,-156.46008 451.37123,-157.82159 452.37642,-158.83013 C 453.38161,-159.83806 454.63347,-160.34264 456.13423,-160.34264 C 458.35435,-160.34264 459.94407,-159.46776 460.90504,-157.71978 L 458.52106,-156.49927 z M 468.8844,-156.49927 C 468.43353,-157.4809 467.77292,-157.9715 466.90201,-157.9715 C 465.28095,-157.9715 464.46988,-156.90198 464.46988,-154.763 C 464.46988,-152.62341 465.28095,-151.55389 466.90201,-151.55389 C 467.95304,-151.55389 468.68918,-152.07519 469.10925,-153.11901 L 471.36126,-151.94598 C 470.31301,-150.0805 468.74007,-149.14697 466.64358,-149.14697 C 465.02587,-149.14697 463.73095,-149.64367 462.75711,-150.63653 C 461.78494,-151.62994 461.29773,-152.99926 461.29773,-154.7445 C 461.29773,-156.46008 461.79221,-157.82159 462.78061,-158.83013 C 463.76843,-159.83806 465.02588,-160.34264 466.55408,-160.34264 C 468.77027,-160.34264 470.35776,-159.46776 471.3154,-157.71978 L 468.8844,-156.49927 z" />
|
||||
|
||||
</g>
|
||||
|
||||
</g>
|
||||
|
||||
<path
|
||||
d="M 297.29639,74.91064 L 181.06688,74.91064 C 179.8203,74.91064 178.80614,75.92529 178.80614,77.17187 L 178.80614,116.66748 C 178.80614,116.94922 179.03466,117.17822 179.31639,117.17822 L 299.04639,117.17822 C 299.32813,117.17822 299.55713,116.94922 299.55713,116.66748 L 299.55713,77.17188 C 299.55713,75.92529 298.54297,74.91064 297.29639,74.91064 z M 181.06688,75.93213 L 297.29639,75.93213 C 297.97998,75.93213 298.53565,76.48828 298.53565,77.17188 C 298.53565,77.17188 298.53565,93.09131 298.53565,104.59034 L 215.4619,104.59034 C 212.41698,110.09571 206.55077,113.83399 199.81835,113.83399 C 193.083,113.83399 187.21825,110.09913 184.1748,104.59034 L 179.82666,104.59034 C 179.82666,93.09132 179.82666,77.17188 179.82666,77.17188 C 179.82664,76.48828 180.38329,75.93213 181.06688,75.93213 z"
|
||||
id="path294" />
|
||||
|
||||
<g
|
||||
enable-background="new "
|
||||
id="g296">
|
||||
<path
|
||||
d="M 265.60986,112.8833 C 265.68994,113.03906 265.79736,113.16504 265.93115,113.26172 C 266.06494,113.35791 266.22119,113.42969 266.40088,113.47608 C 266.58154,113.52296 266.76807,113.54639 266.96045,113.54639 C 267.09033,113.54639 267.22998,113.53565 267.3794,113.51368 C 267.52784,113.4922 267.66749,113.44972 267.79835,113.3877 C 267.92823,113.32569 268.03761,113.23975 268.12355,113.13086 C 268.21144,113.02197 268.25441,112.88379 268.25441,112.71533 C 268.25441,112.53515 268.19679,112.38916 268.08156,112.27685 C 267.9673,112.16455 267.81594,112.07177 267.62941,111.99658 C 267.44386,111.92236 267.23195,111.85693 266.9966,111.80078 C 266.76027,111.74463 266.52101,111.68262 266.27883,111.61377 C 266.02981,111.55176 265.78762,111.47559 265.55129,111.38525 C 265.31594,111.29541 265.10402,111.17822 264.9175,111.03515 C 264.73098,110.89208 264.58059,110.71337 264.46535,110.49853 C 264.35109,110.28369 264.29347,110.02392 264.29347,109.71923 C 264.29347,109.37646 264.36671,109.07958 264.51222,108.82763 C 264.6587,108.57568 264.85011,108.36572 265.08644,108.19726 C 265.32179,108.02929 265.58937,107.90478 265.8882,107.82372 C 266.18605,107.74315 266.48488,107.70263 266.78273,107.70263 C 267.13136,107.70263 267.46535,107.74169 267.78566,107.81982 C 268.105,107.89746 268.39015,108.02392 268.6382,108.19824 C 268.88722,108.37256 269.08449,108.59521 269.23097,108.86621 C 269.37648,109.13721 269.44972,109.46582 269.44972,109.85156 L 268.02784,109.85156 C 268.01514,109.65234 267.97315,109.4873 267.90284,109.35693 C 267.83155,109.22607 267.73682,109.12353 267.61964,109.04834 C 267.50148,108.97412 267.36671,108.9209 267.21534,108.89014 C 267.063,108.85889 266.89796,108.84326 266.71827,108.84326 C 266.60108,108.84326 266.48292,108.85596 266.36573,108.88037 C 266.24757,108.90576 266.14112,108.94922 266.04542,109.01123 C 265.94874,109.07373 265.86964,109.15137 265.80812,109.24463 C 265.7466,109.33838 265.71535,109.45654 265.71535,109.59961 C 265.71535,109.73047 265.73976,109.83643 265.78957,109.91699 C 265.83937,109.99804 265.93801,110.07275 266.08352,110.14111 C 266.22903,110.20947 266.43118,110.27832 266.68899,110.34668 C 266.9468,110.41504 267.28372,110.50244 267.70071,110.60791 C 267.82473,110.63281 267.99661,110.67822 268.21731,110.74365 C 268.43801,110.80908 268.65676,110.91308 268.87454,111.05615 C 269.09231,111.1997 269.27981,111.39111 269.43899,111.63037 C 269.59719,111.87012 269.67629,112.17676 269.67629,112.55029 C 269.67629,112.85547 269.61672,113.13867 269.49856,113.3999 C 269.3804,113.66162 269.20461,113.8872 268.97122,114.07666 C 268.73782,114.26709 268.44876,114.41455 268.10403,114.52051 C 267.75833,114.62647 267.35794,114.6792 266.90481,114.6792 C 266.53762,114.6792 266.18118,114.63379 265.83547,114.54346 C 265.49074,114.45313 265.18508,114.31104 264.92043,114.11768 C 264.65676,113.92432 264.4468,113.67774 264.29055,113.37891 C 264.13528,113.07959 264.06106,112.7251 264.06692,112.31397 L 265.4888,112.31397 C 265.48877,112.53809 265.52881,112.72803 265.60986,112.8833 z"
|
||||
id="path298"
|
||||
style="fill:#ffffff" />
|
||||
|
||||
<path
|
||||
d="M 273.8667,107.8667 L 276.35986,114.53076 L 274.8374,114.53076 L 274.33349,113.04638 L 271.84033,113.04638 L 271.31787,114.53076 L 269.84326,114.53076 L 272.36377,107.8667 L 273.8667,107.8667 z M 273.95068,111.95264 L 273.11084,109.50928 L 273.09229,109.50928 L 272.22315,111.95264 L 273.95068,111.95264 z"
|
||||
id="path300"
|
||||
style="fill:#ffffff" />
|
||||
|
||||
</g>
|
||||
|
||||
<g
|
||||
enable-background="new "
|
||||
id="g302">
|
||||
<path
|
||||
d="M 239.17821,107.8667 C 239.49559,107.8667 239.78563,107.89502 240.04735,107.95068 C 240.30907,108.00683 240.53368,108.09863 240.72118,108.22607 C 240.9077,108.35351 241.05321,108.52295 241.15575,108.73437 C 241.25829,108.94579 241.31005,109.20703 241.31005,109.51806 C 241.31005,109.854 241.23388,110.13329 241.08056,110.35742 C 240.92822,110.58154 240.70165,110.76465 240.40283,110.90771 C 240.81494,111.02587 241.12256,111.23291 241.32568,111.5288 C 241.5288,111.82469 241.63037,112.18114 241.63037,112.59814 C 241.63037,112.93408 241.56494,113.22509 241.43408,113.47119 C 241.30322,113.7168 241.12646,113.91748 240.90576,114.07324 C 240.68408,114.229 240.43115,114.34424 240.14795,114.41845 C 239.86377,114.49365 239.57275,114.53075 239.27295,114.53075 L 236.03662,114.53075 L 236.03662,107.86669 L 239.17821,107.86669 L 239.17821,107.8667 z M 238.99071,110.56201 C 239.25243,110.56201 239.46727,110.5 239.63622,110.37597 C 239.80419,110.25146 239.88817,110.05029 239.88817,109.77099 C 239.88817,109.61572 239.85985,109.48828 239.80419,109.38915 C 239.74755,109.28954 239.67333,109.21239 239.57958,109.15624 C 239.48583,109.10058 239.37841,109.06151 239.25731,109.04003 C 239.13524,109.01806 239.00926,109.00732 238.8784,109.00732 L 237.50535,109.00732 L 237.50535,110.56201 L 238.99071,110.56201 z M 239.07664,113.39014 C 239.22019,113.39014 239.35691,113.37647 239.48777,113.34815 C 239.61863,113.32032 239.73484,113.27344 239.83445,113.2085 C 239.93406,113.14307 240.01316,113.0542 240.07273,112.94239 C 240.1323,112.83058 240.1616,112.68751 240.1616,112.51319 C 240.1616,112.17139 240.06492,111.92725 239.87156,111.78126 C 239.6782,111.63527 239.42234,111.56202 239.10496,111.56202 L 237.50535,111.56202 L 237.50535,113.39014 L 239.07664,113.39014 z"
|
||||
id="path304"
|
||||
style="fill:#ffffff" />
|
||||
|
||||
<path
|
||||
d="M 241.88914,107.8667 L 243.53269,107.8667 L 245.09324,110.49854 L 246.64402,107.8667 L 248.27781,107.8667 L 245.80418,111.97315 L 245.80418,114.53077 L 244.33543,114.53077 L 244.33543,111.93604 L 241.88914,107.8667 z"
|
||||
id="path306"
|
||||
style="fill:#ffffff" />
|
||||
|
||||
</g>
|
||||
|
||||
<g
|
||||
id="g6316_1_"
|
||||
transform="matrix(0.624995,0,0,0.624995,391.2294,176.9332)">
|
||||
|
||||
<path
|
||||
id="path6318_1_"
|
||||
cx="475.97119"
|
||||
ry="29.209877"
|
||||
cy="252.08646"
|
||||
type="arc"
|
||||
rx="29.209877"
|
||||
d="M -175.0083,-139.1153 C -175.00204,-129.7035 -182.62555,-122.06751 -192.03812,-122.06049 C -201.44913,-122.05341 -209.08512,-129.67774 -209.09293,-139.09028 C -209.09293,-139.09809 -209.09293,-139.10749 -209.09293,-139.1153 C -209.09919,-148.52784 -201.47413,-156.1623 -192.06311,-156.17011 C -182.65054,-156.17713 -175.01456,-148.55207 -175.0083,-139.14026 C -175.0083,-139.13092 -175.0083,-139.1239 -175.0083,-139.1153 z"
|
||||
style="fill:#ffffff" />
|
||||
|
||||
<g
|
||||
id="g6320_1_"
|
||||
transform="translate(-23.9521,-89.72962)">
|
||||
<path
|
||||
id="path6322_1_"
|
||||
d="M -168.2204,-68.05536 C -173.39234,-68.05536 -177.76892,-66.25067 -181.35175,-62.64203 C -185.02836,-58.90759 -186.86588,-54.48883 -186.86588,-49.38568 C -186.86588,-44.28253 -185.02836,-39.89416 -181.35175,-36.22308 C -177.67673,-32.55114 -173.29859,-30.71521 -168.2204,-30.71521 C -163.07974,-30.71521 -158.62503,-32.56677 -154.85312,-36.26996 C -151.30307,-39.78558 -149.52652,-44.15827 -149.52652,-49.38568 C -149.52652,-54.6123 -151.33432,-59.03265 -154.94843,-62.64203 C -158.5625,-66.25067 -162.98599,-68.05536 -168.2204,-68.05536 z M -168.17352,-64.69519 C -163.936,-64.69519 -160.33752,-63.20221 -157.37655,-60.21466 C -154.38748,-57.25836 -152.89214,-53.64899 -152.89214,-49.38568 C -152.89214,-45.09186 -154.35466,-41.52856 -157.28438,-38.69653 C -160.36876,-35.64727 -163.99849,-34.12304 -168.17351,-34.12304 C -172.34856,-34.12304 -175.94701,-35.63244 -178.96892,-38.64965 C -181.9908,-41.66918 -183.50176,-45.24657 -183.50176,-49.38567 C -183.50176,-53.52398 -181.97518,-57.13414 -178.92205,-60.21465 C -175.9939,-63.20221 -172.41107,-64.69519 -168.17352,-64.69519 z" />
|
||||
|
||||
<path
|
||||
id="path6324_1_"
|
||||
d="M -176.49548,-52.02087 C -175.75171,-56.71856 -172.44387,-59.22949 -168.30008,-59.22949 C -162.33911,-59.22949 -158.70783,-54.90448 -158.70783,-49.1372 C -158.70783,-43.50982 -162.57194,-39.13793 -168.39383,-39.13793 C -172.39856,-39.13793 -175.98297,-41.60277 -176.63611,-46.43877 L -171.93292,-46.43877 C -171.7923,-43.92778 -170.1626,-43.04418 -167.83447,-43.04418 C -165.1813,-43.04418 -163.4563,-45.50908 -163.4563,-49.27709 C -163.4563,-53.22942 -164.94693,-55.32244 -167.74228,-55.32244 C -169.79074,-55.32244 -171.55948,-54.57787 -171.93292,-52.02087 L -170.56418,-52.02789 L -174.26734,-48.32629 L -177.96894,-52.02789 L -176.49548,-52.02087 z" />
|
||||
|
||||
</g>
|
||||
|
||||
</g>
|
||||
|
||||
<g
|
||||
id="g313">
|
||||
<circle
|
||||
cx="242.56226"
|
||||
cy="90.224609"
|
||||
r="10.8064"
|
||||
id="circle315"
|
||||
sodipodi:cx="242.56226"
|
||||
sodipodi:cy="90.224609"
|
||||
sodipodi:rx="10.8064"
|
||||
sodipodi:ry="10.8064"
|
||||
style="fill:#ffffff" />
|
||||
|
||||
<g
|
||||
id="g317">
|
||||
<path
|
||||
d="M 245.68994,87.09766 C 245.68994,86.68116 245.35205,86.34424 244.93603,86.34424 L 240.16357,86.34424 C 239.74755,86.34424 239.40966,86.68115 239.40966,87.09766 L 239.40966,91.87061 L 240.74071,91.87061 L 240.74071,97.52295 L 244.3579,97.52295 L 244.3579,91.87061 L 245.68993,91.87061 L 245.68993,87.09766 L 245.68994,87.09766 z"
|
||||
id="path319" />
|
||||
|
||||
<circle
|
||||
cx="242.5498"
|
||||
cy="84.083008"
|
||||
r="1.63232"
|
||||
id="circle321"
|
||||
sodipodi:cx="242.5498"
|
||||
sodipodi:cy="84.083008"
|
||||
sodipodi:rx="1.63232"
|
||||
sodipodi:ry="1.63232" />
|
||||
|
||||
</g>
|
||||
|
||||
<path
|
||||
clip-rule="evenodd"
|
||||
d="M 242.53467,78.31836 C 239.30322,78.31836 236.56641,79.4458 234.32715,81.70215 C 232.0293,84.03516 230.88086,86.79736 230.88086,89.98633 C 230.88086,93.1753 232.0293,95.91846 234.32715,98.21338 C 236.625,100.50781 239.36133,101.65527 242.53467,101.65527 C 245.74756,101.65527 248.53272,100.49853 250.88819,98.18359 C 253.10889,95.98681 254.21827,93.2539 254.21827,89.98632 C 254.21827,86.71874 253.08936,83.95751 250.83057,81.70214 C 248.57178,79.4458 245.80615,78.31836 242.53467,78.31836 z M 242.56396,80.41797 C 245.2124,80.41797 247.46142,81.35156 249.31103,83.21875 C 251.18115,85.06592 252.11572,87.32227 252.11572,89.98633 C 252.11572,92.66992 251.20068,94.89746 249.36963,96.66699 C 247.4419,98.57275 245.17334,99.52539 242.56397,99.52539 C 239.9546,99.52539 237.70557,98.58252 235.81739,96.6958 C 233.92774,94.80957 232.98389,92.57324 232.98389,89.98633 C 232.98389,87.3999 233.93799,85.14404 235.84619,83.21875 C 237.67676,81.35156 239.9165,80.41797 242.56396,80.41797 z"
|
||||
id="path323"
|
||||
style="fill-rule:evenodd" />
|
||||
|
||||
</g>
|
||||
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 16 KiB |
91
pydata14/artwork/intro.svg
Normal file
91
pydata14/artwork/intro.svg
Normal file
|
@ -0,0 +1,91 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="1024"
|
||||
height="576"
|
||||
id="svg2"
|
||||
version="1.1"
|
||||
inkscape:version="0.48.4 r9939"
|
||||
sodipodi:docname="intro.svg">
|
||||
<defs
|
||||
id="defs4" />
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="0.98994949"
|
||||
inkscape:cx="191.22587"
|
||||
inkscape:cy="474.75122"
|
||||
inkscape:document-units="px"
|
||||
inkscape:current-layer="layer1"
|
||||
showgrid="false"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="993"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="27"
|
||||
inkscape:window-maximized="1"
|
||||
showguides="true"
|
||||
inkscape:guide-bbox="true">
|
||||
<sodipodi:guide
|
||||
orientation="1,0"
|
||||
position="127.67857,526.07143"
|
||||
id="guide3015" />
|
||||
<sodipodi:guide
|
||||
orientation="1,0"
|
||||
position="955.71429,370"
|
||||
id="guide3011" />
|
||||
</sodipodi:namedview>
|
||||
<metadata
|
||||
id="metadata7">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
inkscape:label="Ebene 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(0,-476.36218)">
|
||||
<image
|
||||
sodipodi:absref="/home/peter/AAA-VOC/intro-outro-generator/pydata14/artwork/bg.png"
|
||||
xlink:href="bg.png"
|
||||
x="0"
|
||||
id="bg"
|
||||
height="576"
|
||||
width="1024"
|
||||
y="476.36218" />
|
||||
<flowRoot
|
||||
xml:space="preserve"
|
||||
id="text"
|
||||
style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#00b1e0;fill-opacity:1;stroke:none;font-family:Sans"
|
||||
transform="translate(-113.00893,265.0333)"><flowRegion
|
||||
id="flowRegion3006"><rect
|
||||
style="line-height:120.00000477%;fill:#00b1e0;fill-opacity:1"
|
||||
id="rect3008"
|
||||
width="828.04333"
|
||||
height="278.94785"
|
||||
x="240.67989"
|
||||
y="257.14285" /></flowRegion><flowPara
|
||||
id="name"
|
||||
style="font-size:41.50000000000000000px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:120.00000476999998966%;fill:#ec9238;fill-opacity:1;font-family:Mark Pro;-inkscape-font-specification:Mark Pro">$personnames</flowPara><flowPara
|
||||
id="title"
|
||||
style="font-size:41.5px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:120.00000477%;fill:#449cb7;fill-opacity:1;font-family:Mark Pro;-inkscape-font-specification:Mark Pro">$title</flowPara></flowRoot> </g>
|
||||
</svg>
|
After Width: | Height: | Size: 3.2 KiB |
224
pydata14/artwork/outro.svg
Normal file
224
pydata14/artwork/outro.svg
Normal file
|
@ -0,0 +1,224 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="1024"
|
||||
height="576"
|
||||
id="svg2"
|
||||
version="1.1"
|
||||
inkscape:version="0.48.4 r9939"
|
||||
sodipodi:docname="outro.svg">
|
||||
<defs
|
||||
id="defs4" />
|
||||
<sodipodi:namedview
|
||||
id="base"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="0.49497475"
|
||||
inkscape:cx="-162.05614"
|
||||
inkscape:cy="330.54903"
|
||||
inkscape:document-units="px"
|
||||
inkscape:current-layer="layer1"
|
||||
showgrid="false"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="993"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="27"
|
||||
inkscape:window-maximized="1"
|
||||
showguides="true"
|
||||
inkscape:guide-bbox="true">
|
||||
<sodipodi:guide
|
||||
orientation="1,0"
|
||||
position="127.67857,526.07143"
|
||||
id="guide3015" />
|
||||
<sodipodi:guide
|
||||
orientation="1,0"
|
||||
position="955.71429,370"
|
||||
id="guide3011" />
|
||||
</sodipodi:namedview>
|
||||
<metadata
|
||||
id="metadata7">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
inkscape:label="Ebene 1"
|
||||
inkscape:groupmode="layer"
|
||||
id="layer1"
|
||||
transform="translate(0,-476.36218)">
|
||||
<image
|
||||
sodipodi:absref="/home/peter/AAA-VOC/intro-outro-generator/pydata14/artwork/bg.png"
|
||||
xlink:href="bg.png"
|
||||
y="476.36218"
|
||||
width="1024"
|
||||
height="576"
|
||||
id="bg"
|
||||
x="0" />
|
||||
<g
|
||||
id="plate"
|
||||
inkscape:label="#plate"
|
||||
transform="matrix(2.750931,0,0,2.750931,346.94413,529.71939)">
|
||||
<g
|
||||
inkscape:export-ydpi="300.23013"
|
||||
inkscape:export-xdpi="300.23013"
|
||||
inkscape:export-filename="/mnt/hgfs/Bov/Documents/Work/2007/cc/identity/srr buttons/big/by-sa.png"
|
||||
id="g287"
|
||||
transform="matrix(0.9937807,0,0,0.9936694,-177.69409,-74.436409)">
|
||||
<path
|
||||
style="fill:#aab2ab"
|
||||
d="m 182.23532,75.39014 114.06396,0.20312 c 1.59375,0 3.01758,-0.23682 3.01758,3.18018 l -0.13965,37.56689 -119.82031,0 0,-37.70654 c 0,-1.68457 0.16309,-3.24365 2.87842,-3.24365 z"
|
||||
nodetypes="ccccccc"
|
||||
id="path3817_2_"
|
||||
inkscape:connector-curvature="0" />
|
||||
<g
|
||||
transform="matrix(0.872921,0,0,0.872921,50.12536,143.2144)"
|
||||
id="g5908_2_">
|
||||
<path
|
||||
style="fill:#ffffff"
|
||||
d="m 187.20944,-55.6792 c 0.006,8.68024 -7.02786,15.72095 -15.7081,15.72708 -8.68021,0.005 -15.72205,-7.02786 -15.72708,-15.70804 0,-0.0067 0,-0.01233 0,-0.01904 -0.005,-8.68134 7.02783,-15.72205 15.70807,-15.72711 8.68134,-0.0056 15.72208,7.02789 15.72711,15.70807 0,0.0056 0,0.01233 0,0.01904 z"
|
||||
rx="22.939548"
|
||||
type="arc"
|
||||
cy="264.3577"
|
||||
ry="22.939548"
|
||||
cx="296.35416"
|
||||
id="path5906_2_"
|
||||
inkscape:connector-curvature="0" />
|
||||
<g
|
||||
transform="translate(-289.6157,99.0653)"
|
||||
id="g5706_2_">
|
||||
<path
|
||||
d="m 473.88455,-167.54724 c 3.48541,3.48596 5.22839,7.75391 5.22839,12.80273 0,5.04938 -1.7128,9.27148 -5.13834,12.66736 -3.63531,3.5766 -7.93179,5.36432 -12.88947,5.36432 -4.89777,0 -9.11987,-1.77261 -12.6651,-5.31955 -3.54584,-3.54581 -5.31845,-7.78299 -5.31845,-12.71213 0,-4.92859 1.77261,-9.19598 5.31845,-12.80273 3.4552,-3.48651 7.67725,-5.22894 12.6651,-5.22894 5.04829,0 9.31401,1.74243 12.79942,5.22894 z m -23.11798,2.34485 c -2.94675,2.97638 -4.41956,6.46289 -4.41956,10.46234 0,3.99835 1.45828,7.4552 4.37424,10.37067 2.91653,2.9165 6.38849,4.37476 10.41705,4.37476 4.02853,0 7.53018,-1.47281 10.50656,-4.41901 2.8259,-2.73584 4.23941,-6.17706 4.23941,-10.32642 0,-4.11804 -1.43646,-7.61292 -4.30768,-10.48474 -2.87064,-2.87067 -6.34988,-4.30652 -10.43829,-4.30652 -4.08837,0 -7.54638,1.44318 -10.37173,4.32892 z m 7.75449,8.70312 c -0.45032,-0.98163 -1.12433,-1.47223 -2.02325,-1.47223 -1.58914,0 -2.38342,1.06952 -2.38342,3.2085 0,2.13959 0.79428,3.20911 2.38342,3.20911 1.04938,0 1.79895,-0.5213 2.24866,-1.56512 l 2.20276,1.17303 c -1.04993,1.86548 -2.62506,2.79901 -4.72549,2.79901 -1.6199,0 -2.91763,-0.4967 -3.89206,-1.48956 -0.97607,-0.99341 -1.46274,-2.36273 -1.46274,-4.10797 0,-1.71558 0.50229,-3.07709 1.50748,-4.08563 1.00519,-1.00793 2.25705,-1.51251 3.75781,-1.51251 2.22012,0 3.80984,0.87488 4.77081,2.62286 l -2.38398,1.22051 z m 10.36334,0 c -0.45087,-0.98163 -1.11148,-1.47223 -1.98239,-1.47223 -1.62106,0 -2.43213,1.06952 -2.43213,3.2085 0,2.13959 0.81107,3.20911 2.43213,3.20911 1.05103,0 1.78717,-0.5213 2.20724,-1.56512 l 2.25201,1.17303 c -1.04825,1.86548 -2.62119,2.79901 -4.71768,2.79901 -1.61771,0 -2.91263,-0.4967 -3.88647,-1.48956 -0.97217,-0.99341 -1.45938,-2.36273 -1.45938,-4.10797 0,-1.71558 0.49448,-3.07709 1.48288,-4.08563 0.98782,-1.00793 2.24527,-1.51251 3.77347,-1.51251 2.21619,0 3.80368,0.87488 4.76132,2.62286 l -2.431,1.22051 z"
|
||||
id="path5708_2_"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
</g>
|
||||
<path
|
||||
id="path294"
|
||||
d="m 297.29639,74.91064 -116.22951,0 c -1.24658,0 -2.26074,1.01465 -2.26074,2.26123 l 0,39.49561 c 0,0.28174 0.22852,0.51074 0.51025,0.51074 l 119.73,0 c 0.28174,0 0.51074,-0.229 0.51074,-0.51074 l 0,-39.4956 c 0,-1.24659 -1.01416,-2.26124 -2.26074,-2.26124 z m -116.22951,1.02149 116.22951,0 c 0.68359,0 1.23926,0.55615 1.23926,1.23975 0,0 0,15.91943 0,27.41846 l -83.07375,0 c -3.04492,5.50537 -8.91113,9.24365 -15.64355,9.24365 -6.73535,0 -12.6001,-3.73486 -15.64355,-9.24365 l -4.34814,0 c 0,-11.49902 0,-27.41846 0,-27.41846 -2e-5,-0.6836 0.55663,-1.23975 1.24022,-1.23975 z"
|
||||
inkscape:connector-curvature="0" />
|
||||
<g
|
||||
id="g296"
|
||||
enable-background="new ">
|
||||
<path
|
||||
style="fill:#ffffff"
|
||||
id="path298"
|
||||
d="m 265.60986,112.8833 c 0.0801,0.15576 0.1875,0.28174 0.32129,0.37842 0.13379,0.0962 0.29004,0.16797 0.46973,0.21436 0.18066,0.0469 0.36719,0.0703 0.55957,0.0703 0.12988,0 0.26953,-0.0107 0.41895,-0.0327 0.14844,-0.0215 0.28809,-0.064 0.41895,-0.12598 0.12988,-0.062 0.23926,-0.14795 0.3252,-0.25684 0.0879,-0.10889 0.13086,-0.24707 0.13086,-0.41553 0,-0.18018 -0.0576,-0.32617 -0.17285,-0.43848 -0.11426,-0.1123 -0.26562,-0.20508 -0.45215,-0.28027 -0.18555,-0.0742 -0.39746,-0.13965 -0.63281,-0.1958 -0.23633,-0.0562 -0.47559,-0.11816 -0.71777,-0.18701 -0.24902,-0.062 -0.49121,-0.13818 -0.72754,-0.22852 -0.23535,-0.0898 -0.44727,-0.20703 -0.63379,-0.3501 -0.18652,-0.14307 -0.33691,-0.32178 -0.45215,-0.53662 -0.11426,-0.21484 -0.17188,-0.47461 -0.17188,-0.7793 0,-0.34277 0.0732,-0.63965 0.21875,-0.8916 0.14648,-0.25195 0.33789,-0.46191 0.57422,-0.63037 0.23535,-0.16797 0.50293,-0.29248 0.80176,-0.37354 0.29785,-0.0806 0.59668,-0.12109 0.89453,-0.12109 0.34863,0 0.68262,0.0391 1.00293,0.11719 0.31934,0.0776 0.60449,0.2041 0.85254,0.37842 0.24902,0.17432 0.44629,0.39697 0.59277,0.66797 0.14551,0.271 0.21875,0.59961 0.21875,0.98535 l -1.42188,0 c -0.0127,-0.19922 -0.0547,-0.36426 -0.125,-0.49463 -0.0713,-0.13086 -0.16602,-0.2334 -0.2832,-0.30859 -0.11816,-0.0742 -0.25293,-0.12744 -0.4043,-0.1582 -0.15234,-0.0312 -0.31738,-0.0469 -0.49707,-0.0469 -0.11719,0 -0.23535,0.0127 -0.35254,0.0371 -0.11816,0.0254 -0.22461,0.0688 -0.32031,0.13086 -0.0967,0.0625 -0.17578,0.14014 -0.2373,0.2334 -0.0615,0.0937 -0.0928,0.21191 -0.0928,0.35498 0,0.13086 0.0244,0.23682 0.0742,0.31738 0.0498,0.0811 0.14844,0.15576 0.29395,0.22412 0.14551,0.0684 0.34766,0.13721 0.60547,0.20557 0.25781,0.0684 0.59473,0.15576 1.01172,0.26123 0.12402,0.0249 0.2959,0.0703 0.5166,0.13574 0.2207,0.0654 0.43945,0.16943 0.65723,0.3125 0.21777,0.14355 0.40527,0.33496 0.56445,0.57422 0.1582,0.23975 0.2373,0.54639 0.2373,0.91992 0,0.30518 -0.0596,0.58838 -0.17773,0.84961 -0.11816,0.26172 -0.29395,0.4873 -0.52734,0.67676 -0.2334,0.19043 -0.52246,0.33789 -0.86719,0.44385 -0.3457,0.10596 -0.74609,0.15869 -1.19922,0.15869 -0.36719,0 -0.72363,-0.0454 -1.06934,-0.13574 -0.34473,-0.0903 -0.65039,-0.23242 -0.91504,-0.42578 -0.26367,-0.19336 -0.47363,-0.43994 -0.62988,-0.73877 -0.15527,-0.29932 -0.22949,-0.65381 -0.22363,-1.06494 l 1.42188,0 c -3e-5,0.22412 0.04,0.41406 0.12106,0.56933 z"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:#ffffff"
|
||||
id="path300"
|
||||
d="m 273.8667,107.8667 2.49316,6.66406 -1.52246,0 -0.50391,-1.48438 -2.49316,0 -0.52246,1.48438 -1.47461,0 2.52051,-6.66406 1.50293,0 z m 0.084,4.08594 -0.83984,-2.44336 -0.0186,0 -0.86914,2.44336 1.72753,0 z"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
<g
|
||||
id="g302"
|
||||
enable-background="new ">
|
||||
<path
|
||||
style="fill:#ffffff"
|
||||
id="path304"
|
||||
d="m 239.17821,107.8667 c 0.31738,0 0.60742,0.0283 0.86914,0.084 0.26172,0.0561 0.48633,0.14795 0.67383,0.27539 0.18652,0.12744 0.33203,0.29688 0.43457,0.5083 0.10254,0.21142 0.1543,0.47266 0.1543,0.78369 0,0.33594 -0.0762,0.61523 -0.22949,0.83936 -0.15234,0.22412 -0.37891,0.40723 -0.67773,0.55029 0.41211,0.11816 0.71973,0.3252 0.92285,0.62109 0.20312,0.29589 0.30469,0.65234 0.30469,1.06934 0,0.33594 -0.0654,0.62695 -0.19629,0.87305 -0.13086,0.24561 -0.30762,0.44629 -0.52832,0.60205 -0.22168,0.15576 -0.47461,0.271 -0.75781,0.34521 -0.28418,0.0752 -0.5752,0.1123 -0.875,0.1123 l -3.23633,0 0,-6.66406 3.14159,0 0,1e-5 z m -0.1875,2.69531 c 0.26172,0 0.47656,-0.062 0.64551,-0.18604 0.16797,-0.12451 0.25195,-0.32568 0.25195,-0.60498 0,-0.15527 -0.0283,-0.28271 -0.084,-0.38184 -0.0566,-0.0996 -0.13086,-0.17676 -0.22461,-0.23291 -0.0937,-0.0557 -0.20117,-0.0947 -0.32227,-0.11621 -0.12207,-0.022 -0.24805,-0.0327 -0.37891,-0.0327 l -1.37305,0 0,1.55469 1.48536,0 z m 0.0859,2.82813 c 0.14355,0 0.28027,-0.0137 0.41113,-0.042 0.13086,-0.0278 0.24707,-0.0747 0.34668,-0.13965 0.0996,-0.0654 0.17871,-0.1543 0.23828,-0.26611 0.0596,-0.11181 0.0889,-0.25488 0.0889,-0.4292 0,-0.3418 -0.0967,-0.58594 -0.29004,-0.73193 -0.19336,-0.14599 -0.44922,-0.21924 -0.7666,-0.21924 l -1.59961,0 0,1.82812 1.57129,0 z"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
style="fill:#ffffff"
|
||||
id="path306"
|
||||
d="m 241.88914,107.8667 1.64355,0 1.56055,2.63184 1.55078,-2.63184 1.63379,0 -2.47363,4.10645 0,2.55762 -1.46875,0 0,-2.59473 -2.44629,-4.06934 z"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
<g
|
||||
transform="matrix(0.624995,0,0,0.624995,391.2294,176.9332)"
|
||||
id="g6316_1_">
|
||||
<path
|
||||
style="fill:#ffffff"
|
||||
d="m -175.0083,-139.1153 c 0.006,9.4118 -7.61725,17.04779 -17.02982,17.05481 -9.41101,0.007 -17.047,-7.61725 -17.05481,-17.02979 0,-0.008 0,-0.0172 0,-0.025 -0.006,-9.41254 7.6188,-17.047 17.02982,-17.05481 9.41257,-0.007 17.04855,7.61804 17.05481,17.02985 0,0.009 0,0.0164 0,0.025 z"
|
||||
rx="29.209877"
|
||||
type="arc"
|
||||
cy="252.08646"
|
||||
ry="29.209877"
|
||||
cx="475.97119"
|
||||
id="path6318_1_"
|
||||
inkscape:connector-curvature="0" />
|
||||
<g
|
||||
transform="translate(-23.9521,-89.72962)"
|
||||
id="g6320_1_">
|
||||
<path
|
||||
d="m -168.2204,-68.05536 c -5.17194,0 -9.54852,1.80469 -13.13135,5.41333 -3.67661,3.73444 -5.51413,8.1532 -5.51413,13.25635 0,5.10315 1.83752,9.49152 5.51413,13.1626 3.67502,3.67194 8.05316,5.50787 13.13135,5.50787 5.14066,0 9.59537,-1.85156 13.36728,-5.55475 3.55005,-3.51562 5.3266,-7.88831 5.3266,-13.11572 0,-5.22662 -1.8078,-9.64697 -5.42191,-13.25635 -3.61407,-3.60864 -8.03756,-5.41333 -13.27197,-5.41333 z m 0.0469,3.36017 c 4.23752,0 7.836,1.49298 10.79697,4.48053 2.98907,2.9563 4.48441,6.56567 4.48441,10.82898 0,4.29382 -1.46252,7.85712 -4.39224,10.68915 -3.08438,3.04926 -6.71411,4.57349 -10.88913,4.57349 -4.17505,0 -7.7735,-1.5094 -10.79541,-4.52661 -3.02188,-3.01953 -4.53284,-6.59692 -4.53284,-10.73602 0,-4.13831 1.52658,-7.74847 4.57971,-10.82898 2.92815,-2.98756 6.51098,-4.48054 10.74853,-4.48054 z"
|
||||
id="path6322_1_"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
d="m -176.49548,-52.02087 c 0.74377,-4.69769 4.05161,-7.20862 8.1954,-7.20862 5.96097,0 9.59225,4.32501 9.59225,10.09229 0,5.62738 -3.86411,9.99927 -9.686,9.99927 -4.00473,0 -7.58914,-2.46484 -8.24228,-7.30084 l 4.70319,0 c 0.14062,2.51099 1.77032,3.39459 4.09845,3.39459 2.65317,0 4.37817,-2.4649 4.37817,-6.23291 0,-3.95233 -1.49063,-6.04535 -4.28598,-6.04535 -2.04846,0 -3.8172,0.74457 -4.19064,3.30157 l 1.36874,-0.007 -3.70316,3.7016 -3.7016,-3.7016 1.47346,0.007 z"
|
||||
id="path6324_1_"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
</g>
|
||||
<g
|
||||
id="g313">
|
||||
<circle
|
||||
style="fill:#ffffff"
|
||||
sodipodi:ry="10.8064"
|
||||
sodipodi:rx="10.8064"
|
||||
sodipodi:cy="90.224609"
|
||||
sodipodi:cx="242.56226"
|
||||
id="circle315"
|
||||
r="10.8064"
|
||||
cy="90.224609"
|
||||
cx="242.56226"
|
||||
d="m 253.36866,90.224609 c 0,5.96821 -4.83819,10.806401 -10.8064,10.806401 -5.96821,0 -10.8064,-4.838191 -10.8064,-10.806401 0,-5.96821 4.83819,-10.8064 10.8064,-10.8064 5.96821,0 10.8064,4.83819 10.8064,10.8064 z" />
|
||||
<g
|
||||
id="g317">
|
||||
<path
|
||||
id="path319"
|
||||
d="m 245.68994,87.09766 c 0,-0.4165 -0.33789,-0.75342 -0.75391,-0.75342 l -4.77246,0 c -0.41602,0 -0.75391,0.33691 -0.75391,0.75342 l 0,4.77295 1.33105,0 0,5.65234 3.61719,0 0,-5.65234 1.33203,0 0,-4.77295 1e-5,0 z"
|
||||
inkscape:connector-curvature="0" />
|
||||
<circle
|
||||
sodipodi:ry="1.63232"
|
||||
sodipodi:rx="1.63232"
|
||||
sodipodi:cy="84.083008"
|
||||
sodipodi:cx="242.5498"
|
||||
id="circle321"
|
||||
r="1.63232"
|
||||
cy="84.083008"
|
||||
cx="242.5498"
|
||||
d="m 244.18212,84.083008 c 0,0.901505 -0.73081,1.63232 -1.63232,1.63232 -0.9015,0 -1.63232,-0.730815 -1.63232,-1.63232 0,-0.901506 0.73082,-1.63232 1.63232,-1.63232 0.90151,0 1.63232,0.730814 1.63232,1.63232 z" />
|
||||
</g>
|
||||
<path
|
||||
style="fill-rule:evenodd"
|
||||
id="path323"
|
||||
d="m 242.53467,78.31836 c -3.23145,0 -5.96826,1.12744 -8.20752,3.38379 -2.29785,2.33301 -3.44629,5.09521 -3.44629,8.28418 0,3.18897 1.14844,5.93213 3.44629,8.22705 2.29785,2.29443 5.03418,3.44189 8.20752,3.44189 3.21289,0 5.99805,-1.15674 8.35352,-3.47168 2.2207,-2.19678 3.33008,-4.92969 3.33008,-8.19727 0,-3.26758 -1.12891,-6.02881 -3.3877,-8.28418 -2.25879,-2.25634 -5.02442,-3.38378 -8.2959,-3.38378 z m 0.0293,2.09961 c 2.64844,0 4.89746,0.93359 6.74707,2.80078 1.87012,1.84717 2.80469,4.10352 2.80469,6.76758 0,2.68359 -0.91504,4.91113 -2.74609,6.68066 -1.92773,1.90576 -4.19629,2.8584 -6.80566,2.8584 -2.60937,0 -4.8584,-0.94287 -6.74658,-2.82959 -1.88965,-1.88623 -2.8335,-4.12256 -2.8335,-6.70947 0,-2.58643 0.9541,-4.84229 2.8623,-6.76758 1.83057,-1.86719 4.07031,-2.80078 6.71777,-2.80078 z"
|
||||
clip-rule="evenodd"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#aab2ab;fill-opacity:1;stroke:none;font-family:Sans"
|
||||
x="283.94141"
|
||||
y="712.74786"
|
||||
id="text"
|
||||
sodipodi:linespacing="125%"
|
||||
inkscape:label="#text"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan3046"
|
||||
x="283.94141"
|
||||
y="712.74786"
|
||||
style="font-size:18px;fill:#aab2ab;fill-opacity:1">Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0)</tspan></text>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 16 KiB |
756
pydata14/schedule.xml
Normal file
756
pydata14/schedule.xml
Normal file
|
@ -0,0 +1,756 @@
|
|||
<schedule>
|
||||
<conference>
|
||||
<title>PyData Berlin 2014</title>
|
||||
<acronym>Berlin2014</acronym>
|
||||
<start>2014-07-25</start>
|
||||
<end>2014-07-27</end>
|
||||
<days>3</days>
|
||||
<timeslot_duration>00:15</timeslot_duration>
|
||||
</conference>
|
||||
<day date="2014-07-25" index="1">
|
||||
<room name="B09">
|
||||
<event id="20254">
|
||||
<title>Interactive Plots Using Bokeh</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-25T12:45:00+0200</date>
|
||||
<start>12:45</start>
|
||||
<duration>02:45</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>Bokeh is a Python interactive visualization library for large datasets that natively uses the latest web technologies. Its goal is to provide elegant, concise construction of novel graphics in the style of Protovis/D3, while delivering high-performance interactivity over large data to thin clients. This tutorial will walk users through the steps to create different kinds of interactive plots using Bokeh. We will cover using Bokeh for static HTML output, the IPython notebook, and plot hosting and embedding using the Bokeh server.
|
||||
</abstract>
|
||||
<description>
|
||||
Bokeh is a Python interactive visualization library for large datasets that natively uses the latest web technologies. Its goal is to provide elegant, concise construction of novel graphics in the style of Protovis/D3, while delivering high-performance interactivity over large data to thin clients. This tutorial will walk users through the steps to create different kinds of interactive plots using Bokeh. We will cover using Bokeh for static HTML output, the IPython notebook, and plot hosting and embedding using the Bokeh server.</description>
|
||||
<type>tutorial</type>
|
||||
<persons>
|
||||
<person id="20038">Bryan Van De Ven</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20270">
|
||||
<title>Exploratory Time Series Analysis of NYC Subway Data</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-25T015:55:00+0200</date>
|
||||
<start>15:55</start>
|
||||
<duration>01:20</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>What questions arise during a quick model assessment? In this hands-on-tutorial we want to cover the whole chain from preparing data to choosing and fitting a model to properly assessing the quality of a predictive model. Our dataset in this tutorial are the numbers of people entering and exiting New York subway stations. Among other ways of building a predictive model, we introduce the python package pydse ( http://pydse.readthedocs.org/ ) and apply it to the dataset in order to derive the parameters of an ARMA-model (autoregressive moving average). At the end of the tutorial we evaluate the models and examine the strengths and weaknesses of various ways to measure the accuracy and quality of a predictive model.
|
||||
</abstract>
|
||||
<description>
|
||||
What questions arise during a quick model assessment? In this hands-on-tutorial we want to cover the whole chain from preparing data to choosing and fitting a model to properly assessing the quality of a predictive model. Our dataset in this tutorial are the numbers of people entering and exiting New York subway stations. Among other ways of building a predictive model, we introduce the python package pydse ( http://pydse.readthedocs.org/ ) and apply it to the dataset in order to derive the parameters of an ARMA-model (autoregressive moving average). At the end of the tutorial we evaluate the models and examine the strengths and weaknesses of various ways to measure the accuracy and quality of a predictive model.</description>
|
||||
<type>tutorial</type>
|
||||
<persons>
|
||||
<person id="20335">Felix Marczinowski</person>
|
||||
<person id="20334">Philipp Mack</person>
|
||||
<person id="20336">Sönke Niekamp</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20276">
|
||||
<title>Packaging and Deployment</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-25T17:25:00+0200</date>
|
||||
<start>17:25</start>
|
||||
<duration>01:05</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>None.
|
||||
</abstract>
|
||||
<description>
|
||||
None.</description>
|
||||
<type>tutorial</type>
|
||||
<persons>
|
||||
<person id="20036">Travis Oliphant</person>
|
||||
</persons>
|
||||
</event>
|
||||
</room>
|
||||
<room name="B05/B06">
|
||||
<event id="20221">
|
||||
<title>scikit-learn</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-25T014:10:00+0200</date>
|
||||
<start>12:45</start>
|
||||
<duration>02:45</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>None</abstract>
|
||||
<description>None</description>
|
||||
<type>tutorial</type>
|
||||
<persons>
|
||||
<person id="20203">Andreas Mueller</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20269">
|
||||
<title>Visualising Data through Pandas</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-25T015:55:00+0200</date>
|
||||
<start>15:55</start>
|
||||
<duration>01:20</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>Vincent D. Warmerdam is a data scientist and developer at GoDataDriven and a former university lecturer of mathematics and statistics. He is fluent in python, R and javascript and is currently checking out with scala and julia. Currently he does a lot of research on machine learning algorithms and applications that can solve problems in real time. The intersection of the algorithm and the use-case is of the most interest to him. During a half year sabbatical he travelled as a true digital nomad from Buenos Aires to San Fransisco while still programming for clients. He has two nationalities (US/Netherlands) and lives in Amsterdam.</abstract>
|
||||
<description>Vincent D. Warmerdam is a data scientist and developer at GoDataDriven and a former university lecturer of mathematics and statistics. He is fluent in python, R and javascript and is currently checking out with scala and julia. Currently he does a lot of research on machine learning algorithms and applications that can solve problems in real time. The intersection of the algorithm and the use-case is of the most interest to him. During a half year sabbatical he travelled as a true digital nomad from Buenos Aires to San Fransisco while still programming for clients. He has two nationalities (US/Netherlands) and lives in Amsterdam.</description>
|
||||
<type>tutorial</type>
|
||||
<persons>
|
||||
<person id="20332">Vincent Warmerdam</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20253">
|
||||
<title>Extract Transform Load using mETL</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-25T017:25:00+0200</date>
|
||||
<start>17:25</start>
|
||||
<duration>01:05</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>mETL is an ETL package written in Python which was developed to load elective data for Central European University. Program can be used in a more general way, it can be used to load practically any kind of data to any target. Code is open source and available for anyone who want to use it. The main advantage to configurable via Yaml files and You have the possibility to write any transformation in Python and You can use it natively from any framework as well. We are using this tool in production for many of our clients and It is really stable and reliable. The project has a few contributors all around the world right now and I hope many developer will join soon. I really want to show you how you can use it in your daily work. In this tutorial We will see the most common situations: - Installation - Write simple Yaml configration files to load CSV, JSON, XML into MySQL or PostgreSQL Database, or convert CSV to JSON, etc. - Add tranformations on your fields - Filter records based on condition - Walk through a directory to feed the tool - How the mapping works - Generate Yaml configurations automatically from data source - Migrate a database to another database</abstract>
|
||||
<description>mETL is an ETL package written in Python which was developed to load elective data for Central European University. Program can be used in a more general way, it can be used to load practically any kind of data to any target. Code is open source and available for anyone who want to use it. The main advantage to configurable via Yaml files and You have the possibility to write any transformation in Python and You can use it natively from any framework as well. We are using this tool in production for many of our clients and It is really stable and reliable. The project has a few contributors all around the world right now and I hope many developer will join soon. I really want to show you how you can use it in your daily work. In this tutorial We will see the most common situations: - Installation - Write simple Yaml configration files to load CSV, JSON, XML into MySQL or PostgreSQL Database, or convert CSV to JSON, etc. - Add tranformations on your fields - Filter records based on condition - Walk through a directory to feed the tool - How the mapping works - Generate Yaml configurations automatically from data source - Migrate a database to another database</description>
|
||||
<type>tutorial</type>
|
||||
<persons>
|
||||
<person id="20162">Bence Faludi</person>
|
||||
</persons>
|
||||
</event>
|
||||
</room>
|
||||
</day>
|
||||
<day date="2014-07-26" index="2">
|
||||
<room name="B09">
|
||||
<event id="20258">
|
||||
<title>Generators Will Free Your Mind</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T10:10:00+0200</date>
|
||||
<start>10:10</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>James Powell is a professional Python programmer based in New York City. He is the chair of the NYC Python meetup nycpython.com and has spoken on Python/CPython topics at PyData SV, PyData NYC, PyTexas, PyArkansas, PyGotham, and at the NYC Python meetup. He also authors a blog on programming topics at seriously.dontusethiscode.com</abstract>
|
||||
<description>James Powell is a professional Python programmer based in New York City. He is the chair of the NYC Python meetup nycpython.com and has spoken on Python/CPython topics at PyData SV, PyData NYC, PyTexas, PyArkansas, PyGotham, and at the NYC Python meetup. He also authors a blog on programming topics at seriously.dontusethiscode.com</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20103">James Powell</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20271">
|
||||
<title>Driving Moore's Law with Python-Powered Machine Learning: An Insider's Perspective</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T11:00:00+0200</date>
|
||||
<start>11:00</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>People talk about a Moore's Law for gene sequencing, a Moore's Law for software, etc. This is talk is about *the* Moore's Law, the bull that the other "Laws" ride; and how Python-powered ML helps drive it. How do we keep making ever-smaller devices? How do we harness atomic-scale physics? Large-scale machine learning is key. The computation drives new chip designs, and those new chip designs are used for new computations, ad infinitum. High-dimensional regression, classification, active learning, optimization, ranking, clustering, density estimation, scientific visualization, massively parallel processing -- it all comes into play, and Python is powering it all.</abstract>
|
||||
<description>People talk about a Moore's Law for gene sequencing, a Moore's Law for software, etc. This is talk is about *the* Moore's Law, the bull that the other "Laws" ride; and how Python-powered ML helps drive it. How do we keep making ever-smaller devices? How do we harness atomic-scale physics? Large-scale machine learning is key. The computation drives new chip designs, and those new chip designs are used for new computations, ad infinitum. High-dimensional regression, classification, active learning, optimization, ranking, clustering, density estimation, scientific visualization, massively parallel processing -- it all comes into play, and Python is powering it all.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20337">Trent McConaghy</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20259">
|
||||
<title>Interactive Analysis of (Large) Financial Data Sets</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T12:30:00+0200</date>
|
||||
<start>12:30</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>None.</abstract>
|
||||
<description>None.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20212">Yves Hilpisch</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20260">
|
||||
<title>Data Oriented Programming</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T13:20:00+0200</date>
|
||||
<start>13:20</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>Computers have traditionally been thought as tools for performing computations with numbers. Of course, its name in English has a lot to do with this conception, but in other languages, like the french 'ordinateur' (which express concepts more like sorting or classifying), one can clearly see the other side of the coin: computers can also be used to extract (usually new) information from data. Storage, reduction, classification, selection, sorting, grouping, among others, are typical operations in this 'alternate' goal of computers, and although carrying out all these tasks does imply doing a lot of computations, it also requires thinking about the computer as a different entity than the view offered by the traditional von Neumann architecture (basically a CPU with memory). In fact, when it is about programming the data handling efficiently, the most interesting part of a computer is the so-called hierarchical storage, where the different levels of caches in CPUs, the RAM memory, the SSD layers (there are several in the market already), the mechanical disks and finally, the network, are pretty much more important than the ALUs (arithmetic and logical units) in CPUs. In data handling, techniques like data deduplication and compression become critical when speaking about dealing with extremely large datasets. Moreover, distributed environments are useful mainly because of its increased storage capacities and I/O bandwidth, rather than for their aggregated computing throughput. During my talk I will describe several programming paradigms that should be taken in account when programming data oriented applications and that are usually different than those required for achieving pure computational throughput. But specially, and in a surprising turnaround, how the amazing amount of computational power in modern CPUs can also be useful for data handling as well.</abstract>
|
||||
<description>Computers have traditionally been thought as tools for performing computations with numbers. Of course, its name in English has a lot to do with this conception, but in other languages, like the french 'ordinateur' (which express concepts more like sorting or classifying), one can clearly see the other side of the coin: computers can also be used to extract (usually new) information from data. Storage, reduction, classification, selection, sorting, grouping, among others, are typical operations in this 'alternate' goal of computers, and although carrying out all these tasks does imply doing a lot of computations, it also requires thinking about the computer as a different entity than the view offered by the traditional von Neumann architecture (basically a CPU with memory). In fact, when it is about programming the data handling efficiently, the most interesting part of a computer is the so-called hierarchical storage, where the different levels of caches in CPUs, the RAM memory, the SSD layers (there are several in the market already), the mechanical disks and finally, the network, are pretty much more important than the ALUs (arithmetic and logical units) in CPUs. In data handling, techniques like data deduplication and compression become critical when speaking about dealing with extremely large datasets. Moreover, distributed environments are useful mainly because of its increased storage capacities and I/O bandwidth, rather than for their aggregated computing throughput. During my talk I will describe several programming paradigms that should be taken in account when programming data oriented applications and that are usually different than those required for achieving pure computational throughput. But specially, and in a surprising turnaround, how the amazing amount of computational power in modern CPUs can also be useful for data handling as well.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20173">Francesc Alted</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20266">
|
||||
<title>Low-rank matrix approximations in Python</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T14:10:00+0200</date>
|
||||
<start>14:10</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>Low-rank approximations of data matrices have become an important tool in machine learning and data mining. They allow for embedding high dimensional data in lower dimensional spaces and can therefore mitigate effects due to noise, uncover latent relations, or facilitate further processing. These properties have been proven successful in many application areas such as bio-informatics, computer vision, text processing, recommender systems, social network analysis, among others. Present day technologies are characterized by exponentially growing amounts of data. Recent advances in sensor technology, internet applications, and communication networks call for methods that scale to very large and/or growing data matrices. In this talk, we will describe how to efficiently analyze data by means of matrix factorization using the Python Matrix Factorization Toolbox (PyMF) and HDF5. We will briefly cover common methods such as k-means clustering, PCA, or Archetypal Analysis which can be easily cast as a matrix decomposition, and explain their usefulness for everyday data analysis tasks.</abstract>
|
||||
<description>Low-rank approximations of data matrices have become an important tool in machine learning and data mining. They allow for embedding high dimensional data in lower dimensional spaces and can therefore mitigate effects due to noise, uncover latent relations, or facilitate further processing. These properties have been proven successful in many application areas such as bio-informatics, computer vision, text processing, recommender systems, social network analysis, among others. Present day technologies are characterized by exponentially growing amounts of data. Recent advances in sensor technology, internet applications, and communication networks call for methods that scale to very large and/or growing data matrices. In this talk, we will describe how to efficiently analyze data by means of matrix factorization using the Python Matrix Factorization Toolbox (PyMF) and HDF5. We will briefly cover common methods such as k-means clustering, PCA, or Archetypal Analysis which can be easily cast as a matrix decomposition, and explain their usefulness for everyday data analysis tasks.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20329">Christian Thurau</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20250">
|
||||
<title>Algorithmic Trading with Zipline</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T15:05:00+0200</date>
|
||||
<start>15:05</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>Python is quickly becoming the glue language which holds together data science and related fields like quantitative finance. Zipline is a BSD-licensed quantitative trading system which allows easy backtesting of investment algorithms on historical data. The system is fundamentally event-driven and a close approximation of how live-trading systems operate. Moreover, Zipline comes "batteries included" as many common statistics like moving average and linear regression can be readily accessed from within a user-written algorithm. Input of historical data and output of performance statistics is based on Pandas DataFrames to integrate nicely into the existing Python eco-system. Furthermore, statistic and machine learning libraries like matplotlib, scipy, statsmodels, and sklearn integrate nicely to support development, analysis and visualization of state-of-the-art trading systems. Zipline is currently used in production as the backtesting engine powering Quantopian.com -- a free, community-centered platform that allows development and real-time backtesting of trading algorithms in the web browser.</abstract>
|
||||
<description>Python is quickly becoming the glue language which holds together data science and related fields like quantitative finance. Zipline is a BSD-licensed quantitative trading system which allows easy backtesting of investment algorithms on historical data. The system is fundamentally event-driven and a close approximation of how live-trading systems operate. Moreover, Zipline comes "batteries included" as many common statistics like moving average and linear regression can be readily accessed from within a user-written algorithm. Input of historical data and output of performance statistics is based on Pandas DataFrames to integrate nicely into the existing Python eco-system. Furthermore, statistic and machine learning libraries like matplotlib, scipy, statsmodels, and sklearn integrate nicely to support development, analysis and visualization of state-of-the-art trading systems. Zipline is currently used in production as the backtesting engine powering Quantopian.com -- a free, community-centered platform that allows development and real-time backtesting of trading algorithms in the web browser.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20090">Thomas Wiecki</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20256">
|
||||
<title>Speed Without Drag</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T15:55:00+0200</date>
|
||||
<start>15:55</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>Speed without drag: making code faster when there's no time to waste A practical walkthrough over the state-of-the-art of low-friction numerical Python enhancing solutions, covering: exhausting CPython, NumPy, Numba, Parakeet, Cython, Theano, Pyston, PyPy/NumPyPy and Blaze.</abstract>
|
||||
<description>Speed without drag: making code faster when there's no time to waste A practical walkthrough over the state-of-the-art of low-friction numerical Python enhancing solutions, covering: exhausting CPython, NumPy, Numba, Parakeet, Cython, Theano, Pyston, PyPy/NumPyPy and Blaze.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20109">Saul Diez-Guerra</person>
|
||||
</persons>
|
||||
</event>
|
||||
</room>
|
||||
<room name="B05/B06">
|
||||
<event id="20231">
|
||||
<title>Quantified Self: Analyzing the Big Data of our Daily Life</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T10:10:00+0200</date>
|
||||
<start>10:10</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>Applications for self tracking that collect, analyze, or publish personal and medical data are getting more popular. This includes either a broad variety of medical and healthcare apps in the fields of telemedicine, remote care, treatment, or interaction with patients, and a huge increasing number of self tracking apps that aims to acquire data form from people’s daily life. The Quantified Self movement goes far beyond collecting or generating medical data. It aims in gathering data of all kinds of activities, habits, or relations that could help to understand and improve one’s behavior, health, or well-being. Both, health apps as well as Quantified Self apps use either just the smartphone as data source (e.g., questionnaires, manual data input, smartphone sensors) or external devices and sensors such as ‘classical’ medical devices (e.g,. blood pressure meters) or wearable devices (e.g., wristbands or eye glasses). The data can be used to get insights into the medical condition or one’s personal life and behavior. This talk will provide an overview of the various data sources and data formats that are relevant for self tracking as well as strategies and examples for analyzing that data with Python. The talk will cover:
|
||||
Accessing local and distributed sources for the heterogeneous Quantified Self data. That includes local data files generated by smartphone apps and web applications as well as data stored on cloud resources via APIs (e.g., data that is stored by vendors of self tracking hardware or data of social media channels, weather data, traffic data etc.)
|
||||
Homogenizing the data. Especially, covering typical problems of heterogeneous Quantified Self data, such as missing data or different and non-standard data formatting.
|
||||
Analyzing and visualizing the data. Depending on the questions one has, the data can be analyzed with statistical methods or correlations. For example, to get insight into one's personal physical activities, steps data form activity trackers can be correlated to location data and weather information. The talk covers how to conduct this and other data analysis tasks with tools such as pandas and how to visualize the results.
|
||||
The examples in this talk will be shown as interactive IPython sessions.</abstract>
|
||||
<description>Applications for self tracking that collect, analyze, or publish personal and medical data are getting more popular. This includes either a broad variety of medical and healthcare apps in the fields of telemedicine, remote care, treatment, or interaction with patients, and a huge increasing number of self tracking apps that aims to acquire data form from people’s daily life. The Quantified Self movement goes far beyond collecting or generating medical data. It aims in gathering data of all kinds of activities, habits, or relations that could help to understand and improve one’s behavior, health, or well-being. Both, health apps as well as Quantified Self apps use either just the smartphone as data source (e.g., questionnaires, manual data input, smartphone sensors) or external devices and sensors such as ‘classical’ medical devices (e.g,. blood pressure meters) or wearable devices (e.g., wristbands or eye glasses). The data can be used to get insights into the medical condition or one’s personal life and behavior. This talk will provide an overview of the various data sources and data formats that are relevant for self tracking as well as strategies and examples for analyzing that data with Python. The talk will cover:
|
||||
Accessing local and distributed sources for the heterogeneous Quantified Self data. That includes local data files generated by smartphone apps and web applications as well as data stored on cloud resources via APIs (e.g., data that is stored by vendors of self tracking hardware or data of social media channels, weather data, traffic data etc.)
|
||||
Homogenizing the data. Especially, covering typical problems of heterogeneous Quantified Self data, such as missing data or different and non-standard data formatting.
|
||||
Analyzing and visualizing the data. Depending on the questions one has, the data can be analyzed with statistical methods or correlations. For example, to get insight into one's personal physical activities, steps data form activity trackers can be correlated to location data and weather information. The talk covers how to conduct this and other data analysis tasks with tools such as pandas and how to visualize the results.
|
||||
The examples in this talk will be shown as interactive IPython sessions.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20285">Andreas Schreiber</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20244">
|
||||
<title>Semantic Python: Mastering Linked Data with Python</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T11:00:00+0200</date>
|
||||
<start>11:00</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>Tim Berners-Lee defined the Semantic Web as a web of data that can be processed directly and indirectly by machines.
|
||||
More precisely, the Semantic Web can be defined as a set of standards and best practices for sharing data and the semantics of that data over the Web to be used by applications [DuCharme, 2013].
|
||||
In particular, the Semantic Web is built on top of three main pillars: the RDF (i.e., Resource Description Framework) data model, the SPARQL query language, and the OWL standard for storing vocabularies and ontologies. These standards allows the huge amount of data on the Web to be available in a unique and unified standard format, contributing to the definition of the Web of Data (WoD) [1].
|
||||
The WoD makes the web data to be reachable and easily manageable by Semantic Web tools, providing also the relationships among these data (thus practically setting up the “Web”). This collection of interrelated datasets on the Web can also be referred to as Linked Data [1].
|
||||
Two typical examples of large Linked Dataset are FreeBase, and DBPedia, which essentially provides the so called Common sense Knowledge in RDF format.
|
||||
Python offers a very powerful and easy to use library to work with Linked Data: rdflib.
|
||||
RDFLib is a lightweight and functionally complete RDF library, allowing applications to access, create and manage RDF graphs in a very Pythonic fashion.
|
||||
In this talk, a general overview of the main features provided by the rdflib package will be presented. To this end, several code examples will be discussed, along with a case study concerning the analysis of a (semantic) social graph. This case study will be focused on the integration between the networkx module and the rdflib library in order to crawl, access (via SPARQL), and analyze a Social Linked Data Graph represented using the FOAF (Friend of a Friend) schema.
|
||||
This talk is intended for an Novice level audience, assuming a good knowledge of the Python language.</abstract>
|
||||
<description>Tim Berners-Lee defined the Semantic Web as a web of data that can be processed directly and indirectly by machines.
|
||||
More precisely, the Semantic Web can be defined as a set of standards and best practices for sharing data and the semantics of that data over the Web to be used by applications [DuCharme, 2013].
|
||||
In particular, the Semantic Web is built on top of three main pillars: the RDF (i.e., Resource Description Framework) data model, the SPARQL query language, and the OWL standard for storing vocabularies and ontologies. These standards allows the huge amount of data on the Web to be available in a unique and unified standard format, contributing to the definition of the Web of Data (WoD) [1].
|
||||
The WoD makes the web data to be reachable and easily manageable by Semantic Web tools, providing also the relationships among these data (thus practically setting up the “Web”). This collection of interrelated datasets on the Web can also be referred to as Linked Data [1].
|
||||
Two typical examples of large Linked Dataset are FreeBase, and DBPedia, which essentially provides the so called Common sense Knowledge in RDF format.
|
||||
Python offers a very powerful and easy to use library to work with Linked Data: rdflib.
|
||||
RDFLib is a lightweight and functionally complete RDF library, allowing applications to access, create and manage RDF graphs in a very Pythonic fashion.
|
||||
In this talk, a general overview of the main features provided by the rdflib package will be presented. To this end, several code examples will be discussed, along with a case study concerning the analysis of a (semantic) social graph. This case study will be focused on the integration between the networkx module and the rdflib library in order to crawl, access (via SPARQL), and analyze a Social Linked Data Graph represented using the FOAF (Friend of a Friend) schema.
|
||||
This talk is intended for an Novice level audience, assuming a good knowledge of the Python language.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20299">Valerio Maggio</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20241">
|
||||
<title>Mall Analytics Using Telco Data & Pandas</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T12:30:00+0200</date>
|
||||
<start>12:30</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>This talk will be about my latest project in mall analytics, where we estimated visitor trends in malls around the globe using telco data as a basis, and employed map reduce technologies and data science to extrapolate from this basis to reality and correct for biases. We succeeded in extracting valuable information such as count of visitors per hour, demographics breakdown, competitor analysis and popularity of the mall among different parts of the surrounding areas, all the while preserving user privacy and working only with aggregated data. I will show an overview of our system's modules, how we got a first raw estimation of the visitors and their behaviours, and how we refined and evaluated this estimation using pandas, matplotlib, scikit-learn and other python libraries.</abstract>
|
||||
<description>This talk will be about my latest project in mall analytics, where we estimated visitor trends in malls around the globe using telco data as a basis, and employed map reduce technologies and data science to extrapolate from this basis to reality and correct for biases. We succeeded in extracting valuable information such as count of visitors per hour, demographics breakdown, competitor analysis and popularity of the mall among different parts of the surrounding areas, all the while preserving user privacy and working only with aggregated data. I will show an overview of our system's modules, how we got a first raw estimation of the visitors and their behaviours, and how we refined and evaluated this estimation using pandas, matplotlib, scikit-learn and other python libraries.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20302">Karolina Alexiou</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20234">
|
||||
<title>Parallel processing using python and gearman</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T13:20:00+0200</date>
|
||||
<start>13:20</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>When talking of parallel processing, some task requires a substantial set-up time. This is the case of Natural Language Processing (NLP) tasks such as classification, where models need to be loaded into memory. In these situations, we can not start a new process for every data set to be handled, but the system needs to be ready to process new incoming data. This talk will look at job queue systems, with particular focus on gearman. We will see how we are using it at Synthesio for NLP tasks; how to set up workers and clients, make it redundant and robust, monitor its activity and adapt to demand.</abstract>
|
||||
<description>When talking of parallel processing, some task requires a substantial set-up time. This is the case of Natural Language Processing (NLP) tasks such as classification, where models need to be loaded into memory. In these situations, we can not start a new process for every data set to be handled, but the system needs to be ready to process new incoming data. This talk will look at job queue systems, with particular focus on gearman. We will see how we are using it at Synthesio for NLP tasks; how to set up workers and clients, make it redundant and robust, monitor its activity and adapt to demand.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20304">Pedro Miguel Dias Cardoso</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20237">
|
||||
<title>Street Fighting Trend Research</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T14:10:00+0200</date>
|
||||
<start>14:10</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>This talk presents a very hands-on approach for identifying research and technology trends in various industries with a little bit of Pandas here, NTLK there and all cooked up in an IPython Notebook. Three examples featured in this talk are:
|
||||
How to find out the most interesting research topics cutting edge companies are after right now?
|
||||
How to pick sessions from a large conference program (think PyCon, PyData or Strata) that are presenting something really novel?
|
||||
How to automagically identify trends in industries such as computer vision or telecommunications?
|
||||
The talk will show how to tackle common tasks in applied trend research and technology foresight from identifying a data-source, getting the data and data cleaning to presenting the insights in meaningful visualizations.</abstract>
|
||||
<description>This talk presents a very hands-on approach for identifying research and technology trends in various industries with a little bit of Pandas here, NTLK there and all cooked up in an IPython Notebook. Three examples featured in this talk are:
|
||||
How to find out the most interesting research topics cutting edge companies are after right now?
|
||||
How to pick sessions from a large conference program (think PyCon, PyData or Strata) that are presenting something really novel?
|
||||
How to automagically identify trends in industries such as computer vision or telecommunications?
|
||||
The talk will show how to tackle common tasks in applied trend research and technology foresight from identifying a data-source, getting the data and data cleaning to presenting the insights in meaningful visualizations.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20324">Benedikt Koehler</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20274">
|
||||
<title>How to Spy with Python</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T15:05:00+0200</date>
|
||||
<start>15:05</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>This talk will walk through what the US government has done in terms of spying on US citizens and foreigners with their PRISM program, then walk through how to do exactly that with Python.</abstract>
|
||||
<description>This talk will walk through what the US government has done in terms of spying on US citizens and foreigners with their PRISM program, then walk through how to do exactly that with Python.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20282">Lynn Root</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20246">
|
||||
<title>Python and pandas as back end to real-time data driven applications</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T15:55:00+0200</date>
|
||||
<start>15:55</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>For data, and data science, to be the fuel of the 21th century, data driven applications should not be confined to dashboards and static analyses. Instead they should be the driver of the organizations that own or generates the data. Most of these applications are web-based and require real-time access to the data. However, many Big Data analyses and tools are inherently batch-driven and not well suited for real-time and performance-critical connections with applications. Trade-offs become often inevitable, especially when mixing multiple tools and data sources. In this talk we will describe our journey to build a data driven application at a large Dutch financial institution. We will dive into the issues we faced, why we chose Python and pandas and what that meant for real-time data analysis (and agile development). Important points in the talk will be, among others, the handling of geographical data, the access to hundreds of millions of records as well as the real time analysis of millions of data points.</abstract>
|
||||
<description>For data, and data science, to be the fuel of the 21th century, data driven applications should not be confined to dashboards and static analyses. Instead they should be the driver of the organizations that own or generates the data. Most of these applications are web-based and require real-time access to the data. However, many Big Data analyses and tools are inherently batch-driven and not well suited for real-time and performance-critical connections with applications. Trade-offs become often inevitable, especially when mixing multiple tools and data sources. In this talk we will describe our journey to build a data driven application at a large Dutch financial institution. We will dive into the issues we faced, why we chose Python and pandas and what that meant for real-time data analysis (and agile development). Important points in the talk will be, among others, the handling of geographical data, the access to hundreds of millions of records as well as the real time analysis of millions of data points.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20303">Giovanni Lanzani</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20263">
|
||||
<title>Dealing With Complexity</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-26T09:00:00+0200</date>
|
||||
<start>09:00</start>
|
||||
<duration>01:00</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>None.</abstract>
|
||||
<description>None.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20326">Jean-Paul Schmetz</person>
|
||||
</persons>
|
||||
</event>
|
||||
</room>
|
||||
</day>
|
||||
<day date="2014-07-27" index="3">
|
||||
<room name="B09">
|
||||
<event id="20268">
|
||||
<title>Introduction to the Signal Processing and Classification Environment pySPACE</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T10:10:00+0200</date>
|
||||
<start>10:10</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>This talk will give a basic introduction to the pySPACE framework and its current applications.
|
||||
|
||||
pySPACE (Signal Processing And Classification Environment) is a modular software for the processing of large data streams that has been specifically designed to enable distributed execution and empirical evaluation of signal processing chains. Various signal processing algorithms (so called nodes) are available within the software, from finite impulse response filters over data-dependent spatial filters (e.g., PCA, CSP, xDAWN) to established classifiers (e.g., SVM, LDA). pySPACE incorporates the concept of node and node chains of the Modular Toolkit for Data Processing (MDP) framework. Due to its modular architecture, the software can easily be extended with new processing nodes and more general operations. Large scale empirical investigations can be configured using simple text-configuration files in the YAML format, executed on different (distributed) computing modalities, and evaluated using an interactive graphical user interface.
|
||||
|
||||
pySPACE allows the user to connect nodes modularly and automatically benchmark the respective chains for different parameter settings and compare these with other node chains, e.g., by automatic evaluation of classification performances provided within the software. In addition, the pySPACElive mode of execution can be used for online processing of streamed data. The software specifically supports but is not limited to EEG data. Any kind of time series or feature vector data can be processed and analyzed.
|
||||
|
||||
pySPACE additionally provides interfaces to specialized signal processing libraries such as SciPy, scikit-learn, LIBSVM, the WEKA Machine Learning Framework, and the Maja Machine Learning Framework (MMLF).
|
||||
|
||||
Web page: http://pyspace.github.io/pyspace/</abstract>
|
||||
<description>This talk will give a basic introduction to the pySPACE framework and its current applications.
|
||||
|
||||
pySPACE (Signal Processing And Classification Environment) is a modular software for the processing of large data streams that has been specifically designed to enable distributed execution and empirical evaluation of signal processing chains. Various signal processing algorithms (so called nodes) are available within the software, from finite impulse response filters over data-dependent spatial filters (e.g., PCA, CSP, xDAWN) to established classifiers (e.g., SVM, LDA). pySPACE incorporates the concept of node and node chains of the Modular Toolkit for Data Processing (MDP) framework. Due to its modular architecture, the software can easily be extended with new processing nodes and more general operations. Large scale empirical investigations can be configured using simple text-configuration files in the YAML format, executed on different (distributed) computing modalities, and evaluated using an interactive graphical user interface.
|
||||
|
||||
pySPACE allows the user to connect nodes modularly and automatically benchmark the respective chains for different parameter settings and compare these with other node chains, e.g., by automatic evaluation of classification performances provided within the software. In addition, the pySPACElive mode of execution can be used for online processing of streamed data. The software specifically supports but is not limited to EEG data. Any kind of time series or feature vector data can be processed and analyzed.
|
||||
|
||||
pySPACE additionally provides interfaces to specialized signal processing libraries such as SciPy, scikit-learn, LIBSVM, the WEKA Machine Learning Framework, and the Maja Machine Learning Framework (MMLF).
|
||||
|
||||
Web page: http://pyspace.github.io/pyspace/</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20331">Mario Michael Krell</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20226">
|
||||
<title>Fast Serialization of Numpy Arrays with Bloscpack</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T11:00:00+0200</date>
|
||||
<start>11:00</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>Bloscpack [1] is a reference implementation and file-format for fast serialization of numerical data. It features lightweight, chunked and compressed storage, based on the extremely fast Blosc [2] metacodec and supports serialization of Numpy arrays out-of-the-box. Recently, Blosc -- being the metacodec that it is -- has received support for using the popular and widely used Snappy [3], LZ4 [4], and ZLib [5] codecs, and so, now Bloscpack supports serializing Numpy arrays easily with those codecs! In this talk I will present recent benchmarks of Bloscpack performance on a variety of artificial and real-world datasets with a special focus on the newly available codecs. In these benchmarks I will compare Bloscpack, both performance and usability wise, to alternatives such as Numpy's native offerings (NPZ and NPY), HDF5/PyTables [6], and if time permits, to novel bleeding edge solutions. Lastly I will argue that compressed and chunked storage format such as Bloscpack can be and somewhat already is a useful substrate on which to build more powerful applications such as online analytical processing engines and distributed computing frameworks. [1]: https://github.com/Blosc/bloscpack [2]: https://github.com/Blosc/c-blosc/ [3]: http://code.google.com/p/snappy/ [4]: http://code.google.com/p/lz4/ [5]: http://www.zlib.net/ [6]: http://www.pytables.org/moin</abstract>
|
||||
<description>Bloscpack [1] is a reference implementation and file-format for fast serialization of numerical data. It features lightweight, chunked and compressed storage, based on the extremely fast Blosc [2] metacodec and supports serialization of Numpy arrays out-of-the-box. Recently, Blosc -- being the metacodec that it is -- has received support for using the popular and widely used Snappy [3], LZ4 [4], and ZLib [5] codecs, and so, now Bloscpack supports serializing Numpy arrays easily with those codecs! In this talk I will present recent benchmarks of Bloscpack performance on a variety of artificial and real-world datasets with a special focus on the newly available codecs. In these benchmarks I will compare Bloscpack, both performance and usability wise, to alternatives such as Numpy's native offerings (NPZ and NPY), HDF5/PyTables [6], and if time permits, to novel bleeding edge solutions. Lastly I will argue that compressed and chunked storage format such as Bloscpack can be and somewhat already is a useful substrate on which to build more powerful applications such as online analytical processing engines and distributed computing frameworks. [1]: https://github.com/Blosc/bloscpack [2]: https://github.com/Blosc/c-blosc/ [3]: http://code.google.com/p/snappy/ [4]: http://code.google.com/p/lz4/ [5]: http://www.zlib.net/ [6]: http://www.pytables.org/moin</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20308">Valentin Haenel</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20251">
|
||||
<title>Exploring Patent Data with Python</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T13:20:00+0200</date>
|
||||
<start>13:20</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>Experiences from building a recommendation engine for patent search using pythonic NLP and topic modeling tools such as Gensim.</abstract>
|
||||
<description>Experiences from building a recommendation engine for patent search using pythonic NLP and topic modeling tools such as Gensim.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20315">Franta Polach</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20247">
|
||||
<title>Networks meet Finance in Python</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T14:10:00+0200</date>
|
||||
<start>14:10</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>In the course of the 2008 Lehman and the subsequent European debt crisis, it became clear that both industry and regulators had underestimated the degree of interconnectedness and interdependency across financial assets and institutions. This type of information is especially well represented by network models, which had first gained popularity in other areas, such as computer science, biology and social sciences.
|
||||
Although in its early stages, the study of network models in finance is gaining momentum and could be key to building the next generation of risk management tools and averting future financial crises. After a short overview of some of the most relevant work in the field, I will walk through (real data) examples using the pydata toolset.</abstract>
|
||||
<description>In the course of the 2008 Lehman and the subsequent European debt crisis, it became clear that both industry and regulators had underestimated the degree of interconnectedness and interdependency across financial assets and institutions. This type of information is especially well represented by network models, which had first gained popularity in other areas, such as computer science, biology and social sciences.
|
||||
Although in its early stages, the study of network models in finance is gaining momentum and could be key to building the next generation of risk management tools and averting future financial crises. After a short overview of some of the most relevant work in the field, I will walk through (real data) examples using the pydata toolset.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20298">Miguel Vaz</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20238">
|
||||
<title>IPython and Sympy to Develop a Kalman Filter for Multisensor Data Fusion</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T15:05:00+0200</date>
|
||||
<start>15:05</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>The best filter algorithm to fuse multiple sensor informations is the Kalman filter. To implement it for non-linear dynamic models (e.g. a car), analytic calculations for the matrices are necessary. In this talk, one can see, how the IPython Notebook and Sympy helps to develop an optimal filter to fuse sensor information from different sources (e.g. acceleration, speed and GPS position) to get an optimal estimate. more: http://balzer82.github.io/Kalman/</abstract>
|
||||
<description>The best filter algorithm to fuse multiple sensor informations is the Kalman filter. To implement it for non-linear dynamic models (e.g. a car), analytic calculations for the matrices are necessary. In this talk, one can see, how the IPython Notebook and Sympy helps to develop an optimal filter to fuse sensor information from different sources (e.g. acceleration, speed and GPS position) to get an optimal estimate. more: http://balzer82.github.io/Kalman/</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20306">Paul Balzer</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20232">
|
||||
<title>Massively Parallel Processing with Procedural Python</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T15:55:00+0200</date>
|
||||
<start>15:55</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B09</room>
|
||||
<language>en</language>
|
||||
<abstract>The Python data ecosystem has grown beyond the confines of single machines to embrace scalability. Here we describe one of our approaches to scaling, which is already being used in production systems. The goal of in-database analytics is to bring the calculations to the data, reducing transport costs and I/O bottlenecks. Using PL/Python we can run parallel queries across terabytes of data using not only pure SQL but also familiar PyData packages such as scikit-learn and nltk. This approach can also be used with PL/R to make use of a wide variety of R packages. We look at examples on Postgres compatible systems such as the Greenplum Database and on Hadoop through Pivotal HAWQ. We will also introduce MADlib, Pivotal’s open source library for scalable in-database machine learning, which uses Python to glue SQL queries to low level C++ functions and is also usable through the PyMADlib package.</abstract>
|
||||
<description>The Python data ecosystem has grown beyond the confines of single machines to embrace scalability. Here we describe one of our approaches to scaling, which is already being used in production systems. The goal of in-database analytics is to bring the calculations to the data, reducing transport costs and I/O bottlenecks. Using PL/Python we can run parallel queries across terabytes of data using not only pure SQL but also familiar PyData packages such as scikit-learn and nltk. This approach can also be used with PL/R to make use of a wide variety of R packages. We look at examples on Postgres compatible systems such as the Greenplum Database and on Hadoop through Pivotal HAWQ. We will also introduce MADlib, Pivotal’s open source library for scalable in-database machine learning, which uses Python to glue SQL queries to low level C++ functions and is also usable through the PyMADlib package.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20310">Ronert Obst</person>
|
||||
</persons>
|
||||
</event>
|
||||
</room>
|
||||
<room name="B05/B06">
|
||||
<event id="20249">
|
||||
<title>ABBY - A Django app to document your A/B tests</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T10:10:00+0200</date>
|
||||
<start>10:10</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>ABBY is a Django app that helps you manage your A/B tests. The main objective is to document all tests happening in your company, in order to better understand which measures work and which don't. Thereby leading to a better understanding of your product and your customer. ABBY offers a front-end that makes it easy to edit, delete or create tests and to add evaluation results. Further, it provides a RESTful API to integrate directly with our platform to easily handle A/B tests without touching the front-end. Another notable feature is the possibility to upload a CSV file and have the A/B test auto-evaluated, although this feature is considered highly experimental. At Jimdo, a do-it-yourself website builder, we have a team of about 180 people from different countries and with professional backgrounds just as diverse. Therefore it is crucial to have tools that allow having a common perspective on the tests. This facilitates having data informed discussions and to deduce effective solutions. In our opinion tools like ABBY are cornerstones to achieve the ultimate goal of being a data-driven company. It enables all our co-workers to review past and plan future tests to further improve our product and to raise the happiness of our customers. The proposed talk will give a detailed overview of ABBY, which eventually will be open-sourced, and its capabilities. I will further discuss the motivation behind the app and the influence it has on the way our company is becoming increasingly data driven.</abstract>
|
||||
<description>ABBY is a Django app that helps you manage your A/B tests. The main objective is to document all tests happening in your company, in order to better understand which measures work and which don't. Thereby leading to a better understanding of your product and your customer. ABBY offers a front-end that makes it easy to edit, delete or create tests and to add evaluation results. Further, it provides a RESTful API to integrate directly with our platform to easily handle A/B tests without touching the front-end. Another notable feature is the possibility to upload a CSV file and have the A/B test auto-evaluated, although this feature is considered highly experimental. At Jimdo, a do-it-yourself website builder, we have a team of about 180 people from different countries and with professional backgrounds just as diverse. Therefore it is crucial to have tools that allow having a common perspective on the tests. This facilitates having data informed discussions and to deduce effective solutions. In our opinion tools like ABBY are cornerstones to achieve the ultimate goal of being a data-driven company. It enables all our co-workers to review past and plan future tests to further improve our product and to raise the happiness of our customers. The proposed talk will give a detailed overview of ABBY, which eventually will be open-sourced, and its capabilities. I will further discuss the motivation behind the app and the influence it has on the way our company is becoming increasingly data driven.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20301">Andy Goldschmidt</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20228">
|
||||
<title>Faster than Google? Optimization lessons in Python.</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T11:00:00+0200</date>
|
||||
<start>11:00</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>Lessons from translating Google's deep learning algorithm into Python. Can a Python port compete with Google's tightly optimized C code? Spoiler: making use of Python and its vibrant ecosystem (generators, NumPy, Cython...), the optimized Python port is cleaner, more readable and clocks in—somewhat astonishingly—4x faster than Google's C. This is 12,000x faster than a naive, pure Python implementation and 100x faster than an optimized NumPy implementation. The talk will go over what went well (data streaming to process humongous datasets, parallelization and avoiding GIL with Cython, plugging into BLAS) as well as trouble along the way (BLAS idiosyncrasies, Cython issues, dead ends). The quest is also documented on my blog.</abstract>
|
||||
<description>Lessons from translating Google's deep learning algorithm into Python. Can a Python port compete with Google's tightly optimized C code? Spoiler: making use of Python and its vibrant ecosystem (generators, NumPy, Cython...), the optimized Python port is cleaner, more readable and clocks in—somewhat astonishingly—4x faster than Google's C. This is 12,000x faster than a naive, pure Python implementation and 100x faster than an optimized NumPy implementation. The talk will go over what went well (data streaming to process humongous datasets, parallelization and avoiding GIL with Cython, plugging into BLAS) as well as trouble along the way (BLAS idiosyncrasies, Cython issues, dead ends). The quest is also documented on my blog.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20286">Radim Řehůřek</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20275">
|
||||
<title>Conda: a cross-platform package manager for any binary distribution</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T13:20:00+0200</date>
|
||||
<start>13:20</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>Conda is an open source package manager, which can be used to manage binary packages and virtual environments on any platform. It is the package manager of the Anaconda Python distribution, although it can be used independently of Anaconda. We will look at how conda solves many of the problems that have plagued Python packaging in the past, followed by a demonstration of its features.
|
||||
We will look at the issues that have plagued packaging in the Python ecosystem in the past, and discuss how Conda solves these problems. We will show how to use conda to manage multiple environments. Finally, we will look at how to build your own conda packages.</abstract>
|
||||
<description>Conda is an open source package manager, which can be used to manage binary packages and virtual environments on any platform. It is the package manager of the Anaconda Python distribution, although it can be used independently of Anaconda. We will look at how conda solves many of the problems that have plagued Python packaging in the past, followed by a demonstration of its features.
|
||||
We will look at the issues that have plagued packaging in the Python ecosystem in the past, and discuss how Conda solves these problems. We will show how to use conda to manage multiple environments. Finally, we will look at how to build your own conda packages.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20339">Ilan Schnell</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20235">
|
||||
<title>Make sense of your (big) data using Elasticsearch</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T14:10:00+0200</date>
|
||||
<start>14:10</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>In this talk I would like to show you a few real-life use-cases where Elasticsearch can help you make sense of your data. We will start with the most basic use case of searching your unstructured data and move on to more advanced topics such as faceting, aggregations and structured search. I would like to demonstrate that the very same tool and dataset can be used for real-time analytics as well as the basis for your more advanced data processing jobs. All in a distributed environment capable of handling terabyte-sized datasets. All examples will be shown with real data and python code demoing the new libraries we have been working on to make this process easier.</abstract>
|
||||
<description>In this talk I would like to show you a few real-life use-cases where Elasticsearch can help you make sense of your data. We will start with the most basic use case of searching your unstructured data and move on to more advanced topics such as faceting, aggregations and structured search. I would like to demonstrate that the very same tool and dataset can be used for real-time analytics as well as the basis for your more advanced data processing jobs. All in a distributed environment capable of handling terabyte-sized datasets. All examples will be shown with real data and python code demoing the new libraries we have been working on to make this process easier.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20313">Honza Král</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20257">
|
||||
<title>Intro to ConvNets</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T15:05:00+0200</date>
|
||||
<start>15:05</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>We will give an introduction to the recent development of Deep Neural Networks and focus in particular on Convolution Networks which are well suited to image classification problems. We will also provide you with the practical knowledge of how to get started with using ConvNets via the cuda-convnet python library.</abstract>
|
||||
<description>We will give an introduction to the recent development of Deep Neural Networks and focus in particular on Convolution Networks which are well suited to image classification problems. We will also provide you with the practical knowledge of how to get started with using ConvNets via the cuda-convnet python library.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20316">Kashif Rasul</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20240">
|
||||
<title>Pandas' Thumb: unexpected evolutionary use of a Python library.</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T15:55:00+0200</date>
|
||||
<start>15:55</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>Lawyers are not famed for their mathematical ability. On the contary - the law almost self-selects as a career choice for the numerically-challenged. So when the one UK tax that property lawyers generally felt comfortable dealing with (lease duty) was replaced with a new tax (stamp duty land tax) that was both arithmetically demanding and conceptually complex, it was inevitable that significant frustrations would arise. Suddenly, lawyers had to deal with concepts such as net present valuations, aggregation of several streams of fluctuating figures, and constant integration of a complex suite of credits and disregards. This talk is a description of how - against a backdrop of data-drunk tax authorities, legal pressures on businesses to have appropriate compliance systems in place, and the constant pressure on their law firms to commoditise compliance services, Pandas may be about to make a foray from its venerable financial origins into a brave new fiscal world - and can revolutionise an industry by doing so. A case study covering the author's development of a Pandas-based stamp duty land tax engine ("ORVILLE") is discussed, and the inherent usefulness of Pandas in the world of tax analysis is explored.</abstract>
|
||||
<description>Lawyers are not famed for their mathematical ability. On the contary - the law almost self-selects as a career choice for the numerically-challenged. So when the one UK tax that property lawyers generally felt comfortable dealing with (lease duty) was replaced with a new tax (stamp duty land tax) that was both arithmetically demanding and conceptually complex, it was inevitable that significant frustrations would arise. Suddenly, lawyers had to deal with concepts such as net present valuations, aggregation of several streams of fluctuating figures, and constant integration of a complex suite of credits and disregards. This talk is a description of how - against a backdrop of data-drunk tax authorities, legal pressures on businesses to have appropriate compliance systems in place, and the constant pressure on their law firms to commoditise compliance services, Pandas may be about to make a foray from its venerable financial origins into a brave new fiscal world - and can revolutionise an industry by doing so. A case study covering the author's development of a Pandas-based stamp duty land tax engine ("ORVILLE") is discussed, and the inherent usefulness of Pandas in the world of tax analysis is explored.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20317">Chris Nyland</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20262">
|
||||
<title>Commodity Machine Learning</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T09:00:00+0200</date>
|
||||
<start>09:00</start>
|
||||
<duration>01:00</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>None.</abstract>
|
||||
<description>None.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20203">Andreas Mueller</person>
|
||||
</persons>
|
||||
</event>
|
||||
<event id="20261">
|
||||
<title>Building the PyData Community</title>
|
||||
<track>Other</track>
|
||||
<date>2014-07-27T12:30:00+0200</date>
|
||||
<start>12:30</start>
|
||||
<duration>00:40</duration>
|
||||
<recording>
|
||||
<license/>
|
||||
<optout>false</optout>
|
||||
</recording>
|
||||
<room>B05/B06</room>
|
||||
<language>en</language>
|
||||
<abstract>None.</abstract>
|
||||
<description>None.</description>
|
||||
<type>talk</type>
|
||||
<persons>
|
||||
<person id="20036">Travis Oliphant</person>
|
||||
</persons>
|
||||
</event>
|
||||
</room>
|
||||
</day>
|
||||
</schedule>
|
||||
|
||||
|
||||
|
Loading…
Add table
Reference in a new issue