
PhD Student
Goethe-Universität Frankfurt am Main
Robert-Mayer-Straße 10
Room 401d
D-60325 Frankfurt am Main
D-60054 Frankfurt am Main (use for package delivery)
Postfach / P.O. Box: 154
Phone:
Mail:
Office Hour: Tuesday, 8-10 AM
Teaching
- Practical: Multilingual systems with AI
Publications
2024
2024.
Towards New Data Spaces for the Study of Multiple Documents with
Va.Si.Li-Lab: A Conceptual Analysis. In: Students', Graduates' and Young Professionals' Critical Use of
Online Information: Digital Performance Assessment and Training
within and across Domains, 259–303.
Springer Nature Switzerland.
BibTeX
@inbook{Mehler:et:al:2024:a,
author = {Mehler, Alexander and Bagci, Mevl{\"u}t and Schrottenbacher, Patrick
and Henlein, Alexander and Konca, Maxim and Abrami, Giuseppe and B{\"o}nisch, Kevin
and Stoeckel, Manuel and Spiekermann, Christian and Engel, Juliane},
editor = {Zlatkin-Troitschanskaia, Olga and Nagel, Marie-Theres and Klose, Verena
and Mehler, Alexander},
title = {Towards New Data Spaces for the Study of Multiple Documents with
Va.Si.Li-Lab: A Conceptual Analysis},
booktitle = {Students', Graduates' and Young Professionals' Critical Use of
Online Information: Digital Performance Assessment and Training
within and across Domains},
year = {2024},
publisher = {Springer Nature Switzerland},
address = {Cham},
pages = {259--303},
abstract = {The constitution of multiple documents has so far been studied
essentially as a process in which a single learner consults a
number (of segments) of different documents in the context of
the task at hand in order to construct a mental model for the
purpose of completing the task. As a result of this research focus,
the constitution of multiple documents appears predominantly as
a monomodal, non-interactive process in which mainly textual units
are studied, supplemented by images, text-image relations and
comparable artifacts. This approach is reflected in the contextual
fixity of the research design, in which the learners under study
search for information using suitably equipped computers. If,
on the other hand, we consider the openness of multi-agent learning
situations, this scenario lacks the aspects of interactivity,
contextual openness and, above all, the multimodality of information
objects, information processing and information exchange. This
is where the chapter comes in. It describes Va.Si.Li-Lab as an
instrument for multimodal measurement for studying and modeling
multiple documents in the context of interactive learning in a
multi-agent environment. To this end, the chapter places Va.Si.Li-Lab
in the spectrum of evolutionary approaches that vary the combination
of human and machine innovation and selection. It also combines
the requirements of multimodal representational learning with
various aspects of contextual plasticity to prepare Va.Si.Li-Lab
as a system that can be used for experimental research. The chapter
is conceptual in nature, designing a system of requirements using
the example of Va.Si.Li-Lab to outline an experimental environment
in which the study of Critical Online Reasoning (COR) as a group
process becomes possible. Although the chapter illustrates some
of these requirements with realistic data from the field of simulation-based
learning, the focus is still conceptual rather than experimental,
hypothesis-driven. That is, the chapter is concerned with the
design of a technology for future research into COR processes.},
isbn = {978-3-031-69510-0},
doi = {10.1007/978-3-031-69510-0_12},
url = {https://doi.org/10.1007/978-3-031-69510-0_12}
}
2024.
German Parliamentary Corpus (GerParCor) Reloaded. Proceedings of the 2024 Joint International Conference on Computational
Linguistics, Language Resources and Evaluation (LREC-COLING 2024), 7707–7716.
BibTeX
@inproceedings{Abrami:et:al:2024:a,
abstract = {In 2022, the largest German-speaking corpus of parliamentary protocols
from three different centuries, on a national and federal level
from the countries of Germany, Austria, Switzerland and Liechtenstein,
was collected and published - GerParCor. Through GerParCor, it
became possible to provide for the first time various parliamentary
protocols which were not available digitally and, moreover, could
not be retrieved and processed in a uniform manner. Furthermore,
GerParCor was additionally preprocessed using NLP methods and
made available in XMI format. In this paper, GerParCor is significantly
updated by including all new parliamentary protocols in the corpus,
as well as adding and preprocessing further parliamentary protocols
previously not covered, so that a period up to 1797 is now covered.
Besides the integration of a new, state-of-the-art and appropriate
NLP preprocessing for the handling of large text corpora, this
update also provides an overview of the further reuse of GerParCor
by presenting various provisioning capabilities such as API's,
among others.},
address = {Torino, Italy},
author = {Abrami, Giuseppe and Bagci, Mevl{\"u}t and Mehler, Alexander},
booktitle = {Proceedings of the 2024 Joint International Conference on Computational
Linguistics, Language Resources and Evaluation (LREC-COLING 2024)},
editor = {Calzolari, Nicoletta and Kan, Min-Yen and Hoste, Veronique and Lenci, Alessandro
and Sakti, Sakriani and Xue, Nianwen},
pages = {7707--7716},
publisher = {ELRA and ICCL},
title = {{G}erman Parliamentary Corpus ({G}er{P}ar{C}or) Reloaded},
url = {https://aclanthology.org/2024.lrec-main.681},
pdf = {https://aclanthology.org/2024.lrec-main.681.pdf},
poster = {https://www.texttechnologylab.org/wp-content/uploads/2024/05/GerParCor_Reloaded_Poster.pdf},
video = {https://www.youtube.com/watch?v=5X-w_oXOAYo},
keywords = {gerparcor,corpus},
year = {2024}
}
2023
2023.
Towards grounding multimodal semantics in interaction data with Va.Si.Li-Lab. Proceedings of the 8th Conference on Gesture and Speech in Interaction (GESPIN).
BibTeX
@inproceedings{Henlein:et:al:2023c,
title = {Towards grounding multimodal semantics in interaction data with Va.Si.Li-Lab},
author = {Henlein, Alexander and Lücking, Andy and Bagci, Mevlüt and Mehler, Alexander},
booktitle = {Proceedings of the 8th Conference on Gesture and Speech in Interaction (GESPIN)},
location = {Nijmegen, Netherlands},
year = {2023},
keywords = {vasililab},
pdf = {https://www.gespin2023.nl/documents/talks_and_posters/GeSpIn_2023_papers/GeSpIn_2023_paper_1692.pdf}
}
2023.
Va.Si.Li-Lab as a Collaborative Multi-User Annotation Tool in
Virtual Reality and Its Potential Fields of Application. Proceedings of the 34th ACM Conference on Hypertext and Social Media.
BibTeX
@inproceedings{Abrami:et:al:2023,
author = {Abrami, Giuseppe and Mehler, Alexander and Bagci, Mevl\"{u}t and Schrottenbacher, Patrick
and Henlein, Alexander and Spiekermann, Christian and Engel, Juliane
and Schreiber, Jakob},
title = {Va.Si.Li-Lab as a Collaborative Multi-User Annotation Tool in
Virtual Reality and Its Potential Fields of Application},
year = {2023},
isbn = {9798400702327},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3603163.3609076},
doi = {10.1145/3603163.3609076},
abstract = {During the last thirty years a variety of hypertext approaches
and virtual environments -- some virtual hypertext environments
-- have been developed and discussed. Although the development
of virtual and augmented reality technologies is rapid and improving,
and many technologies can be used at affordable conditions, their
usability for hypertext systems has not yet been explored. At
the same time, even for virtual three-dimensional virtual and
augmented environments, there is no generally accepted concept
that is similar or nearly as elegant as hypertext. This gap will
have to be filled in the next years and a good concept should
be developed; in this article we aim to contribute in this direction
and also introduce a prototype for a possible implementation of
criteria for virtual hypertext simulations.},
booktitle = {Proceedings of the 34th ACM Conference on Hypertext and Social Media},
articleno = {22},
numpages = {9},
keywords = {VaSiLiLab, virtual hypertext, virtual reality, virtual reality simulation, authoring system},
location = {Rome, Italy},
series = {HT '23},
pdf = {https://dl.acm.org/doi/pdf/10.1145/3603163.3609076}
}
2023.
A Multimodal Data Model for Simulation-Based Learning with Va.Si.Li-Lab. Digital Human Modeling and Applications in Health, Safety, Ergonomics
and Risk Management, 539–565.
BibTeX
@inproceedings{Mehler:et:al:2023:a,
abstract = {Simulation-based learning is a method in which learners learn
to master real-life scenarios and tasks from simulated application
contexts. It is particularly suitable for the use of VR technologies,
as these allow immersive experiences of the targeted scenarios.
VR methods are also relevant for studies on online learning, especially
in groups, as they provide access to a variety of multimodal learning
and interaction data. However, VR leads to a trade-off between
technological conditions of the observability of such data and
the openness of learner behavior. We present Va.Si.Li-Lab, a VR-L
ab for Simulation-based Learn ing developed to address this trade-off.
Va.Si.Li-Lab uses a graph-theoretical model based on hypergraphs
to represent the data diversity of multimodal learning and interaction.
We develop this data model in relation to mono- and multimodal,
intra- and interpersonal data and interleave it with ISO-Space
to describe distributed multiple documents from the perspective
of their interactive generation. The paper adds three use cases
to motivate the broad applicability of Va.Si.Li-Lab and its data
model.},
address = {Cham},
author = {Mehler, Alexander and Bagci, Mevl{\"u}t and Henlein, Alexander
and Abrami, Giuseppe and Spiekermann, Christian and Schrottenbacher, Patrick
and Konca, Maxim and L{\"u}cking, Andy and Engel, Juliane and Quintino, Marc
and Schreiber, Jakob and Saukel, Kevin and Zlatkin-Troitschanskaia, Olga},
booktitle = {Digital Human Modeling and Applications in Health, Safety, Ergonomics
and Risk Management},
editor = {Duffy, Vincent G.},
isbn = {978-3-031-35741-1},
pages = {539--565},
publisher = {Springer Nature Switzerland},
title = {A Multimodal Data Model for Simulation-Based Learning with Va.Si.Li-Lab},
year = {2023},
doi = {10.1007/978-3-031-35741-1_39}
}
2022
2022.
German Parliamentary Corpus (GerParCor). Proceedings of the Language Resources and Evaluation Conference, 1900–1906.
BibTeX
@inproceedings{Abrami:Bagci:Hammerla:Mehler:2022,
author = {Abrami, Giuseppe and Bagci, Mevlüt and Hammerla, Leon and Mehler, Alexander},
editor = {Calzolari, Nicoletta and B\'echet, Fr\'ed\'eric and Blache, Philippe
and Choukri, Khalid and Cieri, Christopher and Declerck, Thierry and Goggi, Sara
and Isahara, Hitoshi and Maegaard, Bente and Mariani, Joseph and Mazo, H\'el\`ene
and Odijk, Jan and Piperidis, Stelios},
title = {German Parliamentary Corpus (GerParCor)},
booktitle = {Proceedings of the Language Resources and Evaluation Conference},
year = {2022},
address = {Marseille, France},
publisher = {European Language Resources Association},
pages = {1900--1906},
abstract = {Parliamentary debates represent a large and partly unexploited
treasure trove of publicly accessible texts. In the German-speaking
area, there is a certain deficit of uniformly accessible and annotated
corpora covering all German-speaking parliaments at the national
and federal level. To address this gap, we introduce the German
Parliamentary Corpus (GerParCor). GerParCor is a genre-specific
corpus of (predominantly historical) German-language parliamentary
protocols from three centuries and four countries, including state
and federal level data. In addition, GerParCor contains conversions
of scanned protocols and, in particular, of protocols in Fraktur
converted via an OCR process based on Tesseract. All protocols
were preprocessed by means of the NLP pipeline of spaCy3 and automatically
annotated with metadata regarding their session date. GerParCor
is made available in the XMI format of the UIMA project. In this
way, GerParCor can be used as a large corpus of historical texts
in the field of political communication for various tasks in NLP.},
url = {https://aclanthology.org/2022.lrec-1.202},
poster = {https://www.texttechnologylab.org/wp-content/uploads/2022/06/GerParCor_LREC_2022.pdf},
keywords = {gerparcor},
pdf = {http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.202.pdf}
}