nunnari.bib

@inproceedings{nunnari24LREC-DGSFabeln-1,
  author = {Fabrizio Nunnari and Eleftherios Avramidis and Cristina España-Bonet and Marco González and Anna Hennes and Patrick Gebhard},
  title = {DGS-Fabeln-1: A Multi-Angle Parallel Corpus of Fairy Tales between German Sign Language and German Text},
  booktitle = {Procedings of the Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)},
  year = {2024},
  location = {Torino, Italy}
}
@inproceedings{nunnari23ACII-UnderstandingPAD,
  author = {Nunnari, Fabrizio and Nicora, Matteo Lavit and Prajod, Pooja and Beyrodt, Sebastian and Chehayeb, Lara and Andre, Elisabeth and Gebhard, Patrick and Malosio, Matteo and Tsovaltzi, Dimitra},
  booktitle = {2023 11th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)},
  location = {Cambridge, MA, USA},
  title = {Understanding and mapping pleasure, arousal and dominance social signals to robot-avatar behavior},
  year = {2023},
  volume = {},
  number = {},
  pages = {1-8},
  publisher = {IEEE},
  doi = {10.1109/ACIIW59127.2023.10388078},
  abstract = {We present an analysis of the pleasure, arousal, and dominance social signals inferred from people faces, and how, despite their noisy nature, these can be used to drive a model of theory-based interventions for a robot-avatar agent in a working space. The analysis let emerge clearly the need of data pre-filtering and per-user calibration. The proposed post processing method helps quantifying the parameters needed to control the frequency of intervention of the agent; still leaving the experimenter with a run-time adjustable global control of its sensitivity.}
}
@inproceedings{beyrodt23IVA-BASSF,
  author = {Beyrodt, Sebastian and Nicora, Matteo Lavit and Nunnari, Fabrizio and Chehayeb, Lara and Prajod, Pooja and Schneeberger, Tanja and Andr\'{e}, Elisabeth and Malosio, Matteo and Gebhard, Patrick and Tsovaltzi, Dimitra},
  title = {Socially Interactive Agents as Cobot Avatars: Developing a Model to Support Flow Experiences and Weil-Being in the Workplace},
  year = {2023},
  isbn = {9781450399944},
  publisher = {Association for Computing Machinery},
  address = {New York, NY, USA},
  url = {https://doi.org/10.1145/3570945.3607349},
  doi = {10.1145/3570945.3607349},
  abstract = {This study evaluates a socially interactive agent to create an embodied cobot. It tests a real-time continuous emotional modeling method and an aligned transparent behavioral model, BASSF (boredom, anxiety, self-efficacy, self-compassion, flow). The BASSF model anticipates and counteracts counterproductive emotional experiences of operators working under stress with cobots on tedious tasks. The flow experience is represented in the three-dimensional pleasure, arousal, and dominance (PAD) space. The embodied covatar (cobot and avatar) is introduced to support flow experiences through emotion regulation guidance. The study tests the model's main theoretical assumptions about flow, dominance, self-efficacy, and boredom. Twenty participants worked on a task for an hour, assembling pieces in collaboration with the covatar. After the task, participants completed questionnaires on flow, their affective experience, and self-efficacy, and they were interviewed to understand their emotions and regulation during the task. The results suggest that the dominance dimension plays a vital role in task-related settings as it predicts the participants' self-efficacy and flow. However, the relationship between flow, pleasure, and arousal requires further investigation. Qualitative interview analysis revealed that participants regulated negative emotions, like boredom, also without support, but some strategies could negatively impact well-being and productivity, which aligns with theory.},
  booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
  articleno = {21},
  numpages = {8},
  keywords = {Boredom, Human-Robot Interaction, Emotion Regulation, Affect Modeling, PAD Model, Flow, Socially Interactive Agents},
  location = {W\"{u}rzburg, Germany},
  series = {IVA '23}
}
@inproceedings{dasilva23IVA-selfawareness,
  author = {Alves da Silva, Claudio and Hilpert, Bernhard and Bhuvaneshwara, Chirag and Gebhard, Patrick and Nunnari, Fabrizio and Tsovaltzi, Dimitra},
  title = {Visual Similarity for Socially Interactive Agents That Support Self-Awareness},
  year = {2023},
  isbn = {9781450399944},
  publisher = {Association for Computing Machinery},
  address = {New York, NY, USA},
  url = {https://doi.org/10.1145/3570945.3607329},
  doi = {10.1145/3570945.3607329},
  abstract = {Self-awareness is a critical factor in social interaction. Teachers being aware of their own emotions and thoughts during class may enable reflection and behavioral change. While inducing self-awareness through mirrors or video is common in face-to-face training, it has been scarcely examined in digital training with virtual avatars. This paper examines the relationship between avatar visual similarity and inducing self-awareness in digital training environments. We developed a theory-based methodology to reliably manipulate perceptually relevant facial features of digital avatars based on human-human identification and emotional predisposition. Manipulating these features allows to create personalized versions of digital avatars with varying degrees of visual similarity.},
  booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
  articleno = {52},
  numpages = {3},
  keywords = {explicit identification, emotional affinity, self-awareness, empathic agents, Avatar similarity socially interactive agents},
  location = {W\"{u}rzburg, Germany},
  series = {IVA '23}
}
@inproceedings{withanage23ICMI-renelib,
  author = {Withanage Don, Daksitha Senel and M\"{u}ller, Philipp and Nunnari, Fabrizio and Andr\'{e}, Elisabeth and Gebhard, Patrick},
  title = {ReNeLiB: Real-Time Neural Listening Behavior Generation for Socially Interactive Agents},
  year = {2023},
  isbn = {9798400700552},
  publisher = {Association for Computing Machinery},
  address = {New York, NY, USA},
  url = {https://doi.org/10.1145/3577190.3614133},
  doi = {10.1145/3577190.3614133},
  abstract = {Flexible and natural nonverbal reactions to human behavior remain a challenge for socially interactive agents (SIAs) that are predominantly animated using hand-crafted rules. While recently proposed machine learning based approaches to conversational behavior generation are a promising way to address this challenge, they have not yet been employed in SIAs. The primary reason for this is the lack of a software toolkit integrating such approaches with SIA frameworks that conforms to the challenging real-time requirements of human-agent interaction scenarios. In our work, we for the first time present such a toolkit consisting of three main components: (1) real-time feature extraction capturing multi-modal social cues from the user; (2) behavior generation based on a recent state-of-the-art neural network approach; (3) visualization of the generated behavior supporting both FLAME-based and Apple ARKit-based interactive agents. We comprehensively evaluate the real-time performance of the whole framework and its components. In addition, we introduce pre-trained behavioral generation models derived from psychotherapy sessions for domain-specific listening behaviors. Our software toolkit, pivotal for deploying and assessing SIAs’ listening behavior in real-time, is publicly available. Resources, including code, behavioural multi-modal features extracted from therapeutic interactions, are hosted at https://daksitha.github.io/ReNeLib},
  booktitle = {Proceedings of the 25th International Conference on Multimodal Interaction},
  pages = {507–516},
  numpages = {10},
  location = {Paris, France},
  series = {ICMI '23}
}
@inproceedings{nunnari23ESANN-MultimodalVADfusion,
  address = {Bruges (Belgium) and online},
  title = {Multimodal {Recognition} of {Valence}, {Arousal} and {Dominance} via {Late}-{Fusion} of {Text}, {Audio} and {Facial} {Expressions}},
  isbn = {978-2-87587-088-9},
  url = {https://www.esann.org/sites/default/files/proceedings/2023/ES2023-128.pdf},
  doi = {10.14428/esann/2023.ES2023-128},
  language = {en},
  urldate = {2023-10-05},
  booktitle = {{ESANN} 2023 proceesdings},
  publisher = {Ciaco - i6doc.com},
  author = {Nunnari, Fabrizio and Rios, Annette and Reichel, Uwe and Bhuvaneshwara, Chirag and Filntisis, Panagiotis and Maragos, Petros and Burkhardt, Felix and Eyben, Florian and Schuller, Björn and Ebling, Sarah},
  year = {2023},
  pages = {571--576},
  abstract = {We present an approach for the prediction of valence, arousal, and dominance of people communicating via text/audio/video streams for a translation from and to sign languages. The approach consists of the fusion of the output of three CNN-based models dedicated to the analysis of text, audio, and facial expressions. Our experiments show that any combination of two or three modalities increases prediction performance for valence and arousal.}
}
@inproceedings{nunnari23SLTAT-VideoAlignment,
  author = {Nunnari, Fabrizio and Ameli, Mina and Mishra, Shailesh},
  booktitle = {2023 IEEE International Conference on Acoustics, Speech, and Signal Processing Workshops (ICASSPW)},
  title = {Automatic Alignment Between Sign Language Videos And Motion Capture Data: A Motion Energy-Based Approach},
  year = {2023},
  volume = {},
  number = {},
  pages = {1-5},
  doi = {10.1109/ICASSPW59220.2023.10193528},
  abstract = {In this paper, we propose a method for the automatic alignment of sign language videos and their corresponding motion capture data, useful for the preparation of multi-modal sign language corpora. First, we extract an estimate of the motion energy from both the video and the motion capture data. Second, we align the two curves to minimize their distance. Our tests show that it is possible to achieve a mean absolute error as low as 1.11 frames using optical flow for video energy extraction and a set of 22 bones for skeletal energy extraction.
}
}
@inproceedings{nunnari23SLTAT-ARSLinterpreter,
  author = {Nunnari, Fabrizio and Avramidis, Eleftherios and Yadav, Vemburaj and Pagani, Alain and Hamidullah, Yasser and Mollanorozy, Sepideh and España-Bonet, Cristina and Woop, Emil and Gebhard, Patrick},
  booktitle = {2023 IEEE International Conference on Acoustics, Speech, and Signal Processing Workshops (ICASSPW)},
  title = {Towards Incorporating 3D Space-Awareness Into an Augmented Reality Sign Language Interpreter},
  year = {2023},
  volume = {},
  number = {},
  pages = {1-5},
  doi = {10.1109/ICASSPW59220.2023.10193194},
  abstract = {This paper describes the concept and the software architecture of a fully integrated system supporting a dialog between a deaf person and a hearing person through a virtual sign language interpreter (aka avatar) projected in the real space by an Augmented Reality device. In addition, a Visual Simultaneous Localization and Mapping system provides information about the 3D location of the objects recognized in the surrounding environment, allowing the avatar to orient, look and point towards the real location of discourse entities during the translation. The goal being to provide a modular architecture to test single software components in a fully integrated framework and move virtual sign language interpreters beyond the standard "front-facing" interaction paradigm.}
}
@inproceedings{nunnari23SLTAT-InflectionParameters,
  author = {Nunnari, Fabrizio and Mishra, Shailesh and Gebhard, Patrick},
  booktitle = {2023 IEEE International Conference on Acoustics, Speech, and Signal Processing Workshops (ICASSPW)},
  title = {Augmenting Glosses with Geometrical Inflection Parameters for the Animation of Sign Language Avatars},
  year = {2023},
  volume = {},
  number = {},
  pages = {1-5},
  doi = {10.1109/ICASSPW59220.2023.10193227},
  abstract = {We present a new machine-readable symbolic representation of sign language based on the pairing of glosses with parameters that can be used for the inflection of motion captured sign animation clips. With respect to existing representations, this approach detaches from a purely linguistic point of view and provides a solution to the problem from a lower-level of abstraction, aiming at generic body-motion manipulation. Early experiments show the effectiveness in manipulating hand trajectories and their potential in modulating the expressivity and communicative emotion of pre-recorded signs.}
}
@misc{https://doi.org/10.48550/arxiv.2301.06471,
  doi = {10.48550/ARXIV.2301.06471},
  url = {https://arxiv.org/abs/2301.06471},
  author = {Nicora, Matteo Lavit and Beyrodt, Sebastian and Tsovaltzi, Dimitra and Nunnari, Fabrizio and Gebhard, Patrick and Malosio, Matteo},
  keywords = {Robotics (cs.RO), FOS: Computer and information sciences, FOS: Computer and information sciences},
  title = {Towards social embodied cobots: The integration of an industrial cobot with a social virtual agent},
  publisher = {arXiv},
  year = {2023},
  copyright = {Creative Commons Attribution Non Commercial Share Alike 4.0 International}
}
@inbook{gebhard23Handbook-SeriousGames,
  author = {Gebhard, Patrick and Tsovaltzi, Dimitra and Schneeberger, Tanja and Nunnari, Fabrizio},
  title = {Serious Games with SIAs},
  year = {2022},
  isbn = {9781450398961},
  publisher = {Association for Computing Machinery},
  address = {New York, NY, USA},
  edition = {1},
  url = {https://doi.org/10.1145/3563659.3563676},
  booktitle = {The Handbook on Socially Interactive Agents: 20 Years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
  pages = {527--546},
  numpages = {20}
}
@inproceedings{bernhard22PETRA-AVASAG,
  author = {Bernhard, Lucas and Nunnari, Fabrizio and Unger, Amelie and Bauerdiek, Judith and Dold, Christian and Hauck, Marcel and Stricker, Alexander and Baur, Tobias and Heimerl, Alexander and Andr\'{e}, Elisabeth and Reinecker, Melissa and Espa\~{n}a-Bonet, Cristina and Hamidullah, Yasser and Busemann, Stephan and Gebhard, Patrick and J\"{a}ger, Corinna and Wecker, Sonja and Kossel, Yvonne and M\"{u}ller, Henrik and Waldow, Kristoffer and Fuhrmann, Arnulph and Misiak, Martin and Wallach, Dieter},
  title = {Towards Automated Sign Language Production: A Pipeline for Creating Inclusive Virtual Humans},
  year = {2022},
  isbn = {9781450396318},
  publisher = {Association for Computing Machinery},
  address = {New York, NY, USA},
  url = {https://doi.org/10.1145/3529190.3529202},
  doi = {10.1145/3529190.3529202},
  abstract = {In everyday life, Deaf People face barriers because information is often only available in spoken or written language. Producing sign language videos showing a human interpreter is often not feasible due to the amount of data required or because the information changes frequently. The ongoing AVASAG project addresses this issue by developing a 3D sign language avatar for the automatic translation of texts into sign language for public services. The avatar is trained using recordings of human interpreters translating text into sign language. For this purpose, we create a corpus with video and motion capture data and an annotation scheme that allows for real-time translation and subsequent correction without requiring to correct the animation frames manually. This paper presents the general translation pipeline focusing on innovative points, such as adjusting an existing annotation system to the specific requirements of sign language and making it usable to annotators from the Deaf communities.},
  booktitle = {Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments},
  pages = {260--268},
  numpages = {9},
  keywords = {automatic translation., motion capture, annotation, corpus, sign language production},
  location = {Corfu, Greece},
  series = {PETRA '22}
}
@inproceedings{nunnari2022SLTAT,
  entrysubtype = {workshop},
  author = {Fabrizio Nunnari},
  title = {A software toolkit for pre-processing sign language video streams},
  year = {2022},
  booktitle = {Seventh International Workshop on Sign Language Translation and Avatar Technology (SLTAT)},
  location = {Marseiile, France},
  keywords = {sign language, video pre-processing, open source toolkit, software engineering},
  abstract = {We present the requirements, design guidelines, and the software architecture of an open-source toolkit dedicated to the pre-processing of sign language video material. The toolkit is a collection of functions and command-line tools designed to be integrated with build automation systems. Every pre-processing tool is dedicated to standard pre-processing operations (e.g., trimming, cropping, resizing) or feature extraction (e.g., identification of areas of interest, landmark detection) and can be used also as a standalone Python module. The UML diagrams of its architecture are presented together with a few working examples of its usage. The software is freely available with an open-source license on a public repository.}
}
@inproceedings{deshpande2022SLTAT,
  entrysubtype = {workshop},
  author = {Neha Deshpande and Fabrizio Nunnari and Eleftherios Avramidis},
  title = {Fine-tuning of convolutional neural networks for the recognition of facial expressions in sign language video samples},
  year = {2022},
  booktitle = {Seventh International Workshop on Sign Language Translation and Avatar Technology (SLTAT)},
  location = {Marseiile, France},
  keywords = {sign language, video pre-processing, open source toolkit, software engineering},
  abstract = {In this paper, we investigate the capability of convolutional neural networks to recognize in sign language video frames the six basic Ekman facial expressions for ’fear’, ’disgust’, ’surprise’, ’sadness’, ’happiness’ and ’anger’ along with the ’neutral’ class. Given the limited amount of annotated facial expression data for the sign language domain, we started from a model pre-trained on general-purpose facial expression datasets and we applied various machine learning techniques such as fine-tuning, data augmentation, class balancing, as well as image preprocessing to reach a better accuracy. The models were evaluated using K-fold cross-validation to get more accurate conclusions. Through our experiments we demonstrate that fine-tuning a pre-trained model along with data augmentation by horizontally flipping images and image normalization, helps in providing the best accuracy on the sign language dataset. The best setting achieves satisfactory classification accuracy, comparable to state-of-the-art systems in generic facial expression recognition. Experiments were performed using different combinations of the above-mentioned techniques based on two different architectures, namely MobileNet and EfficientNet, and is deemed that both architectures seem equally suitable for the purpose of fine-tuning, whereas class balancing is discouraged.}
}
@article{nunnari2020AffComp,
  author = {Nunnari, Fabrizio and Heloir, Alexis},
  journal = {IEEE Transactions on Affective Computing},
  title = {Rating Vs. Paired Comparison for the Judgment of Dominance on First Impressions},
  year = {2022},
  volume = {13},
  number = {1},
  pages = {367--378},
  doi = {10.1109/TAFFC.2020.3022982},
  abstract = {This article presents a contest between the rating and the paired comparison voting in judging the perceived dominance of virtual characters, the aim being to select the voting mode that is the most convenient for voters while staying reliable. The comparison consists of an experiment where human subjects vote on a set of virtual characters generated by randomly altering a set of physical attributes. The minimum number of participants has been determined via numerical simulation. The outcome is a sequence of stereotypes ordered along their conveyed amount of submissiveness or dominance. Results show that the two voting modes result in equivalently expressive models of dominance. Further analysis of the voting procedure shows that, despite an initial slower learning phase, after about 30 votes the two modes exhibit the same judging speed. Finally, a subjective questionnaire reports a higher (63.8 percent) preference for the paired comparison mode.}
}
@inproceedings{schneeberger2021GENEA-influence,
  author = {Schneeberger, Tanja and Aly, Fatima Ayman and Don, Daksitha Withanage and Gies, Katharina and Zeimer, Zita and Nunnari, Fabrizio and Gebhard, Patrick},
  title = {Influence of Movement Energy and Affect Priming on the Perception of Virtual Characters Extroversion and Mood},
  year = {2021},
  isbn = {9781450384711},
  publisher = {Association for Computing Machinery},
  address = {New York, NY, USA},
  url = {https://doi.org/10.1145/3461615.3485409},
  doi = {10.1145/3461615.3485409},
  abstract = { Movement Energy – physical activeness in performing actions and Affect Priming – prior exposure to information about someone’s mood and personality might be two crucial factors that influence how we perceive someone. It is unclear if these factors influence the perception of virtual characters in a way that is similar to what is observed during in-person interactions. This paper presents different configurations of Movement Energy for virtual characters and two studies about how these influence the perception of the characters’ personality, extroversion in particular, and mood. Moreover, the studies investigate how Affect Priming (Personality and Mood), as one form of contextual priming, influences this perception. The results indicate that characters with high Movement Energy are perceived as more extrovert and in a better mood, which corroborates existing research. Moreover, the results indicate that Personality and Mood Priming influence perception in different ways. Characters that were primed as being in a positive mood are perceived as more extrovert, whereas characters that were primed as being introverted are perceived in a more positive mood.},
  booktitle = {Companion Publication of the 2021 International Conference on Multimodal Interaction},
  pages = {211–219},
  numpages = {9},
  keywords = {virtual characters, perceptual study, character animation, contextual priming},
  location = {Montreal, QC, Canada},
  series = {ICMI '21 Companion}
}
@inproceedings{nunnari2021KI-CropIt,
  author = {Nunnari, Fabrizio
and Ezema, Abraham
and Sonntag, Daniel},
  editor = {Edelkamp, Stefan
and M{\"o}ller, Ralf
and Rueckert, Elmar},
  title = {Crop It, but Not Too Much: The Effects of Masking on the Classification of Melanoma Images},
  booktitle = {KI 2021: Advances in Artificial Intelligence},
  year = {2021},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {179--193},
  abstract = {To improve the accuracy of convolutional neural networks in discriminating between nevi and melanomas, we test nine different combinations of masking and cropping on three datasets of skin lesion images (ISIC2016, ISIC2018, and MedNode). Our experiments, confirmed by 10-fold cross-validation, show that cropping increases classification performances, but specificity decreases when cropping is applied together with masking out healthy skin regions. An analysis of Grad-CAM saliency maps shows that in fact our CNN models have the tendency to focus on healthy skin at the border when a nevus is classified.},
  isbn = {978-3-030-87626-5}
}
@inproceedings{nicora2021ROMAN-MindBotOverview,
  author = {Nicora, Matteo Lavit and André, Elisabeth and Berkmans, Daniel and Carissoli, Claudia and D’Orazio, Tiziana and Fave, Antonella Delle and Gebhard, Patrick and Marani, Roberto and Mira, Robert Mihai and Negri, Luca and Nunnari, Fabrizio and Fernandez, Alberto Peña and Scano, Alessandro and Reni, Gianluigi and Malosio, Matteo},
  booktitle = {30th IEEE International Conference on Robot Human Interactive Communication (RO-MAN)},
  title = {A human-driven control architecture for promoting good mental health in collaborative robot scenarios},
  year = {2021},
  publisher = {IEEE},
  volume = {},
  number = {},
  pages = {285-291},
  doi = {10.1109/RO-MAN50785.2021.9515315},
  abstract = {This paper introduces the control architecture of a platform aimed at promoting good mental health for workers interacting with collaborative robots (cobots). The platform aim is to render industrial production cells capable of automatically adapting their behavior in order to improve the operator’s quality of experience and level of engagement and to minimize his/her psychological strain. In order to achieve such a goal, an extremely rich and complex framework is required. Starting from the identification of the parameters that could influence the collaboration experience, the envisioned human- driven control structure is presented together with a detailed description of the components required to implement such an automated system. Future works will include proper tuning of control parameters with dedicated experimental sessions, together with the definition of organizational and technical guidelines for the design of a mental-health-friendly cobot- based manufacturing workplace.}
}
@inproceedings{nunnari2021AT4SSL-AVASAG,
  entrysubtype = {workshop},
  title = {AVASAG: A German Sign Language Translation System for Public Services},
  author = {Fabrizio Nunnari and 
Judith Bauerdiek and 
Lucas Bernhard and 
Cristina Espa{\~n}a-Bonet and 
Corinna Jäger and 
Amelie Unger and 
Kristoffer Waldow and 
Sonja Wecker and 
Elisabeth André and 
Stephan Busemann and 
Christian Dold and 
Arnulph Fuhrmann and 
Patrick Gebhard and 
Yasser Hamidullah and 
Marcel Hauck and 
Yvonne Kossel and 
Martin Misiak and 
Dieter Wallach and 
Alexander Stricker},
  booktitle = {1st International Workshop on Automatic Translation for Signed and Spoken Languages (AT4SSL)},
  year = {2021},
  month = {8},
  publisher = {Association for Machine Translation in the Americas},
  url = {https://aclanthology.org/2021.mtsummit-at4ssl.0/},
  abstract = {This paper presents an overview of AVASAG; an ongoing applied-research project developing a text-to-sign-language translation system for public services. We describe the scientific innovation points (geometry-based SL-description, 3D animation and video corpus, simplified annotation scheme, motion capture strategy) and the overall translation pipeline.}
}
@inproceedings{nunnari2021CDMAKE-AnomalyDetection,
  author = {Nunnari, Fabrizio
and Alam, Hasan Md Tusfiqur
and Sonntag, Daniel},
  editor = {Holzinger, Andreas
and Kieseberg, Peter
and Tjoa, A. Min
and Weippl, Edgar},
  title = {Anomaly Detection for Skin Lesion Images Using Replicator Neural Networks},
  booktitle = {Machine Learning and Knowledge Extraction},
  year = {2021},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {225--240},
  abstract = {This paper presents an investigation on the task of anomaly detection for images of skin lesions. The goal is to provide a decision support system with an extra filtering layer to inform users if a classifier should not be used for a given sample. We tested anomaly detectors based on autoencoders and three discrimination methods: feature vector distance, replicator neural networks, and support vector data description fine-tuning. Results show that neural-based detectors can perfectly discriminate between skin lesions and open world images, but class discrimination cannot easily be accomplished and requires further investigation.},
  isbn = {978-3-030-84060-0},
  url = {https://link.springer.com/chapter/10.1007/978-3-030-84060-0_15}
}
@inproceedings{nunnari2021CDMAKE-GradCAMOverlay,
  author = {Nunnari, Fabrizio
and Kadir, Md Abdul
and Sonntag, Daniel},
  editor = {Holzinger, Andreas
and Kieseberg, Peter
and Tjoa, A. Min
and Weippl, Edgar},
  title = {On the Overlap Between Grad-CAM Saliency Maps and Explainable Visual Features in Skin Cancer Images},
  booktitle = {Machine Learning and Knowledge Extraction},
  year = {2021},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {241--253},
  abstract = {Dermatologists recognize melanomas by inspecting images in which they identify human-comprehensible visual features. In this paper, we investigate to what extent such features correspond to the saliency areas identified on CNNs trained for classification. Our experiments, conducted on two neural architectures characterized by different depth and different resolution of the last convolutional layer, quantify to what extent thresholded Grad-CAM saliency maps can be used to identify visual features of skin cancer. We found that the best threshold value, i.e., the threshold at which we can measure the highest Jaccard index, varies significantly among features; ranging from 0.3 to 0.7. In addition, we measured Jaccard indices as high as 0.143, which is almost 50{\%} of the performance of state-of-the-art architectures specialized in feature mask prediction at pixel-level, such as U-Net. Finally, a breakdown test between malignancy and classification correctness shows that higher resolution saliency maps could help doctors in spotting wrong classifications.},
  isbn = {978-3-030-84060-0},
  url = {https://link.springer.com/chapter/10.1007/978-3-030-84060-0_16}
}
@inproceedings{nunnari2021LDK-SLinTheWild,
  author = {Nunnari, Fabrizio and Espa\~{n}a-Bonet, Cristina and Avramidis, Eleftherios},
  title = {{A Data Augmentation Approach for Sign-Language-To-Text Translation In-The-Wild}},
  booktitle = {3rd Conference on Language, Data and Knowledge (LDK 2021)},
  pages = {36:1--36:8},
  series = {Open Access Series in Informatics (OASIcs)},
  isbn = {978-3-95977-199-3},
  issn = {2190-6807},
  year = {2021},
  volume = {93},
  editor = {Gromann, Dagmar and S\'{e}rasset, Gilles and Declerck, Thierry and McCrae, John P. and Gracia, Jorge and Bosque-Gil, Julia and Bobillo, Fernando and Heinisch, Barbara},
  publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik},
  address = {Dagstuhl, Germany},
  url = {https://drops.dagstuhl.de/opus/volltexte/2021/14572},
  urn = {urn:nbn:de:0030-drops-145728},
  doi = {10.4230/OASIcs.LDK.2021.36},
  annote = {Keywords: sing language, video recognition, end-to-end translation, data augmentation},
  abstract = {In this paper, we describe the current main approaches to sign language translation which use deep neural networks with videos as input and text as output. We highlight that, under our point of view, their main weakness is the lack of generalization in daily life contexts. Our goal is to build a state-of-the-art system for the automatic interpretation of sign language in unpredictable video framing conditions. Our main contribution is the shift from image features to landmark positions in order to diminish the size of the input data and facilitate the combination of data augmentation techniques for landmarks. We describe the set of hypotheses to build such a system and the list of experiments that will lead us to their verification.}
}
@inproceedings{nunnari2021EICS-TIML,
  author = {Nunnari, Fabrizio and Sonntag, Daniel},
  title = {A Software Toolbox for Deploying Deep Learning Decision Support Systems with XAI Capabilities},
  year = {2021},
  isbn = {9781450384490},
  publisher = {Association for Computing Machinery},
  address = {New York, NY, USA},
  url = {https://doi.org/10.1145/3459926.3464753},
  doi = {10.1145/3459926.3464753},
  abstract = { We describe the software architecture of a toolbox of reusable components for the configuration of convolutional neural networks (CNNs) for classification and labeling problems. The toolbox architecture has been designed to maximize the reuse of established algorithms and to include domain experts in the development and evaluation process across different projects and challenges. In addition, we implemented easy-to-edit input formats and modules for XAI (eXplainable AI) through visual inspection capabilities. The toolbox is available for the research community to implement applied artificial intelligence projects.},
  booktitle = {Companion of the 2021 ACM SIGCHI Symposium on Engineering Interactive Computing Systems},
  pages = {44–49},
  numpages = {6},
  keywords = {design patterns, object-oriented architecture, deep learning, Software toolbox, convolutional neural networks, explainable AI.},
  location = {Virtual Event, Netherlands},
  series = {EICS '21}
}
@misc{somfai2021minimizing,
  title = {Minimizing false negative rate in melanoma detection and providing insight into the causes of classification},
  author = {Ellák Somfai and Benjámin Baffy and Kristian Fenech and Changlu Guo and Rita Hosszú and Dorina Korózs and Fabrizio Nunnari and Marcell Pólik and Daniel Sonntag and Attila Ulbert and András Lőrincz},
  year = {2021},
  eprint = {2102.09199},
  archiveprefix = {arXiv},
  primaryclass = {cs.CV}
}
@inproceedings{nguyen2021AAAI,
  entrysubtype = {workshop},
  abstract = {Besides principal polymerase chain reaction (PCR) tests, automatically identifying positive samples based on computed tomography (CT) scans can present a promising option in the early diagnosis of COVID-19. Recently, there have been increasing efforts to utilize deep networks for COVID-19 diagnosis based on CT scans. While these approaches mostly focus on introducing novel architectures, transfer learning techniques or construction of large scale data, we propose a novel strategy to improve several performance baselines by leveraging multiple useful information sources relevant to doctors' judgments.  Specifically, infected regions and heat-map features extracted from learned networks are integrated with the global image via an attention mechanism during the learning process. This procedure makes our system more robust to noise and guides the network focusing on local lesion areas.  Extensive experiments illustrate the superior performance of our approach compared to recent baselines. Furthermore, our learned network guidance presents an explainable feature to doctors to understand the connection between input and output in a grey-box model.},
  year = {2021},
  title = {An Attention Mechanism using Multiple Knowledge Sources for COVID-19 Detection from CT Images},
  booktitle = {The Thirty-Fifth AAAI Conference on Artificial Intelligence (AAAI-21). AAAI Conference on Artificial Intelligence (AAAI-2021), Workshop: Trustworthy AI for Healthcare, February 8-9, Canada},
  lastupdate = {2020-12-17 06:49:37},
  publisher = {AAAI},
  author = {Ho Minh Duy Nguyen and Duy M. Nguyen and Huong Vu and Binh T. Nguyen and Fabrizio Nunnari and Daniel Sonntag},
  keywords = {Explainable AI, Covid-19, Medical Imaging},
  url = {https://www.dfki.de/web/forschung/publikationen/renameFileForDownload?filename=AAAI_Workshop_TrustworthyHealthcare_v3.pdf&file_id=uploads_4954}
}
@inproceedings{nunnari2021MedAI,
  author = {Nunnari, Fabrizio
and Ezema, Abraham
and Sonntag, Daniel},
  editor = {Ye, Juan
and O'Grady, Michael J.
and Civitarese, Gabriele
and Yordanova, Kristina},
  title = {The Effects of Masking in Melanoma Image Classification with CNNs Towards International Standards for Image Preprocessing},
  booktitle = {Wireless Mobile Communication and Healthcare},
  year = {2021},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {257--273},
  abstract = {The classification of skin lesion images is known to be biased by artifacts of the surrounding skin, but it is still not clear to what extent masking out healthy skin pixels influences classification performances, and why. To better understand this phenomenon, we apply different strategies of image masking (rectangular masks, circular masks, full masking, and image cropping) to three datasets of skin lesion images (ISIC2016, ISIC2018, and MedNode). We train CNN-based classifiers, provide performance metrics through a 10-fold cross-validation, and analyse the behaviour of Grad-CAM saliency maps through an automated visual inspection. Our experiments show that cropping is the best strategy to maintain classification performance and to significantly reduce training times as well. Our analysis through visual inspection shows that CNNs have the tendency to focus on pixels of healthy skin when no malignant features can be identified. This suggests that CNNs have the tendency of ``eagerly'' looking for pixel areas to justify a classification choice, potentially leading to biased discriminators. To mitigate this effect, and to standardize image preprocessing, we suggest to crop images during dataset construction or before the learning step.},
  isbn = {978-3-030-70569-5},
  doi = {10.1007/978-3-030-70569-5_16}
}
@inproceedings{nguyen2020KI,
  series = {Lecture Notes in Computer Science, LNCS},
  abstract = {In this work, we propose a new approach to automatically predict the locations of visual dermoscopic attributes for Task 2 of the ISIC 2018 Challenge. Our method is based on the Attention U-Net with multi-scale images as input. We apply a new strategy based on transfer learning, i.e., training the deep network for feature extraction by adapting the weights of the network trained for segmentation. Our tests show that, first, the proposed algorithm is on par or outperforms the best ISIC 2018 architectures (LeHealth and NMN) in the extraction of two visual features. Secondly, it uses only 1/30 of the training parameters; we observed less computation and memory requirements, which are particularly useful for future implementations on mobile devices. Finally, our approach generates visually explainable behaviour with uncertainty estimations to help doctors in diagnosis and treatment decisions.},
  days = {21--25},
  month = {9},
  year = {2020},
  location = {Bamberg, Germany},
  title = {A Visually Explainable Learning System for Skin Lesion Detection Using Multiscale Input with Attention U-Net},
  booktitle = {43rd German Conference on Artificial Intelligence (KI-2020)},
  volume = {12325},
  pages = {313-319},
  publisher = {Springer},
  author = {Ho Minh Duy Nguyen and Abraham Ezema and Fabrizio Nunnari and Daniel Sonntag},
  keywords = {Skin lesion Diagnose features Attention U-Net},
  url = {https://link.springer.com/chapter/10.1007/978-3-030-58285-2_28}
}
@inproceedings{kalimuthu2020ImageCLEF,
  entrysubtype = {workshop},
  title = {A Competitive Deep Neural Network Approach for the ImageCLEFmed Caption 2020 Task},
  author = {Marimuthu Kalimuthu and Fabrizio Nunnari and Daniel Sonntag},
  booktitle = {Working Notes of CLEF 2020 - Conference and Labs of the Evaluation Forum},
  year = {2020},
  location = {Thessaloniki, Greece},
  month = {September},
  days = {22--25},
  url = {https://arxiv.org/abs/2007.14226}
}
@inproceedings{nunnari2020CDMAKE,
  author = {Nunnari, Fabrizio
and Bhuvaneshwara, Chirag
and Ezema, Abraham Obinwanne
and Sonntag, Daniel},
  editor = {Holzinger, Andreas
and Kieseberg, Peter
and Tjoa, A Min
and Weippl, Edgar},
  title = {A Study on the Fusion of Pixels and Patient Metadata in CNN-Based Classification of Skin Lesion Images},
  booktitle = {Machine Learning and Knowledge Extraction},
  year = {2020},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {191--208},
  abstract = {We present a study on the fusion of pixel data and patient metadata (age, gender, and body location) for improving the classification of skin lesion images. The experiments have been conducted with the ISIC 2019 skin lesion classification challenge data set. Taking two plain convolutional neural networks (CNNs) as a baseline, metadata are merged using either non-neural machine learning methods (tree-based and support vector machines) or shallow neural networks. Results show that shallow neural networks outperform other approaches in all overall evaluation measures. However, despite the increase in the classification accuracy (up to +19.1{\%}), interestingly, the average per-class sensitivity decreases in three out of four cases for CNNs, thus suggesting that using metadata penalizes the prediction accuracy for lower represented classes. A study on the patient metadata shows that age is the most useful metadatum as a decision criterion, followed by body location and gender.},
  isbn = {978-3-030-57321-8},
  doi = {10.1007/978-3-030-57321-8_11}
}
@misc{sonntag2020skincare,
  title = {The Skincare project, an interactive deep learning system for differential diagnosis of malignant skin lesions. Technical Report},
  author = {Daniel Sonntag and Fabrizio Nunnari and Hans-Jürgen Profitlich},
  year = {2020},
  eprint = {2005.09448},
  archiveprefix = {arXiv},
  primaryclass = {eess.IV},
  url = {https://arxiv.org/abs/2005.09448}
}
@inproceedings{nunnari2019EuroVR,
  author = {Nunnari, Fabrizio
and Magliaro, Serena
and D'Errico, Giovanni
and De Luca, Valerio
and Barba, Maria Cristina
and De Paolis, Lucio Tommaso},
  editor = {Bourdot, Patrick
and Interrante, Victoria
and Nedel, Luciana
and Magnenat-Thalmann, Nadia
and Zachmann, Gabriel},
  title = {Designing and Assessing Interactive Virtual Characters for Children Affected by ADHD},
  booktitle = {Virtual Reality and Augmented Reality},
  year = {2019},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {285--290},
  abstract = {Within the BRAVO project, we are designing four virtual characters that will interact with children affected by ADHD. In order to assess the quality of the designed characters, we propose a metric to subjectively evaluate the level of intelligibility of the character's facial expression. The results of a preliminary user study conducted with 23 individuals show that our quality measure can be used to quickly identify flawed expressions and iteratively improve the design of the characters.},
  isbn = {978-3-030-31908-3},
  url = {https://link.springer.com/chapter/10.1007/978-3-030-31908-3_17},
  doi = {10.1007/978-3-030-31908-3_17},
  location = {Tallinn, Estonia},
  month = {October},
  days = {23--25}
}
@article{malik2019CompAndGraph,
  title = {Simple and effective deep hand shape and pose regression from a single depth image},
  issn = {00978493},
  url = {https://linkinghub.elsevier.com/retrieve/pii/S0097849319301591},
  doi = {10.1016/j.cag.2019.10.002},
  language = {en},
  urldate = {2019-10-19},
  journal = {Computers \& Graphics},
  author = {Malik, Jameel and Elhayek, Ahmed and Nunnari, Fabrizio and Stricker, Didier},
  month = oct,
  year = {2019},
  volume = {85},
  pages = {85--91}
}
@techreport{nunnari2019arXiv-CNNToolbox,
  title = {A CNN toolbox for skin cancer classification},
  author = {Fabrizio Nunnari and Daniel Sonntag},
  year = {2019},
  eprint = {1908.08187},
  archiveprefix = {arXiv},
  primaryclass = {eess.IV},
  url = {https://arxiv.org/abs/1908.08187},
  note = {https://arxiv.org/abs/1908.08187}
}
@inproceedings{barba2019SalentoAVR,
  author = {Barba, Maria Cristina
and Covino, Attilio
and De Luca, Valerio
and De Paolis, Lucio Tommaso
and D'Errico, Giovanni
and Di Bitonto, Pierpaolo
and Di Gestore, Simona
and Magliaro, Serena
and Nunnari, Fabrizio
and Paladini, Giovanna Ilenia
and Potenza, Ada
and Schena, Annamaria},
  editor = {De Paolis, Lucio Tommaso
and Bourdot, Patrick},
  title = {BRAVO: A Gaming Environment for the Treatment of ADHD},
  booktitle = {Augmented Reality, Virtual Reality, and Computer Graphics},
  year = {2019},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {394--407},
  abstract = {Attention-deficit hyperactivity disorder (ADHD) is a neurodevelopmental disorder that is expressed through different symptoms belonging to three different dimensions: inattention, impulsivity and motor hyperactivity, each of which contributes to the learning and adaptation problems within the different contexts of life. ADHD children have to focus on three main elements: learn to self-control, make and keep friends and feel good about themselves. The BRAVO (Beyond the tReatment of the Attention deficit hyperactiVity disOrder) project aims to realize an immersive therapeutic game context, based on an innovative ICT system, with which improving the relationship between young patients and therapies (administered by means of serious games and gamification). By using wearable equipment and Virtual and Augmented Reality devices, new personalized processes of therapy will be implemented. Such processes will be able to dynamically change in order to follow the patients evolution and support the therapists in the rehabilitation program management.},
  isbn = {978-3-030-25965-5},
  doi = {10.1007/978-3-030-25965-5_30},
  location = {Santa Maria al Bagno, Italy},
  month = {6},
  days = {24--27}
}
@inproceedings{Covre2019SalentoAVR,
  author = {Covre, Nicola
and Nunnari, Fabrizio
and Fornaser, Alberto
and De Cecco, Mariolino},
  editor = {De Paolis, Lucio Tommaso
and Bourdot, Patrick},
  title = {Generation of Action Recognition Training Data Through Rotoscoping and Augmentation of Synthetic Animations},
  booktitle = {Augmented Reality, Virtual Reality, and Computer Graphics},
  year = {2019},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {23--42},
  abstract = {In this paper, we present a method to synthetically generate the training material needed by machine learning algorithms to perform human action recognition from 2D videos. As a baseline pipeline, we consider a 2D video stream passing through a skeleton extractor (OpenPose), whose 2D joint coordinates are analyzed by a random forest. Such a pipeline is trained and tested using real live videos. As an alternative approach, we propose to train the random forest using automatically generated 3D synthetic videos. For each action, given a single reference live video, we edit a 3D animation (in Blender) using the rotoscoping technique. This prior animation is then used to produce a full training set of synthetic videos via perturbation of the original animation curves. Our tests, performed on live videos, show that our alternative pipeline leads to comparable accuracy, with the advantage of drastically reducing both the human effort and the computing power needed to produce the live training material.},
  isbn = {978-3-030-25999-0},
  doi = {10.1007/978-3-030-25999-0_3},
  location = {Santa Maria al Bagno, Italy},
  month = {6},
  days = {24--27}
}
@incollection{Heloir:2019:HandBookMultimodalInterfaces,
  author = {Heloir, Alexis and Nunnari, Fabrizio and Bachynskyi, Myroslav},
  chapter = {Ergonomics for the Design of Multimodal Interfaces},
  title = {The Handbook of Multimodal-Multisensor Interfaces},
  editor = {Oviatt, Sharon and Schuller, Bj\"{o}rn and Cohen, Philip R. and Sonntag, Daniel and Potamianos, Gerasimos and Kr\"{u}ger, Antonio},
  year = {2019},
  isbn = {978-1-97000-175-4},
  pages = {263--304},
  numpages = {42},
  url = {https://doi.org/10.1145/3233795.3233804},
  doi = {10.1145/3233795.3233804},
  acmid = {3233804},
  publisher = {Association for Computing Machinery and Morgan \&\#38; Claypool},
  address = {New York, NY, USA}
}
@article{nunnari:2019:CASA,
  author = {Nunnari, Fabrizio and Heloir, Alexis},
  title = {Yet another low-level agent handler},
  journal = {Computer Animation and Virtual Worlds},
  volume = {30},
  number = {3-4},
  pages = {e1891},
  keywords = {agent framework, character creation pipeline, virtual human},
  doi = {10.1002/cav.1891},
  url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/cav.1891},
  eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/cav.1891},
  note = {e1891 cav.1891},
  abstract = {YALLAH is a framework for the creation of real-time interactive virtual humans. Its production pipeline supports the continuous, parallel development of both the character and the software, and allows users for the deployment of a new character in a few hours of work. YALLAH is based on freely available software, mostly open-source, and its modular software architecture provides a framework for the seamless integration of new features. Finally, thanks to transpilation, the whole framework is conceived to accommodate multiple game engines.},
  year = {2019}
}
@inproceedings{hirsch18VSI,
  entrysubtype = {workshop},
  author = {Anke Hirsch and Fabrizio Nunnari and Alexis Heloir},
  title = {A Software for Identifying the Facial Features Driving the Perception of Personality Traits},
  booktitle = {Proceedings of the 4th Workshop on Virtual Social Interaction},
  days = {17--18},
  month = {12},
  location = {London, UK},
  year = {2018}
}
@inproceedings{malik2018-3DV,
  author = {J. Malik and A. Elhayek and F. Nunnari and K. Varanasi and K. Tamaddon and A. Heloir and D. Stricker},
  booktitle = {2018 International Conference on 3D Vision (3DV)},
  title = {DeepHPS: End-to-end Estimation of 3D Hand Pose and Shape by Learning from Synthetic Depth},
  year = {2018},
  month = {9},
  days = {5--8},
  pages = {110-119},
  location = {Verona, Italy},
  keywords = {computer vision;image classification;image motion analysis;image segmentation;learning (artificial intelligence);pose estimation;joint training strategy;hand mesh;end-to-end estimation;synthetic depth;articulated hand pose;shape estimation;vision-based applications;joint positions;fully supervised deep network;single depth image;complex shape parameters;shape layer;hand shapes;million-scale synthetic benchmark;accurate joint annotations;model based learning methods;time 30.0 ms;Shape;Three-dimensional displays;Joints;Estimation;Training;Kinematics;Pipelines;hand pose and shape;synthetic depth;convolutional neural networks},
  doi = {10.1109/3DV.2018.00023},
  issn = {2475-7888},
  publisher = {IEEE}
}
@inproceedings{nunnari2018salentoavr,
  author = {Nunnari, Fabrizio
    and Heloir, Alexis},
  editor = {De Paolis, Lucio Tommaso
    and Bourdot, Patrick},
  title = {Write-Once, Transpile-Everywhere: Re-using Motion Controllers of Virtual Humans Across Multiple Game Engines},
  booktitle = {Augmented Reality, Virtual Reality, and Computer Graphics},
  year = {2018},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {435--446},
  abstract = {Transpilation allows to write code once and re-use it across multiple runtime environments. In this paper, we propose a software development practice to implement once the motion controllers of virtual humans and re-use the implementation in multiple game engines. In a case study, three common human behaviors -- blinking, text-to-speech, and eye-gaze -- were developed in the Haxe programming language and deployed in the free, open-source Blender Game Engine and the commercial Unity engine. Performance tests show that transpiled code executes within 67{\%} faster to 127{\%} slower with respect to an implementation manually written in the game engine target languages.},
  isbn = {978-3-319-95270-3},
  doi = {10.1007/978-3-319-95270-3_37},
  url = {https://link.springer.com/chapter/10.1007%2F978-3-319-95270-3_37},
  month = {6},
  days = {24--27},
  location = {Otranto (Lecce), Italy}
}
@inproceedings{nunnari2018LREC,
  entrysubtype = {workshop},
  author = {Fabrizio Nunnari and Michael Filhol and Alexis Heloir},
  title = {Animating AZee Descriptions Using Off-the-Shelf {IK} Solvers},
  booktitle = {Proceedings of the 8th LREC Workshop on the Representation and Processing of Sign Languages},
  isbn = {979-10-95546-01-6},
  month = {5},
  year = {2018},
  days = {12},
  location = {Miyazaki, Japan},
  pages = {155--162},
  url = {https://www.sign-lang.uni-hamburg.de/lrec2018/},
  abstract = {We propose to implement a bottom-up animation solution for the AZee system. No low-level AZee animation system exists yet, which hinders its effective implementation as Sign Language avatar input. This bottom-up approach delivers procedurally computed animations and, because of its procedural nature, it is capable of generating the whole possible range of gestures covered by AZee’s symbolic description. The goal is not to compete on the ground of naturalness since movements are bound to look robotic like all bottom-up systems, but its purpose could be to be used as the missing low-level fallback for an existing top-down system. The proposed animation system is built on the top of a freely available 3D authoring tool and takes advantage of the tool’s default IK solving routines.}
}
@article{fraedrich18CAVW,
  author = {Laura Fr{\"a}drich and Fabrizio Nunnari and Maria Staudte and Alexis Heloir},
  title = {(Simulated) listener gaze in real-time spoken interaction},
  journal = {Computer Animation and Virtual Worlds},
  year = {2018},
  volume = {29},
  number = {3-4},
  pages = {e1831},
  publisher = {Wiley},
  keywords = {embodied conversational agents, eye-tracking, joint attention, (listener) gaze, speaker behavior},
  doi = {10.1002/cav.1831},
  url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/cav.1831},
  abstract = {Abstract Gaze is an important aspect of social communication. Previous research has concentrated mainly on the role of speaker gaze and listener gaze in isolation, neglecting the effect of the listener's gaze behavior on the speaker's behavior. This paper presents an exploratory eye-tracking study involving an interactive human-like agent following participants' gaze. This study demonstrates that a rather simple gaze-following mechanism convincingly simulates active listening behavior engaging the speaker. The study also highlights how speakers rely on their interlocutors' gaze when establishing common references.}
}
@inproceedings{tilmanne2018enterface15,
  entrysubtype = {workshop},
  author = {Jo\"elle Tilmanne and Nicolas d'Alessandro and Petr Barborka and Furkan Bayansar and Francisco Bernardo and Rebecca Fiebrink and Alexis Heloir and Edgar Hemery and Sohaib Laraba and Alexis Moinet and Fabrizio Nunnari and Thierry Ravet and Lo\"ic Reboursi\`ere and Alvaro Sarasua and Micka{\"e}l Tits and No{\'e} Tits and Fran\c{c}ois Zaj\'ega
  },
  title = {Prototyping a New Audio-Visual Instrument Based on Extraction of High-Level Features on Full-Body Motion},
  booktitle = {Proceedings of eNTERFACE 2015 Workshop on Intelligent Interfaces},
  location = {Mons, Belgium},
  year = {2018},
  month = {August},
  pages = {7--43},
  url = {https://arxiv.org/abs/1801.06349}
}
@inproceedings{Nunnari2017IVA,
  author = {Nunnari, Fabrizio and Heloir, Alexis},
  title = {Generation of Virtual Characters from Personality Traits},
  booktitle = {Proceedings of the 17th International Conference on Intelligent Virtual Agents},
  series = {IVA 2017},
  location = {Stockholm, Sweden},
  year = {2017},
  month = {8},
  days = {27--30},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {301--314},
  isbn = {978-3-319-67401-8},
  doi = {10.1007/978-3-319-67401-8_39},
  url = {https://doi.org/10.1007/978-3-319-67401-8_39},
  pdf = {2017/2017_IVA.pdf},
  abstract = {We present a method to generate a virtual character whose physical attributes reflect public opinion of a given personality profile. An initial reverse correlation experiment trains a model which explains the perception of personality traits from physical attributes. The reverse model, solved using linear programming, allows for the real-time generation of virtual characters from an input personality. The method has been applied on three personality traits (dominance, trustworthiness, and agreeableness) and 14 physical attributes and verified through both an analytic test and a subjective study.}
}
@inproceedings{Fraedrich2017IVA,
  author = {Fr{\"a}drich, Laura
  and Nunnari, Fabrizio
  and Staudte, Maria
  and Heloir, Alexis},
  title = {Simulating Listener Gaze and Evaluating Its Effect on Human Speakers},
  booktitle = {Proceedings of the 17th International Conference on Intelligent Virtual Agents},
  series = {IVA 2017},
  location = {Stockholm, Sweden},
  year = {2017},
  month = {8},
  days = {27--30},
  publisher = {Springer International Publishing},
  address = {Cham},
  pages = {156--159},
  isbn = {978-3-319-67401-8},
  doi = {10.1007/978-3-319-67401-8_17},
  url = {https://doi.org/10.1007/978-3-319-67401-8_17},
  pdf = {2017/-448363_1_En_17_Chapter_Author.pdf},
  abstract = {This paper presents an agent architecture designed as part of a multidisciplinary collaboration between embodied agents development and psycho-linguistic experimentation. This collaboration will lead to an empirical study involving an interactive human-like avatar following participants' gaze. Instead of adapting existing ``off the shelf'' embodied agents solutions, experimenters and developers collaboratively designed and implemented experiment's logic and the avatar's real time behavior from scratch in the Blender environment following an agile methodology. Frequent iterations and short implementation sprints allowed the experimenters to focus on the experiment and test many interaction scenarios in a short time.}
}
@inproceedings{Nunnari:2017:VSI,
  entrysubtype = {workshop},
  author = {Nunnari, Fabrizio and Heloir, Alexis},
  title = {Personality Models to and from Virtual Characters},
  booktitle = {Proceedings of the 3rd Workshop on Virtual Social Interaction},
  year = {2017},
  location = {Bielefeld, Germany},
  series = {VSI '17},
  abstract = {There is not much work investigating the relationships between the personality of a virtual agent, its behavior, and its physical appearance. The work that we are conducting in the SLSI group is based on the observation that people very quickly build up their ideas about the personality of others in zero-acquaintance encounters. To support our investigations, we are developing a set of tools for the generation of virtual characters from a given personality.}
}
@inproceedings{Nunnari:2017:AAMAS,
  author = {Nunnari, Fabrizio and Heloir, Alexis},
  title = {Generating Virtual Characters from Personality Traits via Reverse Correlation and Linear Programming (Extended Abstract)},
  booktitle = {Proceedings of the Sixteenth International Conference on Autonomous Agents and Multiagent Systems},
  year = {2017},
  series = {AAMAS '17},
  publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
  keywords = {Virtual character generation, personality traits, physical attributes, reverse correlation, paired comparison},
  location = {Sao Paulo, Brasil},
  url = {http://dl.acm.org/citation.cfm?id=3091396},
  pages = {1661--1663},
  pdf = {2017/ex437-nunnariA.pdf},
  doi = {10.5555/3091125.3091396}
}
@inproceedings{Nunnari:2016:IPV:2994258.2994278,
  author = {Nunnari, Fabrizio and Bachynskyi, Myroslav and Heloir, Alexis},
  title = {Introducing Postural Variability Improves the Distribution of Muscular Loads During Mid-air Gestural Interaction},
  booktitle = {Proceedings of the 9th International Conference on Motion in Games},
  series = {MIG '16},
  year = {2016},
  isbn = {978-1-4503-4592-7},
  location = {Burlingame, California},
  pages = {155--160},
  numpages = {6},
  url = {http://doi.acm.org/10.1145/2994258.2994278},
  doi = {10.1145/2994258.2994278},
  acmid = {2994278},
  publisher = {ACM},
  address = {New York, NY, USA},
  keywords = {biomechanical simulation, docking task, gorilla arm, mid-air 3D interaction, physical ergonomics}
}
@article{Heloir2016-SLTAT_UAIS,
  author = {Heloir, Alexis
  and Nunnari, Fabrizio},
  title = {Toward an intuitive sign language animation authoring system for the deaf},
  journal = {Universal Access in the Information Society},
  year = {2016},
  volume = {15},
  number = {4},
  pages = {513--523},
  issn = {1615-5297},
  doi = {10.1007/s10209-015-0409-0},
  url = {http://dx.doi.org/10.1007/s10209-015-0409-0},
  publisher = {Springer}
}
@inproceedings{Gena:2016:AVI:2909132.2927470,
  author = {Gena, Cristina and De Carolis, Berardina and Kuflik, Tsvi and Nunnari, Fabrizio},
  title = {Advanced Visual Interfaces for Cultural Heritage},
  booktitle = {Proceedings of the International Working Conference on Advanced Visual Interfaces},
  series = {AVI '16},
  year = {2016},
  isbn = {978-1-4503-4131-8},
  location = {Bari, Italy},
  pages = {360--362},
  numpages = {3},
  url = {http://doi.acm.org/10.1145/2909132.2927470},
  doi = {10.1145/2909132.2927470},
  acmid = {2927470},
  publisher = {ACM},
  address = {New York, NY, USA},
  keywords = {Advanced Visualization, Cultural Heritage, Workshop},
  pdf = {2016/p360-gena.pdf}
}
@inproceedings{Gillies:2016:HML:2851581.2856492,
  author = {Gillies, Marco and Fiebrink, Rebecca and Tanaka, Atau and Garcia, J{\'e}r{\'e}mie and Bevilacqua, Fr{\'e}d{\'e}ric and Heloir, Alexis and Nunnari, Fabrizio and Mackay, Wendy and Amershi, Saleema and Lee, Bongshin and d'Alessandro, Nicolas and Tilmanne, Jo\"{e}lle and Kulesza, Todd and Caramiaux, Baptiste},
  title = {Human-Centred Machine Learning},
  booktitle = {Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
  series = {CHI EA '16},
  year = {2016},
  isbn = {978-1-4503-4082-3},
  location = {Santa Clara, California, USA},
  pages = {3558--3565},
  numpages = {8},
  url = {http://doi.acm.org/10.1145/2851581.2856492},
  doi = {10.1145/2851581.2856492},
  acmid = {2856492},
  publisher = {ACM},
  address = {New York, NY, USA},
  keywords = {data, machine learning, user-centered design},
  pdf = {2016/ea3558-gillies.pdf}
}
@inbook{Heloir2015-PervHealth,
  author = {Heloir, Alexis
  and Nunnari, Fabrizio
  and Haudegond, Sylvain
  and Havrez, Cl{\'e}mentine
  and Lebrun, Yoann
  and Kolski, Christophe},
  chapter = {Design and Evaluation of a Self Adaptive Architecture for Upper-Limb Rehabilitation},
  title = {ICTs for Improving Patients Rehabilitation Research Techniques: Second International Workshop, REHAB 2014, Oldenburg, Germany, May 20-23, 2014, Revised Selected Papers},
  year = {2015},
  publisher = {Springer Berlin Heidelberg},
  address = {Berlin, Heidelberg},
  pages = {196--209},
  isbn = {978-3-662-48645-0},
  doi = {10.1007/978-3-662-48645-0_17},
  url = {http://dx.doi.org/10.1007/978-3-662-48645-0_17},
  pdf = {2015/chp-10.1007-978-3-662-48645-0_17.pdf}
}
@inproceedings{nunnari_deeva_2015,
  entrysubtype = {workshop},
  author = {Fabrizio Nunnari and Alexis Heloir},
  title = {DeEvA, a Depot of Evolving Avatars},
  booktitle = {Proceedings of Workshop and Poster Papers of the European Conference on Ambient Intelligence 2015},
  year = {2015},
  publisher = {CEUR-WS},
  location = {Athens, Greece},
  month = {11},
  issn = {1613-0073},
  url = {http://ceur-ws.org/Vol-1528/},
  pdf = {2015/nunnari15AFFIN.pdf}
}
@article{nunnari2015FIEE,
  author = {Fabrizio Nunnari and Alexis Heloir},
  title = {Exploiting Reverse Correlation for the Generation of Virtual Characters from Personality Traits},
  journal = {EAI Endorsed Transactions on Future Intelligent Educational Environments},
  volume = {15},
  number = {4},
  publisher = {EAI},
  journal_a = {FIEE},
  year = {2015},
  month = {8},
  keywords = {reverse correlation, virtual characters, interactive genetic algorithms, crowdsourcing},
  doi = {10.4108/icst.intetain.2015.259583},
  url = {http://eudl.eu/doi/10.4108/icst.intetain.2015.259583},
  pdf = {2015/icst.intetain.2015.259583.pdf}
}
@article{10.4108/ct.2.3.e4,
  author = {Fabrizio Nunnari and Alexis Heloir},
  title = {Evaluation of a Facial Animation Authoring Pipeline Seamlessly Supporting Performance Capture and Manual Key-pose Editing},
  journal = {EAI Endorsed Transactions on Creative Technologies},
  volume = {15},
  number = {3},
  publisher = {ICST},
  journal_a = {CT},
  year = {2015},
  month = {6},
  keywords = {facial animation, performance capture, retargeting, evaluation, animation authoring},
  doi = {10.4108/ct.2.3.e4},
  pdf = {2015/ct.2.3.e4.pdf}
}
@inproceedings{Heloir-sltat15,
  entrysubtype = {workshop},
  author = {Alexis Heloir and Fabrizio Nunnari},
  title = {Exploring novel interaction methods for authoring sign language animations},
  booktitle = {Proceedings of the Fourth International Symposium on Sign Language Translation and Avatar Technology (SLTAT)},
  year = {2015},
  month = {April},
  days = {9--10},
  location = {Paris, France},
  numpages = {2},
  pdf = {2015/heloir15sltat.pdf}
}
@inproceedings{Heloir:2014:DSA:2686893.2686962,
  entrysubtype = {workshop},
  author = {Heloir, Alexis and Nunnari, Fabrizio and Haudegond, Sylvain and Lebrun, Yoann and Kolski, Christophe},
  title = {Description of a Self-adaptive Architecture for Upper-limb Rehabilitation},
  booktitle = {Proceedings of the 8th International Conference on Pervasive Computing Technologies for Healthcare},
  series = {PervasiveHealth '14},
  year = {2014},
  isbn = {978-1-63190-011-2},
  location = {Oldenburg, Germany},
  pages = {317--320},
  numpages = {4},
  url = {http://dx.doi.org/10.4108/icst.pervasivehealth.2014.255246},
  doi = {10.4108/icst.pervasivehealth.2014.255246},
  acmid = {2686962},
  publisher = {ICST (Institute for Computer Sciences, Social-Informatics and Telecommunications Engineering)},
  address = {ICST, Brussels, Belgium, Belgium},
  keywords = {gesture based interaction, motor rehabilitation, training tools for rehabilitation, virtual rehabilitation},
  pdf = {2014/icst.pervasivehealth.2014.255246.pdf}
}
@inproceedings{Heloir:2014:AHS:2670444.2670456,
  author = {Heloir, Alexis and Nunnari, Fabrizio and Kolski, Christophe},
  title = {Adaptive Hand-tracked System for 3D Authoring},
  booktitle = {Proceedings of the 26th Conference on L'Interaction Homme-Machine},
  series = {IHM '14},
  year = {2014},
  isbn = {978-1-4503-2935-4},
  location = {Villeneuve d'Ascq, France},
  pages = {101--104},
  numpages = {4},
  url = {http://doi.acm.org/10.1145/2670444.2670456},
  doi = {10.1145/2670444.2670456},
  acmid = {2670456},
  publisher = {ACM},
  address = {New York, NY, USA},
  keywords = {3D authoring, gestural input},
  pdf = {2014/heloir-IHM14.pdf}
}
@inproceedings{Nunnari-iva14,
  year = {2014},
  isbn = {978-3-319-09767-1},
  booktitle = {Intelligent Virtual Agents},
  volume = {8637},
  series = {Lecture Notes in Computer Science},
  editor = {Bickmore, Timothy and Marsella, Stacy and Sidner, Candace},
  doi = {10.1007/978-3-319-09767-1_40},
  title = {Mapping Personality to the Appearance of Virtual Characters Using Interactive Genetic Algorithms},
  location = {Boston, USA},
  url = {http://dx.doi.org/10.1007/978-3-319-09767-1_40},
  publisher = {Springer International Publishing},
  address = {Cham},
  keywords = {Character generation; genetic algorithms; avatar; virtual character; personality traits; OCEAN model},
  author = {Nunnari, Fabrizio and Heloir, Alexis},
  pages = {316-319},
  language = {English},
  pdf = {2014/nunnari-IVA2014.pdf}
}
@inproceedings{Nunnari-aisb14,
  entrysubtype = {workshop},
  author = {Fabrizio Nunnari and Alexis Heloir},
  title = {A Self Adaptive Architecture for Hand-Tracked 3D Authoring Interface},
  booktitle = {AISB workshop on Machine Learning, Expressive Movement, Interaction Design, and Creative Applications},
  year = {2014},
  month = {April},
  days = {1--2},
  location = {London, UK},
  numpages = {4},
  pdf = {2014/2014_AISB_GestureWorkshop_.pdf}
}
@inproceedings{Heloir-sltat13,
  entrysubtype = {workshop},
  author = {Alexis Heloir and Fabrizio Nunnari},
  title = {Towards an Intuitive Sign Language Animation Authoring Environment},
  booktitle = {Proceedings of the Third International Symposium on Sign Language Translation and Avatar Technology (SLTAT)},
  year = {2013},
  month = {October},
  days = {18--19},
  location = {Chicago, USA},
  numpages = {8},
  pdf = {2013/heloir-sltat13.pdf}
}
@article{Damiano2013EntComp,
  title = {Virtual agents for the production of linear animations },
  journal = {Entertainment Computing },
  volume = {4},
  number = {3},
  pages = {187--194},
  year = {2013},
  issn = {1875-9521},
  doi = {http://dx.doi.org/10.1016/j.entcom.2013.06.001},
  url = {http://www.sciencedirect.com/science/article/pii/S1875952113000074},
  author = {Rossana Damiano and Vincenzo Lombardo and Fabrizio Nunnari}
}
@inproceedings{Damiano:2013:LWG:2499149.2499157,
  author = {Damiano, Rossana and Gena, Cristina and Lombardo, Vincenzo and Nunnari, Fabrizio},
  title = {Leveraging Web 3D guidance in cultural heritage fruition},
  booktitle = {Proceedings of the Biannual Conference of the Italian Chapter of SIGCHI},
  series = {CHItaly '13},
  year = {2013},
  isbn = {978-1-4503-2061-0},
  location = {Trento, Italy},
  pages = {1:1--1:10},
  articleno = {1},
  numpages = {10},
  url = {http://doi.acm.org/10.1145/2499149.2499157},
  doi = {10.1145/2499149.2499157},
  acmid = {2499157},
  publisher = {ACM},
  address = {New York, NY, USA}
}
@inproceedings{Damiano:2012:GWC:2338714.2338753,
  author = {Damiano, Rossana and Lombardo, Vincenzo and Gena, Cristina and Nunnari, Fabrizio},
  title = {Guidance for web 3D in cultural heritage dissemination},
  booktitle = {Proceedings of the 17th International Conference on 3D Web Technology},
  series = {Web3D '12},
  year = {2012},
  isbn = {978-1-4503-1432-9},
  location = {Los Angeles, California},
  pages = {186--186},
  numpages = {1},
  url = {http://doi.acm.org/10.1145/2338714.2338753},
  doi = {10.1145/2338714.2338753},
  acmid = {2338753},
  publisher = {ACM},
  address = {New York, NY, USA},
  pdf = {2012/lombardo-Web3D2012-poster.pdf}
}
@inproceedings{Lombardo2012,
  author = {Lombardo, Vincenzo
  and Nunnari, Fabrizio
  and Damiano, Rossana},
  title = {The AnimaTricks System: Animating Intelligent Agents from High-Level Goal Declarations},
  booktitle = {Intelligent Technologies for Interactive Entertainment},
  year = {2012},
  publisher = {Springer Berlin Heidelberg},
  address = {Berlin, Heidelberg},
  pages = {203--208},
  isbn = {978-3-642-30214-5},
  doi = {10.1007/978-3-642-30214-5_22},
  url = {http://dx.doi.org/10.1007/978-3-642-30214-5_22}
}
@inproceedings{Dami1110:150,
  author = {Rossana Damiano and Cristina Gena and Vincenzo Lombardo and Fabrizio
  Nunnari and Andrea Crevola and Alessandra Suppini},
  title = {150 Digit. Integrating {3D} visit and social functions into a Web {3.0}
  learning-oriented approach},
  booktitle = {Sixth International Conference on Broadband and Wireless Computing,
  Communication and Applications (BWCCA 2011)},
  location = {BARCELONA, Spain},
  days = 26,
  month = oct,
  year = 2011,
  publisher = {IEEE Computer Society},
  pdf = {2011/damiano-bwcca11.pdf}
}
@inproceedings{lombardo-jvrc11,
  title = {{MESH} - Mise en scène Helper},
  author = {Vincenzo Lombardo and Fabrizio Nunnari and Davide Di Giannantonio and Jacopo Landi and Paolo Armao and Flavia Confaloni and Shanti May},
  year = {2011},
  publisher = {VTT},
  booktitle = {Proceedings of the 2011 Joint Virtual Reality Conference},
  pages = {27--32},
  location = {Nottingham, UK},
  isbn = {978-951-38-7602-9},
  pdf = {2011/lombardo-jvrc11.pdf}
}
@inproceedings{Lombardo:2011:AII:2055442.2056214,
  author = {Lombardo, Vincenzo and Battaglino, Cristina and Damiano, Rossana and Nunnari, Fabrizio},
  title = {An Avatar-based Interface for the Italian Sign Language},
  booktitle = {Proceedings of the 2011 International Conference on Complex, Intelligent, and Software Intensive Systems},
  series = {CISIS '11},
  year = {2011},
  month = {June},
  isbn = {978-0-7695-4373-4},
  pages = {589--594},
  numpages = {6},
  url = {http://dx.doi.org/10.1109/CISIS.2011.97},
  doi = {10.1109/CISIS.2011.97},
  location = {Korean Bible University (KBU), Seoul, Korea},
  acmid = {2056214},
  publisher = {IEEE Computer Society},
  address = {Washington, DC, USA},
  pdf = {2011/lombardo-cisis11.pdf}
}
@incollection{kersten-scbook11,
  author = { Stefan Kersten and Vincenzo Lombardo and Fabrizio Nunnari and Andrea Valle},
  editor = {Scott Wilson and David Cottle and Nick Collins},
  title = {A Binaural Simulation of {V}arèse's {P}oème électronique},
  booktitle = {The {SuperCollider} {Book}},
  chapter = {19},
  pages = {577--587},
  isbn = {978-0-262-23269-2},
  publisher = {MIT Press},
  address = {Cambridge, Massachusetts},
  year = {2011}
}
@inproceedings{Lombardo:2010:VII:1889075.1889100,
  author = {Lombardo, Vincenzo and Nunnari, Fabrizio and Damiano, Rossana},
  title = {A virtual interpreter for the Italian sign language},
  booktitle = {Proceedings of the 10th international conference on Intelligent virtual agents},
  series = {IVA'10},
  year = {2010},
  isbn = {978-3-642-15891-9},
  location = {Philadelphia, PA},
  pages = {201--207},
  numpages = {7},
  url = {http://dl.acm.org/citation.cfm?id=1889075.1889100},
  acmid = {1889100},
  publisher = {Springer-Verlag},
  address = {Berlin, Heidelberg},
  pdf = {2010/lombardo-iva10.pdf}
}
@inproceedings{bertoldi2010LREC,
  entrysubtype = {workshop},
  title = {On the creation and the annotation of a large-scale Italian-LIS parallel corpus},
  author = {Nicola Bertoldi and Gabriele Tiotto and Paolo Prinetto and Elio Piccolo and Fabrizio Nunnari and Vincenzo Lombardo and Alessandro Mazzei and Rossana Damiano and Leonardo Lesmo and Del Principe, Andrea},
  booktitle = {Proceedings of the 4th LREC Workshop on the Representation and Processing of Sign Languages: Corpora and Sign Language Technologies (CLST-LREC 2010)},
  location = {Malta},
  month = {5},
  pages = {19--22},
  year = {2010},
  url = {https://www.sign-lang.uni-hamburg.de/lrec2010/programme.html}
}
@inproceedings{Lombardo:2009:TE:1631272.1631511,
  author = {Lombardo, Vincenzo and Valle, Andrea and Nunnari, Fabrizio},
  title = {Tabula ex-cambio},
  booktitle = {Proceedings of the 17th ACM international conference on Multimedia},
  series = {MM '09},
  year = {2009},
  isbn = {978-1-60558-608-3},
  location = {Beijing, China},
  pages = {1053--1062},
  numpages = {10},
  url = {http://doi.acm.org/10.1145/1631272.1631511},
  doi = {10.1145/1631272.1631511},
  acmid = {1631511},
  publisher = {ACM},
  address = {New York, NY, USA},
  pdf = {2009/lombardo-acmmm09.pdf}
}
@inproceedings{1563886,
  author = {Damiano, Rossana and Lombardo, Vincenzo and Nunnari, Fabrizio and Pizzo, Antonio},
  title = {Ontological Domain Coding for Cultural Heritage Mediation},
  booktitle = {Proceeding of the 2008 conference on Formal Ontologies Meet Industry},
  year = {2008},
  location = {Torino},
  isbn = {978-1-58603-871-7},
  pages = {88--99},
  publisher = {IOS Press},
  address = {Amsterdam, The Netherlands, The Netherlands},
  url = {http://dl.acm.org/citation.cfm?id=1563875.1563886},
  pdf = {2008/damiano-fomi08.pdf}
}
@article{1455883,
  author = {Rossana Damiano and Cristina Gena and Vincenzo Lombardo and Fabrizio Nunnari and Antonio Pizzo},
  title = {A stroll with Carletto: adaptation in drama-based tours with virtual characters},
  journal = {User Modeling and User-Adapted Interaction},
  volume = {18},
  number = {5},
  year = {2008},
  issn = {0924-1868},
  pages = {417--453},
  publisher = {Springer Netherlands},
  doi = {http://dx.doi.org/10.1007/s11257-008-9053-1},
  pdf = {2008/damiano-umuai08.pdf}
}
@article{lombardo08canonical,
  author = {Vincenzo Lombardo and Fabrizio Nunnari and Rossana Damiano and Antonio Pizzo and Cristina Gena},
  journal = {Multimedia Systems},
  title = {The canonical processes of a dramatized approach to information presentation},
  volume = {14},
  number = {6},
  pages = {385--393},
  year = {2008},
  issn = {0942-4962},
  publisher = {Springer Berlin / Heidelberg},
  doi = {http://dx.doi.org/10.1007/s00530-008-0137-x},
  pdf = {2008/lombardo-canonical08.pdf}
}
@inproceedings{lombardo-acmmm06,
  title = {Archeology of Multimedia},
  author = {Vincenzo Lombardo and Andrea Valle and Fabrizio Nunnari and Francesco Giordana and Andrea Arghinenti},
  booktitle = {ACM Multimedia 2006},
  year = {2006},
  pages = {269-278},
  month = {October},
  location = {Santa Barbara, CA, USA},
  publisher = {ACM},
  isbn = {1-59593-447-2},
  pdf = {2006/lombardo-acmmm06.pdf},
  doi = {http://doi.acm.org/10.1145/1180639.1180706}
}
@inproceedings{damiano-ecai06,
  author = {Rossana Damiano and
               Vincenzo Lombardo and
               Antonio Pizzo and
               Fabrizio Nunnari},
  title = {Dramatization Meets Narrative Presentations.},
  booktitle = {European Conference on Artifical Intelligence (ECAI)},
  year = {2006},
  pages = {31-35},
  publisher = {IOS Press},
  location = {Riva del Garda, Italy},
  isbn = {1-58603-642-4},
  url = {http://dl.acm.org/citation.cfm?id=1567016.1567029},
  pdf = {2006/damiano-ecai06.pdf}
}
@inproceedings{damiano-ecai06a,
  author = {Rossana Damiano and
               Cristina Galia and
               Vincenzo Lombardo and
               Fabrizio Nunnari},
  title = {Character-based guided tours: the Dramatour project},
  booktitle = {System demonstrations at the European Conference on Artifical Intelligence (ECAI)},
  year = {2006},
  pages = {1-2},
  publisher = {IOS Press},
  location = {Riva del Garda, Italy},
  isbn = {1-58603-642-4},
  pdf = {2006/damiano-ecai06a.pdf}
}
@inproceedings{lombardo-icmc05,
  author = { V. Lombardo and A. Arghinenti and F. Nunnari and A. Valle and H.Vogel and J. Fitch and R. Dobson and J. Padget and K. Tazelaar and S. Weinzierl and S. Benser and S. Kersten and R. Starosolski and W. Birczyk and W. Pytlik and S. Niedbala},
  title = { The Virtual Electronic Poem (VEP) Project},
  booktitle = { International Computer Music Conference (ICMC)},
  location = { Escola Superior de Musica de Catalunya, Barcelona },
  month = {September},
  year = {2005},
  pages = {451-454},
  publisher = {SuviSoft Oy Ltd},
  pdf = {2005/lombardo-icmc05.pdf}
}
@phdthesis{nunnari-phd05,
  author = {Fabrizio Nunnari},
  title = {ThreeDness: a framework for the creation of customizable 3D awareness interfaces},
  school = {Computer Science Department, University of Torino Italy},
  month = {January},
  year = {2005},
  address = {Dipartimento di Informatica, C.so Svizzera 185, 10149 - Torino, Italy},
  pdf = {2005/nunnari-phd05.pdf}
}
@inproceedings{manzoni-wetice04,
  entrysubtype = {workshop},
  author = {Sara Manzoni and Fabrizio Nunnari and Giuseppe Vizzari},
  title = {Towards a Model for Ubiquitous and Mobile Computing},
  booktitle = {13th IEEE International Workshops on Enabling Technologies: Infrastructure for Collaborative Enterprises (wetice)},
  year = {2004},
  issn = {1524-4547},
  location = {University of Modena and Reggio Emilia, Italy},
  month = {June},
  pages = {423-428},
  isbn = {0-7695-2183-5},
  doi = {http://doi.ieeecomputersociety.org/10.1109/ENABL.2004.71},
  publisher = {IEEE Computer Society},
  pdf = {2004/manzoni-wetice04.pdf}
}
@inproceedings{nunnari-avi04,
  author = {Fabrizio Nunnari and Carla Simone},
  title = {Perceiving awareness information through 3D representations},
  booktitle = {AVI '04: Proceedings of the working conference on Advanced visual interfaces},
  year = {2004},
  month = {May},
  isbn = {1-58113-867-9},
  pages = {443--446},
  location = {Gallipoli, Italy},
  doi = {http://doi.acm.org/10.1145/989863.989947},
  publisher = {ACM Press},
  address = {New York, NY, USA},
  pdf = {2004/nunnari-avi04.pdf}
}
@inproceedings{nunnari-coop04,
  booktitle = {Proceedings of COOP2004},
  publisher = {IOS Press},
  title = {Three{D}map: customizing awareness information},
  pages = {39--54},
  author = {Fabrizio Nunnari},
  location = {Presqu'ile de Gien, Hy\`eres, France},
  month = {May},
  year = {2004},
  isbn = {978-1-58603-422-1},
  pdf = {2004/nunnari-coop04.pdf}
}
@article{bandini-jctw04,
  title = {Supporting the sense of presence in control environments},
  volume = {6},
  author = {S. Bandini and S. Manzoni and F. Nunnari and C. Simone},
  number = {1},
  publisher = {Springer},
  journal = {International Journal of Cognition, Technology and Work (JCTW)},
  pages = {49--52},
  year = {2004},
  doi = {http://dx.doi.org/10.1007/s10111-003-0137-8},
  issn = {1435-5558},
  pdf = {2004/bandini-jctw04.pdf}
}
@inproceedings{nunnari-coop02,
  booktitle = {Cooperative systems design: a challenge of the mobility age (COOP2002)},
  isbn = {4-274-90503-9},
  pages = {7--22},
  publisher = {IOS Press},
  title = {Three{D}ness: representing awareness in cooperative applications},
  author = {Fabrizio Nunnari and Carla Simone},
  location = {Saint Rapha\"el , FRANCE},
  month = {June},
  year = {2002},
  pdf = {2002/nunnari-coop02.pdf},
  url = {http://www.iospress.nl/book/cooperative-systems-design-2/}
}

This file was generated by bibtex2html 1.99.