#
# Copy/paste the contents of this file and save with extension .bib in order to use elsewhere
# Generated on: 30th September 2024 20:46 on https://www.michelepasin.org
# Clean up: https://flamingtempura.github.io/bibtex-tidy/index.html
#
@inproceedings{Pasin21,
author = {Michele Pasin and Enrico Motta and Zdenek Zdrahal},
title = {Semantic Technologies for ELearning: A case study in the philosophical domain: representing Wittgenstein's Tractatus Logico-Philosophicus},
booktitle = {LAP LAMBERT Academic Publishing},
month = "08",
year = "2010",
abstract = "What does it mean for a student to come to an understanding of a philosophical standpoint and can the explosion of resources now available on the web support this process, or is it inclined instead to create more confusion? We believe that a possible answer to the problem of finding a means through the morass of information on the web to the philosophical insights it conceals lies in the process of narrative pathway generation. That is, the active linking of resources into a learning path that contextualizes them with respect to one another. This result can be achieved only if the content of the resources is indexed, not just their status as a text document, an image or a video. To this aim, we propose a formal conceptualization of the domain of philosophy, an ontology that would allow the categorization of resources according to a series of pre\u002Dagreed content descriptors. Within an e\u002Dlearning scenario, a teacher could use a tool comprising such an ontology to annotate at various levels of granularity available philosophical materials, and let the students explore this semantic space in an unsupervised manner, according to pre\u002Ddefined narrative pathways.",
}
@article{Pasin536,
author = {Joerg Sixt and Michele Pasin},
title = {Dimensions: Calculating Disruption Indices at Scale},
journal = {Quantitative Science Studies},
month = "09",
year = "2024",
abstract = "Evaluating the disruptive nature of academic ideas is a new area of research evaluation that moves beyond standard citation\u002Dbased metrics by taking into account the broader citation context of publications or patents. The \u0022CD index\u0022 and a number of related indicators have been proposed in order to characterise mathematically the disruptiveness of scientific publications or patents. This research area has generated a lot of attention in recent years, yet there is no general consensus on the significance and reliability of disruption indices. More experimentation and evaluation would be desirable, however is hampered by the fact that these indicators are expensive and time\u002Dconsuming to calculate, especially if done at scale on large citation networks. We present a novel method to calculate disruption indices that leverages the Dimensions cloud\u002Dbased research infrastructure and reduces the computational time taken to produce such indices by an order of magnitude, as well as making available such functionalities within an online environment that requires no set\u002Dup efforts. We explain the novel algorithm and describe how its results align with preexisting implementations of disruption indicators. This method will enable researchers to develop, validate and improve mathematical disruption models more quickly and with more precision, thus contributing to the development of this new research area.",
}
@article{Pasin53,
author = {John Bradley and Michele Pasin},
title = {Fitting Personal Interpretation with the Semantic Web: lessons learned from Pliny},
journal = {Digital Humanities Quarterly},
month = "01",
year = "2017",
abstract = "In this paper we expand Stefan Gradmann’s question at WWW2012 \u0022Thinking in the graph: will Digital Humanists ever do so?\u0022 to consider whether humanists, more generally than just \u0022digital\u0022 ones, might do thinking that is, at least to some useful degree, \u0022in the graph\u0022 too. Drawing on the experience of the Pliny project, and recent work done within that project to explore how Pliny materials might connect with the semantic web, we explore ways in which structured \u0022graph\u002Dlike\u0022 thinking might be revealed in — to \u0022peek out\u0022 from — parts of humanities research that is common to digital and non\u002Ddigital humanists alike. Out of this, we propose a number of different ways that scholars might engage with the Semantic Web, and provide examples – arising from the building of a prototype extension to Pliny – of how these engagements could be dealt with. We also explore the challenge of ambiguity and incompleteness in scholarship, explain how 2D space operates in Pliny to cope, to some degree at least, with these issues, and consider the boundaries between the expressiveness of 2D space and the formal graph model of the Semantic Web. We end by proposing several possible avenues for future work that arise from our work so far.",
}
@article{Pasin28,
author = {Sally-Beth MacLean and Tanya Hagen and Michele Pasin},
title = {Moving Early Modern Theatre Online: The Records of Early English Drama introduces the Early Modern London Theatres Website},
journal = {New Technologies and Renaissance Studies II},
month = "12",
year = "2014",
abstract = "The Records of Early English Drama project is an interdisciplinary research and editorial project based at the University of Toronto. REED was founded in 1976, its primary purpose being to find, transcribe and edit for publication surviving records of drama, music and popular mimetic entertainment before 1642, when the Puritans closed the public theatres in London. Thanks to the efforts of a dedicated staff and determined editors in Canada, the US and UK, the project is still going after all these years, a hardy veteran of collaborative humanities scholarship. The list of print publications now totals twenty\u002Dseven collections in thirty\u002Dthree volumes, with a landmark collection for the Inns of Court published in 2011, the second of several for the historic city of London and its neighbouring counties (see Map of REED Collections).\u000D\u000A[...]\u000D\u000AEarly Modern London Theatres aspires to provide its users with a major encyclopedic resource on the early London stage, as well as a comprehensive historiographical survey of the field. In compiling EMLoT, we aim to identify, record and assess transcriptions from primary\u002Dsource materials relating to the early London stage, as found in secondary\u002D source print and manuscript documents. Our main criterion in distinguishing between a primary\u002D and secondary\u002Dsource document is chronological: EMLoT’s purview stops with the REED volumes (and the closing of the theatres) at 1642. Under this rubric, a primary source is a document produced before 1642, and a secondary source is one produced after 1642. There are, of course, some exceptions here. We make allowances for works known to have existed in some form before 1642, but for which the earliest surviving witness is a post\u002D1642 document. This applies primarily to play texts: many of Thomas Middleton’s and James Shirley’s works, for example, did not see publication for the first time until the 1650s. There are also a few instances in which later manuscript sources provide us with valuable contemporary evidence concerning the pre\u002D1642 stage. A petition by Elizabeth Heton, William Wintersall, and Mary Young to the Earl of Dorset, filed c 1657\u002D8, speaks of a lease entered into some thirty years ago with the Earl’s father for an old barn standing in Salisbury Court (Wickham, Ingram, and Berry 2007, 654). In such an instance, where the substance of the record clearly relates to an event that took place before 1642 (e.g., the construction of the Salisbury Court theatre) and provides evidence of major import to the history of the early London stage, we have chosen to relax our chronological parameters.",
}
@article{Pasin43,
author = {Michele Pasin and John Bradley},
title = {Factoid-based Prosopography and Computer Ontologies: towards an integrated approach},
journal = {Digital Scholarship in the Humanities},
month = "12",
year = "2014",
abstract = "Structured Prosopography provides a formal model for representing prosopography: a branch of historical research that traditionally has focused on the identification of people that appear in historical sources. Since the 1990s, KCL’s Department of Digital Humanities has been involved in the development of structured prosopographical databases using a general \u0022factoid\u002Doriented\u0022 model of structure that links people to the information about them via spots in primary sources that assert that information. Recent developments, particularly the WWW, and its related technologies around the Semantic Web have promoted the possibility to both interconnecting dispersed data, and allowing it to be queried semantically. To the purpose making available our prosopographical databases on the semantic web, in this article we review the principles behind our established factoid\u002Dbased approach and reformulate it using a more transparent approach, based on knowledge representation principles and formal ontologies. In particular, we are going to focus primarily on a high\u002Dlevel semantic analysis of the factoid notion, on its relation to other cultural heritage standards such as CIDOC\u002DCRM, and on the modularity and extensibility of the proposed solutions.",
}
@article{Pasin35,
author = {John Bradley and Michele Pasin},
title = {Structuring that which cannot be structured: A role for formal models in representing aspects of Medieval Scotland},
journal = {New Perspectives on Medieval Scotland: 1093-1286},
month = "08",
year = "2013",
abstract = "Computing offers a bit of a paradox when it comes to historical studies. On one hand, one suspects that almost all academic historians in at least Western Europe and North America have a computer both in their office and at home and use it daily for email, word processing and for surfing the World Wide Web. However, in spite of their daily contact with the machine, they view it as having little or nothing to do with the essence of their research. Now, the fact that historians use the computer every day as a part of their research activities, but both hardly notice it and probably don’t often think that it actually affects what they do, turns out to be an interesting phenomenon that is, of course, not restricted to the doing of history. Indeed, the ability of tools such as word processing, email and the WWW to fit into the normal way of doing things so that they are almost invisible, shows an aspect to computing that is significant in its own right. However, this paper presents an example of a more prominent role for the computer in the doing of history. We focus on one of the ways in which computing obviously significantly impacts on the research: representing the product of historical research as highly structured materials in databases, and use the Paradox of Medieval Scotland (PoMS 2010) project as the prime example.",
}
@article{Pasin7,
author = {Michele Pasin and Enrico Motta},
title = {Ontological Requirements for Annotation and Navigation of Philosophical Resources},
journal = {Synthese, Volume 182, Number 2, Springer},
month = "01",
year = "2011",
abstract = "In this article, we describe an ontology aimed at the representation of the relevant entities and relations in the philosophical world. We will guide the reader through our modeling choices, so to highlight the ontology’s practical purpose: to enable an annotation of philosophical resources which is capable of supporting pedagogical navigation mechanisms. The ontology covers all the aspects of philosophy, thus including characterizations of entities such as people, events, documents, and ideas. In particular, here we will present a detailed exposition of the entities belonging to the idea branch of the ontology, for they have a crucial role in the world of philosophy. Moreover, as an example of the type of applications made possible by the ontology we will introduce PhiloSurfical, a prototype tool we created to navigate contextually a classic work in twentieth century philosophy, Wittgenstein’s Tractatus Logico\u002DPhilosophicus. We discuss the potential usage of such navigation mechanisms in educational and scholarly contexts, which aim to enhance the learning process through the serendipitous discovery of relevant resources.",
}
@article{Pasin24,
author = {Michele Pasin},
title = {Review of Interontology conference 2010},
journal = {Humana Mente, Journal of Philosophical Studies, 13},
month = "05",
year = "2010",
abstract = "The third Interdisciplinary Ontology Conference was held in Tokyo, Japan, from February 27 to February 28 2010. Organized by the Japanese Center for Ontological Research (JCOR) and cosponsored by the Japanese Government‟s Ministry of Education and Science (MEXT), the stated goal of this forum is to support the “exchange of ideas and state\u002Dof\u002Dthe\u002Dart technologies for those working in the ontology domain from around the world”. The event has a quite unique flavor, for it gathers researchers from disciplines as disparate as computer science, logic and philosophy, as well as a variety of application domains. The common thread is the discipline of ontology, which has undoubtedly gone a long way since its early days in ancient Greece. We all know that ontology began as a branch of philosophy, studying the types of entities in reality and the relations between them. In the seventies, the early researchers in artificial intelligence borrowed the word from philosophy and applied it to their discipline. Consequently, if ontology used to be intended as a systematic account of Existence, within this new context, what “exists” has become that which can be represented using a computer. Disciplines such as ontology engineering were soon to be born, which investigated (among various other more technical aspects) how to best employ the rich body of theory from philosophical ontology to the purpose of making conceptual distinctions in a systematic and coherent manner. Nowadays ontology has become an established branch of computer science, which offers solutions to problem in areas as disparate as data integration, information retrieval, natural language processing, industrial planning and many others.\u000D\u000AAs already mentioned, it is not uncommon for this conference\u0027s attendees to be almost unable to follow a talk, for it uses the word \u0027ontology\u0027 in a way never heard before. This is, on the contrary, one of the most interesting aspects of the strongly interdisciplinary meeting. In the review that follows we hope to give to the reader a small taste of this feeling, and a better appreciation of the many senses we can talk about ontology in 2010.",
}
@article{Pasin9,
author = {Michele Pasin and Enrico Motta},
title = {PhiloSURFical: An Ontological Approach To Support Philosophy Learning},
journal = {Semantic Web Technologies for e-Learning},
month = "10",
year = "2009",
abstract = "As the Semantic Web is increasingly becoming a reality, the availability of large quantities of structured data brings forward new challenges. In fact, when the content of resources is indexed, not just their status as a text document, an image or a video, it becomes important to have solid semantic models which avoid as much as possible the generation of ambiguities with relation to the resources’ meaning. Within an educational context, we believe that only thanks to these models it is possible to organize and present resources in a dynamic and contextual manner. This can be achieved through a process of narrative pathway generation, that is, the active linking of resources into a learning path that contextualizes them with respect to one another. We are experimenting this approach in the PhiloSurfical tool, aimed at supporting philosophy students in understanding a text, by presenting them ‘maps’ of relevant learning resources. An ontology describing the multiple aspects of the philosophical world plays a central role in this system. In this chapter we want to discuss some lessons\u002Dlearned during the modeling process, which have been crystallized into a series of reusable patterns. We present three of these patterns, showing how they can support different context\u002Dbased reasoning tasks and allow a formal conceptualization of ambiguities that are primarily philosophy\u002Drelated but can be easily found in other domains too. In particular, we describe a practical use of the ontology in the context of a classic work in twentieth century philosophy, Wittgenstein’s Tractatus Logico\u002DPhilosophicus. ",
}
@article{Pasin8,
author = {Vanessa Lopez and Victoria Uren and Enrico Motta and Michele Pasin},
title = {AquaLog: An ontology-driven question answering system for organizational semantic intranets},
journal = {Journal of Web Semantics},
month = "09",
year = "2007",
abstract = "The semantic web vision is one in which rich, ontology\u002Dbased semantic markup will become widely available. The availability of semantic markup on the web opens the way to novel, sophisticated forms of question answering. AquaLog is a portable question\u002Danswering system which takes queries expressed in natural language and an ontology as input, and returns answers drawn from one or more knowledge bases (KBs). We say that AquaLog is portable because the configuration time required to customize the system for a particular ontology is negligible. AquaLog presents an elegant solution in which different strategies are combined together in a novel way. It makes use of the GATE NLP platform, string metric algorithms, WordNet and a novel ontology\u002Dbased relation similarity service to make sense of user queries with respect to the target KB. Moreover it also includes a learning component, which ensures that the performance of the system improves over the time, in response to the particular community jargon used by end users.",
}
@inproceedings{Pasin511,
author = {Michele Pasin and Richard Abdill},
title = {Generating large-scale network analyses of scientific landscapes in seconds using Dimensions on Google BigQuery},
booktitle = {International Conference on Science, Technology and Innovation Indicators (STI 2022)},
month = "09",
year = "2022",
abstract = "The growth of large, programatically accessible bibliometrics databases presents new opportunities for complex analyses of publication metadata. In addition to providing a wealth of information about authors and institutions, databases such as those provided by Dimensions also provide conceptual information and links to entities such as grants, funders and patents. However, data is not the only challenge in evaluating patterns in scholarly work: These large datasets can be challenging to integrate, particularly for those unfamiliar with the complex schemas necessary for accommodating such heterogeneous information, and those most comfortable with data mining may not be as experienced in data visualisation. Here, we present an open\u002Dsource Python library that streamlines the process accessing and diagramming subsets of the Dimensions on Google BigQuery database and demonstrate its use on the freely available Dimensions COVID\u002D19 dataset. We are optimistic that this tool will expand access to this valuable information by streamlining what would otherwise be multiple complex technical tasks, enabling more researchers to examine patterns in research focus and collaboration over time.",
}
@inproceedings{Pasin70,
author = {Beyza Yaman and Michele Pasin and Markus Freudenberg},
title = {Interlinking SciGraph and DBpedia datasets using Link Discovery and Named Entity Recognition Techniques},
booktitle = {Second biennial conference on Language, Data and Knowledge (LDK 2019)},
month = "05",
year = "2019",
abstract = "In recent years we have seen a proliferation of Linked Open Data (LOD) compliant datasets becoming available on the web, leading to an increased number of opportunities for data consumers to build smarter applications which integrate data coming from disparate sources. However, often the integration is not easily achievable since it requires discovering and expressing associations across heterogeneous data sets. The goal of this work is to increase the discoverability and reusability of the scholarly data by integrating them to highly interlinked datasets in the LOD cloud. In order to do so we applied techniques that a) improve the identity resolution across these two sources using Link Discovery for the structured data (i.e. by annotating Springer Nature (SN) SciGraph entities with links to DBpedia entities), and b) enriching SN SciGraph unstructured text content (document abstracts) with links to DBpedia entities using Named Entity Recognition (NER). We published the results of this work using standard vocabularies and provided an interactive exploration tool which presents the discovered links w.r.t. the breadth and depth of the DBpedia classes.",
}
@inproceedings{Pasin54,
author = {Tony Hammond and Michele Pasin and Evangelos Theodoridis},
title = {Data integration and disintegration: Managing Springer Nature SciGraph with SHACL and OWL},
booktitle = {Industry Track, International Semantic Web Conference (ISWC-17)},
month = "10",
year = "2017",
abstract = "We give an overview of the technical challenges involved in building a large\u002Dscale linked data knowledge graph, with a focus on the processes involving the normalization and control of data both entering and leaving the graph. In particular, we discuss how we are leveraging features of the Shapes Constraint Language (SHACL) to combine closed\u002Dworld, constrained views over an enterprise data integration setting with the open\u002Dworld (OWL), unconstrained setting of the global semantic web.",
}
@inproceedings{Pasin55,
author = {Matteo Romanello and Michele Pasin},
title = {Using Linked Open Data to Bootstrap a Knowledge Base of Classical Texts},
booktitle = {WHiSe 2017 - 2nd Workshop on Humanities in the Semantic web (colocated with ISWC17)},
month = "10",
year = "2017",
abstract = "We describe a domain\u002Dspecific knowledge base aimed at sup\u002D porting the extraction of bibliographic references in the domain of Clas\u002D sics. In particular, we deal with references to canonical works of the Greek and Latin literature by providing a model that represents key as\u002D pects of this domain such as names and abbreviations of authors, the canonical structure of classical works, and links to related web resources. Finally, we show how the availability of linked data in the emerging Graph of Ancient World Data has helped bootstrapping the creation of our knowledge base.\u000D\u000A",
}
@inproceedings{Pasin49,
author = {Tony Hammond and Michele Pasin},
title = {Learning how to become a linked data publisher: the nature.com ontologies portal.},
booktitle = {5th Workshop on Linked Science 2015, colocated with ISWC 2015.},
month = "09",
year = "2015",
abstract = "This paper summarizes work done by Macmillan Science and Education to create a publicly accessible repository of the data models and datasets which underlie its semantic publishing architecture. In particular, we give a brief history of our work with linked data, we describe the nature.com ontologies portal and the data models and datasets we have published, we discuss mappings to external datasets and a mechanism for publishing to different knowledge\u002Dbases, and finally we provide some data handling best practices and conclude with a few hopes for future development.\u000D\u000A",
}
@inproceedings{Pasin48,
author = {Michele Pasin},
title = {ResQuotes.com: Turn your Notes and Highlights into Research Ideas},
booktitle = {Force11 - Research Communications and e-Scholarship conference},
month = "01",
year = "2015",
abstract = "Summary: www.resquotes.com is a personal information management tool that supports the extraction, indexing and reorganization of digital annotations (notes, highlights, text snippets) researchers create while working with electronic texts or other scholarly literature available in digital format. The goal of ResearchQuotes is to make it easier for people to review, compare, combine and share novel ideas as they are found in the texts they read. Different levels of privacy controls and support for collaborative work are being developed, so to incrementally make ResearchQuotes also a platform for the exploration and discovery of trending ideas within research communities.",
}
@inproceedings{Pasin47,
author = {Tony Hammond and Michele Pasin},
title = {Linked data experience at Macmillan: Building discovery services for scientific and scholarly content on top of a semantic data model},
booktitle = {International Semantic Web Conference (ISWC-14)},
month = "10",
year = "2014",
abstract = "This paper presents recent work carried out at Macmillan Science and Education in evolving a traditional XML\u002Dbased, document\u002D centric enterprise publishing platform into a scalable, thing\u002Dcentric and RDF\u002Dbased semantic architecture. Performance and robustness guarantees required by our online products on the one hand, and the need to support legacy architectures on the other, led us to develop a hybrid infrastructure in which the data is modelled throughout in RDF but is replicated and distributed between RDF and XML data stores for efficient retrieval. A recently launched product – dynamic pages for scientific subject terms – is briefly introduced as a result of this semantic publishing architecture.",
}
@inproceedings{Pasin46,
author = {Matteo Romanello and Michele Pasin},
title = {Citations and Annotations in Classics: Old Problems and New Perspectives},
booktitle = {Collaborative Annotation in Shared Environments: Metadata, vocabularies and techniques in the Digital Humanities (workshop co-located with ACM DocEng 2013 Conference)},
month = "09",
year = "2013",
abstract = "Annotations played a major role in Classics since the very beginning of the discipline. Some of the first attested examples of philological work, the so\u002Dcalled scholia, were in fact marginalia, namely comments written at the margins of a text. Over the centuries this kind of scholarship evolved until it became a genre on its own, the classical commentary, thus moving away from the text with the result that philologists had to devise a solution to linking together the commented and the commenting text. The solution to this problem is the system of canonical citations, a special kind of bibliographic references that are at the same time very precise and interoperable.\u000D\u000A\u000D\u000AIn this paper we present HuCit, an ontology which mod\u002D els in depth the semantics of canonical citations. Its main goal is to underpin a Knowledge Base providing the information that an automatic expert system requires in order to read and interpret correctly this type of citations. Finally, we describe how HuCit can be combined together with the OAC ontology to publish in a semantic format a specific kind of machine\u002Dgenerated annotations, that is annotations concerning the canonical citations contained in secondary sources (e.g. journal articles, commentaries, etc.).",
}
@inproceedings{Pasin45,
author = {John Bradley and Michele Pasin},
title = {Fitting Personal Interpretations with the Semantic Web},
booktitle = {Digital Humanities 2013},
month = "07",
year = "2013",
abstract = "",
}
@inproceedings{Pasin40,
author = {Michele Pasin},
title = {Exploring Prosopographical Resources Through Novel Tools and Visualizations: a Preliminary Investigation},
booktitle = {Digital Humanities 2012},
month = "07",
year = "2012",
abstract = "Structured Prosopography provides a formal model for representing prosopography: a branch of historical research that traditionally has focused on the identification of people that appear in historical sources (Verboven et al. 2007). Thanks to computing technologies, structured prosopography has succeeded in providing historians with a mean to enhance their scholarly work and make it available worldwide to a variety of academic and non academic users. Since the 1990s, KCL’s Department of Digital Humanities (DDH) has been involved in the development of structured prosopographical databases, and has had direct involvement in Prosopographies of the Byzantine World (PBE and PBW), Anglo\u002D Saxon England (PASE), Medieval Scotland (PoMS) and now more generally northern Britain (BoB).\u000D\u000A\u000D\u000APre\u002Ddigital print prosopographies presented its materials as narrative articles about the individuals it contains. Structured prosopography instead takes a more database\u002D oriented approach as it focuses on isolating information fragments (usually, in textual form) that are relevant to the task of describing the life\u002Devents of a particular person. As a result, it is possible to quickly recollect such results in manifold ways using the logical query languages database systems make available.\u000D\u000A\u000D\u000AIn particular, DDH has been involved in the development of a general \u0022factoid\u002D oriented\u0022 model of structure that although downplaying or eliminating narratives about people, has to a large extent served the needs of these various projects quite well. The structure formally identifies obvious items of interest: Persons and Sources, and extends to related things like Offices or Places. In our prosopographical model the Factoid is a central idea and represents the spot in a primary source where something is said about one or more persons. In other words, it links people to the information about them via spots in primary sources that assert that information (Bradley \u0026 Short 2003).\u000D\u000A\u000D\u000AIn general, it is fair to say that the issue of representing prosopographical data to the purpose of building large and efficient knowledge bases is no longer a critical problem for digital humanities research to tackle. Thanks to more than twenty years of research in this niche\u002Darea, a number of technical approaches such as the factoid one just mentioned have been discussed extensively and thus can facilitate enormously the initial design and construction of a structured back\u002Dend for a digital prosopographical project.\u000D\u000A\u000D\u000AFor that regards instead the visual rendering and final presentation of the contents of a prosopography, the amount of existing research is considerably smaller. In fact it is quite common to present data using a classic database\u002Dcentric approach: the tabular format. This approach normally boils down to a bibliographical\u002Drecord\u002Dlike table containing all the information available about a specific person: his/her recorded appellations and life dates, plus of course a variable number of rows that refer to the excepts that describe that person in the primary or secondary sources examined. We can see an example of this classic visualization approach in Fig. 1 (the example can be found online at http://www.poms.ac.uk/db/record/person/251/).\u000D\u000A\u000D\u000AThe tabular format has the advantage of offering a wealth of information in a clean and well\u002Dorganized interface, thus simplifying the task of finding what we are looking for during a search. However, by combining all the information in a single view, this approach also hides some of the key dimensions used by historians in order to make sense of the materials at hand. For example, such dimensions could be deriving from a spatio\u002Dhistorical, genealogical or socio\u002Dpolitical consideration of the data.\u000D\u000A\u000D\u000AIn other words, we acknowledged that although the tabular format succeeds in creating a comprehensive and condensed version of the information relevant for a search, it would also be interesting to examine if we could present the same data in a more piecemeal fashion, according to predefined pathways or views on the dataset that aim at making explicit some of the coherence principles of the historical discourse.\u000D\u000A\u000D\u000AWe believe that this kind of approach could be desirable for both non\u002Dexpert users (e.g., learners) \u002D who could simultaneously access the data and get a feeling for the meaningful relations among them \u002D and for experts alike (e.g., academic scholars) \u002D who could be facilitated in the process of analyzing data within predefined dimensions, so to highlight patterns of interest that would be otherwise hard to spot.\u000D\u000A\u000D\u000AWith these ideas in mind we started to investigate the creation of innovative methods for presenting prosopographical data to users. For the moment these experiments have been developed in the context of a single prosopography, the “Paradox of Medieval Scotland”, but we reckon that they could be easily generalizable to other projects too, due to the intrinsic similarity of the approaches we used.",
}
@inproceedings{Pasin41,
author = {John Bradley and Michele Pasin},
title = {Annotation and Ontology in most Humanities research: accommodating a more informal interpretation context},
booktitle = {NeDiMaH workshop on ontology based annotation, held in conjunction with Digital Humanities 2012},
month = "07",
year = "2012",
abstract = "The emergence of formal ontologies into the World Wide Web has had a profound effect on research in certain fields. In the Life Sciences, for example, key research information has been captured in formal domain ontologies, like those mentioned in the Open Biological and Biomedical Ontologies website (OBOFoundary 2012). In parallel with this has been the development of the AO annotation ontology framework (AO 2012) which formalises annotation to connect ontologies such as those in the OBOFoundary to references to them in the scientific literature: an act sometimes referred to as \u0022semantic annotation\u0022, and tools such as the SWAN annotation system (SWAN 2008) have emerged to support this. We will call the activity of linking references in a domain literature directly to entities in one or more domain ontologies \u0022direct semantic annotation\u0022. We show it in schematic form in figure I. The annotations – shown as heavier lines connecting spots in the literature to the ontologies would be in the AO annotation ontology, or something similar to it.\u000D\u000ACan direct semantic annotation be applied to research in the Humanities? For it to work as it does in the Life Sciences, formal models of humanities materials, such as CIDOC\u002D‐CRM, need to exist and be already used to model material of interest to the humanities. Not much of this has happened at present, although perhaps Linked Data initiatives (Heath 2011) show some promise in that general direction.",
}
@inproceedings{Pasin34,
author = {John Bradley and Michele Pasin},
title = {Prosopography and Computer Ontologies: towards a formal representation of the ‘factoid’ model by means of CIDOC-CRM},
booktitle = {Representing Knowledge in the Digital Humanities},
month = "09",
year = "2011",
abstract = "",
}
@inproceedings{Pasin30,
author = {Michele Pasin},
title = {Browsing highly interconnected humanities databases through multi-result faceted browsers},
booktitle = {Digital Humanities 2011 },
month = "06",
year = "2011",
abstract = "",
}
@inproceedings{Pasin29,
author = {Matteo Romanello and Michele Pasin},
title = {An Ontological View of Canonical Citations},
booktitle = {Digital Humanities 2011 },
month = "06",
year = "2011",
abstract = "",
}
@inproceedings{Pasin20,
author = {Michele Pasin},
title = {How do philosophers think their own discipline? Reports from a knowledge elicitation experiment},
booktitle = {European Philosophy and Computing conference, ECAP10},
month = "10",
year = "2010",
abstract = "",
}
@inproceedings{Pasin23,
author = {Michele Pasin},
title = {Data integration perspectives from the London Theatres Bibliography project},
booktitle = {Annual Conference of the Canadian Society for Digital Humanities / Société pour l'étude des médias interactifs (SDH-SEMI 2010)},
month = "06",
year = "2010",
abstract = "",
}
@inproceedings{Pasin1,
author = {Michele Pasin and Arianna Ciula},
title = {Laying the Conceptual Foundations for Data Integration in the Humanities},
booktitle = {Proc. of the Digital Humanities Conference (DH09)},
month = "06",
year = "2009",
abstract = "",
}
@inproceedings{Pasin25,
author = {Michele Pasin and John Bradley},
title = {Meaning and Structure in the London Theatres Bibliography},
booktitle = {The Fifty-Fifth Annual Meeting of the Renaissance Society of America},
month = "03",
year = "2009",
abstract = "",
}
@inproceedings{Pasin2,
author = {Michele Pasin and Simon Buckingham-Shum and Enrico Motta},
title = {Formalizing ʻphilosophicalʼ narratives: the tension between form and content},
booktitle = {European Computing and Philosophy Conference (ECAP08)},
month = "06",
year = "2008",
abstract = "",
}
@inproceedings{Pasin3,
author = {Michele Pasin and Enrico Motta and Zdenek Zdrahal},
title = {Capturing Knowledge About Philosophy},
booktitle = {Fourth International Conference on Knowledge Capture (K-CAP07)},
month = "10",
year = "2007",
abstract = "",
}
@inproceedings{Pasin4,
author = {Michele Pasin},
title = {PhiloSURFical: browse Wittgensteinʼs Tractatus with the Semantic Web},
booktitle = {Wittgenstein and the Philosophy of Information - Proceedings of the 30th International Ludwig Wittgenstein Symposium},
month = "08",
year = "2007",
abstract = "",
}
@inproceedings{Pasin10,
author = {Michele Pasin and Enrico Motta},
title = {Supporting Philosophers’ Work through the Semantic Web: Ontological Issues},
booktitle = {Fifth International Workshop on Ontologies and Semantic Web for E-Learning (SWEL-07), held in conjunction with AIED-07},
month = "07",
year = "2007",
abstract = "",
}
@inproceedings{Pasin5,
author = {Michele Pasin and Enrico Motta},
title = {An ontology for the description and navigation through philosophical resources},
booktitle = {European Conference on Philosophy and Computing (ECAP-06)},
month = "06",
year = "2006",
abstract = "",
}
@inproceedings{Pasin11,
author = {Michele Pasin and Martin Dzbor},
title = {A Task Based Approach to Support Situating Learning for the Semantic Web},
booktitle = { International Workshop on Applications of Semantic Web Technologies for E-Learning (SWEL-06), held in conjunction with Adaptive Hypermedia 2006},
month = "06",
year = "2006",
abstract = "",
}
@inproceedings{Pasin12,
author = {Michele Pasin and Enrico Motta},
title = {Paving the way towards the e-humanities: a Semantic Web approach to support the learning of philosophy },
booktitle = {Poster paper presented at the 3rd European Semantic Web Conference (ESWC-06)},
month = "06",
year = "2006",
abstract = "",
}
@inproceedings{Pasin13,
author = {Michele Pasin and Enrico Motta},
title = {Semantic Learning Narratives},
booktitle = {International Workshop on Applications of Semantic Web Technologies for E-Learning (SWEL-05), held in conjunction with KCAP-05},
month = "10",
year = "2005",
abstract = "",
}
@inproceedings{Pasin6,
author = {Vanessa Lopez and Michele Pasin and Enrico Motta},
title = {AquaLog A Ontology-portable Question Answering interface for the Semantic Web},
booktitle = {2nd European Semantic Web Conference (ESWC05)},
month = "05",
year = "2005",
abstract = "",
}