Skip to content

Instantly share code, notes, and snippets.

@holmesw
Created June 6, 2016 12:02
Show Gist options
  • Save holmesw/9aae85601b72e63471b5c4d8b2a92efc to your computer and use it in GitHub Desktop.
Save holmesw/9aae85601b72e63471b5c4d8b2a92efc to your computer and use it in GitHub Desktop.
XML London 2014 Conference Proceedings bibtex
@INPROCEEDINGS {XMLLondon14.Kay01,
title = "Benchmarking XSLT Performance",
author = "Michael Kay and Debbie Lockett",
crossref = "ISBN:9780992647117",
pages = "10—23",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Kay01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Kay01",
keywords = "xmllondon,xmllondon14,xmllondon2014,XT-Speedo,Benchmarking,XSLT,Performance",
abstract = "This paper presents a new benchmarking framework for XSLT. The project, called XT-Speedo, is open source and we hope that it will attract a community of developers. The tangible deliverable consists of a set of test material, a set of test drivers for various XSLT processors, and tools for analyzing the test results. Underpinning these deliverables is a methodology and set of measurement objectives that influence the design and selection of material for the test suite, which are also described in this paper. "
}
@INPROCEEDINGS {XMLLondon14.Braaksma01,
title = "Streaming Design Patterns or: How I Learned to Stop Worrying and Love the Stream",
author = "Abel Braaksma",
crossref = "ISBN:9780992647117",
pages = "24—52",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Braaksma01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Braaksma01",
keywords = "xmllondon,xmllondon14,xmllondon2014,XML,XSLT,XPath,streaming,XSLT-30,Exselt",
abstract = "XML and streaming, and more specifically, XSLT and streaming, is often avoided by programmers because they think that streaming is hard. They worry that when they have to rewrite their stylesheets to allow streamed processing, that the stylesheets become less maintainable, (much) harder to develop and that following the Rules on Streamability, in the absence of a good tutorial or book on the subject, is excruciatingly hard and arduous when the only reference they can refer to is the Latest Working Draft on XSLT, section 19. This paper goes further where a previous paper by me left off. This previous paper explains ten rules of thumb for streaming, which will be briefly iterated over in this paper, see Section 4, “Brief overview of the Ten Rules of Thumb of streaming”. This paper expands on that by showing streaming refactoring design patterns that turn typical non-streaming XSLT programming scenarios into streaming ones. They can be found in Section 5, “Streaming Design Patterns”, the text being specifically geared towards programmers new to streaming. "
}
@INPROCEEDINGS {XMLLondon14.Kohl01,
title = "From monolithic XML for print/web to lean XML for data: realising linked data for dictionaries",
author = "Matt Kohl and Sandro Cirulli and Phil Gooch",
crossref = "ISBN:9780992647117",
pages = "53—62",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Kohl01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Kohl01",
keywords = "xmllondon,xmllondon14,xmllondon2014,XProc,RDF,dictionaries",
abstract = "In order to reconcile the need for legacy data compatibility with changing business requirements, proprietary XML schemas inevitably become larger and looser over time. We discuss the transition at Oxford University Press from monolithic XML models designed to capture monolingual and bilingual print dictionaries derived from multiple sources, towards a single, leaner, semantic model. This new model reflects the lexical content units of a traditional dictionary, while maximising human readability and machine interpretability, thus facilitating transformation to Resource Description Framework (RDF) triples as linked data. We describe a modular transformation process based on XProc, XSLT, XSpec and Schematron that maps complex structures and multilingual metadata in the legacy data to the structures and harmonised taxonomy of the new model, making explicit information that is often implicit in the original data. Using the new model in its prototype RDF form, we demonstrate how cross-lingual, cross-domain searches can be performed, and custom data-sets can be constructed, that would be impossible or very timeconsuming to achieve with the original XML content stored at the individual dictionary level. "
}
@INPROCEEDINGS {XMLLondon14.Narmontas01,
title = "XML Processing in Scala",
author = "William Narmontas and Dino Fancellu",
crossref = "ISBN:9780992647117",
pages = "63—75",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Narmontas01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Narmontas01",
keywords = "xmllondon,xmllondon14,xmllondon2014,Scala,XML,XQuery,XSLT,XQJ,Java,Processing,JVM",
abstract = "Scala is an established static- and strongly-typed functional and object-oriented scalable programming language for the JVM with seamless Java interoperation. Scala and its ecosystem are used at LinkedIn, Twitter, Morgan Stanley among many companies demanding remarkable time to market, robustness, high performance and scalability. This paper shows you Scala's strong native XML support, powerful XQuery-like constructs, hybrid processing via XQuery for Scala, and increased XML processing performance. You will learn how you can benefit from Scala’s practicality in a commercial setting, ultimately increasing your productivity. "
}
@INPROCEEDINGS {XMLLondon14.Bina01,
title = "XML Authoring On Mobile Devices",
author = "George Bina",
crossref = "ISBN:9780992647117",
pages = "76—82",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Bina01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Bina01",
keywords = "xmllondon,xmllondon14,xmllondon2014,XML,authoring,mobile,review,user experience",
abstract = "Not too long ago XML-born content was not present in a mobile-friendly form on mobile devices. Now, many of the XML frameworks like DocBook, DITA and TEI provide output formats that are tuned to be used on mobile devices. These are either different electronic book formats (EPUB, Kindle) or different mobile-friendly web formats. Many people find XML authoring difficult on computers, let alone mobile devices. However, due to the constantly increasing number of mobile devices, that made people create mobile-friendly output formats from XML documents, there is clearly a need to provide also direct access to authoring XML content on these devices. I would like to explore the options for providing XML authoring on mobile devices and describe our current work and the technology choices we made to create an authoring solution for mobile devices. Trying to enable people to create XML documents on mobile devices is a very exciting, mainly because the user interaction is completely different on a mobile device: different screen resolutions, different interaction methods (touch, swipe, pinch), etc. See how we imagined XML authoring on an Android phone or on iPad! How about editing XML on a smart TV? Leverage speech recognition/dictation and handwriting recognition technologies that are available on mobile devices to enable completely new ways of interacting with XML documents! "
}
@INPROCEEDINGS {XMLLondon14.Weingaertner01,
title = "Engineering a XML-based Content Hub for Enterprise Publishing",
author = "Elias Weingärtner and Christoph Ludwig",
crossref = "ISBN:9780992647117",
pages = "83—87",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Weingaertner01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Weingaertner01",
keywords = "xmllondon,xmllondon14,xmllondon2014,publishing",
abstract = "Being one of the leading publishing houses in the domains of tax, human resources and law in Germany, delivering large amounts of XML-based content to our customers is a vital part of our business at Haufe Group. We currently make use of several legacy and proprietary systems for this purpose. However, recent business needs such as the requirement for flexible transformation or complex structural queries push these systems to both conceptual and technical limits. Along with new business requirements derived from our company's business strategy, we are currently designing a new service that centrally manages our entire document corpus in XML. We term this service "Content Hub". In this paper, we sketch the architecture of this system, discuss important software architectural challenges and illustrate how we are implementing this system using standard XML technology. "
}
@INPROCEEDINGS {XMLLondon14.Huang01,
title = "A Visual Comparison Approach to Automated Regression Testing",
author = "Celina Huang",
crossref = "ISBN:9780992647117",
pages = "88—95",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Huang01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Huang01",
keywords = "xmllondon,xmllondon14,xmllondon2014,regression testing,PDF,XML,XSL-FO,xslt",
abstract = "Antenna House Regression Testing System (AHRTS) is an automated solution designed to perform visual regression testing of PDF output (PDF to PDF compare) from the Antenna House Formatter software by converting a set of baseline PDFs and a set of new PDFs to bitmaps, and then comparing the bitmaps pixel by pixel. Several functions of the system make use of XML and the final reports are generated using XML and XSL-FO. This paper addresses the importance of PDF to PDF comparison for regression testing and explains the visual comparison approach taken. We explain the issues of traditional methods such as manual regression testing and why the need for an automated solution. We also look at how AHRTS works and discuss the benefits we’ve seen since using it internally to test new releases of our own software. Given its visual-oriented capabilities, we then explore other possible uses beyond the original design intent. "
}
@INPROCEEDINGS {XMLLondon14.Pemberton01,
title = "Live XML Data",
author = "Steven Pemberton",
crossref = "ISBN:9780992647117",
pages = "96—102",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Pemberton01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Pemberton01",
keywords = "xmllondon,xmllondon14,xmllondon2014,xforms,maps",
abstract = "XML is often thought of in terms of documents, or data being transferred between machines, but there is an aspect of XML often overlooked, and that is as a source of live data, that can be displayed in different ways in real time, and used in interactive applications. In this paper we talk about the use of live XML data, and give some examples of its use. "
}
@INPROCEEDINGS {XMLLondon14.Fennell01,
title = "Schematron - More useful than you’d thought",
author = "Philip Fennell",
crossref = "ISBN:9780992647117",
pages = "103—112",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Fennell01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Fennell01",
keywords = "xmllondon,xmllondon14,xmllondon2014,Schematron,RDF",
abstract = "The Schematron XML validation language has been around for about as long as XML and has been used extensively for validation tasks outside the gamut of what XML Schema 1.0 was designed for. The reference implementation is implemented, with great care, in XSLT, and with extensibility in mind. There are a number of points in the Schematron compilation process that provide opportunities to extend its basic behavior and allow other modes of report output to be generated. This paper looks at one example of extending Schematron to create an XML to RDF Mapping Language for flexible RDF triple construction and built-in source-data validation rules. "
}
@INPROCEEDINGS {XMLLondon14.Ahmed01,
title = "Linked Data in a .NET World",
author = "Kal Ahmed",
crossref = "ISBN:9780992647117",
pages = "113—127",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Ahmed01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Ahmed01",
keywords = "xmllondon,xmllondon14,xmllondon2014,RDF,.Net",
abstract = "„is paper discusses two different ways in which .NET applications can access linked data. We start with a discussion of using LINQ to query data from a SPARQL endpoint that will describe how and why you might use LINQ queries against a compile-time data model to query a dynamic, open data set. In the second section we discuss OData - Microsoft's approach to publishing data on the web - and its relationship to RDF and the Linked Data approach, and we show how an OData endpoint can be easily constructed as a type-safe "view" over an RDF data set. "
}
@INPROCEEDINGS {XMLLondon14.Broersma01,
title = "Frameless for XML - The Reactive Revolution",
author = "Robbert Broersma and Yolijn van der Kolk",
crossref = "ISBN:9780992647117",
pages = "128—132",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Broersma01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Broersma01",
keywords = "xmllondon,xmllondon14,xmllondon2014,Frameless,xml,xslt,browser,JavaScript",
abstract = "What would the web look like with functional reactive templates driven by functional reactive query expressions? Lots of recent innovative developments are significant steps towards faster and more manageable web development, but to really improve our lives by leaps and bounds we must take a step back and consider the requirements for unleashing all this power to front-end developers that aren't fluent in JavaScript. What would happen if we throw Angular expressions, React's virtual DOM and Reactive Extensions (Rx) in a mix? What if we use a declarative syntaxes like XSLT and XPath to compile an instruction set for this engine? What if we can reason about the instructions that make up your website and automatically build minimal and optimized modules? It's uneconomical to obtain optimal performance for most projects you're working on, there are just too many sides to it: asynchronous tasks, web workers, parallel computations, lazily loading modules, reducing file size, splitting HTML/CSS/JS into modules, combining modules again to reduce HTTP requests, minification, atomic DOM updates, only rendering what's visible, only calculating what is being rendered, only re-calculating what has changed... But we must do better, also because performance is very much about economic inclusiveness. Smaller web pages are essential to those using internet in (remote) areas over slow 2.5G mobile networks, where wireless data charges are high and every CPU cycle counts when you're using a $25 dollar smartphone. When we've got a reactive template solution in place we can start thinking about using some of the kilobytes we've saved and some of the CPU cycles to add ubiquitous support for unsexy inclusive technologies such as accessibility, Unicode, localization, and security.
"
}
@INPROCEEDINGS {XMLLondon14.Williams01,
title = "Product Usage Schemas",
author = "Jorge Williams",
crossref = "ISBN:9780992647117",
pages = "133—147",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Williams01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Williams01",
keywords = "xmllondon,xmllondon14,xmllondon2014,Usage Collection,Usage Validation,ATOM Syndication,XML Schema,WADL,XSLT,Cloud,Utility Computing",
abstract = "In this case study we describe the process of collecting, validating, and aggregating usage information in a large public cloud for the purpose of billing. We also describe the Product Usage Schema a simple xml schema language used in-house to describe, version, and validate usage messages as they are emitted by various products across our public cloud. "
}
@INPROCEEDINGS {XMLLondon14.Goncalves01,
title = "An XML-based Approach for Data Preprocessing of Multi-Label Classification Problems",
author = "Eduardo Corrêa Gonçalves and Vanessa Braganholo",
crossref = "ISBN:9780992647117",
pages = "148—151",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Goncalves01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Goncalves01",
keywords = "xmllondon,xmllondon14,xmllondon2014,Data Preprocessing,Text Categorization, Multi-Label Classification",
abstract = "Most of the data mining tools are only able to work with data structured either in relational tables or in text files with one record per line. However, both kinds of data representation are not well-suited to certain data mining tasks. One example of such task is multi-label classification, where the goal is to predict the states of a multi-valued target attribute. This paper discusses the use of XML as an alternative to represent datasets for multi-label classification processes, since this language offers flexible means of structuring complex information, thus potentially facilitating the major steps involved in data preprocessing. In order to discuss from a practical point of view, we describe the steps of an experience involving the preprocessing of a real text dataset. "
}
@INPROCEEDINGS {XMLLondon14.Rzedzicki01,
title = "Using Abstract Content Model and Wikis to link Semantic Web, XML, HTML, JSON and CSV: Using Semantic Media Wiki as a mechanism for storing format neutral content model",
author = "Lech Rzedzicki",
crossref = "ISBN:9780992647117",
pages = "152—156",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Rzedzicki01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Rzedzicki01",
keywords = "xmllondon,xmllondon14,xmllondon2014,Wiki,Semantic Web,XML,xhtml,HTML,JSON,CSV",
abstract = "2013 has been hyped as the year of Big Data, 2014 is still about projects dealing with deluge of data and this trend is going to continue as organisations produce and retain exponentially growing amounts of data, outpacing their capability to utilise the data and gain insight from it. One method of dealing with the data flood is modeling the data - applying rules to ensure it is consistent and predictable where possible, and flexible everywhere else, providing definitions, examples, alternatives and connecting related structures. On one hand of the modeling spectrum is the traditional relational data modeling with conceptual, logical and physical models and levels of normalization. Such a strict approach is definitely working well in some environments, but not in publishing where requirements are in constant flux and are rarely well defined. On the other hand of the spectrum is the 'NOSQL' movement where data is literally dumped to storage as is and any data validation and modelling is kept in the application layer therefore needs software developers to maintain. At the moment NOSQL developers are a scarce minority amongst established publishers and a rare and expensive resource in general. To balance these needs and problems, at Kode1100 Ltd we have designed and developed a modeling system, which to a large extent is resilient to changes in developer fashion and taste and can be maintained by technically savvy and otherwise intelligent folks who do not have to be full time programmers."
}
@INPROCEEDINGS {XMLLondon14.Vlist01,
title = "JSON and XML: a new perspective",
author = "Eric Van Der Vlist",
crossref = "ISBN:9780992647117",
pages = "157—161",
booktitle = "XML London 2014 Conference Proceedings",
year = "2014",
doi = "10.14337/XMLLondon14.Vlist01",
url = "http://dx.doi.org/10.14337/XMLLondon14.Vlist01",
keywords = "xmllondon,xmllondon14,xmllondon2014,xml,json",
abstract = "A lot has already been said about the tumultuous relationship between JSON and XML. A number of binding tools have been proposed. Extensions to the XPath Data Model (XDM) and functions are being considered for XSLT 3.0 and XQuery 3.1 to define maps and arrays, two item types that would facilitate the import of JSON objects. The author of this paper has already published and presented papers proposing an XML serialization for XSLT 3.0 maps and arrays, a detailed comparison between XML and JSON data models and a proposal to extend the XDM to better bridge the gap between these data models. None of these efforts seems to be totally satisfying to eliminate the fundamental impedance mismatch between JSON and XML suggesting that we may not have found the right angle to look at this problem. Rather than proposing yet another conversion methodology, this paper proposes a new perspective to look at the differences between JSON and XML which might more constructive than the ones which had been adopted so far. "
}
@PROCEEDINGS {ISBN:9780992647117,
title = "XML London 2014",
editor = "Charles Foster",
year = 2014,
isbn = "978-0-9926471-1-7",
publisher = "XML London",
booktitle = "XML London 2014 Conference Proceedings",
url = "http://xmllondon.com/2014/xmllondon-2014-proceedings.pdf",
keywords = "xmllondon,xmllondon14,xmllondon2014,xml"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment