@article {1033, title = {Making cytological diagnoses on digital images using the iPath network.}, journal = {Acta Cytol}, volume = {58}, year = {2014}, month = {2014}, pages = {453-60}, abstract = {

BACKGROUND: The iPath telemedicine platform Basel is mainly used for histological and cytological consultations, but also serves as a valuable learning tool.

AIM: To study the level of accuracy in making diagnoses based on still images achieved by experienced cytopathologists, to identify limiting factors, and to provide a cytological image series as a learning set.

METHOD: Images from 167 consecutive cytological specimens of different origin were uploaded on the iPath platform and evaluated by four cytopathologists. Only wet-fixed and well-stained specimens were used. The consultants made specific diagnoses and categorized each as benign, suspicious or malignant.

RESULTS: For all consultants, specificity and sensitivity regarding categorized diagnoses were 83-92 and 85-93\%, respectively; the overall accuracy was 88-90\%. The interobserver agreement was substantial (κ = 0.791). The lowest rate of concordance was achieved in urine and bladder washings and in the identification of benign lesions.

CONCLUSION: Using a digital image set for diagnostic purposes implies that even under optimal conditions the accuracy rate will not exceed to 80-90\%, mainly because of lacking supportive immunocytochemical or molecular tests. This limitation does not disqualify digital images for teleconsulting or as a learning aid. The series of images used for the study are open to the public at http://pathorama.wordpress.com/extragenital-cytology-2013/.

}, keywords = {Adolescent, Adult, Aged, Aged, 80 and over, Child, Child, Preschool, Computers, Handheld, Cytodiagnosis, Diagnosis, Differential, Female, Humans, Hyperplasia, Infant, Male, Metaplasia, Middle Aged, Neoplasms, Observer Variation, Reproducibility of Results, Sensitivity and Specificity, Telemedicine}, issn = {0001-5547}, doi = {10.1159/000369241}, author = {Dalquen, Peter and Savic Prince, Spasenija and Spieler, Peter and Kunze, Dietmar and Neumann, Heinrich and Eppenberger-Castori, Serenella and Adams, Heiner and Glatz, Katharina and Bubendorf, Lukas} } @article {868, title = {The caBIG{\textregistered} Life Science Business Architecture Model.}, journal = {Bioinformatics}, volume = {27}, year = {2011}, month = {2011 May 15}, pages = {1429-35}, abstract = {

MOTIVATION: Business Architecture Models (BAMs) describe what a business does, who performs the activities, where and when activities are performed, how activities are accomplished and which data are present. The purpose of a BAM is to provide a common resource for understanding business functions and requirements and to guide software development. The cancer Biomedical Informatics Grid (caBIG{\textregistered}) Life Science BAM (LS BAM) provides a shared understanding of the vocabulary, goals and processes that are common in the business of LS research.

RESULTS: LS BAM 1.1 includes 90 goals and 61 people and groups within Use Case and Activity Unified Modeling Language (UML) Diagrams. Here we report on the model{\textquoteright}s current release, LS BAM 1.1, its utility and usage, and plans for future use and continuing development for future releases. Availability and Implementation: The LS BAM is freely available as UML, PDF and HTML (https://wiki.nci.nih.gov/x/OFNyAQ).

}, keywords = {Biomedical Research, Computational Biology, Computer Systems, National Cancer Institute (U.S.), Neoplasms, Software, United States, Vocabulary, Controlled}, issn = {1367-4811}, doi = {10.1093/bioinformatics/btr141}, author = {Boyd, Lauren Becnel and Hunicke-Smith, Scott P and Stafford, Grace A and Freund, Elaine T and Ehlman, Michele and Chandran, Uma and Dennis, Robert and Fernandez, Anna T and Goldstein, Stephen and Steffen, David and Tycko, Benjamin and Klemm, Juli D} } @article {871, title = {The caBIG annotation and image Markup project.}, journal = {J Digit Imaging}, volume = {23}, year = {2010}, month = {2010 Apr}, pages = {217-25}, abstract = {

Image annotation and markup are at the core of medical interpretation in both the clinical and the research setting. Digital medical images are managed with the DICOM standard format. While DICOM contains a large amount of meta-data about whom, where, and how the image was acquired, DICOM says little about the content or meaning of the pixel data. An image annotation is the explanatory or descriptive information about the pixel data of an image that is generated by a human or machine observer. An image markup is the graphical symbols placed over the image to depict an annotation. While DICOM is the standard for medical image acquisition, manipulation, transmission, storage, and display, there are no standards for image annotation and markup. Many systems expect annotation to be reported verbally, while markups are stored in graphical overlays or proprietary formats. This makes it difficult to extract and compute with both of them. The goal of the Annotation and Image Markup (AIM) project is to develop a mechanism, for modeling, capturing, and serializing image annotation and markup data that can be adopted as a standard by the medical imaging community. The AIM project produces both human- and machine-readable artifacts. This paper describes the AIM information model, schemas, software libraries, and tools so as to prepare researchers and developers for their use of AIM.

}, keywords = {Computational Biology, Computer Communication Networks, Databases, Factual, Diagnostic Imaging, Humans, Interdisciplinary Communication, Medical Records Systems, Computerized, National Cancer Institute (U.S.), National Institutes of Health (U.S.), Neoplasms, Program Evaluation, Quality of Health Care, Radiographic Image Enhancement, Radiology Information Systems, Software, Systems Integration, United States, User-Computer Interface}, issn = {1618-727X}, doi = {10.1007/s10278-009-9193-9}, author = {Channin, David S and Mongkolwat, Pattanasak and Kleper, Vladimir and Sepukar, Kastubh and Rubin, Daniel L} }