| /******************************************************************************* |
| * Copyright (c) 2006 IBM Corporation and others. |
| * All rights reserved. This program and the accompanying materials |
| * are made available under the terms of the Eclipse Public License v1.0 |
| * which accompanies this distribution, and is available at |
| * http://www.eclipse.org/legal/epl-v10.html |
| * |
| * Contributors: |
| * IBM Corporation - initial API and implementation |
| * |
| *******************************************************************************/ |
| |
| package org.eclipse.europa.tools; |
| |
| import java.io.FileInputStream; |
| import java.io.FileOutputStream; |
| import java.io.IOException; |
| import java.util.ArrayList; |
| |
| import javax.xml.parsers.DocumentBuilder; |
| |
| import org.eclipse.europa.tools.utils.CommonXML; |
| import org.w3c.dom.Attr; |
| import org.w3c.dom.Document; |
| import org.w3c.dom.NamedNodeMap; |
| import org.w3c.dom.Node; |
| import org.w3c.dom.NodeList; |
| import org.xml.sax.SAXException; |
| |
| /** |
| * The purpose of this utility is to take a site.xml file that's automatically |
| * created during the mirror command operations, and "merge" it with an |
| * authored site.xml file that uses different categories or descriptions that |
| * the automated one. The "merge" is only that any new URL's or version |
| * numbers are mapped to their corresponding authored versions, so rest of |
| * authored version is untouched, and can be used as "the" site.xml for the |
| * mirrored site. |
| */ |
| |
| public class SiteFileUpdater { |
| |
| public static void main(String[] args) { |
| |
| if (args.length != 2) { |
| System.out.println(); |
| System.out.println(" Usage: file-to-merge-into file-to-merge-from"); |
| System.out.println(" The file-to-merge-into will be overwritten"); |
| System.out.println(); |
| } |
| else { |
| try { |
| String oldFileName = args[0]; |
| String newFileName = args[1]; |
| SiteFileUpdater siteFileUpdater = new SiteFileUpdater(); |
| // old file is the authored file, with old feature versions |
| FileInputStream oldFile; |
| oldFile = new FileInputStream(oldFileName); |
| // new file has correct feature versions and url's from mirror |
| // command |
| FileInputStream newFile = new FileInputStream(newFileName); |
| Document mergedDom = siteFileUpdater.merge(oldFile, newFile); |
| oldFile.close(); |
| newFile.close(); |
| |
| // outfile will contain "merge" xml files |
| FileOutputStream outfile = new FileOutputStream(oldFileName); |
| CommonXML.serialize(mergedDom, outfile); |
| |
| outfile.close(); |
| System.out.println("\tOutput to: " + oldFileName); |
| } |
| catch (IOException e) { |
| e.printStackTrace(); |
| } |
| } |
| |
| } |
| |
| private Document merge(FileInputStream oldFile, FileInputStream newFile) throws IOException { |
| |
| DocumentBuilder documentBuilder = CommonXML.getDocumentBuilder(); |
| Document mergedDom = null; |
| try { |
| Document oldDom = documentBuilder.parse(oldFile); |
| |
| Document newDom = documentBuilder.parse(newFile); |
| mergedDom = merge(oldDom, newDom); |
| } |
| catch (SAXException e) { |
| e.printStackTrace(); |
| } |
| return mergedDom; |
| |
| |
| } |
| |
| private Document merge(Document oldDom, Document newDom) { |
| |
| Document result = oldDom; |
| NodeList oldNodeList = oldDom.getElementsByTagName("feature"); |
| NodeList newNodeList = newDom.getElementsByTagName("feature"); |
| |
| // go through each of the existing features, if there's a new one with |
| // same ID, |
| // then update old one with url and version. |
| int nNodes = oldNodeList.getLength(); |
| |
| for (int i = 0; i < nNodes; i++) { |
| Node node = oldNodeList.item(i); |
| NamedNodeMap attributeMap = node.getAttributes(); |
| Node oldIdAttribute = attributeMap.getNamedItem("id"); |
| |
| Node matchingNode = findMatchingNodeById(newNodeList, oldIdAttribute); |
| if (matchingNode != null) { |
| // so, found match, update url and version |
| Node newurl = matchingNode.getAttributes().getNamedItem("url"); |
| Node newversion = matchingNode.getAttributes().getNamedItem("version"); |
| Attr newurlAttr = node.getOwnerDocument().createAttribute("url"); |
| Attr newversionAttr = node.getOwnerDocument().createAttribute("version"); |
| newurlAttr.setValue(newurl.getNodeValue()); |
| newversionAttr.setNodeValue(newversion.getNodeValue()); |
| attributeMap.setNamedItem(newurlAttr); |
| attributeMap.setNamedItem(newversionAttr); |
| } |
| |
| } |
| |
| return result; |
| } |
| |
| private Node findMatchingNodeById(NodeList nodeList, Node attribute) { |
| Node result = null; |
| ArrayList matches = new ArrayList(); |
| String idValue = attribute.getNodeValue(); |
| |
| for (int i = 0; i < nodeList.getLength(); i++) { |
| Node potentialnode = nodeList.item(i); |
| Node potentialMatch = potentialnode.getAttributes().getNamedItem("id"); |
| if (potentialMatch != null) { |
| String potentialIdValue = potentialMatch.getNodeValue(); |
| if (idValue.equals(potentialIdValue)) { |
| matches.add(potentialnode); |
| } |
| } |
| } |
| if (matches.size() > 1) { |
| System.out.println(" WARNING: there were mulitple matches for " + idValue); |
| System.out.println(" The last one in list was used, but sites(s) should be cleaned up so only newest one included"); |
| for (int i = 0; i < matches.size(); i++) { |
| Node match = (Node) matches.get(i); |
| Node version = match.getAttributes().getNamedItem("version"); |
| |
| System.out.println(" " + version.getNodeValue()); |
| } |
| |
| } |
| if (matches.size() > 0) { |
| result = (Node) matches.get(matches.size() - 1); |
| } |
| return result; |
| } |
| } |