diff --git a/buildpdf.py b/buildpdf.py index a6014eb..0d987a3 100755 --- a/buildpdf.py +++ b/buildpdf.py @@ -22,6 +22,7 @@ #* * #*************************************************************************** +from __future__ import print_function __title__="buildpdf" __author__ = "Yorik van Havre " __url__ = "http://www.freecadweb.org" @@ -363,19 +364,19 @@ def crawl(): return 1 elif PDFCONVERTOR == 'htmldoc': if os.system('htmldoc --version'): - print "Error: Htmldoc not found, exiting." + print("Error: Htmldoc not found, exiting.") return 1 try: from PyPDF2 import PdfFileReader,PdfFileWriter except: - print "Error: Python-pypdf2 not installed, exiting." + print("Error: Python-pypdf2 not installed, exiting.") # run ######################################################## buildpdffiles() joinpdf() - if VERBOSE: print "All done!" + if VERBOSE: print("All done!") return 0 @@ -389,10 +390,10 @@ def buildpdffiles(): for i in templist: if i[-5:] == '.html': fileslist.append(i) - print "converting ",len(fileslist)," pages" + print("converting ",len(fileslist)," pages") i = 1 for f in fileslist: - print i," : ",f + print(i," : ",f) if PDFCONVERTOR == 'pisa': createpdf_pisa(f[:-5]) elif PDFCONVERTOR == 'wkhtmltopdf': @@ -421,7 +422,7 @@ def createpdf_pisa(pagename): if (not exists(pagename+".pdf",image=True)) or OVERWRTIE: infile = open(FOLDER + os.sep + pagename+'.html','ro') outfile = open(FOLDER + os.sep + pagename+'.pdf','wb') - if VERBOSE: print "Converting " + pagename + " to pdf..." + if VERBOSE: print("Converting " + pagename + " to pdf...") pdf = pisa.CreatePDF(infile,outfile,FOLDER,link_callback=fetch_resources) outfile.close() if pdf.err: @@ -441,7 +442,7 @@ def createpdf_firefox(pagename): if os.path.exists(FIREFOXPDFFOLDER + os.sep + pagename + ".pdf"): shutil.move(FIREFOXPDFFOLDER+os.sep+pagename+".pdf",outfile) else: - print "-----------------------------------------> Couldn't find print output!" + print("-----------------------------------------> Couldn't find print output!") def createpdf_htmldoc(pagename): @@ -458,16 +459,16 @@ def createpdf_wkhtmltopdf(pagename): infile = FOLDER + os.sep + pagename+'.html' outfile = FOLDER + os.sep + pagename+'.pdf' cmd = 'wkhtmltopdf -L 5mm --user-style-sheet '+FOLDER+os.sep+'wkhtmltopdf.css '+infile+' '+outfile - print cmd + print(cmd) #return os.system(cmd) else: - print "skipping" + print("skipping") def joinpdf(): "creates one pdf file from several others, following order from the cover" from PyPDF2 import PdfFileReader,PdfFileWriter - if VERBOSE: print "Building table of contents..." + if VERBOSE: print("Building table of contents...") result = PdfFileWriter() createCover() @@ -488,7 +489,7 @@ def joinpdf(): if page == "end": parent = False continue - if VERBOSE: print 'Appending',page, "at position",count + if VERBOSE: print('Appending',page, "at position",count) title = page.replace("_"," ") pdffile = page + ".pdf" if exists(pdffile,True): @@ -504,16 +505,16 @@ def joinpdf(): result.addBookmark(title,count,parent) count += numpages else: - print "page",pdffile,"not found, aborting." + print("page",pdffile,"not found, aborting.") sys.exit() - if VERBOSE: print "Writing..." + if VERBOSE: print("Writing...") outputfile = open(FOLDER+os.sep+"freecad.pdf",'wb') result.write(outputfile) outputfile.close() if VERBOSE: - print ' ' - print 'Successfully created '+FOLDER+os.sep+'freecad.pdf' + print(' ') + print('Successfully created '+FOLDER+os.sep+'freecad.pdf') def local(page,image=False): @@ -544,7 +545,7 @@ def makeStyleSheet(): def createCover(): "downloads and creates a cover page" - if VERBOSE: print "fetching " + COVER + if VERBOSE: print("fetching " + COVER) data = (urlopen(COVER).read()) path = FOLDER + os.sep + "Cover.svg" fil = open(path,'wb') diff --git a/buildqhelp.py b/buildqhelp.py index 350f543..177d4e1 100755 --- a/buildqhelp.py +++ b/buildqhelp.py @@ -22,6 +22,7 @@ #* * #*************************************************************************** +from __future__ import print_function __title__="wiki2qhelp" __author__ = "Yorik van Havre " __url__ = "http://www.freecadweb.org" @@ -52,10 +53,10 @@ def crawl(): # tests ############################################### if os.system(QHELPCOMPILER +' -v'): - print "Error: QAssistant not fully installed, exiting." + print("Error: QAssistant not fully installed, exiting.") return 1 if os.system(QCOLLECTIOMGENERATOR +' -v'): - print "Error: QAssistant not fully installed, exiting." + print("Error: QAssistant not fully installed, exiting.") return 1 # run ######################################################## @@ -64,22 +65,22 @@ def crawl(): qhcp = createCollProjectFile() shutil.copy("freecad-icon-64.png","localwiki/freecad-icon-64.png") if generate(qhcp) or compile(qhp): - print "Error at compiling" + print("Error at compiling") return 1 - if VERBOSE: print "All done!" + if VERBOSE: print("All done!") i=raw_input("Copy the files to their correct location in the source tree? y/n (default=no) ") if i.upper() in ["Y","YES"]: shutil.copy("localwiki/freecad.qch","../../Doc/freecad.qch") shutil.copy("localwiki/freecad.qhc","../../Doc/freecad.qhc") else: - print 'Files are in localwiki. Test with "assistant -collectionFile localwiki/freecad.qhc"' + print('Files are in localwiki. Test with "assistant -collectionFile localwiki/freecad.qhc"') return 0 def compile(qhpfile): "compiles the whole html doc with qassistant" qchfile = FOLDER + os.sep + "freecad.qch" if not os.system(QHELPCOMPILER + ' '+qhpfile+' -o '+qchfile): - if VERBOSE: print "Successfully created",qchfile + if VERBOSE: print("Successfully created",qchfile) return 0 def generate(qhcpfile): @@ -93,7 +94,7 @@ def generate(qhcpfile): about.close() qhcfile = FOLDER + os.sep + "freecad.qhc" if not os.system(QCOLLECTIOMGENERATOR+' '+qhcpfile+' -o '+qhcfile): - if VERBOSE: print "Successfully created ",qhcfile + if VERBOSE: print("Successfully created ",qhcfile) return 0 def createCollProjectFile(): @@ -131,12 +132,12 @@ def createCollProjectFile(): ''' - if VERBOSE: print "Building project file..." + if VERBOSE: print("Building project file...") qfilename = FOLDER + os.sep + "freecad.qhcp" f = open(qfilename,'w') f.write(qprojectfile) f.close() - if VERBOSE: print "Done writing qhcp file",qfilename + if VERBOSE: print("Done writing qhcp file",qfilename) return qfilename def buildtoc(): @@ -182,7 +183,7 @@ def buildtoc(): if not link: link = 'default.html' return title,link - if VERBOSE: print "Building table of contents..." + if VERBOSE: print("Building table of contents...") f = open(FOLDER+os.sep+INDEX+'.html') html = '' for line in f: html += line @@ -229,7 +230,7 @@ def buildtoc(): f = open(qfilename,'wb') f.write(qhelpfile) f.close() - if VERBOSE: print "Done writing qhp file",qfilename + if VERBOSE: print("Done writing qhp file",qfilename) return qfilename if __name__ == "__main__": diff --git a/buildwikiindex.py b/buildwikiindex.py index 320d47c..072b2bc 100755 --- a/buildwikiindex.py +++ b/buildwikiindex.py @@ -22,6 +22,7 @@ #* * #*************************************************************************** +from __future__ import print_function __title__="buildwikiindex.py" __author__ = "Yorik van Havre " __url__ = "http://www.freecadweb.org" @@ -60,15 +61,15 @@ def crawl(pagename=[]): else: if os.path.exists("wikifiles.txt"): f = open("wikifiles.txt","r") - if VERBOSE: print "Reading existing list..." + if VERBOSE: print("Reading existing list...") for l in f.readlines(): if l.strip() != "": - if VERBOSE: print "Adding ",l + if VERBOSE: print("Adding ",l) processed.append(l.strip()) f.close() if os.path.exists("todolist.txt"): f = open("todolist.txt","r") - if VERBOSE: print "Reading existing todo list..." + if VERBOSE: print("Reading existing todo list...") for l in f.readlines(): if l.strip() != "": todolist.append(l.strip()) @@ -79,19 +80,19 @@ def crawl(pagename=[]): while todolist: targetpage = todolist.pop() if (not targetpage in NORETRIEVE): - if VERBOSE: print count, ": Scanning ", targetpage + if VERBOSE: print(count, ": Scanning ", targetpage) pages,images = get(targetpage) count += 1 processed.append(targetpage) processed.extend(images) - if VERBOSE: print "got",len(pages),"links" + if VERBOSE: print("got",len(pages),"links") for p in pages: if (not (p in todolist)) and (not (p in processed)): todolist.append(p) if WRITETHROUGH: writeList(processed) writeList(todolist,"todolist.txt") - if VERBOSE: print "Fetched ", count, " pages" + if VERBOSE: print("Fetched ", count, " pages") if not WRITETHROUGH: writeList(processed) if pagename: @@ -156,7 +157,7 @@ def getlinks(html): NORETRIEVE.append(rg) if not rg in NORETRIEVE: pages.append(rg) - print "got link: ",rg + print("got link: ",rg) return pages def getimagelinks(html): @@ -167,7 +168,7 @@ def getimagelinks(html): def fetchpage(page): "retrieves given page from the wiki" - print "fetching: ",page + print("fetching: ",page) failcount = 0 while failcount < MAXFAIL: try: @@ -175,7 +176,7 @@ def fetchpage(page): return html except HTTPError: failcount += 1 - print 'Error: unable to fetch page ' + page + print('Error: unable to fetch page ' + page) sys.exit() def cleanList(pagelist): @@ -193,7 +194,7 @@ def writeList(pages,filename="wikifiles.txt"): for p in pages: f.write(p+"\n") f.close() - if VERBOSE: print "written ",filename + if VERBOSE: print("written ",filename) if __name__ == "__main__": crawl(sys.argv[1:]) diff --git a/downloadwiki.py b/downloadwiki.py index f48c319..466924b 100755 --- a/downloadwiki.py +++ b/downloadwiki.py @@ -22,6 +22,7 @@ #* * #*************************************************************************** +from __future__ import print_function __title__="downloadwiki" __author__ = "Yorik van Havre " __url__ = "http://www.freecadweb.org" @@ -137,7 +138,7 @@ def crawl(): "downloads an entire wiki site" global processed processed = [] - if VERBOSE: print "crawling ", URL, ", saving in ", FOLDER + if VERBOSE: print("crawling ", URL, ", saving in ", FOLDER) if not os.path.isdir(FOLDER): os.mkdir(FOLDER) file = open(FOLDER + os.sep + "wiki.css",'wb') file.write(css) @@ -151,16 +152,16 @@ def crawl(): for l in lfile: locallist.append(l.replace("\n","")) lfile.close() todolist = locallist[:] - print "getting",len(todolist),"files..." + print("getting",len(todolist),"files...") count = 1 indexpages = get(INDEX) while todolist: targetpage = todolist.pop() - if VERBOSE: print count, ": Fetching ", targetpage + if VERBOSE: print(count, ": Fetching ", targetpage) get(targetpage) count += 1 - if VERBOSE: print "Fetched ", count, " pages" - if VERBOSE: print "All done!" + if VERBOSE: print("Fetched ", count, " pages") + if VERBOSE: print("All done!") return 0 def get(page): @@ -180,7 +181,7 @@ def get(page): html = cleanimagelinks(html) output(html,page) else: - if VERBOSE: print " skipping",page + if VERBOSE: print(" skipping",page) def getlinks(html): "returns a list of wikipage links in html file" @@ -268,7 +269,7 @@ def cleanimagelinks(html,links=None): def fetchpage(page): "retrieves given page from the wiki" - print " fetching: ",page + print(" fetching: ",page) failcount = 0 while failcount < MAXFAIL: try: @@ -276,19 +277,19 @@ def fetchpage(page): return html except HTTPError: failcount += 1 - print 'Error: unable to fetch page ' + page + print('Error: unable to fetch page ' + page) def fetchimage(imagelink): "retrieves given image from the wiki and saves it" if imagelink[0:5] == "File:": - print "Skipping file page link" + print("Skipping file page link") return filename = re.findall('.*/(.*)',imagelink)[0] if not exists(filename,image=True): failcount = 0 while failcount < MAXFAIL: try: - if VERBOSE: print " fetching " + filename + if VERBOSE: print(" fetching " + filename) data = (urlopen(URL + imagelink).read()) path = local(filename,image=True) file = open(path,'wb') @@ -298,11 +299,11 @@ def fetchimage(imagelink): failcount += 1 else: processed.append(filename) - if VERBOSE: print " saving",local(filename,image=True) + if VERBOSE: print(" saving",local(filename,image=True)) return - print 'Error: unable to fetch file ' + filename + print('Error: unable to fetch file ' + filename) else: - if VERBOSE: print " skipping",filename + if VERBOSE: print(" skipping",filename) def local(page,image=False): "returns a local path for a given page/image" @@ -337,7 +338,7 @@ def output(html,page): filename = filename.replace("&pagefrom=","+") filename = filename.replace("#mw-pages","") filename = filename.replace(".html.html",".html") - print " saving",filename + print(" saving",filename) file = open(filename,'wb') file.write(html) file.close() diff --git a/update.py b/update.py index 9f35ed5..75e2625 100755 --- a/update.py +++ b/update.py @@ -22,6 +22,7 @@ #* * #*************************************************************************** +from __future__ import print_function __title__="update.py" __author__ = "Yorik van Havre " __url__ = "http://www.freecadweb.org" @@ -58,70 +59,70 @@ def update(pagename=None): if not os.path.exists("revisions.txt"): # case 1) if not os.path.exists("wikifiles.txt"): - print "No wikifiles.txt found. Aborting" + print("No wikifiles.txt found. Aborting") sys.exit() pages = [] f = open("wikifiles.txt","r") - if VERBOSE: print "Reading existing list..." + if VERBOSE: print("Reading existing list...") for l in f.readlines(): if l.strip() != "": if not "/wiki/" in l: - if VERBOSE: print "Adding ",l.strip() + if VERBOSE: print("Adding ",l.strip()) pages.append(l.strip()) f.close() - if VERBOSE: print "Added ",str(len(pages))," entries" + if VERBOSE: print("Added ",str(len(pages))," entries") i = 1 revs = [] for page in pages: rev = getRevision(page) - if VERBOSE: print str(i)," revision: ",rev + if VERBOSE: print(str(i)," revision: ",rev) revs.append(page+":"+rev) i += 1 writeList(revs,"revisions.txt") - print "All done. Successfully written revisions.txt with ",len(revs)," entries." + print("All done. Successfully written revisions.txt with ",len(revs)," entries.") elif os.path.exists("revisions.txt") and (not os.path.exists("updates.txt")): # case 2) f = open("revisions.txt","r") - if VERBOSE: print "Reading revisions list..." + if VERBOSE: print("Reading revisions list...") revisions = {} for l in f.readlines(): if l.strip() != "": r = l.strip().split(":") p = ":".join(r[:-1]) - if VERBOSE: print "Adding ",p + if VERBOSE: print("Adding ",p) revisions[p] = r[1] f.close() - if VERBOSE: print "Added ",str(len(revisions.keys()))," entries" + if VERBOSE: print("Added ",str(len(revisions.keys()))," entries") updates = [] i = 1 for page in revisions.keys(): rev = getRevision(page) if rev != revisions[page]: - if VERBOSE: print str(i),page," has a new revision: ",rev + if VERBOSE: print(str(i),page," has a new revision: ",rev) updates.append(page) else: - if VERBOSE: print str(i),page," is up to date " + if VERBOSE: print(str(i),page," is up to date ") i += 1 if updates: writeList(updates,"updates.txt") - print "All done. Successfully written updates.txt with ",len(updates)," entries." + print("All done. Successfully written updates.txt with ",len(updates)," entries.") else: - print "Everything up to date. Nothing to be done." + print("Everything up to date. Nothing to be done.") elif os.path.exists("revisions.txt") and os.path.exists("updates.txt"): # case 3) if not os.path.exists("wikifiles.txt"): - print "No wikifiles.txt found. Aborting" + print("No wikifiles.txt found. Aborting") sys.exit() wikifiles = [] f = open("wikifiles.txt","r") - if VERBOSE: print "Reading wikifiles list..." + if VERBOSE: print("Reading wikifiles list...") for l in f.readlines(): if l.strip() != "": wikifiles.append(l.strip()) f.close() - if VERBOSE: print "Read ",str(len(wikifiles))," entries" + if VERBOSE: print("Read ",str(len(wikifiles))," entries") f = open("revisions.txt","r") - if VERBOSE: print "Reading revisions list..." + if VERBOSE: print("Reading revisions list...") revisions = {} for l in f.readlines(): if l.strip() != "": @@ -131,25 +132,25 @@ def update(pagename=None): f.close() todo = [] f = open("updates.txt","r") - if VERBOSE: print "Reading updates list..." + if VERBOSE: print("Reading updates list...") for l in f.readlines(): if l.strip() != "": todo.append(l.strip()) f.close() - if VERBOSE: print str(len(todo))," pages to scan..." + if VERBOSE: print(str(len(todo))," pages to scan...") import buildwikiindex buildwikiindex.WRITETHROUGH = False buildwikiindex.VERBOSE = VERBOSE updates = [] for t in todo: - if VERBOSE: print "Scanning ",t + if VERBOSE: print("Scanning ",t) updates.extend(buildwikiindex.crawl(t)) updates = [u for u in updates if not u in wikifiles] - if VERBOSE: print str(len(updates))," files to download..." + if VERBOSE: print(str(len(updates))," files to download...") import downloadwiki i = 1 for u in updates: - if VERBOSE: print i, ": Fetching ", u + if VERBOSE: print(i, ": Fetching ", u) downloadwiki.get(u) if not "/wiki/" in u: rev = getRevision(u) @@ -157,26 +158,26 @@ def update(pagename=None): if not u in wikifiles: wikifiles.append(u) i += 1 - if VERBOSE: print "Updating wikifiles and revisions..." + if VERBOSE: print("Updating wikifiles and revisions...") writeList(wikifiles,"wikifiles.txt") updatedrevs = [] for k in revisions.keys(): updatedrevs.append(k+":"+revisions[k]) writeList(updatedrevs,"revisions.txt") os.remove("updates.txt") - if VERBOSE: print "All done!" + if VERBOSE: print("All done!") def getRevision(page): html = fetchPage(page) revs = re.findall("wgCurRevisionId\"\:(.*?),",html) if len(revs) == 1: return revs[0] - print 'Error: unable to get revision ID of ' + page + print('Error: unable to get revision ID of ' + page) sys.exit() def fetchPage(page): "retrieves given page from the wiki" - print "fetching: ",page + print("fetching: ",page) failcount = 0 while failcount < MAXFAIL: try: @@ -184,7 +185,7 @@ def fetchPage(page): return html except HTTPError: failcount += 1 - print 'Error: unable to fetch page ' + page + print('Error: unable to fetch page ' + page) sys.exit() def writeList(pages,filename): @@ -192,7 +193,7 @@ def writeList(pages,filename): for p in pages: f.write(p+"\n") f.close() - if VERBOSE: print "written ",filename + if VERBOSE: print("written ",filename) if __name__ == "__main__": update(sys.argv[1:])