From 1e0b319d2bb2c7e5bcf14d795e2ef5f70d6f8621 Mon Sep 17 00:00:00 2001 From: evilhero Date: Fri, 4 Mar 2016 15:04:19 -0500 Subject: [PATCH] FIX:(#1222) Fixed usage with NZBHydra - will now be able to properly grab the nzb files, as well as proper logging and handling of Failed downloading, IMP: Duplicate Directory Dump option available in Configuration GUI. Any duplicates discovered during post-processing will be moved into this directory (if enabled) depending on the dupe constraints, FIX: Better handling of titles with '&' and '-' in the titles (as well as annuals) when adding series which should mean more accurate results when trying to add a series, FIX:(#1142) If files didn't have the pages field metadata within the comicinfo.xml file, would error out and either fail to display the issue, or fail to scan the issue during an import scan, FIX: When adding/refreshing a series, if the cover image from CV is unable to be retrieved or is not of an adequate size, fallback to a differnt quality image from CV, FIX: When refreshing/adding a series, annuals will only be checked against once (it was previously running through the entire annual check twice), FIX: During RSS scans/checks if a title in the results had an encoded & (&), would store it as the html which would never match up when doing actual comparison searches, IMP: Fixed usage of feedparser module in rss feeds so that it only uses the retrieved data and doesn't do the actual polling against the url (requests module now does), IMP: Added proper handling of error code 910 with dognzb (max api hits), so that once it hits the 910 error it will disable dognzb as a provider, FIX: When attempting to display issue details on a series detail page (the i icon in the issues table), if the metadata in the .cbz cannot be read or doesn't exist, will now display a graphical warning instead of a 500 error, IMP: Added fork/fork version/tag to comictagger user-agent/version, IMP: Removed configparser dependency from ComicTagger, FIX: When performing searches, improved volume label matching regardless of how the volume label is represented --- README.md | 3 +- data/interfaces/default/config.html | 17 +- .../default/images/symbol_exclamation.png | Bin 0 -> 12241 bytes lib/comictaggerlib/comicvinetalker.py | 6 +- lib/comictaggerlib/ctversion.py | 4 +- lib/comictaggerlib/options.py | 8 +- lib/comictaggerlib/readme | 10 + lib/comictaggerlib/settings.py | 348 ++++++------ mylar/Failed.py | 10 +- mylar/PostProcessor.py | 61 ++- mylar/__init__.py | 9 +- mylar/cmtagmylar.py | 17 +- mylar/helpers.py | 75 ++- mylar/importer.py | 514 +++++++++--------- mylar/mb.py | 26 +- mylar/rsscheck.py | 51 +- mylar/search.py | 80 ++- mylar/webserve.py | 84 +-- mylar/weeklypull.py | 4 +- 19 files changed, 736 insertions(+), 591 deletions(-) create mode 100755 data/interfaces/default/images/symbol_exclamation.png create mode 100644 lib/comictaggerlib/readme diff --git a/README.md b/README.md index 6cdd5e78..6c078058 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,8 @@ Mylar is an automated Comic Book (cbr/cbz) downloader program heavily-based on t Yes, it does work, yes there are still bugs, and for that reson I still consider it the definition of an 'Alpha Release'. -This application requires a version of the 2.7.x Python branch for the best results (3.x is not supported) +-REQUIREMENTS- +- at least version 2.7.9 Python for proper usage (3.x is not supported). ** NEW ** You will need to get your OWN ComicVine API Key for this application to fully work. diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index 9ef92e41..d73e1144 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -706,16 +706,28 @@ +
+ Duplicate Dump Folder +
+ +
+
+
+ + +
+
+
Failed Download Handling
-
+
- +
@@ -1401,6 +1413,7 @@ initConfigCheckbox("#replace_spaces"); initConfigCheckbox("#use_minsize"); initConfigCheckbox("#use_maxsize"); + initConfigCheckbox("#enable_ddump"); initConfigCheckbox("#enable_failed"); initConfigCheckbox("#enable_meta"); initConfigCheckbox("#zero_level"); diff --git a/data/interfaces/default/images/symbol_exclamation.png b/data/interfaces/default/images/symbol_exclamation.png new file mode 100755 index 0000000000000000000000000000000000000000..7d45631c21b9d76ebef60254417e3006f0aac1cf GIT binary patch literal 12241 zcmY*JJJBG%L8KuuvZ3d*u-zTg5#NyH1o~ibm9ohb<0jX~ zRi?Gm`R1GejihI>kGuOJZjZpe5dQnfKeJDsNu=7+Qc_U(`LFfGCRLe^Qa?uu0TTYF z4%^+~cn=kAZEYn*MdU1&{hPh{mFb#;*g|tk;C9zu?xu;C zmzT)pPxBF7kH8EuvB4-FBb!>uL_OXUzHAk?B=hZ^oh;)$sU;k%D$`JE`C8Dwq`z5EMLeo2fNNijhbAK$Zb@ zpk7N5s59hw<7RHY^kFghW)It-ZBhK4YXH!Gxf*@Z*x0zZu(Tw*`)$e?3ukGABVyfQRYqjc~@WTb{IUO~|*UHU9}0JjASdNJm{syKn8K5LQxs+JVKPaByTs3AWaZ>SP)ce|tiVW)v~p5Xk|sVr{^yVVYfgZJfd_St z;LM-#@uP&5m6Z=KZ*pp$ETjSw0q48eCs(%-adB}u?m2B$C5AlYsLVkdy0*ZpW0x)H z;u}EX&931qoTH4;(9kxS{)4-AX7)lGHMODdR`r*tsyo-&_pZ`FwzRKnh?|?6JzLTo zzIb~8(q_ALF>TOB)@%Py%eSh#;Mq)9ZlL{q-pM#KGxPlX>}-#zP@+boG7IPx${6Nj z>|Mjd!)r=W@IwL1hbukvy}iBG=;-K91U>)Y=>4@iT=^2>AG`lxJn|yt_%+&DVHoRr zARUI3m65sanhu_Idfr3;C@Cw;&4B318r?jQ2N$(VM6do~p>ZOf;DQ-OmnYlR3k2c5 z^!Pr`yjpMQtj3mf)%|xUZL;d_`)rge9?*U^W9$;Q7H9=NDrsU{Xu)-kDrHJHkGDoA zPE=R{?mzWH5phVHNu)D3ld8D5_yKQI zSoUaAPsY+Q>`AD+G7jjxw9;GfIkU@^Gcrrh?1VCuv9PgctIq?>@ml{v4*z(K?m0R- z&VP4ZXkF0KLnf1OLAgp4BUOp1NG(|8p%zdE9H`_nOTGsr;!)I>HSJH#z@Q$NPZM<2 z*A@H2VJcQ97(3 z(^)b=2#>SWzz>$-agDOr=5Df8fMN@y-nZbR8KqQ+H z?~_kJm}l`F)bj|jkZt$bnW2RzqrrSA zF#EZ>_(ouBg^z%($2d?KUI3E+2+fkWP=sDmHW}jJ=<2!{R3#nO!~zh~$*4TxHw(0y zEY)~A=iEq-C>1hKneQIv5Yg-)r2)9sXf;VCa>?C4d9md?5k~3uK;vljW1CA~JAptd z18=JM=QmjCdM#(2>)<$h2zIMK|+{f02$!WM$k4g%fBK51rlJ-5(6vTxud* zB>{Xw!r&SZDqotKeDMJ0;lPK3)hQg22GcgpM{D7vR{j)ZaZhL^#ze5Cd}edFknww~kHYDHFWth|Ro^Tp~h3+3(i< zaf{AkZAz+x+i&llNpPqWwN&OweltV<^yWa|6uz7Hf)j$KFon6Hq^a;brF@%JL}O=X zYwPN=#cR`*6NDh76FeWe`I(uSCh*Y*6el0mP2H4;JblFWb4+yh1X~8hzxT?x77gu% zsRcBw43oy;b6CboQf%e#M0GZC;4;T}kg=VW>w5RgXyfqcXvlFWHeIvY z&sReNA_hSQ`XRI_=DK=)HwgDpV&{KSayfBBowUnyNq_X}joj1CKY;N`&k_c}DWGdaMOtPAO>AdA7Zz2JrGS{T3n-@TM+x zwLZQTWGMWC3rl*PxdUSA9&{Y^V;V#V=)HAMomwjjdyi*{i<4s7#i0gK1jP8|1#N0r&xE@nR@}Nf z%@h3ZGw$CIvCXd)>{(;--f}lA!jm;ibaqOTKP~MNu1u?^H#43-2|0SQ0NU&AK$+a# z7H{^+hW~q+;$&!vq(rz!FDarA@(v^L7myEa!>ce6m)PVEe$!$SCVEbS{|bYBYmt;B zhQ(WhfjB9hN-o@L6Ih*VY~53Z2VDgQZyp{VDno7^zM+JpTvfegFaCIWU!QLEAGB=d zo-Zyg#_Fins0QZWhr>K>dDIsOrYYt^=hL9jv6VV9o3E4Pb3ClvaS&YWg9P`BVmlaBkX5|_uzp_YWIJ8Dur4JK+Bvd*=PZUf_Ck=xK%{jkWSYKbi zpLU((Xx#!1n?i1y2E)U{M`dphdv+2BZG3-k-2C9=@6M$LFFz zKVelC6lO7zM`oZ6Hh2E6K(8D6QEvQ0aueOu)D$TuA<=C64b@1u;0;jF(A@rM_1QW0 zT?)Eb0K6ak47;PO%PhCW)E7oCxGnSMkGvkUl!U=C52X{z#k^TO^IxM4h9L%{DP->* z*E=H1r8CseA%4z&ewU>uEv{TZ;GUB?HAmVIQSfCC{bHZwXt;jmLzAd5+RF@i^(SL~ z9C@|HzN}mZ)sW&tmOL6nnDH`(v67g&0z(Q}I?@|IS~np_F(y*%E69mDnwu{Zrd`)q zQ;hc>U7r4O3r->cql*W4u0CIznut27QS^jYVJ=;nQqj$fmfJs7=qZ2xyE>9}|H0s1 zTMJ7}6kt+@gl0K`hGX-LDaGCWXzUYno&sF5M*!5|@5CDUJ!uk36 z_ke>AUu@#(J&*ti7z<-0u}M-C1aW=F>AhuX@AV_3uG9Gg+l`Mh8@!tl8c8S`>=i zWCGs&OSSVsa=)qsKAWW-sj3vB`FUZ%&@5h@eP2v%uM@}xgkFaxG=-oinTbj;{jdLXxEI;tdv{wV7n>JlzCR$rt9E6KT za;^`7;XnqW*zZV0JDIjzXjSa#Y|JCfB%{+$__(b3*TzoFtChIoNXbUSe5fOcu@8TP zaa&^rTI0`pzu|UocQZ9LHPR7qaeZ>Vt@HMl$GZg)Hdx?<1_z#R2@d?a4_y|-ai>+- zBDin&1XT27w$d1QVDq{#TdX=ehHB1y8SE_y1TU-+KHQjKrPzKlZ~SlJ&e*}Usc5m9 zax>7)%j=}~_wV1Jfx3S~h3JcR3XjSZnYVgxjW4wM`q~O*oZW-oKkAriw?bH@msPz- z|E`#D-Ebm_9cWJUF9G6p3QhQ|U60MRGJ_x^-A#QcUHf`L*E2H`~u*ncI zbjOQ5D^e_kCDlq2f70)M{MSusF)#AGSf^rmcXzi^I;;u{fxKx$?tIqcqF?x8Kgy;t zdAu#DkDgo>0V@$?SwqyQv94F?ruOdCur=$DE=$-7e5xO|7HZyjYn}!|LDm^Oa-NsMYH3N_ju=W}vy3mzOlsc`5nGt%1d=7eh*Q{q+nfLQLHrfK=?@4sqe6nNSpf_Lco z?ocSHKCa@OUR|ase*a!$8H8fiL<7dR#_5jX)8z0fpEEE?-UNMCv{;Bus#UjhRa)Ze zXA=|ZedL=`or;Z&jEsI)5q<{pJ`06ZEXFdkci91ybsu2rFQ*$y1Aw|TIQd!nzR=>Ee#q}!(t&hextEw^czaH{ zr%w3A`d!OXVYuEGc8B{!55qa-uRcVoOp5G0l$^-C!l%$Uds}9 zb9Hfaj8YGu|M8XwpBB!hsM3p*J(T%+K_{8v$tMN%c0e)&8t{Bj3@2^w z(aHO2j<#WYn*2)Pf2gav6J-7Y5oW3PBssE7uZZX3vx1W(x3d-3AKlK`&J zlfOcyM0#~?2d*WbORah)YE(O3g;|FV=pXcc?ZnW-GvB7bBD9>gAH%1e+mL9TwQ;}N zI}q{2>5^Q^8u}}xS~Ah^CjGyZTqABxrUB%gXAzd(%BD?zSNOeu7=$>V4JR+J1Lrcm zyYi7odCyY$njy^A`#4&fin{rb=5BlgRyEFTp}>z*v+H}j@p)U;xtt6y3Gnnwt7L&q zQSEnGj3Q&1nuTAaTe20>CwzhoxKge!2L@E=#3NmCfRWs%H{tj9_a*@WH?9xXLHF#a zK$6Dr3Q{=*v0w2q!tP`Y_CyLXNF1Y99H>EL)ltHNdum&eSp; zvy5WjE5%?}%Bjw$iV9_Mm-(p#6&A-*jhkV1rsPuPItg#6 z$0p6`Tw2%9C$KQ8=Wi$v$d6?_jx$V)uD>C!dL_dOA=_#J)v9m$(dYAFKdTsUwiWad zn4_Pa9{pKrrtGGemzs3=`O^KLw0V@XFq7jaFvIM?U-9w9spSUx`upX^=fJVB5QKcF z{$978rek$D{eUNU&P1}xVue+S&t6#>P%Fr}9%`VE%&XDYY3ya(xfoKOx4LMiRgoh; z$^2;W4dF<5d#9B+fa&m26_j#RRXSEIP6_lz8i9rF6Ww3MZxA4#fVCUF9plJV2uL=P4Jzad(%*}c=VBPZh!BbLM*IKIBn=G zMYTlp&ne({Hf=ELn0emy&1rUjHM{IKt-%EqLMhPazTF=Zo-F%5?p!4CIeN@ng6+o! zq=$WtFQ4`+#MUlG+Z0b#{a|Nok1JcIlf_9TuY~aG)6P)kzm(U)6#0-LFgBm7WbaGm zCVTcFo@s({%G)%AAw@OF#@qYrS}A5H$dr}A>-|(~+9sB#&en7}v98u(POhYB4N~=j zD(+b4BwUS);->;%zH!Y?W9U`QNKOkaHeJE@sWETa1zw8EpG9fj8zOWnFqD%k+iPu= zanPs#;IP-IgoeCGl&UxTvEEHut0Dbyjt3a$SbZ3g7~Q z^%wvbE~AAJIXDEw#XtP`q1gNdQNRG>!kn(IWJt$0BSK7HYm18fT@RgI*k-#B3ZwiV z;>Nvjc0v(e-^KGr-Q8(7 zFRm(=kkBYq#aJaAgqByFquL>Ci$K8p;iT>T&hvnJ!`|Cl8Sp%`D)iVBg*+=y2=j-f z52O9@eOSfTCt>5BWbIGZ@Q=bm0}ZHtX%G_1UHmOSAVoy#fU={3RL-KDh=}(unr7R!)#f5LQ0E9;=%y+6mid z4C5(Bo)@7T6ZjG8y7;C}4e5RtZ`HFUr*JTgy;}aXdbZpSW?=!?gfz|EX+sOM3k$Tp zw`aS%^_vf5zbNsp8YNae2${mKSQhRWrp~E8b=e9e^TRDi4MxCh&U0|K1gP&%k$!iBi4csR%;+pGbRDIsP8@(9eiXf+$DR zr|~4ZFnN6-%OhguTtccXN#b&te!93y9||R&^ttN-8GiyOXZX~_D8z0B5h%+BUe>+Y znkeaSbX}Nt$=r}S%dYh7e?ikZ_3^-RXoE(Gk~UL`uID)cA z55P!V-&ec%&Vm`Ht5l%XIbNik_WMCVh0=OVgkTG!(g{C0Ufhs5Sx7*%>m?_gM5nr` zY_6F7PN(@Mn>6t?E$zZJjV(kE~lzAbrn!tg0XsHC!v$Jnw zLHb?;mI#7^F{j5n;24$$?*F#rUs+ptV%o{LdL?oxYspZrS}<1p zm#x+kYoD+5enP$zZIg{NEcJOF;1BC)s;=IN*&@cr>6E#;I@4?SgE+OF%jBw~KW=*< z$I}}$2`ll$ThuydK9d5}EDQQFo*7^&#a=ZLCxoL2U;>x|T%qMS?~J)-I9aT`#+uQw zq?^q(v}Y*@Yo%kP&U9ue8U8NZa&EG8HN$1P;ty2vTL$|QHXlCQHMjDcn4$Wt>VF4i zsBe5xP*AA<>o*-)^9ns%F66!*BshO{E2b7!ykxC5l}Eu?kL>OX#b_|1&3uvDQyl3P zd)k9aqnqr2YJMa$*}4ef2DY9zz|WrH!bn8+^Q(f;BFIJtKu<}mcKYvVwVY0ZWiv6Q+ToCq} zJ)I)z+7$Y1s<^Uaz)=BYUmNxH^_mdB>j<>v{{*m(K^spe(7Ry|>eMY&C{4zLHUr3Xfe&C1go28K~anx~+&$d84E#ka!sFACzmK9B&i*8z~S-t)HBD4*-+ zNgui-E8kP@h>0`iSS@<;k%aDDl*kSIzC3F1&Vg0h2D*3X1v5W|Ua=XhROabTrFUA@ z)05rcXJ!g9q+(br#iDccqTvvR2@{qb61a=~{iRThb&eziidRWh6!H8DtN^rQc?R^Y zv~@Iu@JE}(q9Wmlil(KWEpwTV1oVMSV`z-fb)AZ-r^3R*n!x2Y0i+Ww{xE~GmuZWM zD-rK_5uZ`Cu*CH(eNnA�DE-52Ee&yF4Mxmc(5}GRt@()g0N-Fpu zHl}sk3RyngniP5Z^bT%rZXO~-XHM<2a5*UMel?c#&rv*HL)gLOrNIf}r}u1DEkwb6 z#6IxbQMWp~Wq|r8ju)5{d_7#kP)ap_v?7&mm}Naaic3M_WzS7dg>TI@^`p+^_Y>B# z+i-GKVEOppS7*PbnZ#D*GzcUmD9U`_bFi;D|2sCUpu zI>Ik#sU6#1slRHkWt0h9ym^=qls82akK90Xcu{#r(l_#D>j-Fmz930(T?TjDM-@7^d zAaxk^Vj(r7&_77x@maM^8;|Pcj$zmT=1;)ul)fz5s9VW7!KomHmhF{1^w)MtSM_6q zt-|$%Z|dR~OX3nPhHy;I?u`l(px_Z&TPXntlngs&PKAwCN=meD)g$}unXwv0ZE^Hu zg`T50f_q!Yj2IZnlS3lQ%F4uZUbuZsEDS%wlQ>JN&95?5(9O*E3NbSmef>-{&q_Oq zH7b(`kv~5c#pIJhg`3v+Sb)dGL|6c?N1zTIyUSXP7@s|HCysJwmlp}5_vDbdeTPd! zudd@N8T?8?QO8pwZ2tv8S@3Nc$4H#=HF``v_(WVTXhBxgm4aq5$FU675RIKt!TDun zo9LgVoZsMfT&*7zRaflx-Ih|XhADhu2*&=?-f=4Mum9j)upp+77=yFp2rxTsw)v`* zZ0F08&cSG>s;;TYoHo=Y@~gEJruwm^MG_~V?}Wd&4WGuC?NPb1&}mcNi}Qf^@YAS} z7p%6V;h8LGonZYd?U&ss5dlHR^yH+j zNlhgI#QonGZ)i6OAsKgDZyDvI!)Fdge6IzunTvaC=1mb6}? zTQR>y;+*0%up?pVlfDFCtgE{P!bSrR`~V8^kHvxvNeTZ3gxkGN>9z0I={wb%Fc=QMg<^-rm6nz+SX*1? zVI2}3-~nrEYxFz+{{0^N^QV2tt=xY?*nijFVNhjV2+^geuk#*DOXq9wyjc93zv-ia zA?X_N%b`^qQ@N5p_$u(bL5K)vy~NdM-RgpaqvN}p9SDvd0KiIgy1K`*2KroxfFYK5 zF9#kf0hOs!Gi=!5dHCmPik}Pk8ig&-3ng{CN|@#3AKvHHdf@20jJOOYyQQjodd zWQ}(BXp<*w`s!Xkpufj#5T zcYeHlS=4&VhudsquZz7Z_)Og^MA_h?D}fs=^psZKcFKqXW6_57H@Oz4>bn00M$BgR zECMEoYeUiZ9i+T1(NFm4%)Z$DT8#qS50~XnK)9P5f+JA*$^FXPK zyj2h&5m+NJ@xAeWXC}4uQMdo{DnvJ@T4>&1kuK!@n^_F#tclC=ztA<)j*<(Skv|z5 zneIg*o3B(tC_t1uw&M0qR3ouU-4Uw9=~;lyzJkiBv$55~9Wsc)uRkj*Gn0;jnmPcZ zGl?6m^YZfYzXpLUu!P2zH|WYNiY0$h`w*aRcT5p-^@-U2iDmD?+(fU83q9ZWuOBG* z6iDd5qC_nswvEGM3%3Kz)V}jqhaF25SfZa@#}u#GW#f5u2JR|2-1R<0nOup`5aTd* zN-kxnmdJV>uMdVSHhZjH9lAL#o~0O=8R>GR+}m92FCb~7e|WCMB_0ON^YXat8N(g$N7 zN!jU^8eLE?CPlpN@yZj;d;Uen$EQb}cA6DqpRe9&ky-b|RHRW6&{w5N;yuuwWtCft zA`YXpUNSp9D+x#HI3k0HBCCvH<1E?hIjj!Wwg=>}8Ec{QNhUoBQwIzkl%i$K0mPRXZMS z%Izo;)25AkaW*S$=Jw8^<769%@qhokd-hU?Msx$_Qsp0QAjW0sLRM0;9b`cblK};V z;fBvdei`+*Ei_%<0QspeRN4}fIz)Q1Yul{)+Y3c>!aFh3 zc-ew^FDr2)Inpc0rpZ@Y6+3)8kQ$RF{S*N>otp1BOaH%xCO(Df{O9&?)PvpP#ER<)6YtTG<{UEE(XT`HSL- z259ff-ME!A;;ib^WkJnPAX^h1;n~8YnIk2-FF`1xVXV`lv;njI87qdpUGIOUWq<`K zoGwBV<*!`%vz3gu&Fnn=^r~8&fW8Ppk2RYD#{lA0SPrLjHBGVLCaYN zSqGK>pR@BKtF`jG3$@r`#Jq%MAO9v#&6KwD=^y_y9R(TvNw{`0IgV49Kch>sQoIgR zf{KN!E1~iyx~G;#FA+2ZfNDvi7nlHd8JGv@Z=AfH|N5HLx$RBPT@^u7Ven0`Jah(Q zg(h@6@-1aO>+{TVKRK@Zi-X6(FE4YoaSH|3Qq{;R5V)TlUXD(quJeU8NI$~Sz#@Jx z2owQOn-EM;!N@Je0@~i}k$_Ef*YFmT+@Rw@_AIdLEWFydsSuQX`A7}A|CM9mBW;qC z$WnYv_7DCd<_E~d3*C>OrwnsoDODVl@1zENayRV``GD%sLZsNC#2X~7^Ye$!`j=o1#f{!ySH}L?P{!m*Zur@7!q(Czw zDWdVdW(UmFBP?OFXDT0fc`(l!9Ab+ZO^}D{QGZqLa^u$|K-Ho5&mS5)dwXpja;HFO zNK}itw<*!~9$y@x4GRzY2Q=Ai(yP39vOmi-XwjgR94c&(kq=U0q#kz_G6% z8yNWOSz=W$k^G|UJw4_V#ooc8w}aeAh15TmwL6T5e*D!gW!w(tu3qy*VnVO(&lW9N z&)VnDm$RAaJ~1b#fF)mz$-~8fgV@hTU%>k>KjwyN3_4oY`vLmp!OjhiBYn4O5Fom)YfqrhmBJ8pGRm6tTwOyW*5}QeS0Elt z9wF4(tm!2^*M~u&V-*RoXTU(c2ee|q3PnmPh9DPoyvM*tKS zGjGquJ}a(E@-G=SpkDN#(c^k7&}V^qQ@3CL2w1uj#xJs~LhD?rEXft7NJ*+lyE=g^ zOD}z2#$}9uQI??2CI}+1b=5%18Esiho8q6r zggE58qjU0aoGNeqJ#+rE1M$z-)$*eFJY)DrX6_ z8--KgT9Vw|=%QVrw>~_qZtLz|ce#<}W)`?rl4gc>xO;zhqxtmibW;Afa09o^_n;Ki zxNJ8!Huh&kT%Hi0d5#EYpQP99l$O2*OE0UfHS8i9YMvuW*(Vg%JW6mj;w^YF@$~0i zuMaf@S1~m}IS%8`xV7XksA9hdxeJO9EG{Xc8lxLs(zsJp=R9bJJhCN$Cm_8K8cOBu zUj&;+-uX|qs&8*?eM}kpOAhRTEx7g+N_`@~^mxdJC?XQ3eL55;zuJww6}IM4gTH1{ zCDF!5;3ASPV)LcFH3sPUO%ilcOOjbR|GrE19P!9D>h~yhM&*9xahh-VoZ;CbW6qUA zKJ@oE+XW@8^RxbJXI8WQ{(48PR7F{J>+WJ{w5_!jAH1^l;MJ$quV1A`W`e;navt~! zIH1L2#-l+$-~q9b>B+}&i4tKYkaw1_>joTVh2y^s>XZ5|eH zzY%IBQ3&`zmB7NpSkl%E5I-(S#sp4)jYx+GK`~4VB~?i`ioU|+1~`1?@UYI>XeLFI zV94%^&a96%U)r7_kDXv&x(n+27xv=h#^eSS_=$VQK~)It{=|TR)Td2C#UwX08m5Ng zWOCxi5VGRr6P=%L`+m14dqQAc9@z~wJ2S(>F-fW=aE{}uf(ZO;@bpI`BX?SBC?g7_ z3gH+Pf#t19uDB9e6b}Kc}YeW>)=+l?ZS0W;!A{G$LJ)#7U`#qc)!-`&KqIX_xh$H6& zi0hgDw<|uI+jGtg=)1S|a@d^`n}1G-!TZrm!K^Xq{@+#-n-e$xQpZGo5nOjP=Lv2A z?#?K30Ko{;$DvWs+*7L%laJH4$tbXXy|QY z>1``z?P&{6fRKQo0Iz@$ub`lwfS{Csq?CvtuYefX>NopIh3EfU;NoWU+U_m5KtP-q lTq7u^CnPK-DkLQ$#48{QF35}W+yEB<&lO+5Yo1xW`#)9TXIuaP literal 0 HcmV?d00001 diff --git a/lib/comictaggerlib/comicvinetalker.py b/lib/comictaggerlib/comicvinetalker.py index fee65723..e82ee8a6 100644 --- a/lib/comictaggerlib/comicvinetalker.py +++ b/lib/comictaggerlib/comicvinetalker.py @@ -107,7 +107,7 @@ class ComicVineTalker(QObject): else: self.api_key = ComicVineTalker.api_key - self.cv_headers = {'User-Agent': 'ComicTagger.[ninjas.walk.alone.fork] - UserAgent + CV Rate Limiting / 1.01 - KATANA'} + self.cv_headers = {'User-Agent': 'ComicTagger ' + str(ctversion.version) + ' [' + ctversion.fork + ' / ' + ctversion.fork_tag + ']'} self.log_func = None def setLogFunc( self , log_func ): @@ -449,8 +449,10 @@ class ComicVineTalker(QObject): if settings.use_series_start_as_volume: metadata.volume = volume_results['start_year'] - metadata.notes = "Tagged with ComicTagger {0} using info from Comic Vine on {1}. [Issue ID {2}]".format( + metadata.notes = "Tagged with the {1} fork of ComicTagger {0} using info from Comic Vine on {3}. [Issue ID {4}]".format( ctversion.version, + ctversion.fork, + ctversion.fork_tag, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), issue_results['id']) #metadata.notes += issue_results['site_detail_url'] diff --git a/lib/comictaggerlib/ctversion.py b/lib/comictaggerlib/ctversion.py index 8d86cf7d..d4bc58b5 100644 --- a/lib/comictaggerlib/ctversion.py +++ b/lib/comictaggerlib/ctversion.py @@ -1,3 +1,5 @@ # This file should contan only these comments, and the line below. # Used by packaging makefiles and app -version="1.1.15-beta" +version="1.20.0" +fork="ninjas.walk.alone" +fork_tag="SHURIKEN" diff --git a/lib/comictaggerlib/options.py b/lib/comictaggerlib/options.py index 5e0dba21..15783511 100644 --- a/lib/comictaggerlib/options.py +++ b/lib/comictaggerlib/options.py @@ -318,13 +318,9 @@ For more help visit the wiki at: http://code.google.com/p/comictagger/ if o == "--only-set-cv-key": self.only_set_key = True if o == "--version": - print "ComicTagger {0}: Copyright (c) 2012-2014 Anthony Beville".format(ctversion.version) + print "ComicTagger {0} [{1} / {2}]".format(ctversion.version, ctversion.fork, ctversion.fork_tag) + print "Modified version of ComicTagger (Copyright (c) 2012-2014 Anthony Beville)" print "Distributed under Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)" - new_version = VersionChecker().getLatestVersion("", False) - if new_version is not None and new_version != ctversion.version: - print "----------------------------------------" - print "New version available online: {0}".format(new_version) - print "----------------------------------------" sys.exit(0) if o in ("-t", "--type"): if a.lower() == "cr": diff --git a/lib/comictaggerlib/readme b/lib/comictaggerlib/readme new file mode 100644 index 00000000..d9a7a33f --- /dev/null +++ b/lib/comictaggerlib/readme @@ -0,0 +1,10 @@ +ComicTagger.[ninjas.walk.alone] + +Fork: Ninjas Walk Alone + + +Modified ComicTagger 1.15.beta to include some patches to allow for better integration with both ComicVine and mylar. These fixes are: +- UserAgent included now to allow for usage with ComicVine +- ComicVine Rate limiting is now on a per api request (1 api request / 2s) +- Removed requirement for configparser +- Changed queries to ComicVine to utilize the requests module diff --git a/lib/comictaggerlib/settings.py b/lib/comictaggerlib/settings.py index 47c9e981..13c59099 100644 --- a/lib/comictaggerlib/settings.py +++ b/lib/comictaggerlib/settings.py @@ -20,13 +20,20 @@ limitations under the License. import os import sys -import configparser import platform import codecs import uuid - import utils +try: + config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..') + if config_path not in sys.path: + sys.path.append(config_path) + from lib.configobj import ConfigObj +except ImportError: + print "Unable to use configobj module. This is a CRITICAL error and ComicTagger cannot proceed. Exiting." + + class ComicTaggerSettings: @staticmethod @@ -137,13 +144,14 @@ class ComicTaggerSettings: self.folder = "" self.setDefaultValues() - self.config = configparser.RawConfigParser() + #self.config = configparser.RawConfigParser() self.folder = ComicTaggerSettings.getSettingsFolder() if not os.path.exists( self.folder ): os.makedirs( self.folder ) - self.settings_file = os.path.join( self.folder, "settings") + self.settings_file = os.path.join( self.folder, "settings.ini") + self.CFG = ConfigObj(self.settings_file, encoding='utf-8') # if config file doesn't exist, write one out if not os.path.exists( self.settings_file ): @@ -182,204 +190,176 @@ class ComicTaggerSettings: os.unlink( self.settings_file ) self.__init__() + def CheckSection(self, sec): + """ Check if INI section exists, if not create it """ + try: + self.CFG[sec] + return True + except: + self.CFG[sec] = {} + return False + + ################################################################################ + # Check_setting_int # + ################################################################################ + def check_setting_int(self, config, cfg_name, item_name, def_val): + try: + my_val = int(config[cfg_name][item_name]) + except: + my_val = def_val + try: + config[cfg_name][item_name] = my_val + except: + config[cfg_name] = {} + config[cfg_name][item_name] = my_val + return my_val + + ################################################################################ + # Check_setting_str # + ################################################################################ + def check_setting_str(self, config, cfg_name, item_name, def_val, log=True): + try: + my_val = config[cfg_name][item_name] + except: + my_val = def_val + try: + config[cfg_name][item_name] = my_val + except: + config[cfg_name] = {} + config[cfg_name][item_name] = my_val + + return my_val + def load(self): - def readline_generator(f): - line = f.readline() - while line: - yield line - line = f.readline() + self.rar_exe_path = self.check_setting_str(self.CFG, 'settings', 'rar_exe_path', '') + self.unrar_exe_path = self.check_setting_str(self.CFG, 'settings', 'unurar_exe_path', '') + self.check_for_new_version = bool(self.check_setting_int(self.CFG, 'settings', 'check_for_new_version', 0)) + self.send_usage_stats = bool(self.check_setting_int(self.CFG, 'settings', 'send_usage_stats', 0)) + self.install_id = self.check_setting_str(self.CFG, 'auto', 'install_id', '') + self.last_selected_load_data_style = self.check_setting_str(self.CFG, 'auto', 'last_selected_load_data_style', '') + self.last_selected_save_data_style = self.check_setting_str(self.CFG, 'auto', 'last_selected_save_data_style', '') + self.last_selected_save_data_style = self.check_setting_str(self.CFG, 'auto', 'last_selected_save_data_style', '') + self.last_opened_folder = self.check_setting_str(self.CFG, 'auto', 'last_opened_folder', '') + self.last_main_window_width = self.check_setting_str(self.CFG, 'auto', 'last_main_window_width', '') + self.last_main_window_height = self.check_setting_str(self.CFG, 'auto', 'last_main_window_height', '') + self.last_form_side_width = self.check_setting_str(self.CFG, 'auto', 'last_form_side_width', '') + self.last_list_side_width = self.check_setting_str(self.CFG, 'auto', 'last_list_side_width', '') + self.last_filelist_sorted_column = self.check_setting_str(self.CFG, 'auto', 'last_filelist_sorted_column', '') + self.last_filelist_sorted_order = self.check_setting_str(self.CFG, 'auto', 'last_filelist_sorted_order', '') + self.last_main_window_x = self.check_setting_str(self.CFG, 'auto', 'last_main_window_x', '') + self.last_main_window_y = self.check_setting_str(self.CFG, 'auto', 'last_main_window_y','') + self.last_form_side_width = self.check_setting_str(self.CFG, 'auto', 'last_form_side_width','') + self.last_list_side_width = self.check_setting_str(self.CFG, 'auto', 'last_list_side_width','') - #self.config.readfp(codecs.open(self.settings_file, "r", "utf8")) - self.config.read_file(readline_generator(codecs.open(self.settings_file, "r", "utf8"))) - - self.rar_exe_path = self.config.get( 'settings', 'rar_exe_path' ) - self.unrar_exe_path = self.config.get( 'settings', 'unrar_exe_path' ) - if self.config.has_option('settings', 'check_for_new_version'): - self.check_for_new_version = self.config.getboolean( 'settings', 'check_for_new_version' ) - if self.config.has_option('settings', 'send_usage_stats'): - self.send_usage_stats = self.config.getboolean( 'settings', 'send_usage_stats' ) - - if self.config.has_option('auto', 'install_id'): - self.install_id = self.config.get( 'auto', 'install_id' ) - if self.config.has_option('auto', 'last_selected_load_data_style'): - self.last_selected_load_data_style = self.config.getint( 'auto', 'last_selected_load_data_style' ) - if self.config.has_option('auto', 'last_selected_save_data_style'): - self.last_selected_save_data_style = self.config.getint( 'auto', 'last_selected_save_data_style' ) - if self.config.has_option('auto', 'last_opened_folder'): - self.last_opened_folder = self.config.get( 'auto', 'last_opened_folder' ) - if self.config.has_option('auto', 'last_main_window_width'): - self.last_main_window_width = self.config.getint( 'auto', 'last_main_window_width' ) - if self.config.has_option('auto', 'last_main_window_height'): - self.last_main_window_height = self.config.getint( 'auto', 'last_main_window_height' ) - if self.config.has_option('auto', 'last_main_window_x'): - self.last_main_window_x = self.config.getint( 'auto', 'last_main_window_x' ) - if self.config.has_option('auto', 'last_main_window_y'): - self.last_main_window_y = self.config.getint( 'auto', 'last_main_window_y' ) - if self.config.has_option('auto', 'last_form_side_width'): - self.last_form_side_width = self.config.getint( 'auto', 'last_form_side_width' ) - if self.config.has_option('auto', 'last_list_side_width'): - self.last_list_side_width = self.config.getint( 'auto', 'last_list_side_width' ) - if self.config.has_option('auto', 'last_filelist_sorted_column'): - self.last_filelist_sorted_column = self.config.getint( 'auto', 'last_filelist_sorted_column' ) - if self.config.has_option('auto', 'last_filelist_sorted_order'): - self.last_filelist_sorted_order = self.config.getint( 'auto', 'last_filelist_sorted_order' ) + self.id_length_delta_thresh = self.check_setting_str(self.CFG, 'identifier', 'id_length_delta_thresh', '') + self.id_publisher_blacklist = self.check_setting_str(self.CFG, 'identifier', 'id_publisher_blacklist', '') - if self.config.has_option('identifier', 'id_length_delta_thresh'): - self.id_length_delta_thresh = self.config.getint( 'identifier', 'id_length_delta_thresh' ) - if self.config.has_option('identifier', 'id_publisher_blacklist'): - self.id_publisher_blacklist = self.config.get( 'identifier', 'id_publisher_blacklist' ) + self.parse_scan_info = bool(self.check_setting_int(self.CFG, 'filenameparser', 'parse_scan_info', 0)) - if self.config.has_option('filenameparser', 'parse_scan_info'): - self.parse_scan_info = self.config.getboolean( 'filenameparser', 'parse_scan_info' ) + self.ask_about_cbi_in_rar = bool(self.check_setting_int(self.CFG, 'dialogflags', 'ask_about_cbi_in_rar', 0)) + self.show_disclaimer = bool(self.check_setting_int(self.CFG, 'dialogflags', 'show_disclaimer', 0)) + self.dont_notify_about_this_version = self.check_setting_str(self.CFG, 'dialogflags', 'dont_notify_about_this_version', '') + self.ask_about_usage_stats = bool(self.check_setting_int(self.CFG, 'dialogflags', 'ask_about_usage_stats', 0)) + self.show_no_unrar_warning = bool(self.check_setting_int(self.CFG, 'dialogflags', 'show_no_unrar_warning', 0)) + + self.use_series_start_as_volume = bool(self.check_setting_int(self.CFG, 'comicvine', 'use_series_start_as_volume', 0)) + self.clear_form_before_populating_from_cv = bool(self.check_setting_int(self.CFG, 'comicvine', 'clear_form_before_populating_from_cv', 0)) + self.remove_html_tables = bool(self.check_setting_int(self.CFG, 'comicvine', 'remove_html_tables', 0)) + self.cv_api_key = self.check_setting_str(self.CFG, 'comicvine', 'cv_api_key', '') - if self.config.has_option('dialogflags', 'ask_about_cbi_in_rar'): - self.ask_about_cbi_in_rar = self.config.getboolean( 'dialogflags', 'ask_about_cbi_in_rar' ) - if self.config.has_option('dialogflags', 'show_disclaimer'): - self.show_disclaimer = self.config.getboolean( 'dialogflags', 'show_disclaimer' ) - if self.config.has_option('dialogflags', 'dont_notify_about_this_version'): - self.dont_notify_about_this_version = self.config.get( 'dialogflags', 'dont_notify_about_this_version' ) - if self.config.has_option('dialogflags', 'ask_about_usage_stats'): - self.ask_about_usage_stats = self.config.getboolean( 'dialogflags', 'ask_about_usage_stats' ) - if self.config.has_option('dialogflags', 'show_no_unrar_warning'): - self.show_no_unrar_warning = self.config.getboolean( 'dialogflags', 'show_no_unrar_warning' ) - - if self.config.has_option('comicvine', 'use_series_start_as_volume'): - self.use_series_start_as_volume = self.config.getboolean( 'comicvine', 'use_series_start_as_volume' ) - if self.config.has_option('comicvine', 'clear_form_before_populating_from_cv'): - self.clear_form_before_populating_from_cv = self.config.getboolean( 'comicvine', 'clear_form_before_populating_from_cv' ) - if self.config.has_option('comicvine', 'remove_html_tables'): - self.remove_html_tables = self.config.getboolean( 'comicvine', 'remove_html_tables' ) - if self.config.has_option('comicvine', 'cv_api_key'): - self.cv_api_key = self.config.get( 'comicvine', 'cv_api_key' ) - - if self.config.has_option('cbl_transform', 'assume_lone_credit_is_primary'): - self.assume_lone_credit_is_primary = self.config.getboolean( 'cbl_transform', 'assume_lone_credit_is_primary' ) - if self.config.has_option('cbl_transform', 'copy_characters_to_tags'): - self.copy_characters_to_tags = self.config.getboolean( 'cbl_transform', 'copy_characters_to_tags' ) - if self.config.has_option('cbl_transform', 'copy_teams_to_tags'): - self.copy_teams_to_tags = self.config.getboolean( 'cbl_transform', 'copy_teams_to_tags' ) - if self.config.has_option('cbl_transform', 'copy_locations_to_tags'): - self.copy_locations_to_tags = self.config.getboolean( 'cbl_transform', 'copy_locations_to_tags' ) - if self.config.has_option('cbl_transform', 'copy_notes_to_comments'): - self.copy_notes_to_comments = self.config.getboolean( 'cbl_transform', 'copy_notes_to_comments' ) - if self.config.has_option('cbl_transform', 'copy_storyarcs_to_tags'): - self.copy_storyarcs_to_tags = self.config.getboolean( 'cbl_transform', 'copy_storyarcs_to_tags' ) - if self.config.has_option('cbl_transform', 'copy_weblink_to_comments'): - self.copy_weblink_to_comments = self.config.getboolean( 'cbl_transform', 'copy_weblink_to_comments' ) - if self.config.has_option('cbl_transform', 'apply_cbl_transform_on_cv_import'): - self.apply_cbl_transform_on_cv_import = self.config.getboolean( 'cbl_transform', 'apply_cbl_transform_on_cv_import' ) - if self.config.has_option('cbl_transform', 'apply_cbl_transform_on_bulk_operation'): - self.apply_cbl_transform_on_bulk_operation = self.config.getboolean( 'cbl_transform', 'apply_cbl_transform_on_bulk_operation' ) + self.assume_lone_credit_is_primary = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'assume_lone_credit_is_primary', 0)) + self.copy_characters_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_characters_to_tags', 0)) + self.copy_teams_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_teams_to_tags', 0)) + self.copy_locations_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_locations_to_tags', 0)) + self.copy_notes_to_comments = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_notes_to_comments', 0)) + self.copy_storyarcs_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_storyarcs_to_tags', 0)) + self.copy_weblink_to_comments = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_weblink_to_comments', 0)) + self.apply_cbl_transform_on_cv_import = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'apply_cbl_transform_on_cv_import', 0)) + self.apply_cbl_transform_on_bulk_operation = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'apply_cbl_transform_on_bulk_operation', 0)) - if self.config.has_option('rename', 'rename_template'): - self.rename_template = self.config.get( 'rename', 'rename_template' ) - if self.config.has_option('rename', 'rename_issue_number_padding'): - self.rename_issue_number_padding = self.config.getint( 'rename', 'rename_issue_number_padding' ) - if self.config.has_option('rename', 'rename_use_smart_string_cleanup'): - self.rename_use_smart_string_cleanup = self.config.getboolean( 'rename', 'rename_use_smart_string_cleanup' ) - if self.config.has_option('rename', 'rename_extension_based_on_archive'): - self.rename_extension_based_on_archive = self.config.getboolean( 'rename', 'rename_extension_based_on_archive' ) + self.rename_template = bool(self.check_setting_int(self.CFG, 'rename', 'rename_template', 0)) + self.rename_issue_number_padding = self.check_setting_str(self.CFG, 'rename', 'rename_issue_number_padding', '') + self.rename_use_smart_string_cleanup = bool(self.check_setting_int(self.CFG, 'rename', 'rename_use_smart_string_cleanup', 0)) + self.rename_extension_based_on_archive = bool(self.check_setting_int(self.CFG, 'rename', 'rename_extension_based_on_archive', 0)) - if self.config.has_option('autotag', 'save_on_low_confidence'): - self.save_on_low_confidence = self.config.getboolean( 'autotag', 'save_on_low_confidence' ) - if self.config.has_option('autotag', 'dont_use_year_when_identifying'): - self.dont_use_year_when_identifying = self.config.getboolean( 'autotag', 'dont_use_year_when_identifying' ) - if self.config.has_option('autotag', 'assume_1_if_no_issue_num'): - self.assume_1_if_no_issue_num = self.config.getboolean( 'autotag', 'assume_1_if_no_issue_num' ) - if self.config.has_option('autotag', 'ignore_leading_numbers_in_filename'): - self.ignore_leading_numbers_in_filename = self.config.getboolean( 'autotag', 'ignore_leading_numbers_in_filename' ) - if self.config.has_option('autotag', 'remove_archive_after_successful_match'): - self.remove_archive_after_successful_match = self.config.getboolean( 'autotag', 'remove_archive_after_successful_match' ) - if self.config.has_option('autotag', 'wait_and_retry_on_rate_limit'): - self.wait_and_retry_on_rate_limit = self.config.getboolean( 'autotag', 'wait_and_retry_on_rate_limit' ) + self.save_on_low_confidence = bool(self.check_setting_int(self.CFG, 'autotag', 'save_on_low_confidence', 0)) + self.dont_use_year_when_identifying = bool(self.check_setting_int(self.CFG, 'autotag', 'dont_use_year_when_identifying', 0)) + self.assume_1_if_no_issue_num = bool(self.check_setting_int(self.CFG, 'autotag', 'assume_1_if_no_issue_num', 0)) + self.ignore_leading_numbers_in_filename = bool(self.check_setting_int(self.CFG, 'autotag', 'ignore_leading_numbers_in_filename', 0)) + self.remove_archive_after_successful_match = bool(self.check_setting_int(self.CFG, 'autotag', 'remove_archive_after_successful_match', 0)) + self.wait_and_retry_on_rate_limit = bool(self.check_setting_int(self.CFG, 'autotag', 'wait_and_retry_on_rate_limit', 0)) def save( self ): + new_config = ConfigObj() + new_config.filename = self.settings_file - if not self.config.has_section( 'settings' ): - self.config.add_section( 'settings' ) + new_config.encoding = 'UTF8' + new_config['settings'] = {} + new_config['settings']['check_for_new_version'] = self.check_for_new_version + new_config['settings']['rar_exe_path'] = self.rar_exe_path + new_config['settings']['unrar_exe_path'] = self.unrar_exe_path + new_config['settings']['send_usage_stats'] = self.send_usage_stats - self.config.set( 'settings', 'check_for_new_version', self.check_for_new_version ) - self.config.set( 'settings', 'rar_exe_path', self.rar_exe_path ) - self.config.set( 'settings', 'unrar_exe_path', self.unrar_exe_path ) - self.config.set( 'settings', 'send_usage_stats', self.send_usage_stats ) + new_config.write() + new_config['auto'] = {} + new_config['auto']['install_id'] = self.install_id + new_config['auto']['last_selected_load_data_style'] = self.last_selected_load_data_style + new_config['auto']['last_selected_save_data_style'] = self.last_selected_save_data_style + new_config['auto']['last_opened_folder'] = self.last_opened_folder + new_config['auto']['last_main_window_width'] = self.last_main_window_width + new_config['auto']['last_main_window_height'] = self.last_main_window_height + new_config['auto']['last_main_window_x'] = self.last_main_window_x + new_config['auto']['last_main_window_y'] = self.last_main_window_y + new_config['auto']['last_form_side_width'] = self.last_form_side_width + new_config['auto']['last_list_side_width'] = self.last_list_side_width + new_config['auto']['last_filelist_sorted_column'] = self.last_filelist_sorted_column + new_config['auto']['last_filelist_sorted_order'] = self.last_filelist_sorted_order - if not self.config.has_section( 'auto' ): - self.config.add_section( 'auto' ) + new_config['identifier'] = {} + new_config['identifier']['id_length_delta_thresh'] = self.id_length_delta_thresh + new_config['identifier']['id_publisher_blacklist'] = self.id_publisher_blacklist - self.config.set( 'auto', 'install_id', self.install_id ) - self.config.set( 'auto', 'last_selected_load_data_style', self.last_selected_load_data_style ) - self.config.set( 'auto', 'last_selected_save_data_style', self.last_selected_save_data_style ) - self.config.set( 'auto', 'last_opened_folder', self.last_opened_folder ) - self.config.set( 'auto', 'last_main_window_width', self.last_main_window_width ) - self.config.set( 'auto', 'last_main_window_height', self.last_main_window_height ) - self.config.set( 'auto', 'last_main_window_x', self.last_main_window_x ) - self.config.set( 'auto', 'last_main_window_y', self.last_main_window_y ) - self.config.set( 'auto', 'last_form_side_width', self.last_form_side_width ) - self.config.set( 'auto', 'last_list_side_width', self.last_list_side_width ) - self.config.set( 'auto', 'last_filelist_sorted_column', self.last_filelist_sorted_column ) - self.config.set( 'auto', 'last_filelist_sorted_order', self.last_filelist_sorted_order ) + new_config['dialogflags'] = {} + new_config['dialogflags']['ask_about_cbi_in_rar'] = self.ask_about_cbi_in_rar + new_config['dialogflags']['show_disclaimer'] = self.show_disclaimer + new_config['dialogflags']['dont_notify_about_this_version'] = self.dont_notify_about_this_version + new_config['dialogflags']['ask_about_usage_stats'] = self.ask_about_usage_stats + new_config['dialogflags']['show_no_unrar_warning'] = self.show_no_unrar_warning - if not self.config.has_section( 'identifier' ): - self.config.add_section( 'identifier' ) - - self.config.set( 'identifier', 'id_length_delta_thresh', self.id_length_delta_thresh ) - self.config.set( 'identifier', 'id_publisher_blacklist', self.id_publisher_blacklist ) - - if not self.config.has_section( 'dialogflags' ): - self.config.add_section( 'dialogflags' ) - - self.config.set( 'dialogflags', 'ask_about_cbi_in_rar', self.ask_about_cbi_in_rar ) - self.config.set( 'dialogflags', 'show_disclaimer', self.show_disclaimer ) - self.config.set( 'dialogflags', 'dont_notify_about_this_version', self.dont_notify_about_this_version ) - self.config.set( 'dialogflags', 'ask_about_usage_stats', self.ask_about_usage_stats ) - self.config.set( 'dialogflags', 'show_no_unrar_warning', self.show_no_unrar_warning ) - - if not self.config.has_section( 'filenameparser' ): - self.config.add_section( 'filenameparser' ) + new_config['filenameparser'] = {} + new_config['filenameparser']['parse_scan_info'] = self.parse_scan_info - self.config.set( 'filenameparser', 'parse_scan_info', self.parse_scan_info ) - - if not self.config.has_section( 'comicvine' ): - self.config.add_section( 'comicvine' ) - - self.config.set( 'comicvine', 'use_series_start_as_volume', self.use_series_start_as_volume ) - self.config.set( 'comicvine', 'clear_form_before_populating_from_cv', self.clear_form_before_populating_from_cv ) - self.config.set( 'comicvine', 'remove_html_tables', self.remove_html_tables ) - self.config.set( 'comicvine', 'cv_api_key', self.cv_api_key ) + new_config['comicvine'] = {} + new_config['comicvine']['use_series_start_as_volume'] = self.use_series_start_as_volume + new_config['comicvine']['clear_form_before_populating_from_cv'] = self.clear_form_before_populating_from_cv + new_config['comicvine']['remove_html_tables'] = self.remove_html_tables + new_config['comicvine']['cv_api_key'] = self.cv_api_key - if not self.config.has_section( 'cbl_transform' ): - self.config.add_section( 'cbl_transform' ) + new_config['cbl_transform'] = {} + new_config['cbl_transform']['assume_lone_credit_is_primary'] = self.assume_lone_credit_is_primary + new_config['cbl_transform']['copy_characters_to_tags'] = self.copy_characters_to_tags + new_config['cbl_transform']['copy_teams_to_tags'] = self.copy_teams_to_tags + new_config['cbl_transform']['copy_locations_to_tags'] = self.copy_locations_to_tags + new_config['cbl_transform']['copy_storyarcs_to_tags'] = self.copy_storyarcs_to_tags + new_config['cbl_transform']['copy_notes_to_comments'] = self.copy_notes_to_comments + new_config['cbl_transform']['copy_weblink_to_comments'] = self.copy_weblink_to_comments + new_config['cbl_transform']['apply_cbl_transform_on_cv_import'] = self.apply_cbl_transform_on_cv_import + new_config['cbl_transform']['apply_cbl_transform_on_bulk_operation'] = self.apply_cbl_transform_on_bulk_operation - self.config.set( 'cbl_transform', 'assume_lone_credit_is_primary', self.assume_lone_credit_is_primary ) - self.config.set( 'cbl_transform', 'copy_characters_to_tags', self.copy_characters_to_tags ) - self.config.set( 'cbl_transform', 'copy_teams_to_tags', self.copy_teams_to_tags ) - self.config.set( 'cbl_transform', 'copy_locations_to_tags', self.copy_locations_to_tags ) - self.config.set( 'cbl_transform', 'copy_storyarcs_to_tags', self.copy_storyarcs_to_tags ) - self.config.set( 'cbl_transform', 'copy_notes_to_comments', self.copy_notes_to_comments ) - self.config.set( 'cbl_transform', 'copy_weblink_to_comments', self.copy_weblink_to_comments ) - self.config.set( 'cbl_transform', 'apply_cbl_transform_on_cv_import', self.apply_cbl_transform_on_cv_import ) - self.config.set( 'cbl_transform', 'apply_cbl_transform_on_bulk_operation', self.apply_cbl_transform_on_bulk_operation ) - - if not self.config.has_section( 'rename' ): - self.config.add_section( 'rename' ) - - self.config.set( 'rename', 'rename_template', self.rename_template ) - self.config.set( 'rename', 'rename_issue_number_padding', self.rename_issue_number_padding ) - self.config.set( 'rename', 'rename_use_smart_string_cleanup', self.rename_use_smart_string_cleanup ) - self.config.set( 'rename', 'rename_extension_based_on_archive', self.rename_extension_based_on_archive ) - - if not self.config.has_section( 'autotag' ): - self.config.add_section( 'autotag' ) - self.config.set( 'autotag', 'save_on_low_confidence', self.save_on_low_confidence ) - self.config.set( 'autotag', 'dont_use_year_when_identifying', self.dont_use_year_when_identifying ) - self.config.set( 'autotag', 'assume_1_if_no_issue_num', self.assume_1_if_no_issue_num ) - self.config.set( 'autotag', 'ignore_leading_numbers_in_filename', self.ignore_leading_numbers_in_filename ) - self.config.set( 'autotag', 'remove_archive_after_successful_match', self.remove_archive_after_successful_match ) - self.config.set( 'autotag', 'wait_and_retry_on_rate_limit', self.wait_and_retry_on_rate_limit ) - - with codecs.open( self.settings_file, 'wb', 'utf8') as configfile: - self.config.write(configfile) + new_config['rename'] = {} + new_config['rename']['rename_template'] = self.rename_template + new_config['rename']['rename_issue_number_padding'] = self.rename_issue_number_padding + new_config['rename']['rename_use_smart_string_cleanup'] = self.rename_use_smart_string_cleanup + new_config['rename']['rename_extension_based_on_archive'] = self.rename_extension_based_on_archive + new_config['autotag'] = {} + new_config['autotag']['save_on_low_confidence'] = self.save_on_low_confidence + new_config['autotag']['dont_use_year_when_identifying'] = self.dont_use_year_when_identifying + new_config['autotag']['assume_1_if_no_issue_num'] = self.assume_1_if_no_issue_num + new_config['autotag']['ignore_leading_numbers_in_filename'] = self.ignore_leading_numbers_in_filename + new_config['autotag']['remove_archive_after_successful_match'] = self.remove_archive_after_successful_match + new_config['autotag']['wait_and_retry_on_rate_limit'] = self.wait_and_retry_on_rate_limit + #make sure the basedir is cached, in case we're on windows running a script from frozen binary ComicTaggerSettings.baseDir() diff --git a/mylar/Failed.py b/mylar/Failed.py index 94a6d0c1..b4b406cd 100644 --- a/mylar/Failed.py +++ b/mylar/Failed.py @@ -242,7 +242,15 @@ class FailedProcessor(object): # Perhaps later improvement might be to break it down by provider so that Mylar will attempt to # download same issues on different providers (albeit it shouldn't matter, if it's broke it's broke). logger.info('prov : ' + str(self.prov) + '[' + str(self.id) + ']') - chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID=?', [self.id]).fetchone() + # if this is from nzbhydra, we need to rejig the id line so that the searchid is removed since it's always unique to the search. + if 'indexerguid' in self.id: + st = self.id.find('searchid:') + end = self.id.find(',',st) + self.id = '%' + self.id[:st] + '%' + self.id[end+1:len(self.id)-1] + '%' + chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID LIKE ?', [self.id]).fetchone() + else: + chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID=?', [self.id]).fetchone() + if chk_fail is None: logger.info(module + ' Successfully marked this download as Good for downloadable content') return 'Good' diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 4136c995..6868b271 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -163,6 +163,29 @@ class PostProcessor(object): self._log(u"Unable to run extra_script: " + str(script_cmd)) + def duplicate_process(self, dupeinfo): + #path to move 'should' be the entire path to the given file + path_to_move = dupeinfo[0]['to_dupe'] + file_to_move = os.path.split(path_to_move)[1] + + if dupeinfo[0]['action'] == 'dupe_src': + logger.info('[DUPLICATE-CLEANUP] New File will be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.') + else: + logger.info('[DUPLICATE-CLEANUP] New File will not be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.') + + #check to make sure duplicate_dump directory exists: + checkdirectory = filechecker.validateAndCreateDirectory(mylar.DUPLICATE_DUMP, True, module='[DUPLICATE-CLEANUP]') + + #this gets tricky depending on if it's the new filename or the existing filename, and whether or not 'copy' or 'move' has been selected. + try: + shutil.move(path_to_move, os.path.join(mylar.DUPLICATE_DUMP, file_to_move)) + except (OSError, IOError): + logger.warn('[DUPLICATE-CLEANUP] Failed to move ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move)) + return False + + logger.warn('[DUPLICATE-CLEANUP] Successfully moved ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move)) + return True + def Process(self): module = self.module self._log("nzb name: " + self.nzb_name) @@ -930,16 +953,22 @@ class PostProcessor(object): break dupthis = helpers.duplicate_filecheck(ml['ComicLocation'], ComicID=comicid, IssueID=issueid) - if dupthis == "write": + if dupthis[0]['action'] == 'dupe_src' or dupthis[0]['action'] == 'dupe_file': + #check if duplicate dump folder is enabled and if so move duplicate file in there for manual intervention. + #'dupe_file' - do not write new file as existing file is better quality + #'dupe_src' - write new file, as existing file is a lesser quality (dupe) + if mylar.DUPLICATE_DUMP: + dupchkit = self.duplicate_process(dupthis) + if dupchkit == False: + logger.warn('Unable to move duplicate file - skipping post-processing of this file.') + continue + + + if dupthis[0]['action'] == "write" or dupthis[0]['action'] == 'dupe_src': stat = ' [' + str(i) + '/' + str(len(manual_list)) + ']' self.Process_next(comicid, issueid, issuenumOG, ml, stat) dupthis = None - else: - pass - #check if duplicate dump folder is enabled and if so move duplicate file in there for manual intervention. - #if mylar.DUPLICATE_DUMP: - # if dupthis == 'dupe_src': - # + logger.info(module + ' Manual post-processing completed for ' + str(i) + ' issues.') return else: @@ -947,7 +976,22 @@ class PostProcessor(object): issuenumOG = issuenzb['Issue_Number'] #the self.nzb_folder should contain only the existing filename dupthis = helpers.duplicate_filecheck(self.nzb_folder, ComicID=comicid, IssueID=issueid) - if dupthis == "write": + if dupthis[0]['action'] == 'dupe_src' or dupthis[0]['action'] == 'dupe_file': + #check if duplicate dump folder is enabled and if so move duplicate file in there for manual intervention. + #'dupe_file' - do not write new file as existing file is better quality + #'dupe_src' - write new file, as existing file is a lesser quality (dupe) + if mylar.DUPLICATE_DUMP: + dupchkit = self.duplicate_process(dupthis) + if dupchkit == False: + logger.warn('Unable to move duplicate file - skipping post-processing of this file.') + self.valreturn.append({"self.log": self.log, + "mode": 'stop', + "issueid": issueid, + "comicid": comicid}) + + return self.queue.put(self.valreturn) + + if dupthis[0]['action'] == "write" or dupthis[0]['action'] == 'dupe_src': return self.Process_next(comicid, issueid, issuenumOG) else: self.valreturn.append({"self.log": self.log, @@ -957,7 +1001,6 @@ class PostProcessor(object): return self.queue.put(self.valreturn) - def Process_next(self, comicid, issueid, issuenumOG, ml=None, stat=None): if stat is None: stat = ' [1/1]' module = self.module diff --git a/mylar/__init__.py b/mylar/__init__.py index a4b4b243..ecde01cb 100755 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -165,6 +165,8 @@ CHECK_FOLDER = None ENABLE_CHECK_FOLDER = False INTERFACE = None DUPECONSTRAINT = None +DDUMP = 0 +DUPLICATE_DUMP = None PREFERRED_QUALITY = 0 CORRECT_METADATA = False MOVE_FILES = False @@ -415,7 +417,7 @@ def initialize(): global __INITIALIZED__, DBCHOICE, DBUSER, DBPASS, DBNAME, COMICVINE_API, DEFAULT_CVAPI, CVAPI_RATE, CV_HEADERS, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, UPCOMING_SNATCHED, COMICSORT, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, MAX_LOGSIZE, OLDCONFIG_VERSION, OS_DETECT, \ queue, LOCAL_IP, EXT_IP, HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, HTTPS_FORCE_ON, HOST_RETURN, API_ENABLED, API_KEY, DOWNLOAD_APIKEY, LAUNCH_BROWSER, GIT_PATH, SAFESTART, AUTO_UPDATE, \ CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, GIT_USER, GIT_BRANCH, USER_AGENT, DESTINATION_DIR, MULTIPLE_DEST_DIRS, CREATE_FOLDERS, DELETE_REMOVE_DIR, \ - DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, DUPECONSTRAINT, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, HIGHCOUNT, \ + DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, DUPECONSTRAINT, DDUMP, DUPLICATE_DUMP, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, HIGHCOUNT, \ DOWNLOAD_SCAN_INTERVAL, FOLDER_SCAN_LOG_VERBOSE, IMPORTLOCK, NZB_DOWNLOADER, USE_SABNZBD, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, SAB_TO_MYLAR, SAB_DIRECTORY, USE_BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, IMP_METADATA, \ USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBGET_DIRECTORY, NZBSU, NZBSU_UID, NZBSU_APIKEY, NZBSU_VERIFY, DOGNZB, DOGNZB_APIKEY, DOGNZB_VERIFY, \ NEWZNAB, NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_VERIFY, NEWZNAB_UID, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \ @@ -515,6 +517,8 @@ def initialize(): ENABLE_CHECK_FOLDER = bool(check_setting_int(CFG, 'General', 'enable_check_folder', 0)) INTERFACE = check_setting_str(CFG, 'General', 'interface', 'default') DUPECONSTRAINT = check_setting_str(CFG, 'General', 'dupeconstraint', 'filesize') + DDUMP = bool(check_setting_int(CFG, 'General', 'ddump', 0)) + DUPLICATE_DUMP = check_setting_str(CFG, 'General', 'duplicate_dump', '') AUTOWANT_ALL = bool(check_setting_int(CFG, 'General', 'autowant_all', 0)) AUTOWANT_UPCOMING = bool(check_setting_int(CFG, 'General', 'autowant_upcoming', 1)) COMIC_COVER_LOCAL = bool(check_setting_int(CFG, 'General', 'comic_cover_local', 0)) @@ -1256,6 +1260,8 @@ def config_write(): new_config['General']['check_folder'] = CHECK_FOLDER new_config['General']['interface'] = INTERFACE new_config['General']['dupeconstraint'] = DUPECONSTRAINT + new_config['General']['ddump'] = DDUMP + new_config['General']['duplicate_dump'] = DUPLICATE_DUMP new_config['General']['autowant_all'] = int(AUTOWANT_ALL) new_config['General']['autowant_upcoming'] = int(AUTOWANT_UPCOMING) new_config['General']['preferred_quality'] = int(PREFERRED_QUALITY) @@ -1937,7 +1943,6 @@ def dbcheck(): logger.info('Correcting Null entries that make the main page break on startup.') c.execute("UPDATE Comics SET LatestDate='Unknown' WHERE LatestDate='None' or LatestDate is NULL") - conn.commit() c.close() diff --git a/mylar/cmtagmylar.py b/mylar/cmtagmylar.py index bcd33bca..ea3addd7 100644 --- a/mylar/cmtagmylar.py +++ b/mylar/cmtagmylar.py @@ -71,14 +71,6 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen logger.fdebug(module + ' UNRAR path set to : ' + unrar_cmd) - #check for dependencies here - configparser - try: - import configparser - except ImportError: - logger.warn(module + ' configparser not found on system. Please install manually in order to write metadata') - logger.warn(module + ' continuing with PostProcessing, but I am not using metadata.') - return "fail" - if not os.path.exists(unrar_cmd): logger.fdebug(module + ' WARNING: cannot find the unrar command.') @@ -144,10 +136,10 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen ctversion = subprocess.check_output([sys.executable, comictagger_cmd, "--version"], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: #logger.warn(module + "[WARNING] "command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output)) - logger.warn(module + '[WARNING] Make sure that you have configparser installed.') + logger.warn(module + '[WARNING] Make sure that you are using the comictagger included with Mylar.') return "fail" - ctend = ctversion.find(':') + ctend = ctversion.find('\]') ctcheck = re.sub("[^0-9]", "", ctversion[:ctend]) ctcheck = re.sub('\.', '', ctcheck).strip() if int(ctcheck) >= int('1115'): # (v1.1.15) @@ -159,7 +151,7 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen use_cvapi = "True" tagoptions.extend(["--cv-api-key", mylar.COMICVINE_API]) else: - logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - personal ComicVine API key not supported in this version. Good luck.') + logger.fdebug(module + ' ' + ctversion[:ctend+1] + ' being used - personal ComicVine API key not supported in this version. Good luck.') use_cvapi = "False" i = 1 @@ -231,7 +223,8 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen try: p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, err = p.communicate() - + logger.info(out) + logger.info(err) if initial_ctrun and 'exported successfully' in out: logger.fdebug(module + '[COMIC-TAGGER] : ' +str(out)) #Archive exported successfully to: X-Men v4 008 (2014) (Digital) (Nahga-Empire).cbz (Original deleted) diff --git a/mylar/helpers.py b/mylar/helpers.py index 53eed092..998959fd 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -1301,13 +1301,13 @@ def IssueDetails(filelocation, IssueID=None): #print str(data) issuetag = 'xml' #looks for the first page and assumes it's the cover. (Alternate covers handled later on) - elif any(['000.' in infile, '00.' in infile]) and infile.endswith(pic_extensions): + elif any(['000.' in infile, '00.' in infile]) and infile.endswith(pic_extensions) and cover == "notfound": logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.') local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb") local_file.write(inzipfile.read(infile)) local_file.close cover = "found" - elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensions): + elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensions) and cover == "notfound": logger.fdebug('Found Alternate cover - ' + infile + ' . Extracting.') altlist = ('00a', '00b', '00c', '00d', '00e') for alt in altlist: @@ -1433,17 +1433,25 @@ def IssueDetails(filelocation, IssueID=None): pagecount = 0 logger.fdebug("number of pages I counted: " + str(pagecount)) i = 0 - while (i < int(pagecount)): - pageinfo = result.getElementsByTagName('Page')[i].attributes - attrib = pageinfo.getNamedItem('Image') - logger.fdebug('Frontcover validated as being image #: ' + str(attrib.value)) - att = pageinfo.getNamedItem('Type') - logger.fdebug('pageinfo: ' + str(pageinfo)) - if att.value == 'FrontCover': - logger.fdebug('FrontCover detected. Extracting.') - break - i+=1 - else: + + try: + pageinfo = result.getElementsByTagName('Page')[0].attributes + if pageinfo: pageinfo_test == True + except: + pageinfo_test = False + + if pageinfo_test: + while (i < int(pagecount)): + pageinfo = result.getElementsByTagName('Page')[i].attributes + attrib = pageinfo.getNamedItem('Image') + logger.fdebug('Frontcover validated as being image #: ' + str(attrib.value)) + att = pageinfo.getNamedItem('Type') + logger.fdebug('pageinfo: ' + str(pageinfo)) + if att.value == 'FrontCover': + logger.fdebug('FrontCover detected. Extracting.') + break + i+=1 + elif issuetag == 'comment': stripline = 'Archive: ' + dstlocation data = re.sub(stripline, '', data.encode("utf-8")).strip() if data is None or data == '': @@ -1515,6 +1523,10 @@ def IssueDetails(filelocation, IssueID=None): except: pagecount = "None" + else: + logger.warn('Unable to locate any metadata within cbz file. Tag this file and try again if necessary.') + return + issuedetails.append({"title": issue_title, "series": series_title, "volume": series_volume, @@ -1609,17 +1621,21 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None): logger.info('[DUPECHECK] Unable to find corresponding Issue within the DB. Do you still have the series on your watchlist?') return + series = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [dupchk['ComicID']]).fetchone() + #if it's a retry and the file was already snatched, the status is Snatched and won't hit the dupecheck. #rtnval will be one of 3: #'write' - write new file #'dupe_file' - do not write new file as existing file is better quality #'dupe_src' - write new file, as existing file is a lesser quality (dupe) + rtnval = [] if dupchk['Status'] == 'Downloaded' or dupchk['Status'] == 'Archived': try: dupsize = dupchk['ComicSize'] except: logger.info('[DUPECHECK] Duplication detection returned no hits as this is a new Snatch. This is not a duplicate.') - rtnval = "write" + rtnval.append({'action': "write"}) + logger.info('[DUPECHECK] Existing Status already set to ' + dupchk['Status']) cid = [] if dupsize is None: @@ -1635,11 +1651,13 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None): return duplicate_filecheck(filename, ComicID, IssueID, StoryArcID) else: #not sure if this one is correct - should never actually get to this point. - rtnval = "dupe_file" + rtnval.append({'action': "dupe_file", + 'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])}) else: - rtnval = "dupe_file" + rtnval.append({'action': "dupe_file", + 'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])}) else: - logger.info('[DUPECHECK] Existing file :' + dupchk['Location'] + ' has a filesize of : ' + str(dupsize) + ' bytes.') + logger.info('[DUPECHECK] Existing file within db :' + dupchk['Location'] + ' has a filesize of : ' + str(dupsize) + ' bytes.') #keywords to force keep / delete #this will be eventually user-controlled via the GUI once the options are enabled. @@ -1648,7 +1666,8 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None): logger.info('[DUPECHECK] Existing filesize is 0 as I cannot locate the original entry.') if dupchk['Status'] == 'Archived': logger.info('[DUPECHECK] Assuming issue is Archived.') - rtnval = "dupe_file" + rtnval.append({'action': "dupe_file", + 'to_dupe': filename}) return rtnval else: logger.info('[DUPECHECK] Assuming 0-byte file - this one is gonna get hammered.') @@ -1660,33 +1679,39 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None): if dupchk['Location'].endswith('.cbz'): #keep dupechk['Location'] logger.info('[DUPECHECK-CBR PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in file : ' + dupchk['Location']) - rtnval = "dupe_file" + rtnval.append({'action': "dupe_file", + 'to_dupe': filename}) else: #keep filename logger.info('[DUPECHECK-CBR PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in file : ' + filename) - rtnval = "dupe_src" + rtnval.append({'action': "dupe_src", + 'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])}) elif 'cbz' in mylar.DUPECONSTRAINT: if dupchk['Location'].endswith('.cbr'): #keep dupchk['Location'] logger.info('[DUPECHECK-CBZ PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in filename : ' + dupchk['Location']) - rtnval = "dupe_file" + rtnval.append({'action': "dupe_file", + 'to_dupe': filename}) else: #keep filename logger.info('[DUPECHECK-CBZ PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in filename : ' + filename) - rtnval = "dupe_src" + rtnval.append({'action': "dupe_src", + 'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])}) if mylar.DUPECONSTRAINT == 'filesize': if filesz <= int(dupsize) and int(dupsize) != 0: logger.info('[DUPECHECK-FILESIZE PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in filename : ' + dupchk['Location']) - rtnval = "dupe_file" + rtnval.append({'action': "dupe_file", + 'to_dupe': filename}) else: logger.info('[DUPECHECK-FILESIZE PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in filename : ' + filename) - rtnval = "dupe_src" + rtnval.append({'action': "dupe_src", + 'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])}) else: logger.info('[DUPECHECK] Duplication detection returned no hits. This is not a duplicate of anything that I have scanned in as of yet.') - rtnval = "write" + rtnval.append({'action': "write"}) return rtnval def create_https_certificates(ssl_cert, ssl_key): diff --git a/mylar/importer.py b/mylar/importer.py index 515c3049..004ef82d 100755 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -28,6 +28,9 @@ import shutil import imghdr import sqlite3 import cherrypy +import lib.requests as requests +import gzip +from StringIO import StringIO import mylar from mylar import logger, helpers, db, mb, cv, parseit, filechecker, search, updater, moveit, comicbookdb @@ -169,152 +172,152 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No #since the weekly issue check could return either annuals or issues, let's initialize it here so it carries through properly. weeklyissue_check = [] - #let's do the Annual check here. - if mylar.ANNUALS_ON: - #we need to check first to see if there are pre-existing annuals that have been manually added, or else they'll get - #wiped out. - annualids = [] #to be used to make sure an ID isn't double-loaded - - if annload is None: - pass - else: - for manchk in annload: - if manchk['ReleaseComicID'] is not None or manchk['ReleaseComicID'] is not None: #if it exists, then it's a pre-existing add. - #print str(manchk['ReleaseComicID']), comic['ComicName'], str(SeriesYear), str(comicid) - manualAnnual(manchk['ReleaseComicID'], comic['ComicName'], SeriesYear, comicid) - annualids.append(manchk['ReleaseComicID']) - - annualcomicname = re.sub('[\,\:]', '', comic['ComicName']) - -#----- CBDB (outdated) -# annuals = comicbookdb.cbdb(annualcomicname, SeriesYear) -# print ("Number of Annuals returned: " + str(annuals['totalissues'])) -# nb = 0 -# while (nb <= int(annuals['totalissues'])): -# try: -# annualval = annuals['annualslist'][nb] -# except IndexError: -# break -#---- - #this issueid doesn't exist at this point since we got the data from cbdb...let's try and figure out - #the issueID for CV based on what we know so we can use that ID (and thereby the metadata too) - - #other inherit issue - results below will return the ID for the Series of Annuals, not the series itself. - #sr['comicid'] not the same as comicid for series. - annComicName = annualcomicname + ' annual' - mode = 'series' - #if annuals['totalissues'] is None: - # annissues = 0 - #else: - # annissues = annuals['totalissues'] - #print "annissues :" + str(annissues) - - # annuals happen once / year. determine how many. - annualyear = SeriesYear # no matter what, the year won't be less than this. - #if annualval['AnnualYear'] is None: - # sresults = mb.findComic(annComicName, mode, issue=annissues) - #else: - #sresults = mb.findComic(annComicName, mode, issue=annissues, limityear=annualval['AnnualYear']) - #print "annualyear: " + str(annualval['AnnualYear']) - annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected', 'print edition', 'tpb', 'available in print', 'collects'} - - logger.fdebug('[IMPORTER-ANNUAL] - Annual Year:' + str(annualyear)) - sresults, explicit = mb.findComic(annComicName, mode, issue=None, explicit='all')#,explicit=True) - type='comic' - - if len(sresults) == 1: - logger.fdebug('[IMPORTER-ANNUAL] - 1 result') - if len(sresults) > 0: - logger.fdebug('[IMPORTER-ANNUAL] - there are ' + str(len(sresults)) + ' results.') - num_res = 0 - while (num_res < len(sresults)): - sr = sresults[num_res] - logger.fdebug("description:" + sr['description']) - if any(x in sr['description'].lower() for x in annual_types_ignore): - logger.fdebug('[IMPORTER-ANNUAL] - tradeback/collected edition detected - skipping ' + str(sr['comicid'])) - else: - if comicid in sr['description']: - logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.') - issueid = sr['comicid'] - logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' added to series list as an Annual') - if issueid in annualids: - logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.') - num_res+=1 # need to manually increment since not a for-next loop - continue - issued = cv.getComic(issueid, 'issue') - if len(issued) is None or len(issued) == 0: - logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...') - pass - else: - n = 0 - if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1: - sr_issues = 1 - else: - sr_issues = sr['issues'] - logger.fdebug('[IMPORTER-ANNUAL (MAIN)] - There are ' + str(sr_issues) + ' annuals in this series.') - while (n < int(sr_issues)): - try: - firstval = issued['issuechoice'][n] - except IndexError: - break - try: - cleanname = helpers.cleanName(firstval['Issue_Name']) - except: - cleanname = 'None' - issid = str(firstval['Issue_ID']) - issnum = str(firstval['Issue_Number']) - issname = cleanname - issdate = str(firstval['Issue_Date']) - stdate = str(firstval['Store_Date']) - int_issnum = helpers.issuedigits(issnum) - newCtrl = {"IssueID": issid} - newVals = {"Issue_Number": issnum, - "Int_IssueNumber": int_issnum, - "IssueDate": issdate, - "ReleaseDate": stdate, - "IssueName": issname, - "ComicID": comicid, - "ComicName": comic['ComicName'], - "ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(), - "ReleaseComicName": sr['name'], - "Status": "Skipped"} - myDB.upsert("annuals", newVals, newCtrl) - - if issuechk is not None and issuetype == 'annual': - logger.fdebug('[IMPORTER-ANNUAL] - Comparing annual ' + str(issuechk) + ' .. to .. ' + str(int_issnum)) - if issuechk == int_issnum: - weeklyissue_check.append({"Int_IssueNumber": int_issnum, - "Issue_Number": issnum, - "IssueDate": issdate, - "ReleaseDate": stdate}) - - n+=1 - num_res+=1 - - elif len(sresults) == 0 or len(sresults) is None: - logger.fdebug('[IMPORTER-ANNUAL] - No results, removing the year from the agenda and re-querying.') - sresults, explicit = mb.findComic(annComicName, mode, issue=None)#, explicit=True) - if len(sresults) == 1: - sr = sresults[0] - logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.') - else: - resultset = 0 - else: - logger.fdebug('[IMPORTER-ANNUAL] - Returning results to screen - more than one possibility') - for sr in sresults: - if annualyear < sr['comicyear']: - logger.fdebug('[IMPORTER-ANNUAL] - ' + str(annualyear) + ' is less than ' + str(sr['comicyear'])) - if int(sr['issues']) > (2013 - int(sr['comicyear'])): - logger.fdebug('[IMPORTER-ANNUAL] - Issue count is wrong') - - #newCtrl = {"IssueID": issueid} - #newVals = {"Issue_Number": annualval['AnnualIssue'], - # "IssueDate": annualval['AnnualDate'], - # "IssueName": annualval['AnnualTitle'], - # "ComicID": comicid, - # "Status": "Skipped"} - #myDB.upsert("annuals", newVals, newCtrl) - #nb+=1 +# #let's do the Annual check here. +# if mylar.ANNUALS_ON: +# #we need to check first to see if there are pre-existing annuals that have been manually added, or else they'll get +# #wiped out. +# annualids = [] #to be used to make sure an ID isn't double-loaded +# +# if annload is None: +# pass +# else: +# for manchk in annload: +# if manchk['ReleaseComicID'] is not None or manchk['ReleaseComicID'] is not None: #if it exists, then it's a pre-existing add. +# #print str(manchk['ReleaseComicID']), comic['ComicName'], str(SeriesYear), str(comicid) +# manualAnnual(manchk['ReleaseComicID'], comic['ComicName'], SeriesYear, comicid) +# annualids.append(manchk['ReleaseComicID']) +# +# annualcomicname = re.sub('[\,\:]', '', comic['ComicName']) +# +##----- CBDB (outdated) +## annuals = comicbookdb.cbdb(annualcomicname, SeriesYear) +## print ("Number of Annuals returned: " + str(annuals['totalissues'])) +## nb = 0 +## while (nb <= int(annuals['totalissues'])): +## try: +## annualval = annuals['annualslist'][nb] +## except IndexError: +## break +##---- +# #this issueid doesn't exist at this point since we got the data from cbdb...let's try and figure out +# #the issueID for CV based on what we know so we can use that ID (and thereby the metadata too) +# +# #other inherit issue - results below will return the ID for the Series of Annuals, not the series itself. +# #sr['comicid'] not the same as comicid for series. +# annComicName = annualcomicname + ' annual' +# mode = 'series' +# #if annuals['totalissues'] is None: +# # annissues = 0 +# #else: +# # annissues = annuals['totalissues'] +# #print "annissues :" + str(annissues) +# +# # annuals happen once / year. determine how many. +# annualyear = SeriesYear # no matter what, the year won't be less than this. +# #if annualval['AnnualYear'] is None: +# # sresults = mb.findComic(annComicName, mode, issue=annissues) +# #else: +# #sresults = mb.findComic(annComicName, mode, issue=annissues, limityear=annualval['AnnualYear']) +# #print "annualyear: " + str(annualval['AnnualYear']) +# annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected', 'print edition', 'tpb', 'available in print', 'collects'} +# +# logger.fdebug('[IMPORTER-ANNUAL] - Annual Year:' + str(annualyear)) +# sresults, explicit = mb.findComic(annComicName, mode, issue=None, explicit='all')#,explicit=True) +# type='comic' +# +# if len(sresults) == 1: +# logger.fdebug('[IMPORTER-ANNUAL] - 1 result') +# if len(sresults) > 0: +# logger.fdebug('[IMPORTER-ANNUAL] - there are ' + str(len(sresults)) + ' results.') +# num_res = 0 +# while (num_res < len(sresults)): +# sr = sresults[num_res] +# #logger.fdebug("description:" + sr['description']) +# if any(x in sr['description'].lower() for x in annual_types_ignore): +# logger.fdebug('[IMPORTER-ANNUAL] - tradeback/collected edition detected - skipping ' + str(sr['comicid'])) +# else: +# if comicid in sr['description']: +# logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.') +# issueid = sr['comicid'] +# logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' added to series list as an Annual') +# if issueid in annualids: +# logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.') +# num_res+=1 # need to manually increment since not a for-next loop +# continue +# issued = cv.getComic(issueid, 'issue') +# if len(issued) is None or len(issued) == 0: +# logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...') +# pass +# else: +# n = 0 +# if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1: +# sr_issues = 1 +# else: +# sr_issues = sr['issues'] +# logger.fdebug('[IMPORTER-ANNUAL (MAIN)] - There are ' + str(sr_issues) + ' annuals in this series.') +# while (n < int(sr_issues)): +# try: +# firstval = issued['issuechoice'][n] +# except IndexError: +# break +# try: +# cleanname = helpers.cleanName(firstval['Issue_Name']) +# except: +# cleanname = 'None' +# issid = str(firstval['Issue_ID']) +# issnum = str(firstval['Issue_Number']) +# issname = cleanname +# issdate = str(firstval['Issue_Date']) +# stdate = str(firstval['Store_Date']) +# int_issnum = helpers.issuedigits(issnum) +# newCtrl = {"IssueID": issid} +# newVals = {"Issue_Number": issnum, +# "Int_IssueNumber": int_issnum, +# "IssueDate": issdate, +# "ReleaseDate": stdate, +# "IssueName": issname, +# "ComicID": comicid, +# "ComicName": comic['ComicName'], +# "ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(), +# "ReleaseComicName": sr['name'], +# "Status": "Skipped"} +# myDB.upsert("annuals", newVals, newCtrl) +# +# if issuechk is not None and issuetype == 'annual': +# logger.fdebug('[IMPORTER-ANNUAL] - Comparing annual ' + str(issuechk) + ' .. to .. ' + str(int_issnum)) +# if issuechk == int_issnum: +# weeklyissue_check.append({"Int_IssueNumber": int_issnum, +# "Issue_Number": issnum, +# "IssueDate": issdate, +# "ReleaseDate": stdate}) +# +# n+=1 +# num_res+=1 +# +# elif len(sresults) == 0 or len(sresults) is None: +# logger.fdebug('[IMPORTER-ANNUAL] - No results, removing the year from the agenda and re-querying.') +# sresults, explicit = mb.findComic(annComicName, mode, issue=None)#, explicit=True) +# if len(sresults) == 1: +# sr = sresults[0] +# logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.') +# else: +# resultset = 0 +# else: +# logger.fdebug('[IMPORTER-ANNUAL] - Returning results to screen - more than one possibility') +# for sr in sresults: +# if annualyear < sr['comicyear']: +# logger.fdebug('[IMPORTER-ANNUAL] - ' + str(annualyear) + ' is less than ' + str(sr['comicyear'])) +# if int(sr['issues']) > (2013 - int(sr['comicyear'])): +# logger.fdebug('[IMPORTER-ANNUAL] - Issue count is wrong') +# +# #newCtrl = {"IssueID": issueid} +# #newVals = {"Issue_Number": annualval['AnnualIssue'], +# # "IssueDate": annualval['AnnualDate'], +# # "IssueName": annualval['AnnualTitle'], +# # "ComicID": comicid, +# # "Status": "Skipped"} +# #myDB.upsert("annuals", newVals, newCtrl) +# #nb+=1 #parseit.annualCheck(gcomicid=gcdinfo['GCDComicID'], comicid=comicid, comicname=comic['ComicName'], comicyear=SeriesYear) #comic book location on machine @@ -410,9 +413,6 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No coverfile = os.path.join(mylar.CACHE_DIR, str(comicid) + ".jpg") #if cover has '+' in url it's malformed, we need to replace '+' with '%20' to retreive properly. - #thisci = urllib.quote_plus(str(comic['ComicImage'])) - - #urllib.urlretrieve(str(thisci), str(coverfile)) #new CV API restriction - one api request / second.(probably unecessary here, but it doesn't hurt) if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2: @@ -420,45 +420,63 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No else: time.sleep(mylar.CVAPI_RATE) + logger.info('Attempting to retrieve the comic image for series') try: - cimage = re.sub('[\+]', '%20', comic['ComicImage']) - request = urllib2.Request(cimage)#, headers={'Content-Type': 'application/x-www-form-urlencoded'}) - #request.add_header('User-Agent', str(mylar.USER_AGENT)) - - response = urllib2.urlopen(request) - - com_image = response.read() - - with open(coverfile, 'wb') as the_file: - the_file.write(com_image) - - try: - logger.info('Image header check: ' + imghdr.what(coverfile)) - except: - logger.info('image is corrupted.') - raise Exception - logger.info('Successfully retrieved cover for ' + comic['ComicName']) + r = requests.get(comic['ComicImage'], params=None, stream=True, headers=mylar.CV_HEADERS) except Exception, e: - logger.warn('[%s] Error fetching data using : %s' % (e, comic['ComicImage'])) - logger.info('Attempting to use alternate image size to get cover.') - #new CV API restriction - one api request / second. - if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2: - time.sleep(2) + logger.warn('Unable to download image from CV URL link: ' + comic['ComicImage'] + ' [Status Code returned: ' + str(r.status_code) + ']') + + logger.fdebug('comic image retrieval status code: ' + str(r.status_code)) + + if str(r.status_code) != '200': + logger.warn('Unable to download image from CV URL link: ' + comic['ComicImage'] + ' [Status Code returned: ' + str(r.status_code) + ']') + coversize = 0 + else: + if r.headers.get('Content-Encoding') == 'gzip': + buf = StringIO(r.content) + f = gzip.GzipFile(fileobj=buf) + + with open(coverfile, 'wb') as f: + for chunk in r.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + f.flush() + + + statinfo = os.stat(coverfile) + coversize = statinfo.st_size + + if int(coversize) < 35000 or str(r.status_code) != '200': + if str(r.status_code) != '200': + logger.info('Trying to grab an alternate cover due to problems trying to retrieve the main cover image.') else: - time.sleep(mylar.CVAPI_RATE) + logger.info('Image size invalid [' + str(coversize) + ' bytes] - trying to get alternate cover image.') + logger.fdebug('invalid image link is here: ' + comic['ComicImage']) + os.remove(coverfile) + logger.info('Attempting to retrieve alternate comic image for the series.') try: - cimage = re.sub('[\+]', '%20', comic['ComicImageALT']) - request = urllib2.Request(cimage) - response = urllib2.urlopen(request) - com_image = response.read() - with open(coverfile, 'wb') as the_file: - the_file.write(com_image) + r = requests.get(comic['ComicImageALT'], params=None, stream=True, headers=mylar.CV_HEADERS) - logger.info('Successfully retrieved cover for ' + comic['ComicName']) except Exception, e: - logger.warn('[%s] Error fetching data using : %s' % (e, comic['ComicImageALT'])) + logger.warn('Unable to download image from CV URL link: ' + comic['ComicImageALT'] + ' [Status Code returned: ' + str(r.status_code) + ']') + + logger.fdebug('comic image retrieval status code: ' + str(r.status_code)) + + if str(r.status_code) != '200': + logger.warn('Unable to download image from CV URL link: ' + comic['ComicImageALT'] + ' [Status Code returned: ' + str(r.status_code) + ']') + + else: + if r.headers.get('Content-Encoding') == 'gzip': + buf = StringIO(r.content) + f = gzip.GzipFile(fileobj=buf) + + with open(coverfile, 'wb') as f: + for chunk in r.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + f.flush() PRComicImage = os.path.join('cache', str(comicid) + ".jpg") ComicImage = helpers.replacetheslash(PRComicImage) @@ -1486,7 +1504,7 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu sresults, explicit = mb.findComic(annComicName, mode, issue=None, explicit='all')#,explicit=True) type='comic' - annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected', 'print edition', 'tpb', 'available in print', 'collects'} + annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected edition', 'print edition', 'tpb', 'available in print', 'collects'} if len(sresults) == 1: logger.fdebug('[IMPORTER-ANNUAL] - 1 result') @@ -1495,79 +1513,83 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu num_res = 0 while (num_res < len(sresults)): sr = sresults[num_res] - logger.fdebug("description:" + sr['description']) - if any(x in sr['description'].lower() for x in annual_types_ignore): - logger.fdebug('[IMPORTER-ANNUAL] - tradeback/collected edition detected - skipping ' + str(sr['comicid'])) - else: - if comicid in sr['description']: - logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.') - issueid = sr['comicid'] - logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' added to series list as an Annual') - if issueid in annualids: - logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.') - num_res+=1 # need to manually increment since not a for-next loop + #logger.fdebug("description:" + sr['description']) + for x in annual_types_ignore: + if x in sr['description'].lower(): + test_id_position = sr['description'].find(comicid) + if test_id_position >= sr['description'].lower().find(x) or test_id_position == -1: + logger.fdebug('[IMPORTER-ANNUAL] - tradeback/collected edition detected - skipping ' + str(sr['comicid'])) continue - issued = cv.getComic(issueid, 'issue') - if len(issued) is None or len(issued) == 0: - logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...') - pass + + if comicid in sr['description']: + logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.') + issueid = sr['comicid'] + logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' added to series list as an Annual') + if issueid in annualids: + logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.') + num_res+=1 # need to manually increment since not a for-next loop + continue + issued = cv.getComic(issueid, 'issue') + if len(issued) is None or len(issued) == 0: + logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...') + pass + else: + n = 0 + if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1: + sr_issues = 1 else: - n = 0 - if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1: - sr_issues = 1 - else: - sr_issues = sr['issues'] - logger.fdebug('[IMPORTER-ANNUAL] - There are ' + str(sr_issues) + ' annuals in this series.') - while (n < int(sr_issues)): - try: - firstval = issued['issuechoice'][n] - except IndexError: - break - try: - cleanname = helpers.cleanName(firstval['Issue_Name']) - except: - cleanname = 'None' - issid = str(firstval['Issue_ID']) - issnum = str(firstval['Issue_Number']) - issname = cleanname - issdate = str(firstval['Issue_Date']) - stdate = str(firstval['Store_Date']) - int_issnum = helpers.issuedigits(issnum) + sr_issues = sr['issues'] + logger.fdebug('[IMPORTER-ANNUAL] - There are ' + str(sr_issues) + ' annuals in this series.') + while (n < int(sr_issues)): + try: + firstval = issued['issuechoice'][n] + except IndexError: + break + try: + cleanname = helpers.cleanName(firstval['Issue_Name']) + except: + cleanname = 'None' + issid = str(firstval['Issue_ID']) + issnum = str(firstval['Issue_Number']) + issname = cleanname + issdate = str(firstval['Issue_Date']) + stdate = str(firstval['Store_Date']) + int_issnum = helpers.issuedigits(issnum) - newVals = {"Issue_Number": issnum, - "Int_IssueNumber": int_issnum, - "IssueDate": issdate, - "ReleaseDate": stdate, - "IssueName": issname, - "ComicID": comicid, - "ComicName": ComicName, - "ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(), - "ReleaseComicName": sr['name']} + newVals = {"Issue_Number": issnum, + "Int_IssueNumber": int_issnum, + "IssueDate": issdate, + "ReleaseDate": stdate, + "IssueName": issname, + "ComicID": comicid, + "ComicName": ComicName, + "ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(), + "ReleaseComicName": sr['name']} - iss_exists = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issid]).fetchone() - if iss_exists is None: - datechk = re.sub('-', '', issdate).strip() # converts date to 20140718 format - if mylar.AUTOWANT_ALL: - newVals['Status'] = "Wanted" - elif int(datechk) >= int(nowtime) and mylar.AUTOWANT_UPCOMING: - newVals['Status'] = "Wanted" - else: - newVals['Status'] = "Skipped" + iss_exists = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issid]).fetchone() + if iss_exists is None: + datechk = re.sub('-', '', issdate).strip() # converts date to 20140718 format + if mylar.AUTOWANT_ALL: + newVals['Status'] = "Wanted" + elif int(datechk) >= int(nowtime) and mylar.AUTOWANT_UPCOMING: + newVals['Status'] = "Wanted" else: - newVals['Status'] = iss_exists['Status'] + newVals['Status'] = "Skipped" + else: + newVals['Status'] = iss_exists['Status'] - newCtrl = {"IssueID": issid} - myDB.upsert("annuals", newVals, newCtrl) + newCtrl = {"IssueID": issid} + myDB.upsert("annuals", newVals, newCtrl) - if issuechk is not None and issuetype == 'annual': - #logger.fdebug('[IMPORTER-ANNUAL] - Comparing annual ' + str(issuechk) + ' .. to .. ' + str(int_issnum)) - if issuechk == int_issnum: - weeklyissue_check.append({"Int_IssueNumber": int_issnum, - "Issue_Number": issnum, - "IssueDate": issdate, - "ReleaseDate": stdate}) + if issuechk is not None and issuetype == 'annual': + #logger.fdebug('[IMPORTER-ANNUAL] - Comparing annual ' + str(issuechk) + ' .. to .. ' + str(int_issnum)) + if issuechk == int_issnum: + weeklyissue_check.append({"Int_IssueNumber": int_issnum, + "Issue_Number": issnum, + "IssueDate": issdate, + "ReleaseDate": stdate}) - n+=1 + n+=1 num_res+=1 elif len(sresults) == 0 or len(sresults) is None: diff --git a/mylar/mb.py b/mylar/mb.py index b5d31d1d..3f6523e4 100755 --- a/mylar/mb.py +++ b/mylar/mb.py @@ -47,9 +47,6 @@ if platform.python_version() == '2.7.6': def pullsearch(comicapi, comicquery, offset, explicit, type): u_comicquery = urllib.quote(comicquery.encode('utf-8').strip()) u_comicquery = u_comicquery.replace(" ", "%20") - if '-' in u_comicquery: - #cause titles like A-Force will return 16,000+ results otherwise - u_comicquery = '%22' + u_comicquery + '%22' if explicit == 'all' or explicit == 'loose': PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=' + str(type) + '&query=' + u_comicquery + '&field_list=id,name,start_year,first_issue,site_detail_url,count_of_issues,image,publisher,deck,description&format=xml&page=' + str(offset) @@ -78,17 +75,7 @@ def pullsearch(comicapi, comicquery, offset, explicit, type): except Exception, e: logger.warn('Error fetching data from ComicVine: %s' % (e)) return -# try: -# file = urllib2.urlopen(PULLURL) -# except urllib2.HTTPError, err: -# logger.error('err : ' + str(err)) -# logger.error("There was a major problem retrieving data from ComicVine - on their end. You'll have to try again later most likely.") -# return -# #convert to string: -# data = file.read() -# #close file because we dont need it anymore: -# file.close() -# #parse the xml you downloaded + dom = parseString(r.content) #(data) return dom @@ -100,8 +87,8 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None): comiclist = [] arcinfolist = [] - chars = set('!?*') - if any((c in chars) for c in name): + chars = set('!?*&-') + if any((c in chars) for c in name) or 'annual' in name: name = '"' +name +'"' #print ("limityear: " + str(limityear)) @@ -116,10 +103,11 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None): explicit = 'all' #OR - if ' and ' in comicquery.lower() or ' & ' in comicquery: + if ' and ' in comicquery.lower(): logger.fdebug('Enforcing exact naming match due to operator in title (and)') explicit = 'all' - elif explicit == 'loose': + + if explicit == 'loose': logger.fdebug('Changing to loose mode - this will match ANY of the search words') comicquery = name.replace(" ", " OR ") elif explicit == 'explicit': @@ -127,7 +115,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None): comicquery=name.replace(" ", " AND ") else: logger.fdebug('Default search mode - this will match on ALL search words') - comicquery = name.replace(" ", " AND ") + #comicquery = name.replace(" ", " AND ") explicit = 'all' diff --git a/mylar/rsscheck.py b/mylar/rsscheck.py index 08fc4812..7cc7e664 100755 --- a/mylar/rsscheck.py +++ b/mylar/rsscheck.py @@ -181,6 +181,7 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None): #logger.fdebug('publisher: ' + re.sub("'",'', pub).strip()) #publisher sometimes is given within quotes for some reason, strip 'em. vol_find = feedme.entries[i].title.find('vol.') series = feedme.entries[i].title[st_end +1:vol_find].strip() + series = re.sub('&', '&', series).strip() #logger.fdebug('series title: ' + series) iss_st = feedme.entries[i].title.find(' - ', vol_find) vol = re.sub('\.', '', feedme.entries[i].title[vol_find:iss_st]).strip() @@ -266,9 +267,19 @@ def nzbs(provider=None, forcerss=False): feedthis = [] - def _parse_feed(site, url): + def _parse_feed(site, url, verify): logger.fdebug('[RSS] Fetching items from ' + site) - feedme = feedparser.parse(url, agent=str(mylar.USER_AGENT)) + payload = None + headers = {'User-Agent': str(mylar.USER_AGENT)} + + try: + r = requests.get(url, params=payload, verify=verify, headers=headers) + except Exception, e: + logger.warn('Error fetching RSS Feed Data from %s: %s' % (site, e)) + return + + feedme = feedparser.parse(r.content) + feedthis.append({"site": site, "feed": feedme}) @@ -276,8 +287,8 @@ def nzbs(provider=None, forcerss=False): if mylar.NEWZNAB == 1: for newznab_host in mylar.EXTRA_NEWZNABS: - logger.fdebug('[RSS] newznab name: ' + str(newznab_host[0]) + ' - enabled: ' + str(newznab_host[4])) - if str(newznab_host[4]) == '1': + logger.fdebug('[RSS] newznab name: ' + str(newznab_host[0]) + ' - enabled: ' + str(newznab_host[5])) + if str(newznab_host[5]) == '1': newznab_hosts.append(newznab_host) providercount = len(newznab_hosts) + int(mylar.EXPERIMENTAL == 1) + int(mylar.NZBSU == 1) + int(mylar.DOGNZB == 1) @@ -285,24 +296,24 @@ def nzbs(provider=None, forcerss=False): if mylar.EXPERIMENTAL == 1: max_entries = "250" if forcerss else "50" - _parse_feed('experimental', 'http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&max=' + max_entries + '&more=1') + _parse_feed('experimental', 'http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&max=' + max_entries + '&more=1', False) if mylar.NZBSU == 1: num_items = "&num=100" if forcerss else "" # default is 25 - _parse_feed('nzb.su', 'http://api.nzb.su/rss?t=7030&dl=1&i=' + (mylar.NZBSU_UID or '1') + '&r=' + mylar.NZBSU_APIKEY + num_items) + _parse_feed('nzb.su', 'http://api.nzb.su/rss?t=7030&dl=1&i=' + (mylar.NZBSU_UID or '1') + '&r=' + mylar.NZBSU_APIKEY + num_items, bool(mylar.NZBSU_VERIFY)) if mylar.DOGNZB == 1: num_items = "&num=100" if forcerss else "" # default is 25 - _parse_feed('dognzb', 'https://dognzb.cr/rss.cfm?r=' + mylar.DOGNZB_APIKEY + '&t=7030' + num_items) + _parse_feed('dognzb', 'https://dognzb.cr/rss.cfm?r=' + mylar.DOGNZB_APIKEY + '&t=7030' + num_items, bool(mylar.DOGNZB_VERIFY)) for newznab_host in newznab_hosts: site = newznab_host[0].rstrip() - (newznabuid, _, newznabcat) = (newznab_host[3] or '').partition('#') + (newznabuid, _, newznabcat) = (newznab_host[4] or '').partition('#') newznabuid = newznabuid or '1' newznabcat = newznabcat or '7030' # 11-21-2014: added &num=100 to return 100 results (or maximum) - unsure of cross-reliablity - _parse_feed(site, newznab_host[1].rstrip() + '/rss?t=' + str(newznabcat) + '&dl=1&i=' + str(newznabuid) + '&num=100&r=' + newznab_host[2].rstrip()) + _parse_feed(site, newznab_host[1].rstrip() + '/rss?t=' + str(newznabcat) + '&dl=1&i=' + str(newznabuid) + '&num=100&r=' + newznab_host[3].rstrip(), bool(newznab_host[2])) feeddata = [] @@ -478,19 +489,23 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None): torinfo = {} for tor in tresults: - torsplit = tor['Title'].split('/') + #& have been brought into the title field incorretly occassionally - patched now, but to include those entries already in the + #cache db that have the incorrect entry, we'll adjust. + torTITLE = re.sub('&', '&', tor['Title']).strip() + + torsplit = torTITLE.split('/') if mylar.PREFERRED_QUALITY == 1: - if 'cbr' in tor['Title']: + if 'cbr' in torTITLE: logger.fdebug('Quality restriction enforced [ cbr only ]. Accepting result.') else: logger.fdebug('Quality restriction enforced [ cbr only ]. Rejecting result.') elif mylar.PREFERRED_QUALITY == 2: - if 'cbz' in tor['Title']: + if 'cbz' in torTITLE: logger.fdebug('Quality restriction enforced [ cbz only ]. Accepting result.') else: logger.fdebug('Quality restriction enforced [ cbz only ]. Rejecting result.') - logger.fdebug('tor-Title: ' + tor['Title']) + logger.fdebug('tor-Title: ' + torTITLE) logger.fdebug('there are ' + str(len(torsplit)) + ' sections in this title') i=0 if nzbprov is not None: @@ -542,7 +557,7 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None): logger.fdebug(str(len(formatrem_seriesname)) + ' - formatrem_seriesname :' + formatrem_seriesname.lower()) if formatrem_seriesname.lower() in formatrem_torsplit.lower() or any(x.lower() in formatrem_torsplit.lower() for x in AS_Alt): - logger.fdebug('matched to : ' + tor['Title']) + logger.fdebug('matched to : ' + torTITLE) logger.fdebug('matched on series title: ' + seriesname) titleend = formatrem_torsplit[len(formatrem_seriesname):] titleend = re.sub('\-', '', titleend) #remove the '-' which is unnecessary @@ -556,15 +571,15 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None): extra = '' #the title on 32P has a mix-mash of crap...ignore everything after cbz/cbr to cleanit - ctitle = tor['Title'].find('cbr') + ctitle = torTITLE.find('cbr') if ctitle == 0: - ctitle = tor['Title'].find('cbz') + ctitle = torTITLE.find('cbz') if ctitle == 0: - ctitle = tor['Title'].find('none') + ctitle = torTITLE.find('none') if ctitle == 0: logger.fdebug('cannot determine title properly - ignoring for now.') continue - cttitle = tor['Title'][:ctitle] + cttitle = torTITLE[:ctitle] if tor['Site'] == '32P': st_pub = rebuiltline.find('(') diff --git a/mylar/search.py b/mylar/search.py index 9a58a432..7674a73a 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -223,6 +223,11 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD newznab_host = None searchprov = prov_order[prov_count].lower() + if searchprov == 'dognzb' and mylar.DOGNZB == 0: + #since dognzb could hit the 50 daily api limit during the middle of a search run, check here on each pass to make + #sure it's not disabled (it gets auto-disabled on maxing out the API hits) + prov_count+=1 + continue if searchmode == 'rss': findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName) if findit == 'yes': @@ -451,6 +456,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa while (findloop < findcount): #logger.fdebug('findloop: ' + str(findloop) + ' / findcount: ' + str(findcount)) comsrc = comsearch + if nzbprov == 'dognzb' and not mylar.DOGNZB: + foundc = "no" + done = True + break while (cmloopit >= 1): #if issue_except is None: issue_exc = '' #else: issue_exc = issue_except @@ -568,14 +577,14 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa headers = {'User-Agent': str(mylar.USER_AGENT)} payload = None - if findurl.startswith('https'): + if findurl.startswith('https:') and verify == False: try: from lib.requests.packages.urllib3 import disable_warnings disable_warnings() except: logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.') - else: + elif findurl.startswith('http:') and verify == True: verify = False #logger.fdebug('[SSL: ' + str(verify) + '] Search URL: ' + findurl) @@ -591,10 +600,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa break data = False - logger.info(r.content) + logger.info('status code: ' + str(r.status_code)) if str(r.status_code) != '200': - logger.warn('Unable to download torrent from ' + nzbprov + ' [Status Code returned: ' + str(r.status_code) + ']') + logger.warn('Unable to retrieve search results from ' + tmpprov + ' [Status Code returned: ' + str(r.status_code) + ']') + data = False else: data = r.content @@ -602,11 +612,17 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa bb = feedparser.parse(data) else: bb = "no results" + #logger.info('Search results:' + str(bb)) try: if bb['feed']['error']: logger.error('[ERROR CODE: ' + str(bb['feed']['error']['code']) + '] ' + str(bb['feed']['error']['description'])) - bb = "no results" + if bb['feed']['error']['code'] == '910': + logger.warn('DAILY API limit reached. Disabling provider usage until 12:01am') + mylar.DOGNZB = 0 + foundc = False + done = True + break except: #logger.info('no errors on data retrieval...proceeding') pass @@ -829,18 +845,28 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa ComVersChk = 0 ctchk = cleantitle.split() + ctchk_indexes = [] volfound = False - vol_label = None + vol_nono = [] + new_cleantitle = [] + fndcomicversion = None for ct in ctchk: if any([ct.lower().startswith('v') and ct[1:].isdigit(), ct.lower()[:3] == 'vol', volfound == True]): if volfound == True: - logger.fdebug('Split Volume label detected - ie. Vol 4. Attempting to adust.') + logger.fdebug('Split Volume label detected [' + ct + '] - ie. Vol 4. Attempting to adust.') if ct.isdigit(): - vol_label = vol_label + ' ' + str(ct) + vol_nono.append(ctchk.index(ct)) + #recreate the cleantitle, with the volume label completely removed (but stored for comparison later) ct = 'v' + str(ct) + ctchk_indexes.extend(range(0, len(ctchk))) + logger.info(ctchk_indexes) + for i in ctchk_indexes: + if i not in vol_nono: + new_cleantitle.append(ctchk[i]) + cleantitle = ' '.join(new_cleantitle) + logger.fdebug('Newly finished reformed cleantitle (with NO volume label): ' + cleantitle) volfound == False - cleantitle = re.sub(vol_label, ct, cleantitle).strip() tmpsplit = ct if tmpsplit.lower().startswith('vol'): logger.fdebug('volume detected - stripping and re-analzying for volume label.') @@ -850,7 +876,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa #if vol label set as 'Vol 4' it will obliterate the Vol, but pass over the '4' - set #volfound to True so that it can loop back around. if not tmpsplit.isdigit(): - vol_label = ct #store the wording of how the Vol is defined so we can skip it later on. + #vol_label = ct #store the wording of how the Vol is defined so we can skip it later on. + vol_nono.append(ctchk.index(ct)) volfound = True continue @@ -1251,26 +1278,26 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa # instead of the Series they belong to (V2012 vs V2013) if annualize == "true" and int(ComicYear) == int(F_ComicVersion): logger.fdebug("We matched on versions for annuals " + str(fndcomicversion)) - scount+=1 - cvers = "true" + #scount+=1 + #cvers = "true" elif int(F_ComicVersion) == int(D_ComicVersion) or int(F_ComicVersion) == int(S_ComicVersion): logger.fdebug("We matched on versions..." + str(fndcomicversion)) - scount+=1 - cvers = "true" + #scount+=1 + #cvers = "true" else: logger.fdebug("Versions wrong. Ignoring possible match.") - scount = 0 - cvers = "false" + #scount = 0 + #cvers = "false" - if cvers == "true": + #if cvers == "true": #since we matched on versions, let's remove it entirely to improve matching. - logger.fdebug('Removing versioning [' + fndcomicversion + '] from nzb filename to improve matching algorithims.') - cissb4vers = re.sub(fndcomicversion, "", comic_iss_b4).strip() - logger.fdebug('New b4split : ' + str(cissb4vers)) - splitit = cissb4vers.split(None) - splitst -=1 + #logger.fdebug('Removing versioning [' + fndcomicversion + '] from nzb filename to improve matching algorithims.') + #cissb4vers = re.sub(fndcomicversion, "", comic_iss_b4).strip() + #logger.fdebug('New b4split : ' + str(cissb4vers)) + #splitit = cissb4vers.split(None) + #splitst -=1 #do an initial check initialchk = 'ok' @@ -1800,8 +1827,9 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc else: host_newznab_fix = host_newznab - if 'warp?x=' in link: - logger.fdebug('NZBMegaSearch url detected. Adjusting...') + #account for nzbmegasearch & nzbhydra + if 'warp?x=' in link or 'indexerguid' in link: + logger.fdebug('NZBMegaSearch / NZBHydra url detected. Adjusting...') nzbmega = True else: apikey = newznab[3].rstrip() @@ -2395,7 +2423,7 @@ def generate_id(nzbprov, link): url_parts = urlparse.urlparse(link) path_parts = url_parts[2].rpartition('/') nzbid = path_parts[0].rsplit('/', 1)[1] - elif nzbprov == 'newznab': + elif nzbprov == 'newznab': #if in format of http://newznab/getnzb/.nzb&i=1&r=apikey tmpid = urlparse.urlparse(link)[4] #param 4 is the query string from the url. if 'warp' in urlparse.urlparse(link)[2] and 'x=' in tmpid: @@ -2406,6 +2434,8 @@ def generate_id(nzbprov, link): # for the geek in all of us... st = tmpid.find('&id') end = tmpid.find('&', st +1) + if end == -1: + end = len(tmpid) nzbid = re.sub('&id=', '', tmpid[st:end]).strip() elif nzbprov == 'Torznab': if mylar.TORZNAB_HOST.endswith('/'): diff --git a/mylar/webserve.py b/mylar/webserve.py index 8776ba7b..dc0f218e 100755 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -3425,6 +3425,8 @@ class WebInterface(object): "maxsize": mylar.MAXSIZE, "interface_list": interface_list, "dupeconstraint": mylar.DUPECONSTRAINT, + "ddump": helpers.checked(mylar.DDUMP), + "duplicate_dump": mylar.DUPLICATE_DUMP, "autowant_all": helpers.checked(mylar.AUTOWANT_ALL), "autowant_upcoming": helpers.checked(mylar.AUTOWANT_UPCOMING), "comic_cover_local": helpers.checked(mylar.COMIC_COVER_LOCAL), @@ -3689,7 +3691,7 @@ class WebInterface(object): prowl_enabled=0, prowl_onsnatch=0, prowl_keys=None, prowl_priority=None, nma_enabled=0, nma_apikey=None, nma_priority=0, nma_onsnatch=0, pushover_enabled=0, pushover_onsnatch=0, pushover_apikey=None, pushover_userkey=None, pushover_priority=None, boxcar_enabled=0, boxcar_onsnatch=0, boxcar_token=None, pushbullet_enabled=0, pushbullet_apikey=None, pushbullet_deviceid=None, pushbullet_onsnatch=0, preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, file_opts=None, syno_fix=0, search_delay=None, chmod_dir=0777, chmod_file=0660, chowner=None, chgroup=None, - tsab=None, destination_dir=None, create_folders=1, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, dupeconstraint=None, **kwargs): + tsab=None, destination_dir=None, create_folders=1, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, dupeconstraint=None, ddump=0, duplicate_dump=None, **kwargs): mylar.COMICVINE_API = comicvine_api mylar.HTTP_HOST = http_host mylar.HTTP_PORT = http_port @@ -3815,6 +3817,8 @@ class WebInterface(object): mylar.COMIC_COVER_LOCAL = comic_cover_local mylar.INTERFACE = interface mylar.DUPECONSTRAINT = dupeconstraint + mylar.DDUMP = ddump + mylar.DUPLICATE_DUMP = duplicate_dump mylar.ENABLE_EXTRA_SCRIPTS = enable_extra_scripts mylar.EXTRA_SCRIPTS = extra_scripts mylar.ENABLE_PRE_SCRIPTS = enable_pre_scripts @@ -4082,43 +4086,51 @@ class WebInterface(object): filelocation = filelocation.encode('ASCII') filelocation = urllib.unquote_plus(filelocation).decode('utf8') issuedetails = helpers.IssueDetails(filelocation) - #print str(issuedetails) - issueinfo = '' - #issueinfo += '' - if len(issuedetails[0]['summary']) > 1000: - issuesumm = issuedetails[0]['summary'][:1000] + '...' - else: - issuesumm = issuedetails[0]['summary'] - issueinfo += '' - issueinfo += '
' - issueinfo += '' - issueinfo += '

' + issuedetails[0]['series'] + '
[#' + issuedetails[0]['issue_number'] + ']

' - issueinfo += '
"' + issuedetails[0]['title'] + '"

' - issueinfo += '

' + str(issuedetails[0]['pagecount']) + ' pages

' - if issuedetails[0]['day'] is None: - issueinfo += '

(' + str(issuedetails[0]['year']) + '-' + str(issuedetails[0]['month']) + ')


' + if issuedetails: + #print str(issuedetails) + issueinfo = '' + #issueinfo += '' + if len(issuedetails[0]['summary']) > 1000: + issuesumm = issuedetails[0]['summary'][:1000] + '...' + else: + issuesumm = issuedetails[0]['summary'] + issueinfo += '' + issueinfo += '
' + issueinfo += '' + issueinfo += '

' + issuedetails[0]['series'] + '
[#' + issuedetails[0]['issue_number'] + ']

' + issueinfo += '
"' + issuedetails[0]['title'] + '"

' + issueinfo += '

' + str(issuedetails[0]['pagecount']) + ' pages

' + if issuedetails[0]['day'] is None: + issueinfo += '

(' + str(issuedetails[0]['year']) + '-' + str(issuedetails[0]['month']) + ')


' + else: + issueinfo += '

(' + str(issuedetails[0]['year']) + '-' + str(issuedetails[0]['month']) + '-' + str(issuedetails[0]['day']) + ')


' + if not issuedetails[0]['writer'] == 'None': + issueinfo += 'Writer: ' + issuedetails[0]['writer'] + '
' + if not issuedetails[0]['penciller'] == 'None': + issueinfo += 'Penciller: ' + issuedetails[0]['penciller'] + '
' + if not issuedetails[0]['inker'] == 'None': + issueinfo += 'Inker: ' + issuedetails[0]['inker'] + '
' + if not issuedetails[0]['colorist'] == 'None': + issueinfo += 'Colorist: ' + issuedetails[0]['colorist'] + '
' + if not issuedetails[0]['letterer'] == 'None': + issueinfo += 'Letterer: ' + issuedetails[0]['letterer'] + '
' + if not issuedetails[0]['editor'] == 'None': + issueinfo += 'Editor: ' + issuedetails[0]['editor'] + '
' + issueinfo += '
Summary: ' + issuesumm + '
' + os.path.split(filelocation)[1] + '
' + issueinfo += '
' + else: - issueinfo += '

(' + str(issuedetails[0]['year']) + '-' + str(issuedetails[0]['month']) + '-' + str(issuedetails[0]['day']) + ')


' - if not issuedetails[0]['writer'] == 'None': - issueinfo += 'Writer: ' + issuedetails[0]['writer'] + '
' - if not issuedetails[0]['penciller'] == 'None': - issueinfo += 'Penciller: ' + issuedetails[0]['penciller'] + '
' - if not issuedetails[0]['inker'] == 'None': - issueinfo += 'Inker: ' + issuedetails[0]['inker'] + '
' - if not issuedetails[0]['colorist'] == 'None': - issueinfo += 'Colorist: ' + issuedetails[0]['colorist'] + '
' - if not issuedetails[0]['letterer'] == 'None': - issueinfo += 'Letterer: ' + issuedetails[0]['letterer'] + '
' - if not issuedetails[0]['editor'] == 'None': - issueinfo += 'Editor: ' + issuedetails[0]['editor'] + '
' - issueinfo += '
Summary: ' + issuesumm + '
' + os.path.split(filelocation)[1] + '
' - issueinfo += '
' + ErrorPNG = 'interfaces/default/images/symbol_exclamation.png' + issueinfo = '
' + issueinfo += '' + issueinfo += '

ERROR


' + issueinfo += '
Unable to retrieve metadata from within cbz file

' + issueinfo += '
Maybe you should try and tag the file again?

' + issueinfo += '
' + os.path.split(filelocation)[1] + '
' + issueinfo += '
' + return issueinfo - #import json - #json_dump = json.dumps(issuedetails) - #json_dump = json_dump.replace("\\","\\\\") - #print 'json_dump:' + str(json_dump) - #return json_dump + IssueInfo.exposed = True def manual_metatag(self, dirName, issueid, filename, comicid, comversion): diff --git a/mylar/weeklypull.py b/mylar/weeklypull.py index 15cc55b6..7b16f2a3 100755 --- a/mylar/weeklypull.py +++ b/mylar/weeklypull.py @@ -754,8 +754,8 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep break else: - #logger.fdebug('issuedate:' + str(datevalues[0]['issuedate'])) - #logger.fdebug('status:' + str(datevalues[0]['status'])) + logger.fdebug('issuedate:' + str(datevalues[0]['issuedate'])) + logger.fdebug('status:' + str(datevalues[0]['status'])) datestatus = datevalues[0]['status'] validcheck = checkthis(datevalues[0]['issuedate'], datestatus, usedate) if validcheck == True: